mirror of
				https://github.com/huggingface/transformers.git
				synced 2025-11-04 12:04:37 +08:00 
			
		
		
		
	Compare commits
	
		
			445 Commits
		
	
	
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| b832d5bb8a | |||
| e6cf62d499 | |||
| 1cc1c3c344 | |||
| dee8af4e46 | |||
| 56a47ce2b7 | |||
| 331a46ff04 | |||
| 704037ad51 | |||
| d76a57b0ba | |||
| 80f995a141 | |||
| d94c6b0144 | |||
| c36cca075a | |||
| 99e02c3415 | |||
| 98cb7b2c51 | |||
| b8e2a9c584 | |||
| af8a0384fc | |||
| 14b1f719f4 | |||
| 69850b4011 | |||
| bb7557d3ab | |||
| 34ccc8ebf4 | |||
| bfd6f6b257 | |||
| ae4c9fee73 | |||
| 68a889ee43 | |||
| 34ae5bf838 | |||
| 23d4554ec0 | |||
| 265550ec34 | |||
| fa76520240 | |||
| bcde2c61cb | |||
| 929579f3b5 | |||
| 31d387604c | |||
| 8407429d74 | |||
| 2e153930cf | |||
| 46078e1b46 | |||
| b8686130ca | |||
| 5afa497cbf | |||
| bc70779bf0 | |||
| 87677fcc4d | |||
| 9e666aaa29 | |||
| 07154dadb4 | |||
| bdaba1897c | |||
| 18a8a15f78 | |||
| 3d78e226e6 | |||
| 3571187ef6 | |||
| 64b6ef4db0 | |||
| d616022455 | |||
| df5d9c3551 | |||
| 2499b0a5fc | |||
| 7816f7921f | |||
| 1135f2384a | |||
| cc43307023 | |||
| 60ea6c59d2 | |||
| 179a2c2ff6 | |||
| b3c6ee0ac1 | |||
| 20577d8a7c | |||
| 9761aa4845 | |||
| b17963d82f | |||
| e8568a3b17 | |||
| 870b734bfd | |||
| 3e65f255dc | |||
| 6b35cfd28f | |||
| aff44f0c08 | |||
| 7e7e4753c8 | |||
| bb61b747df | |||
| 7873d76464 | |||
| 38ba7b439b | |||
| fe2756ff41 | |||
| 34cf67fd6c | |||
| dbbd6c7500 | |||
| b509bf7655 | |||
| 1d203a34c0 | |||
| 616743330e | |||
| 2cdfb8b254 | |||
| c49ce3c722 | |||
| 074c869bbe | |||
| 724eb45cef | |||
| 4bc4c69af9 | |||
| a05fad8dce | |||
| 4a82f4f856 | |||
| 991b8e65f4 | |||
| e99b2014cc | |||
| 8fffba5f47 | |||
| fd8a3556f0 | |||
| f4fc9c6152 | |||
| 6c4c7be282 | |||
| 4d3cf0d602 | |||
| 0d6a882f63 | |||
| fc7693adc3 | |||
| 20686b78fc | |||
| 1b4ce76c38 | |||
| 5fed5bb3d6 | |||
| 23bd2eebf5 | |||
| 91a073f804 | |||
| b64cc63a77 | |||
| d164867d90 | |||
| 1758c8fc72 | |||
| 725a56329d | |||
| 94980b529f | |||
| 9ca25ce828 | |||
| db4dccd1b5 | |||
| 19666dcb3b | |||
| 1d8c232324 | |||
| 846b1fd6f8 | |||
| 404adcdabf | |||
| f26ce6992e | |||
| 2f80dbbc0d | |||
| 94adad6be3 | |||
| 8b5c63e4de | |||
| d07db28f52 | |||
| 60005f464d | |||
| 4d3721f9bc | |||
| ec5c1d6134 | |||
| b588ff362a | |||
| f872eb98c2 | |||
| 694e2117f3 | |||
| 01520d5412 | |||
| f7c9dc8c99 | |||
| cc8c2d2332 | |||
| bbff03fbfc | |||
| 2fb8ddeeff | |||
| 34561e61a5 | |||
| 361aff6de5 | |||
| cea8ba1d59 | |||
| 0401317b23 | |||
| 24e67fbf75 | |||
| 8d1d1ffde2 | |||
| fda2f62395 | |||
| 0dd796e359 | |||
| 472857c47f | |||
| 2e6f5ffb96 | |||
| 5938f31fa7 | |||
| 7797d21b8d | |||
| f471979167 | |||
| abb7d1ff6d | |||
| 06a30cfdf3 | |||
| 7d1ae644ef | |||
| 2bba7f810e | |||
| 8733ffcb5e | |||
| 8a861048dd | |||
| a8a577ba93 | |||
| 0ae59e662d | |||
| 6a9038ba53 | |||
| 77944d1b31 | |||
| d52f914e24 | |||
| 29a392fbcf | |||
| 832b2b0058 | |||
| 934d3f4d2f | |||
| f19ba35b2b | |||
| 7de5c6aa5e | |||
| 1798e98e5a | |||
| c64c2fc4c2 | |||
| 0540d360f2 | |||
| 976554a472 | |||
| 262a9992d7 | |||
| 19cc2c084e | |||
| 2283dcca5e | |||
| b6c1cae67b | |||
| ef28b2c747 | |||
| 90430ae7ec | |||
| bed6408dcc | |||
| e5b63fb542 | |||
| 8a4e90ff40 | |||
| e0bf01d9a9 | |||
| 4c721c6b6a | |||
| f3e5404880 | |||
| 83857ffeaa | |||
| d5c037c3ed | |||
| d1e4fa98a9 | |||
| 59e2bdd086 | |||
| 3d6452163d | |||
| 76906372b0 | |||
| a98dfe4ced | |||
| e5f2d9122c | |||
| 043c8781ef | |||
| eecaaa734a | |||
| 20e652209c | |||
| 22a465a91f | |||
| eac039d21f | |||
| 471daf1b6c | |||
| 9024613337 | |||
| baf66d1419 | |||
| 9b03d67b83 | |||
| 8435d78f0c | |||
| 80790705e0 | |||
| 13aa13dbc0 | |||
| c0660df5dd | |||
| f91ce0b803 | |||
| 51efde54a9 | |||
| f113a2dfdc | |||
| 90a41dbe14 | |||
| d648a02203 | |||
| 88874f6cf0 | |||
| 66d8206809 | |||
| 72fa8d03a7 | |||
| 6190e8ce4c | |||
| 7cc35c3104 | |||
| 906b638efa | |||
| 994d86609b | |||
| 2dd8f524f5 | |||
| 5c85fc3977 | |||
| 8e36da7acb | |||
| 21c88a07b7 | |||
| 3c01dfb775 | |||
| 477ec4b6cc | |||
| 7b9e5a54b5 | |||
| 4784b04f47 | |||
| 4a49c22584 | |||
| e99bc87e4d | |||
| 0f96d4b1f7 | |||
| 0c970caa4a | |||
| 4b4b079272 | |||
| 9775b2eb27 | |||
| c0cf0a04d5 | |||
| 4d1ad83236 | |||
| 35410da758 | |||
| 4d79e0d386 | |||
| 66a84b63b0 | |||
| 070f3b21d8 | |||
| 46ef646016 | |||
| 9bc3773c84 | |||
| 60a372387f | |||
| e14c6b52e3 | |||
| da2d8ca265 | |||
| e04bab59e1 | |||
| 2152bfeae8 | |||
| 8722e9eb3b | |||
| 33aa7a80ca | |||
| a5b3a89545 | |||
| ff22b3acc0 | |||
| cbb7fad319 | |||
| 09efcece75 | |||
| 97c815dae2 | |||
| 8607233679 | |||
| f50b82af04 | |||
| 2fdab323d1 | |||
| 813e4d18ba | |||
| 8337740754 | |||
| 5b0e0b61f0 | |||
| 3ca35b99ba | |||
| 0ae8eece55 | |||
| 07ebe0fd06 | |||
| 1cb9c76ec5 | |||
| a25d056b7a | |||
| 517d7c8624 | |||
| ada22a1c9e | |||
| 522733f6cb | |||
| 0202da0271 | |||
| 8f46cd1057 | |||
| e0855e8929 | |||
| 0856a231c0 | |||
| ab7f5d2943 | |||
| b450a7faf2 | |||
| d44db1145c | |||
| 690a0dbf36 | |||
| fbb248a2e4 | |||
| 5ff0c60505 | |||
| 210d407245 | |||
| b65f07d8c0 | |||
| 009ee86a19 | |||
| ffd623823d | |||
| 3a2f97db6f | |||
| 434d15da8e | |||
| 5faf386652 | |||
| 8efaf8f176 | |||
| 0e774e57a6 | |||
| c35d9d48d9 | |||
| 65df0d78ed | |||
| 4e56da38d9 | |||
| cdcb206e10 | |||
| 321d70a7a9 | |||
| 67376c02e2 | |||
| c6bea08448 | |||
| e7cfc46fc1 | |||
| e1b3cfb504 | |||
| 3c33499f87 | |||
| 03cdb2a390 | |||
| 1e71f11dec | |||
| d38caba169 | |||
| af62cc5f20 | |||
| eebc8abbe2 | |||
| 81c7e3ec9f | |||
| e8fe6b7140 | |||
| 884ca81d87 | |||
| 32fea876bb | |||
| b31ba23913 | |||
| 0a9860daa7 | |||
| 2071a9b86e | |||
| 8197eb9f10 | |||
| 525eba68ab | |||
| b514a60c36 | |||
| 9bdcba53fd | |||
| f0bf81e141 | |||
| 9f9909ea2f | |||
| 6cd769957e | |||
| 1320e4ec0c | |||
| f4a07a392c | |||
| 43b9af0cac | |||
| cfcb95417c | |||
| 0c1a6f9b1d | |||
| 1756b5e956 | |||
| dadd0c1b13 | |||
| 102c6b238c | |||
| b80684b23f | |||
| 80607874c1 | |||
| 7b4b0cf966 | |||
| 4bbb9f2d68 | |||
| 5d7e845712 | |||
| eccb2f0163 | |||
| 5adc20723b | |||
| 5ee4f17234 | |||
| 2dfaf2f227 | |||
| 777459b471 | |||
| edcb56fd96 | |||
| 6bc082da0a | |||
| eb8fda51f4 | |||
| e77721e4fe | |||
| 009b581316 | |||
| f99f2fb661 | |||
| 438db43d46 | |||
| c306869ea2 | |||
| d482e3d79d | |||
| 9c3c24800b | |||
| 2df41663f1 | |||
| 9aebc711c9 | |||
| 4a450b25d5 | |||
| 58f0a2745c | |||
| 7ac3311e48 | |||
| ed47cb6cba | |||
| 973926431e | |||
| ba9e4eb354 | |||
| 34bdb7f9cb | |||
| 848aae49e1 | |||
| 448937c00d | |||
| ba37ddc5ce | |||
| 822915142b | |||
| bd74632687 | |||
| fd223374f0 | |||
| d609ba24cb | |||
| bde1eeebe0 | |||
| 3ea3b00e59 | |||
| d8e3bdbb4c | |||
| 64ce900974 | |||
| 0ad9b239a1 | |||
| e9e77cd3c4 | |||
| 1579c53635 | |||
| f3bda2352a | |||
| 6179f537a3 | |||
| 850da1cc36 | |||
| 01a3966bc6 | |||
| 05f961840b | |||
| aa90e0c36a | |||
| 8f8bbd4a4c | |||
| e2d53d95b0 | |||
| 7e0b415ab4 | |||
| ce75b169bd | |||
| 9bf528877e | |||
| af2b78601b | |||
| 0dd2b750ca | |||
| 5169069997 | |||
| 3a848111e6 | |||
| 98c96fb1a7 | |||
| 5456d82311 | |||
| 9b2540b5a7 | |||
| bd3b3aee9c | |||
| a45a9cc0e1 | |||
| b12616fd8e | |||
| d77dd62ff8 | |||
| 9c6a48c8c3 | |||
| 01ff4f82ba | |||
| 4eb2a49d41 | |||
| 0a9d7c7edb | |||
| be9fa192f0 | |||
| 9c35c132fa | |||
| b9c77b98d5 | |||
| f040a43cb3 | |||
| 35115eaf93 | |||
| 009101de12 | |||
| fea15cc9f5 | |||
| a28dfc8659 | |||
| c03c12687f | |||
| 8831c68803 | |||
| bcd4aa8fe0 | |||
| a69ec2c722 | |||
| 7d03c53718 | |||
| 3a9c88377f | |||
| 647c983530 | |||
| 4e0cba1053 | |||
| c94455651e | |||
| 25eae7b0ae | |||
| cd30565aed | |||
| 8edc898f63 | |||
| 6c65cb2492 | |||
| a2da2b4109 | |||
| 35becc6d84 | |||
| 506e5bb0c8 | |||
| e485829a41 | |||
| 7e60205bd3 | |||
| 64326dccfb | |||
| e5c78c6684 | |||
| fa5222c296 | |||
| 0dd5f55ac8 | |||
| b3628f117e | |||
| ab90d4cddd | |||
| dc5df92fa8 | |||
| 3cf12b235a | |||
| eed51c5bdf | |||
| 3f60a60eed | |||
| 751beb9e73 | |||
| 793dcd236b | |||
| 2e4db64cab | |||
| c9fd350567 | |||
| 93f563b8a8 | |||
| e048c7f1c8 | |||
| d3d56f9a0b | |||
| 766c6b2ce3 | |||
| 77966a43a4 | |||
| bcd607542c | |||
| 2e8c5c00ec | |||
| 2860377021 | |||
| c18bdb4433 | |||
| d0d9b384f2 | |||
| ca4e7aaa72 | |||
| 193e2df8ba | |||
| c64de50ea4 | |||
| b96149a19b | |||
| be3b9bcf4d | |||
| 186f75342e | |||
| e626eecc25 | |||
| 99709ee61d | |||
| 8da280ebbe | |||
| e5fc98c542 | |||
| 7176674849 | |||
| 7fb94ab934 | |||
| 2feb29c0ff | |||
| 2c9991496b | |||
| 17595ef2de | |||
| 67f4dd56a3 | |||
| ecf3ea197e | |||
| 87c1244c7d | |||
| b3d86162b0 | |||
| d57763f582 | |||
| 78cf7b4ab4 | |||
| a58361f197 | |||
| 786cc41299 | |||
| ecc0b54bec | |||
| 8b1b93947f | |||
| 8809eb6c93 | 
							
								
								
									
										29
									
								
								.circleci/config.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								.circleci/config.yml
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,29 @@
 | 
			
		||||
version: 2
 | 
			
		||||
jobs:
 | 
			
		||||
    build_py3:
 | 
			
		||||
        working_directory: ~/pytorch-pretrained-BERT
 | 
			
		||||
        docker:
 | 
			
		||||
            - image: circleci/python:3.5
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: sudo pip install --progress-bar off .
 | 
			
		||||
            - run: sudo pip install pytest ftfy spacy
 | 
			
		||||
            - run: sudo python -m spacy download en
 | 
			
		||||
            - run: python -m pytest -sv tests/ --runslow
 | 
			
		||||
    build_py2:
 | 
			
		||||
        working_directory: ~/pytorch-pretrained-BERT
 | 
			
		||||
        docker:
 | 
			
		||||
            - image: circleci/python:2.7
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: sudo pip install --progress-bar off .
 | 
			
		||||
            - run: sudo pip install pytest spacy
 | 
			
		||||
            - run: sudo pip install ftfy==4.4.3
 | 
			
		||||
            - run: sudo python -m spacy download en
 | 
			
		||||
            - run: python -m pytest -sv tests/ --runslow
 | 
			
		||||
workflows:
 | 
			
		||||
  version: 2
 | 
			
		||||
  build_and_test:
 | 
			
		||||
    jobs:
 | 
			
		||||
      - build_py3
 | 
			
		||||
      - build_py2
 | 
			
		||||
							
								
								
									
										17
									
								
								.github/stale.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										17
									
								
								.github/stale.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@ -0,0 +1,17 @@
 | 
			
		||||
# Number of days of inactivity before an issue becomes stale
 | 
			
		||||
daysUntilStale: 60
 | 
			
		||||
# Number of days of inactivity before a stale issue is closed
 | 
			
		||||
daysUntilClose: 7
 | 
			
		||||
# Issues with these labels will never be considered stale
 | 
			
		||||
exemptLabels:
 | 
			
		||||
  - pinned
 | 
			
		||||
  - security
 | 
			
		||||
# Label to use when marking an issue as stale
 | 
			
		||||
staleLabel: wontfix
 | 
			
		||||
# Comment to post when marking an issue as stale. Set to `false` to disable
 | 
			
		||||
markComment: >
 | 
			
		||||
  This issue has been automatically marked as stale because it has not had
 | 
			
		||||
  recent activity. It will be closed if no further activity occurs. Thank you
 | 
			
		||||
  for your contributions.
 | 
			
		||||
# Comment to post when closing a stale issue. Set to `false` to disable
 | 
			
		||||
closeComment: false
 | 
			
		||||
							
								
								
									
										5
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										5
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							@ -119,4 +119,7 @@ dmypy.json
 | 
			
		||||
.vscode
 | 
			
		||||
 | 
			
		||||
# TF code
 | 
			
		||||
tensorflow_code
 | 
			
		||||
tensorflow_code
 | 
			
		||||
 | 
			
		||||
# Models
 | 
			
		||||
models
 | 
			
		||||
							
								
								
									
										1
									
								
								MANIFEST.in
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								MANIFEST.in
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1 @@
 | 
			
		||||
include LICENSE
 | 
			
		||||
@ -1,2 +0,0 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
python -m pytorch_pretrained_bert "$@"
 | 
			
		||||
							
								
								
									
										
											BIN
										
									
								
								docs/imgs/warmup_constant_schedule.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/imgs/warmup_constant_schedule.png
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 9.7 KiB  | 
							
								
								
									
										
											BIN
										
									
								
								docs/imgs/warmup_cosine_hard_restarts_schedule.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/imgs/warmup_cosine_hard_restarts_schedule.png
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 22 KiB  | 
							
								
								
									
										
											BIN
										
									
								
								docs/imgs/warmup_cosine_schedule.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/imgs/warmup_cosine_schedule.png
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 17 KiB  | 
							
								
								
									
										
											BIN
										
									
								
								docs/imgs/warmup_cosine_warm_restarts_schedule.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/imgs/warmup_cosine_warm_restarts_schedule.png
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 22 KiB  | 
							
								
								
									
										
											BIN
										
									
								
								docs/imgs/warmup_linear_schedule.png
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										
											BIN
										
									
								
								docs/imgs/warmup_linear_schedule.png
									
									
									
									
									
										Normal file
									
								
							
										
											Binary file not shown.
										
									
								
							| 
		 After Width: | Height: | Size: 16 KiB  | 
@ -1,5 +1,5 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
@ -57,7 +57,7 @@ class InputFeatures(object):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_examples_to_features(examples, seq_length, tokenizer):
 | 
			
		||||
    """Loads a data file into a list of `InputBatch`s."""
 | 
			
		||||
    """Loads a data file into a list of `InputFeature`s."""
 | 
			
		||||
 | 
			
		||||
    features = []
 | 
			
		||||
    for (ex_index, example) in enumerate(examples):
 | 
			
		||||
@ -80,10 +80,10 @@ def convert_examples_to_features(examples, seq_length, tokenizer):
 | 
			
		||||
        # The convention in BERT is:
 | 
			
		||||
        # (a) For sequence pairs:
 | 
			
		||||
        #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
 | 
			
		||||
        #  type_ids: 0   0  0    0    0     0       0 0    1  1  1  1   1 1
 | 
			
		||||
        #  type_ids:   0   0  0    0    0     0      0   0    1  1  1   1  1   1
 | 
			
		||||
        # (b) For single sequences:
 | 
			
		||||
        #  tokens:   [CLS] the dog is hairy . [SEP]
 | 
			
		||||
        #  type_ids: 0   0   0   0  0     0 0
 | 
			
		||||
        #  type_ids:   0   0   0   0  0     0   0
 | 
			
		||||
        #
 | 
			
		||||
        # Where "type_ids" are used to indicate whether this is the first
 | 
			
		||||
        # sequence or the second sequence. The embedding vectors for `type=0` and
 | 
			
		||||
@ -199,7 +199,7 @@ def main():
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--do_lower_case", default=False, action='store_true', help="Set this flag if you are using an uncased model.")
 | 
			
		||||
    parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.")
 | 
			
		||||
    parser.add_argument("--layers", default="-1,-2,-3,-4", type=str)
 | 
			
		||||
    parser.add_argument("--max_seq_length", default=128, type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after WordPiece tokenization. Sequences longer "
 | 
			
		||||
@ -210,7 +210,6 @@ def main():
 | 
			
		||||
                        default=-1,
 | 
			
		||||
                        help = "local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										64
									
								
								examples/lm_finetuning/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								examples/lm_finetuning/README.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,64 @@
 | 
			
		||||
# BERT Model Finetuning using Masked Language Modeling objective
 | 
			
		||||
 | 
			
		||||
## Introduction
 | 
			
		||||
 | 
			
		||||
The three example scripts in this folder can be used to **fine-tune** a pre-trained BERT model using the pretraining objective (combination of masked language modeling and next sentence prediction loss). In general, pretrained models like BERT are first trained with a pretraining objective (masked language modeling and next sentence prediction for BERT) on a large and general natural language corpus. A classifier head is then added on top of the pre-trained architecture and the model is quickly fine-tuned on a target task, while still (hopefully) retaining its general language understanding. This greatly reduces overfitting and yields state-of-the-art results, especially when training data for the target task are limited.
 | 
			
		||||
 | 
			
		||||
The [ULMFiT paper](https://arxiv.org/abs/1801.06146) took a slightly different approach, however, and added an intermediate step in which the model is fine-tuned on text **from the same domain as the target task and using the pretraining objective** before the final stage in which the classifier head is added and the model is trained on the target task itself. This paper reported significantly improved results from this step, and found that they could get high-quality classifications even with only tiny numbers (<1000) of labelled training examples, as long as they had a lot of unlabelled data from the target domain.
 | 
			
		||||
 | 
			
		||||
The BERT model has more capacity than the LSTM models used in the ULMFiT work, but the [BERT paper](https://arxiv.org/abs/1810.04805) did not test finetuning using the pretraining objective and at the present stage there aren't many examples of this approach being used for Transformer-based language models. As such, it's hard to predict what effect this step will have on final model performance, but it's reasonable to conjecture that this approach can improve the final classification performance, especially when a large unlabelled corpus from the target domain is available, labelled data is limited, or the target domain is very unusual and different from 'normal' English text. If you are aware of any literature on this subject, please feel free to add it in here, or open an issue and tag me (@Rocketknight1) and I'll include it.
 | 
			
		||||
 | 
			
		||||
## Input format
 | 
			
		||||
 | 
			
		||||
The scripts in this folder expect a single file as input, consisting of untokenized text, with one **sentence** per line, and one blank line between documents. The reason for the sentence splitting is that part of BERT's training involves a _next sentence_ objective in which the model must predict whether two sequences of text are contiguous text from the same document or not, and to avoid making the task _too easy_, the split point between the sequences is always at the end of a sentence. The linebreaks in the file are therefore necessary to mark the points where the text can be split.
 | 
			
		||||
 | 
			
		||||
## Usage
 | 
			
		||||
 | 
			
		||||
There are two ways to fine-tune a language model using these scripts. The first _quick_ approach is to use [`simple_lm_finetuning.py`](./simple_lm_finetuning.py). This script does everything in a single script, but generates training instances that consist of just two sentences. This is quite different from the BERT paper, where (confusingly) the NextSentence task concatenated sentences together from each document to form two long multi-sentences, which the paper just referred to as _sentences_. The difference between this simple approach and the original paper approach can have a significant effect for long sequences since two sentences will be much shorter than the max sequence length. In this case, most of each training example will just consist of blank padding characters, which wastes a lot of computation and results in a model that isn't really training on long sequences.
 | 
			
		||||
 | 
			
		||||
As such, the preferred approach (assuming you have documents containing multiple contiguous sentences from your target domain) is to use [`pregenerate_training_data.py`](./pregenerate_training_data.py) to pre-process your data into training examples following the methodology used for LM training in the original BERT paper and repository. Since there is a significant random component to training data generation for BERT, this script includes an option to generate multiple _epochs_ of pre-processed data, to avoid training on the same random splits each epoch. Generating an epoch of data for each training epoch should result a better final model, and so we recommend doing so.
 | 
			
		||||
 | 
			
		||||
You can then train on the pregenerated data using [`finetune_on_pregenerated.py`](./finetune_on_pregenerated.py), and pointing it to the folder created by [`pregenerate_training_data.py`](./pregenerate_training_data.py). Note that you should use the same `bert_model` and case options for both! Also note that `max_seq_len` does not need to be specified for the [`finetune_on_pregenerated.py`](./finetune_on_pregenerated.py) script, as it is inferred from the training examples.
 | 
			
		||||
 | 
			
		||||
There are various options that can be tweaked, but they are mostly set to the values from the BERT paper/repository and default values should make sense. The most relevant ones are:
 | 
			
		||||
 | 
			
		||||
- `--max_seq_len`: Controls the length of training examples (in wordpiece tokens) seen by the model. Defaults to 128 but can be set as high as 512. Higher values may yield stronger language models at the cost of slower and more memory-intensive training.
 | 
			
		||||
- `--fp16`: Enables fast half-precision training on recent GPUs.
 | 
			
		||||
 | 
			
		||||
In addition, if memory usage is an issue, especially when training on a single GPU, reducing `--train_batch_size` from the default 32 to a lower number (4-16) can be helpful, or leaving `--train_batch_size` at the default and increasing `--gradient_accumulation_steps` to 2-8. Changing `--gradient_accumulation_steps` may be preferable as alterations to the batch size may require corresponding changes in the learning rate to compensate. There is also a `--reduce_memory` option for both the `pregenerate_training_data.py` and `finetune_on_pregenerated.py` scripts that spills data to disc in shelf objects or numpy memmaps rather than retaining it in memory, which significantly reduces memory usage with little performance impact.
 | 
			
		||||
 | 
			
		||||
## Examples
 | 
			
		||||
 | 
			
		||||
### Simple fine-tuning
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
python3 simple_lm_finetuning.py 
 | 
			
		||||
--train_corpus my_corpus.txt 
 | 
			
		||||
--bert_model bert-base-uncased 
 | 
			
		||||
--do_lower_case 
 | 
			
		||||
--output_dir finetuned_lm/
 | 
			
		||||
--do_train
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Pregenerating training data
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
python3 pregenerate_training_data.py
 | 
			
		||||
--train_corpus my_corpus.txt
 | 
			
		||||
--bert_model bert-base-uncased
 | 
			
		||||
--do_lower_case
 | 
			
		||||
--output_dir training/
 | 
			
		||||
--epochs_to_generate 3
 | 
			
		||||
--max_seq_len 256
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Training on pregenerated data
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
python3 finetune_on_pregenerated.py
 | 
			
		||||
--pregenerated_data training/
 | 
			
		||||
--bert_model bert-base-uncased
 | 
			
		||||
--do_lower_case
 | 
			
		||||
--output_dir finetuned_lm/
 | 
			
		||||
--epochs 3
 | 
			
		||||
```
 | 
			
		||||
							
								
								
									
										334
									
								
								examples/lm_finetuning/finetune_on_pregenerated.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										334
									
								
								examples/lm_finetuning/finetune_on_pregenerated.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,334 @@
 | 
			
		||||
from argparse import ArgumentParser
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
import torch
 | 
			
		||||
import logging
 | 
			
		||||
import json
 | 
			
		||||
import random
 | 
			
		||||
import numpy as np
 | 
			
		||||
from collections import namedtuple
 | 
			
		||||
from tempfile import TemporaryDirectory
 | 
			
		||||
 | 
			
		||||
from torch.utils.data import DataLoader, Dataset, RandomSampler
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForPreTraining
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
InputFeatures = namedtuple("InputFeatures", "input_ids input_mask segment_ids lm_label_ids is_next")
 | 
			
		||||
 | 
			
		||||
log_format = '%(asctime)-10s: %(message)s'
 | 
			
		||||
logging.basicConfig(level=logging.INFO, format=log_format)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_example_to_features(example, tokenizer, max_seq_length):
 | 
			
		||||
    tokens = example["tokens"]
 | 
			
		||||
    segment_ids = example["segment_ids"]
 | 
			
		||||
    is_random_next = example["is_random_next"]
 | 
			
		||||
    masked_lm_positions = example["masked_lm_positions"]
 | 
			
		||||
    masked_lm_labels = example["masked_lm_labels"]
 | 
			
		||||
 | 
			
		||||
    assert len(tokens) == len(segment_ids) <= max_seq_length  # The preprocessed data should be already truncated
 | 
			
		||||
    input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
    masked_label_ids = tokenizer.convert_tokens_to_ids(masked_lm_labels)
 | 
			
		||||
 | 
			
		||||
    input_array = np.zeros(max_seq_length, dtype=np.int)
 | 
			
		||||
    input_array[:len(input_ids)] = input_ids
 | 
			
		||||
 | 
			
		||||
    mask_array = np.zeros(max_seq_length, dtype=np.bool)
 | 
			
		||||
    mask_array[:len(input_ids)] = 1
 | 
			
		||||
 | 
			
		||||
    segment_array = np.zeros(max_seq_length, dtype=np.bool)
 | 
			
		||||
    segment_array[:len(segment_ids)] = segment_ids
 | 
			
		||||
 | 
			
		||||
    lm_label_array = np.full(max_seq_length, dtype=np.int, fill_value=-1)
 | 
			
		||||
    lm_label_array[masked_lm_positions] = masked_label_ids
 | 
			
		||||
 | 
			
		||||
    features = InputFeatures(input_ids=input_array,
 | 
			
		||||
                             input_mask=mask_array,
 | 
			
		||||
                             segment_ids=segment_array,
 | 
			
		||||
                             lm_label_ids=lm_label_array,
 | 
			
		||||
                             is_next=is_random_next)
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PregeneratedDataset(Dataset):
 | 
			
		||||
    def __init__(self, training_path, epoch, tokenizer, num_data_epochs, reduce_memory=False):
 | 
			
		||||
        self.vocab = tokenizer.vocab
 | 
			
		||||
        self.tokenizer = tokenizer
 | 
			
		||||
        self.epoch = epoch
 | 
			
		||||
        self.data_epoch = epoch % num_data_epochs
 | 
			
		||||
        data_file = training_path / f"epoch_{self.data_epoch}.json"
 | 
			
		||||
        metrics_file = training_path / f"epoch_{self.data_epoch}_metrics.json"
 | 
			
		||||
        assert data_file.is_file() and metrics_file.is_file()
 | 
			
		||||
        metrics = json.loads(metrics_file.read_text())
 | 
			
		||||
        num_samples = metrics['num_training_examples']
 | 
			
		||||
        seq_len = metrics['max_seq_len']
 | 
			
		||||
        self.temp_dir = None
 | 
			
		||||
        self.working_dir = None
 | 
			
		||||
        if reduce_memory:
 | 
			
		||||
            self.temp_dir = TemporaryDirectory()
 | 
			
		||||
            self.working_dir = Path(self.temp_dir.name)
 | 
			
		||||
            input_ids = np.memmap(filename=self.working_dir/'input_ids.memmap',
 | 
			
		||||
                                  mode='w+', dtype=np.int32, shape=(num_samples, seq_len))
 | 
			
		||||
            input_masks = np.memmap(filename=self.working_dir/'input_masks.memmap',
 | 
			
		||||
                                    shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
 | 
			
		||||
            segment_ids = np.memmap(filename=self.working_dir/'segment_ids.memmap',
 | 
			
		||||
                                    shape=(num_samples, seq_len), mode='w+', dtype=np.bool)
 | 
			
		||||
            lm_label_ids = np.memmap(filename=self.working_dir/'lm_label_ids.memmap',
 | 
			
		||||
                                     shape=(num_samples, seq_len), mode='w+', dtype=np.int32)
 | 
			
		||||
            lm_label_ids[:] = -1
 | 
			
		||||
            is_nexts = np.memmap(filename=self.working_dir/'is_nexts.memmap',
 | 
			
		||||
                                 shape=(num_samples,), mode='w+', dtype=np.bool)
 | 
			
		||||
        else:
 | 
			
		||||
            input_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.int32)
 | 
			
		||||
            input_masks = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
 | 
			
		||||
            segment_ids = np.zeros(shape=(num_samples, seq_len), dtype=np.bool)
 | 
			
		||||
            lm_label_ids = np.full(shape=(num_samples, seq_len), dtype=np.int32, fill_value=-1)
 | 
			
		||||
            is_nexts = np.zeros(shape=(num_samples,), dtype=np.bool)
 | 
			
		||||
        logging.info(f"Loading training examples for epoch {epoch}")
 | 
			
		||||
        with data_file.open() as f:
 | 
			
		||||
            for i, line in enumerate(tqdm(f, total=num_samples, desc="Training examples")):
 | 
			
		||||
                line = line.strip()
 | 
			
		||||
                example = json.loads(line)
 | 
			
		||||
                features = convert_example_to_features(example, tokenizer, seq_len)
 | 
			
		||||
                input_ids[i] = features.input_ids
 | 
			
		||||
                segment_ids[i] = features.segment_ids
 | 
			
		||||
                input_masks[i] = features.input_mask
 | 
			
		||||
                lm_label_ids[i] = features.lm_label_ids
 | 
			
		||||
                is_nexts[i] = features.is_next
 | 
			
		||||
        assert i == num_samples - 1  # Assert that the sample count metric was true
 | 
			
		||||
        logging.info("Loading complete!")
 | 
			
		||||
        self.num_samples = num_samples
 | 
			
		||||
        self.seq_len = seq_len
 | 
			
		||||
        self.input_ids = input_ids
 | 
			
		||||
        self.input_masks = input_masks
 | 
			
		||||
        self.segment_ids = segment_ids
 | 
			
		||||
        self.lm_label_ids = lm_label_ids
 | 
			
		||||
        self.is_nexts = is_nexts
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return self.num_samples
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, item):
 | 
			
		||||
        return (torch.tensor(self.input_ids[item].astype(np.int64)),
 | 
			
		||||
                torch.tensor(self.input_masks[item].astype(np.int64)),
 | 
			
		||||
                torch.tensor(self.segment_ids[item].astype(np.int64)),
 | 
			
		||||
                torch.tensor(self.lm_label_ids[item].astype(np.int64)),
 | 
			
		||||
                torch.tensor(self.is_nexts[item].astype(np.int64)))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = ArgumentParser()
 | 
			
		||||
    parser.add_argument('--pregenerated_data', type=Path, required=True)
 | 
			
		||||
    parser.add_argument('--output_dir', type=Path, required=True)
 | 
			
		||||
    parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
    parser.add_argument("--do_lower_case", action="store_true")
 | 
			
		||||
    parser.add_argument("--reduce_memory", action="store_true",
 | 
			
		||||
                        help="Store training data as on-disc memmaps to massively reduce memory usage")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=-1,
 | 
			
		||||
                        help="local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument("--train_batch_size",
 | 
			
		||||
                        default=32,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="Total batch size for training.")
 | 
			
		||||
    parser.add_argument('--fp16',
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit float precision instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--loss_scale',
 | 
			
		||||
                        type=float, default=0,
 | 
			
		||||
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
 | 
			
		||||
                        "0 (default value): dynamic loss scaling.\n"
 | 
			
		||||
                        "Positive power of 2: static loss scaling value.\n")
 | 
			
		||||
    parser.add_argument("--warmup_proportion",
 | 
			
		||||
                        default=0.1,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. "
 | 
			
		||||
                             "E.g., 0.1 = 10%% of training.")
 | 
			
		||||
    parser.add_argument("--learning_rate",
 | 
			
		||||
                        default=3e-5,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument('--seed',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    assert args.pregenerated_data.is_dir(), \
 | 
			
		||||
        "--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!"
 | 
			
		||||
 | 
			
		||||
    samples_per_epoch = []
 | 
			
		||||
    for i in range(args.epochs):
 | 
			
		||||
        epoch_file = args.pregenerated_data / f"epoch_{i}.json"
 | 
			
		||||
        metrics_file = args.pregenerated_data / f"epoch_{i}_metrics.json"
 | 
			
		||||
        if epoch_file.is_file() and metrics_file.is_file():
 | 
			
		||||
            metrics = json.loads(metrics_file.read_text())
 | 
			
		||||
            samples_per_epoch.append(metrics['num_training_examples'])
 | 
			
		||||
        else:
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                exit("No training data was found!")
 | 
			
		||||
            print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).")
 | 
			
		||||
            print("This script will loop over the available data, but training diversity may be negatively impacted.")
 | 
			
		||||
            num_data_epochs = i
 | 
			
		||||
            break
 | 
			
		||||
    else:
 | 
			
		||||
        num_data_epochs = args.epochs
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        n_gpu = 1
 | 
			
		||||
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
    logging.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
 | 
			
		||||
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
 | 
			
		||||
 | 
			
		||||
    if args.gradient_accumulation_steps < 1:
 | 
			
		||||
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
 | 
			
		||||
                            args.gradient_accumulation_steps))
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
    if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
 | 
			
		||||
        logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
 | 
			
		||||
    args.output_dir.mkdir(parents=True, exist_ok=True)
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    total_train_examples = 0
 | 
			
		||||
    for i in range(args.epochs):
 | 
			
		||||
        # The modulo takes into account the fact that we may loop over limited epochs of data
 | 
			
		||||
        total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]
 | 
			
		||||
 | 
			
		||||
    num_train_optimization_steps = int(
 | 
			
		||||
        total_train_examples / args.train_batch_size / args.gradient_accumulation_steps)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
 | 
			
		||||
 | 
			
		||||
    # Prepare model
 | 
			
		||||
    model = BertForPreTraining.from_pretrained(args.bert_model)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        model.half()
 | 
			
		||||
    model.to(device)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.parallel import DistributedDataParallel as DDP
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError(
 | 
			
		||||
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
        model = DDP(model)
 | 
			
		||||
    elif n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer
 | 
			
		||||
    param_optimizer = list(model.named_parameters())
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
 | 
			
		||||
         'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.optimizers import FP16_Optimizer
 | 
			
		||||
            from apex.optimizers import FusedAdam
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError(
 | 
			
		||||
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
 | 
			
		||||
        optimizer = FusedAdam(optimizer_grouped_parameters,
 | 
			
		||||
                              lr=args.learning_rate,
 | 
			
		||||
                              bias_correction=False,
 | 
			
		||||
                              max_grad_norm=1.0)
 | 
			
		||||
        if args.loss_scale == 0:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
 | 
			
		||||
        else:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
 | 
			
		||||
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
 | 
			
		||||
                                             t_total=num_train_optimization_steps)
 | 
			
		||||
    else:
 | 
			
		||||
        optimizer = BertAdam(optimizer_grouped_parameters,
 | 
			
		||||
                             lr=args.learning_rate,
 | 
			
		||||
                             warmup=args.warmup_proportion,
 | 
			
		||||
                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    logging.info("***** Running training *****")
 | 
			
		||||
    logging.info(f"  Num examples = {total_train_examples}")
 | 
			
		||||
    logging.info("  Batch size = %d", args.train_batch_size)
 | 
			
		||||
    logging.info("  Num steps = %d", num_train_optimization_steps)
 | 
			
		||||
    model.train()
 | 
			
		||||
    for epoch in range(args.epochs):
 | 
			
		||||
        epoch_dataset = PregeneratedDataset(epoch=epoch, training_path=args.pregenerated_data, tokenizer=tokenizer,
 | 
			
		||||
                                            num_data_epochs=num_data_epochs, reduce_memory=args.reduce_memory)
 | 
			
		||||
        if args.local_rank == -1:
 | 
			
		||||
            train_sampler = RandomSampler(epoch_dataset)
 | 
			
		||||
        else:
 | 
			
		||||
            train_sampler = DistributedSampler(epoch_dataset)
 | 
			
		||||
        train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
        tr_loss = 0
 | 
			
		||||
        nb_tr_examples, nb_tr_steps = 0, 0
 | 
			
		||||
        with tqdm(total=len(train_dataloader), desc=f"Epoch {epoch}") as pbar:
 | 
			
		||||
            for step, batch in enumerate(train_dataloader):
 | 
			
		||||
                batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
 | 
			
		||||
                loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
 | 
			
		||||
                if n_gpu > 1:
 | 
			
		||||
                    loss = loss.mean() # mean() to average on multi-gpu.
 | 
			
		||||
                if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                    loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
                if args.fp16:
 | 
			
		||||
                    optimizer.backward(loss)
 | 
			
		||||
                else:
 | 
			
		||||
                    loss.backward()
 | 
			
		||||
                tr_loss += loss.item()
 | 
			
		||||
                nb_tr_examples += input_ids.size(0)
 | 
			
		||||
                nb_tr_steps += 1
 | 
			
		||||
                pbar.update(1)
 | 
			
		||||
                mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps
 | 
			
		||||
                pbar.set_postfix_str(f"Loss: {mean_loss:.5f}")
 | 
			
		||||
                if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                    if args.fp16:
 | 
			
		||||
                        # modify learning rate with special warm up BERT uses
 | 
			
		||||
                        # if args.fp16 is False, BertAdam is used that handles this automatically
 | 
			
		||||
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,
 | 
			
		||||
                                                                                 args.warmup_proportion)
 | 
			
		||||
                        for param_group in optimizer.param_groups:
 | 
			
		||||
                            param_group['lr'] = lr_this_step
 | 
			
		||||
                    optimizer.step()
 | 
			
		||||
                    optimizer.zero_grad()
 | 
			
		||||
                    global_step += 1
 | 
			
		||||
 | 
			
		||||
    # Save a trained model
 | 
			
		||||
    logging.info("** ** * Saving fine-tuned model ** ** * ")
 | 
			
		||||
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
    output_model_file = args.output_dir / "pytorch_model.bin"
 | 
			
		||||
    torch.save(model_to_save.state_dict(), str(output_model_file))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										302
									
								
								examples/lm_finetuning/pregenerate_training_data.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										302
									
								
								examples/lm_finetuning/pregenerate_training_data.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,302 @@
 | 
			
		||||
from argparse import ArgumentParser
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
from tempfile import TemporaryDirectory
 | 
			
		||||
import shelve
 | 
			
		||||
 | 
			
		||||
from random import random, randrange, randint, shuffle, choice, sample
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
import numpy as np
 | 
			
		||||
import json
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DocumentDatabase:
 | 
			
		||||
    def __init__(self, reduce_memory=False):
 | 
			
		||||
        if reduce_memory:
 | 
			
		||||
            self.temp_dir = TemporaryDirectory()
 | 
			
		||||
            self.working_dir = Path(self.temp_dir.name)
 | 
			
		||||
            self.document_shelf_filepath = self.working_dir / 'shelf.db'
 | 
			
		||||
            self.document_shelf = shelve.open(str(self.document_shelf_filepath),
 | 
			
		||||
                                              flag='n', protocol=-1)
 | 
			
		||||
            self.documents = None
 | 
			
		||||
        else:
 | 
			
		||||
            self.documents = []
 | 
			
		||||
            self.document_shelf = None
 | 
			
		||||
            self.document_shelf_filepath = None
 | 
			
		||||
            self.temp_dir = None
 | 
			
		||||
        self.doc_lengths = []
 | 
			
		||||
        self.doc_cumsum = None
 | 
			
		||||
        self.cumsum_max = None
 | 
			
		||||
        self.reduce_memory = reduce_memory
 | 
			
		||||
 | 
			
		||||
    def add_document(self, document):
 | 
			
		||||
        if not document:
 | 
			
		||||
            return
 | 
			
		||||
        if self.reduce_memory:
 | 
			
		||||
            current_idx = len(self.doc_lengths)
 | 
			
		||||
            self.document_shelf[str(current_idx)] = document
 | 
			
		||||
        else:
 | 
			
		||||
            self.documents.append(document)
 | 
			
		||||
        self.doc_lengths.append(len(document))
 | 
			
		||||
 | 
			
		||||
    def _precalculate_doc_weights(self):
 | 
			
		||||
        self.doc_cumsum = np.cumsum(self.doc_lengths)
 | 
			
		||||
        self.cumsum_max = self.doc_cumsum[-1]
 | 
			
		||||
 | 
			
		||||
    def sample_doc(self, current_idx, sentence_weighted=True):
 | 
			
		||||
        # Uses the current iteration counter to ensure we don't sample the same doc twice
 | 
			
		||||
        if sentence_weighted:
 | 
			
		||||
            # With sentence weighting, we sample docs proportionally to their sentence length
 | 
			
		||||
            if self.doc_cumsum is None or len(self.doc_cumsum) != len(self.doc_lengths):
 | 
			
		||||
                self._precalculate_doc_weights()
 | 
			
		||||
            rand_start = self.doc_cumsum[current_idx]
 | 
			
		||||
            rand_end = rand_start + self.cumsum_max - self.doc_lengths[current_idx]
 | 
			
		||||
            sentence_index = randrange(rand_start, rand_end) % self.cumsum_max
 | 
			
		||||
            sampled_doc_index = np.searchsorted(self.doc_cumsum, sentence_index, side='right')
 | 
			
		||||
        else:
 | 
			
		||||
            # If we don't use sentence weighting, then every doc has an equal chance to be chosen
 | 
			
		||||
            sampled_doc_index = (current_idx + randrange(1, len(self.doc_lengths))) % len(self.doc_lengths)
 | 
			
		||||
        assert sampled_doc_index != current_idx
 | 
			
		||||
        if self.reduce_memory:
 | 
			
		||||
            return self.document_shelf[str(sampled_doc_index)]
 | 
			
		||||
        else:
 | 
			
		||||
            return self.documents[sampled_doc_index]
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return len(self.doc_lengths)
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, item):
 | 
			
		||||
        if self.reduce_memory:
 | 
			
		||||
            return self.document_shelf[str(item)]
 | 
			
		||||
        else:
 | 
			
		||||
            return self.documents[item]
 | 
			
		||||
 | 
			
		||||
    def __enter__(self):
 | 
			
		||||
        return self
 | 
			
		||||
 | 
			
		||||
    def __exit__(self, exc_type, exc_val, traceback):
 | 
			
		||||
        if self.document_shelf is not None:
 | 
			
		||||
            self.document_shelf.close()
 | 
			
		||||
        if self.temp_dir is not None:
 | 
			
		||||
            self.temp_dir.cleanup()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens):
 | 
			
		||||
    """Truncates a pair of sequences to a maximum sequence length. Lifted from Google's BERT repo."""
 | 
			
		||||
    while True:
 | 
			
		||||
        total_length = len(tokens_a) + len(tokens_b)
 | 
			
		||||
        if total_length <= max_num_tokens:
 | 
			
		||||
            break
 | 
			
		||||
 | 
			
		||||
        trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
 | 
			
		||||
        assert len(trunc_tokens) >= 1
 | 
			
		||||
 | 
			
		||||
        # We want to sometimes truncate from the front and sometimes from the
 | 
			
		||||
        # back to add more randomness and avoid biases.
 | 
			
		||||
        if random() < 0.5:
 | 
			
		||||
            del trunc_tokens[0]
 | 
			
		||||
        else:
 | 
			
		||||
            trunc_tokens.pop()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_list):
 | 
			
		||||
    """Creates the predictions for the masked LM objective. This is mostly copied from the Google BERT repo, but
 | 
			
		||||
    with several refactors to clean it up and remove a lot of unnecessary variables."""
 | 
			
		||||
    cand_indices = []
 | 
			
		||||
    for (i, token) in enumerate(tokens):
 | 
			
		||||
        if token == "[CLS]" or token == "[SEP]":
 | 
			
		||||
            continue
 | 
			
		||||
        cand_indices.append(i)
 | 
			
		||||
 | 
			
		||||
    num_to_mask = min(max_predictions_per_seq,
 | 
			
		||||
                      max(1, int(round(len(tokens) * masked_lm_prob))))
 | 
			
		||||
    shuffle(cand_indices)
 | 
			
		||||
    mask_indices = sorted(sample(cand_indices, num_to_mask))
 | 
			
		||||
    masked_token_labels = []
 | 
			
		||||
    for index in mask_indices:
 | 
			
		||||
        # 80% of the time, replace with [MASK]
 | 
			
		||||
        if random() < 0.8:
 | 
			
		||||
            masked_token = "[MASK]"
 | 
			
		||||
        else:
 | 
			
		||||
            # 10% of the time, keep original
 | 
			
		||||
            if random() < 0.5:
 | 
			
		||||
                masked_token = tokens[index]
 | 
			
		||||
            # 10% of the time, replace with random word
 | 
			
		||||
            else:
 | 
			
		||||
                masked_token = choice(vocab_list)
 | 
			
		||||
        masked_token_labels.append(tokens[index])
 | 
			
		||||
        # Once we've saved the true label for that token, we can overwrite it with the masked version
 | 
			
		||||
        tokens[index] = masked_token
 | 
			
		||||
 | 
			
		||||
    return tokens, mask_indices, masked_token_labels
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def create_instances_from_document(
 | 
			
		||||
        doc_database, doc_idx, max_seq_length, short_seq_prob,
 | 
			
		||||
        masked_lm_prob, max_predictions_per_seq, vocab_list):
 | 
			
		||||
    """This code is mostly a duplicate of the equivalent function from Google BERT's repo.
 | 
			
		||||
    However, we make some changes and improvements. Sampling is improved and no longer requires a loop in this function.
 | 
			
		||||
    Also, documents are sampled proportionally to the number of sentences they contain, which means each sentence
 | 
			
		||||
    (rather than each document) has an equal chance of being sampled as a false example for the NextSentence task."""
 | 
			
		||||
    document = doc_database[doc_idx]
 | 
			
		||||
    # Account for [CLS], [SEP], [SEP]
 | 
			
		||||
    max_num_tokens = max_seq_length - 3
 | 
			
		||||
 | 
			
		||||
    # We *usually* want to fill up the entire sequence since we are padding
 | 
			
		||||
    # to `max_seq_length` anyways, so short sequences are generally wasted
 | 
			
		||||
    # computation. However, we *sometimes*
 | 
			
		||||
    # (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
 | 
			
		||||
    # sequences to minimize the mismatch between pre-training and fine-tuning.
 | 
			
		||||
    # The `target_seq_length` is just a rough target however, whereas
 | 
			
		||||
    # `max_seq_length` is a hard limit.
 | 
			
		||||
    target_seq_length = max_num_tokens
 | 
			
		||||
    if random() < short_seq_prob:
 | 
			
		||||
        target_seq_length = randint(2, max_num_tokens)
 | 
			
		||||
 | 
			
		||||
    # We DON'T just concatenate all of the tokens from a document into a long
 | 
			
		||||
    # sequence and choose an arbitrary split point because this would make the
 | 
			
		||||
    # next sentence prediction task too easy. Instead, we split the input into
 | 
			
		||||
    # segments "A" and "B" based on the actual "sentences" provided by the user
 | 
			
		||||
    # input.
 | 
			
		||||
    instances = []
 | 
			
		||||
    current_chunk = []
 | 
			
		||||
    current_length = 0
 | 
			
		||||
    i = 0
 | 
			
		||||
    while i < len(document):
 | 
			
		||||
        segment = document[i]
 | 
			
		||||
        current_chunk.append(segment)
 | 
			
		||||
        current_length += len(segment)
 | 
			
		||||
        if i == len(document) - 1 or current_length >= target_seq_length:
 | 
			
		||||
            if current_chunk:
 | 
			
		||||
                # `a_end` is how many segments from `current_chunk` go into the `A`
 | 
			
		||||
                # (first) sentence.
 | 
			
		||||
                a_end = 1
 | 
			
		||||
                if len(current_chunk) >= 2:
 | 
			
		||||
                    a_end = randrange(1, len(current_chunk))
 | 
			
		||||
 | 
			
		||||
                tokens_a = []
 | 
			
		||||
                for j in range(a_end):
 | 
			
		||||
                    tokens_a.extend(current_chunk[j])
 | 
			
		||||
 | 
			
		||||
                tokens_b = []
 | 
			
		||||
 | 
			
		||||
                # Random next
 | 
			
		||||
                if len(current_chunk) == 1 or random() < 0.5:
 | 
			
		||||
                    is_random_next = True
 | 
			
		||||
                    target_b_length = target_seq_length - len(tokens_a)
 | 
			
		||||
 | 
			
		||||
                    # Sample a random document, with longer docs being sampled more frequently
 | 
			
		||||
                    random_document = doc_database.sample_doc(current_idx=doc_idx, sentence_weighted=True)
 | 
			
		||||
 | 
			
		||||
                    random_start = randrange(0, len(random_document))
 | 
			
		||||
                    for j in range(random_start, len(random_document)):
 | 
			
		||||
                        tokens_b.extend(random_document[j])
 | 
			
		||||
                        if len(tokens_b) >= target_b_length:
 | 
			
		||||
                            break
 | 
			
		||||
                    # We didn't actually use these segments so we "put them back" so
 | 
			
		||||
                    # they don't go to waste.
 | 
			
		||||
                    num_unused_segments = len(current_chunk) - a_end
 | 
			
		||||
                    i -= num_unused_segments
 | 
			
		||||
                # Actual next
 | 
			
		||||
                else:
 | 
			
		||||
                    is_random_next = False
 | 
			
		||||
                    for j in range(a_end, len(current_chunk)):
 | 
			
		||||
                        tokens_b.extend(current_chunk[j])
 | 
			
		||||
                truncate_seq_pair(tokens_a, tokens_b, max_num_tokens)
 | 
			
		||||
 | 
			
		||||
                assert len(tokens_a) >= 1
 | 
			
		||||
                assert len(tokens_b) >= 1
 | 
			
		||||
 | 
			
		||||
                tokens = ["[CLS]"] + tokens_a + ["[SEP]"] + tokens_b + ["[SEP]"]
 | 
			
		||||
                # The segment IDs are 0 for the [CLS] token, the A tokens and the first [SEP]
 | 
			
		||||
                # They are 1 for the B tokens and the final [SEP]
 | 
			
		||||
                segment_ids = [0 for _ in range(len(tokens_a) + 2)] + [1 for _ in range(len(tokens_b) + 1)]
 | 
			
		||||
 | 
			
		||||
                tokens, masked_lm_positions, masked_lm_labels = create_masked_lm_predictions(
 | 
			
		||||
                    tokens, masked_lm_prob, max_predictions_per_seq, vocab_list)
 | 
			
		||||
 | 
			
		||||
                instance = {
 | 
			
		||||
                    "tokens": tokens,
 | 
			
		||||
                    "segment_ids": segment_ids,
 | 
			
		||||
                    "is_random_next": is_random_next,
 | 
			
		||||
                    "masked_lm_positions": masked_lm_positions,
 | 
			
		||||
                    "masked_lm_labels": masked_lm_labels}
 | 
			
		||||
                instances.append(instance)
 | 
			
		||||
            current_chunk = []
 | 
			
		||||
            current_length = 0
 | 
			
		||||
        i += 1
 | 
			
		||||
 | 
			
		||||
    return instances
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = ArgumentParser()
 | 
			
		||||
    parser.add_argument('--train_corpus', type=Path, required=True)
 | 
			
		||||
    parser.add_argument("--output_dir", type=Path, required=True)
 | 
			
		||||
    parser.add_argument("--bert_model", type=str, required=True,
 | 
			
		||||
                        choices=["bert-base-uncased", "bert-large-uncased", "bert-base-cased",
 | 
			
		||||
                                 "bert-base-multilingual", "bert-base-chinese"])
 | 
			
		||||
    parser.add_argument("--do_lower_case", action="store_true")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--reduce_memory", action="store_true",
 | 
			
		||||
                        help="Reduce memory usage for large datasets by keeping data on disc rather than in memory")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument("--epochs_to_generate", type=int, default=3,
 | 
			
		||||
                        help="Number of epochs of data to pregenerate")
 | 
			
		||||
    parser.add_argument("--max_seq_len", type=int, default=128)
 | 
			
		||||
    parser.add_argument("--short_seq_prob", type=float, default=0.1,
 | 
			
		||||
                        help="Probability of making a short sentence as a training example")
 | 
			
		||||
    parser.add_argument("--masked_lm_prob", type=float, default=0.15,
 | 
			
		||||
                        help="Probability of masking each token for the LM task")
 | 
			
		||||
    parser.add_argument("--max_predictions_per_seq", type=int, default=20,
 | 
			
		||||
                        help="Maximum number of tokens to mask in each sequence")
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
    vocab_list = list(tokenizer.vocab.keys())
 | 
			
		||||
    with DocumentDatabase(reduce_memory=args.reduce_memory) as docs:
 | 
			
		||||
        with args.train_corpus.open() as f:
 | 
			
		||||
            doc = []
 | 
			
		||||
            for line in tqdm(f, desc="Loading Dataset", unit=" lines"):
 | 
			
		||||
                line = line.strip()
 | 
			
		||||
                if line == "":
 | 
			
		||||
                    docs.add_document(doc)
 | 
			
		||||
                    doc = []
 | 
			
		||||
                else:
 | 
			
		||||
                    tokens = tokenizer.tokenize(line)
 | 
			
		||||
                    doc.append(tokens)
 | 
			
		||||
            if doc:
 | 
			
		||||
                docs.add_document(doc)  # If the last doc didn't end on a newline, make sure it still gets added
 | 
			
		||||
        if len(docs) <= 1:
 | 
			
		||||
            exit("ERROR: No document breaks were found in the input file! These are necessary to allow the script to "
 | 
			
		||||
                 "ensure that random NextSentences are not sampled from the same document. Please add blank lines to "
 | 
			
		||||
                 "indicate breaks between documents in your input file. If your dataset does not contain multiple "
 | 
			
		||||
                 "documents, blank lines can be inserted at any natural boundary, such as the ends of chapters, "
 | 
			
		||||
                 "sections or paragraphs.")
 | 
			
		||||
 | 
			
		||||
        args.output_dir.mkdir(exist_ok=True)
 | 
			
		||||
        for epoch in trange(args.epochs_to_generate, desc="Epoch"):
 | 
			
		||||
            epoch_filename = args.output_dir / f"epoch_{epoch}.json"
 | 
			
		||||
            num_instances = 0
 | 
			
		||||
            with epoch_filename.open('w') as epoch_file:
 | 
			
		||||
                for doc_idx in trange(len(docs), desc="Document"):
 | 
			
		||||
                    doc_instances = create_instances_from_document(
 | 
			
		||||
                        docs, doc_idx, max_seq_length=args.max_seq_len, short_seq_prob=args.short_seq_prob,
 | 
			
		||||
                        masked_lm_prob=args.masked_lm_prob, max_predictions_per_seq=args.max_predictions_per_seq,
 | 
			
		||||
                        vocab_list=vocab_list)
 | 
			
		||||
                    doc_instances = [json.dumps(instance) for instance in doc_instances]
 | 
			
		||||
                    for instance in doc_instances:
 | 
			
		||||
                        epoch_file.write(instance + '\n')
 | 
			
		||||
                        num_instances += 1
 | 
			
		||||
            metrics_file = args.output_dir / f"epoch_{epoch}_metrics.json"
 | 
			
		||||
            with metrics_file.open('w') as metrics_file:
 | 
			
		||||
                metrics = {
 | 
			
		||||
                    "num_training_examples": num_instances,
 | 
			
		||||
                    "max_seq_len": args.max_seq_len
 | 
			
		||||
                }
 | 
			
		||||
                metrics_file.write(json.dumps(metrics))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										645
									
								
								examples/lm_finetuning/simple_lm_finetuning.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										645
									
								
								examples/lm_finetuning/simple_lm_finetuning.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,645 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""BERT finetuning runner."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import DataLoader, Dataset, RandomSampler
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForPreTraining
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt='%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level=logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BERTDataset(Dataset):
 | 
			
		||||
    def __init__(self, corpus_path, tokenizer, seq_len, encoding="utf-8", corpus_lines=None, on_memory=True):
 | 
			
		||||
        self.vocab = tokenizer.vocab
 | 
			
		||||
        self.tokenizer = tokenizer
 | 
			
		||||
        self.seq_len = seq_len
 | 
			
		||||
        self.on_memory = on_memory
 | 
			
		||||
        self.corpus_lines = corpus_lines  # number of non-empty lines in input corpus
 | 
			
		||||
        self.corpus_path = corpus_path
 | 
			
		||||
        self.encoding = encoding
 | 
			
		||||
        self.current_doc = 0  # to avoid random sentence from same doc
 | 
			
		||||
 | 
			
		||||
        # for loading samples directly from file
 | 
			
		||||
        self.sample_counter = 0  # used to keep track of full epochs on file
 | 
			
		||||
        self.line_buffer = None  # keep second sentence of a pair in memory and use as first sentence in next pair
 | 
			
		||||
 | 
			
		||||
        # for loading samples in memory
 | 
			
		||||
        self.current_random_doc = 0
 | 
			
		||||
        self.num_docs = 0
 | 
			
		||||
        self.sample_to_doc = [] # map sample index to doc and line
 | 
			
		||||
 | 
			
		||||
        # load samples into memory
 | 
			
		||||
        if on_memory:
 | 
			
		||||
            self.all_docs = []
 | 
			
		||||
            doc = []
 | 
			
		||||
            self.corpus_lines = 0
 | 
			
		||||
            with open(corpus_path, "r", encoding=encoding) as f:
 | 
			
		||||
                for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
 | 
			
		||||
                    line = line.strip()
 | 
			
		||||
                    if line == "":
 | 
			
		||||
                        self.all_docs.append(doc)
 | 
			
		||||
                        doc = []
 | 
			
		||||
                        #remove last added sample because there won't be a subsequent line anymore in the doc
 | 
			
		||||
                        self.sample_to_doc.pop()
 | 
			
		||||
                    else:
 | 
			
		||||
                        #store as one sample
 | 
			
		||||
                        sample = {"doc_id": len(self.all_docs),
 | 
			
		||||
                                  "line": len(doc)}
 | 
			
		||||
                        self.sample_to_doc.append(sample)
 | 
			
		||||
                        doc.append(line)
 | 
			
		||||
                        self.corpus_lines = self.corpus_lines + 1
 | 
			
		||||
 | 
			
		||||
            # if last row in file is not empty
 | 
			
		||||
            if self.all_docs[-1] != doc:
 | 
			
		||||
                self.all_docs.append(doc)
 | 
			
		||||
                self.sample_to_doc.pop()
 | 
			
		||||
 | 
			
		||||
            self.num_docs = len(self.all_docs)
 | 
			
		||||
 | 
			
		||||
        # load samples later lazily from disk
 | 
			
		||||
        else:
 | 
			
		||||
            if self.corpus_lines is None:
 | 
			
		||||
                with open(corpus_path, "r", encoding=encoding) as f:
 | 
			
		||||
                    self.corpus_lines = 0
 | 
			
		||||
                    for line in tqdm(f, desc="Loading Dataset", total=corpus_lines):
 | 
			
		||||
                        if line.strip() == "":
 | 
			
		||||
                            self.num_docs += 1
 | 
			
		||||
                        else:
 | 
			
		||||
                            self.corpus_lines += 1
 | 
			
		||||
 | 
			
		||||
                    # if doc does not end with empty line
 | 
			
		||||
                    if line.strip() != "":
 | 
			
		||||
                        self.num_docs += 1
 | 
			
		||||
 | 
			
		||||
            self.file = open(corpus_path, "r", encoding=encoding)
 | 
			
		||||
            self.random_file = open(corpus_path, "r", encoding=encoding)
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        # last line of doc won't be used, because there's no "nextSentence". Additionally, we start counting at 0.
 | 
			
		||||
        return self.corpus_lines - self.num_docs - 1
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, item):
 | 
			
		||||
        cur_id = self.sample_counter
 | 
			
		||||
        self.sample_counter += 1
 | 
			
		||||
        if not self.on_memory:
 | 
			
		||||
            # after one epoch we start again from beginning of file
 | 
			
		||||
            if cur_id != 0 and (cur_id % len(self) == 0):
 | 
			
		||||
                self.file.close()
 | 
			
		||||
                self.file = open(self.corpus_path, "r", encoding=self.encoding)
 | 
			
		||||
 | 
			
		||||
        t1, t2, is_next_label = self.random_sent(item)
 | 
			
		||||
 | 
			
		||||
        # tokenize
 | 
			
		||||
        tokens_a = self.tokenizer.tokenize(t1)
 | 
			
		||||
        tokens_b = self.tokenizer.tokenize(t2)
 | 
			
		||||
 | 
			
		||||
        # combine to one sample
 | 
			
		||||
        cur_example = InputExample(guid=cur_id, tokens_a=tokens_a, tokens_b=tokens_b, is_next=is_next_label)
 | 
			
		||||
 | 
			
		||||
        # transform sample to features
 | 
			
		||||
        cur_features = convert_example_to_features(cur_example, self.seq_len, self.tokenizer)
 | 
			
		||||
 | 
			
		||||
        cur_tensors = (torch.tensor(cur_features.input_ids),
 | 
			
		||||
                       torch.tensor(cur_features.input_mask),
 | 
			
		||||
                       torch.tensor(cur_features.segment_ids),
 | 
			
		||||
                       torch.tensor(cur_features.lm_label_ids),
 | 
			
		||||
                       torch.tensor(cur_features.is_next))
 | 
			
		||||
 | 
			
		||||
        return cur_tensors
 | 
			
		||||
 | 
			
		||||
    def random_sent(self, index):
 | 
			
		||||
        """
 | 
			
		||||
        Get one sample from corpus consisting of two sentences. With prob. 50% these are two subsequent sentences
 | 
			
		||||
        from one doc. With 50% the second sentence will be a random one from another doc.
 | 
			
		||||
        :param index: int, index of sample.
 | 
			
		||||
        :return: (str, str, int), sentence 1, sentence 2, isNextSentence Label
 | 
			
		||||
        """
 | 
			
		||||
        t1, t2 = self.get_corpus_line(index)
 | 
			
		||||
        if random.random() > 0.5:
 | 
			
		||||
            label = 0
 | 
			
		||||
        else:
 | 
			
		||||
            t2 = self.get_random_line()
 | 
			
		||||
            label = 1
 | 
			
		||||
 | 
			
		||||
        assert len(t1) > 0
 | 
			
		||||
        assert len(t2) > 0
 | 
			
		||||
        return t1, t2, label
 | 
			
		||||
 | 
			
		||||
    def get_corpus_line(self, item):
 | 
			
		||||
        """
 | 
			
		||||
        Get one sample from corpus consisting of a pair of two subsequent lines from the same doc.
 | 
			
		||||
        :param item: int, index of sample.
 | 
			
		||||
        :return: (str, str), two subsequent sentences from corpus
 | 
			
		||||
        """
 | 
			
		||||
        t1 = ""
 | 
			
		||||
        t2 = ""
 | 
			
		||||
        assert item < self.corpus_lines
 | 
			
		||||
        if self.on_memory:
 | 
			
		||||
            sample = self.sample_to_doc[item]
 | 
			
		||||
            t1 = self.all_docs[sample["doc_id"]][sample["line"]]
 | 
			
		||||
            t2 = self.all_docs[sample["doc_id"]][sample["line"]+1]
 | 
			
		||||
            # used later to avoid random nextSentence from same doc
 | 
			
		||||
            self.current_doc = sample["doc_id"]
 | 
			
		||||
            return t1, t2
 | 
			
		||||
        else:
 | 
			
		||||
            if self.line_buffer is None:
 | 
			
		||||
                # read first non-empty line of file
 | 
			
		||||
                while t1 == "" :
 | 
			
		||||
                    t1 = next(self.file).strip()
 | 
			
		||||
                    t2 = next(self.file).strip()
 | 
			
		||||
            else:
 | 
			
		||||
                # use t2 from previous iteration as new t1
 | 
			
		||||
                t1 = self.line_buffer
 | 
			
		||||
                t2 = next(self.file).strip()
 | 
			
		||||
                # skip empty rows that are used for separating documents and keep track of current doc id
 | 
			
		||||
                while t2 == "" or t1 == "":
 | 
			
		||||
                    t1 = next(self.file).strip()
 | 
			
		||||
                    t2 = next(self.file).strip()
 | 
			
		||||
                    self.current_doc = self.current_doc+1
 | 
			
		||||
            self.line_buffer = t2
 | 
			
		||||
 | 
			
		||||
        assert t1 != ""
 | 
			
		||||
        assert t2 != ""
 | 
			
		||||
        return t1, t2
 | 
			
		||||
 | 
			
		||||
    def get_random_line(self):
 | 
			
		||||
        """
 | 
			
		||||
        Get random line from another document for nextSentence task.
 | 
			
		||||
        :return: str, content of one line
 | 
			
		||||
        """
 | 
			
		||||
        # Similar to original tf repo: This outer loop should rarely go for more than one iteration for large
 | 
			
		||||
        # corpora. However, just to be careful, we try to make sure that
 | 
			
		||||
        # the random document is not the same as the document we're processing.
 | 
			
		||||
        for _ in range(10):
 | 
			
		||||
            if self.on_memory:
 | 
			
		||||
                rand_doc_idx = random.randint(0, len(self.all_docs)-1)
 | 
			
		||||
                rand_doc = self.all_docs[rand_doc_idx]
 | 
			
		||||
                line = rand_doc[random.randrange(len(rand_doc))]
 | 
			
		||||
            else:
 | 
			
		||||
                rand_index = random.randint(1, self.corpus_lines if self.corpus_lines < 1000 else 1000)
 | 
			
		||||
                #pick random line
 | 
			
		||||
                for _ in range(rand_index):
 | 
			
		||||
                    line = self.get_next_line()
 | 
			
		||||
            #check if our picked random line is really from another doc like we want it to be
 | 
			
		||||
            if self.current_random_doc != self.current_doc:
 | 
			
		||||
                break
 | 
			
		||||
        return line
 | 
			
		||||
 | 
			
		||||
    def get_next_line(self):
 | 
			
		||||
        """ Gets next line of random_file and starts over when reaching end of file"""
 | 
			
		||||
        try:
 | 
			
		||||
            line = next(self.random_file).strip()
 | 
			
		||||
            #keep track of which document we are currently looking at to later avoid having the same doc as t1
 | 
			
		||||
            if line == "":
 | 
			
		||||
                self.current_random_doc = self.current_random_doc + 1
 | 
			
		||||
                line = next(self.random_file).strip()
 | 
			
		||||
        except StopIteration:
 | 
			
		||||
            self.random_file.close()
 | 
			
		||||
            self.random_file = open(self.corpus_path, "r", encoding=self.encoding)
 | 
			
		||||
            line = next(self.random_file).strip()
 | 
			
		||||
        return line
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputExample(object):
 | 
			
		||||
    """A single training/test example for the language model."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, guid, tokens_a, tokens_b=None, is_next=None, lm_labels=None):
 | 
			
		||||
        """Constructs a InputExample.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            guid: Unique id for the example.
 | 
			
		||||
            tokens_a: string. The untokenized text of the first sequence. For single
 | 
			
		||||
            sequence tasks, only this sequence must be specified.
 | 
			
		||||
            tokens_b: (Optional) string. The untokenized text of the second sequence.
 | 
			
		||||
            Only must be specified for sequence pair tasks.
 | 
			
		||||
            label: (Optional) string. The label of the example. This should be
 | 
			
		||||
            specified for train and dev examples, but not for test examples.
 | 
			
		||||
        """
 | 
			
		||||
        self.guid = guid
 | 
			
		||||
        self.tokens_a = tokens_a
 | 
			
		||||
        self.tokens_b = tokens_b
 | 
			
		||||
        self.is_next = is_next  # nextSentence
 | 
			
		||||
        self.lm_labels = lm_labels  # masked words for language model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class InputFeatures(object):
 | 
			
		||||
    """A single set of features of data."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, input_ids, input_mask, segment_ids, is_next, lm_label_ids):
 | 
			
		||||
        self.input_ids = input_ids
 | 
			
		||||
        self.input_mask = input_mask
 | 
			
		||||
        self.segment_ids = segment_ids
 | 
			
		||||
        self.is_next = is_next
 | 
			
		||||
        self.lm_label_ids = lm_label_ids
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def random_word(tokens, tokenizer):
 | 
			
		||||
    """
 | 
			
		||||
    Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
 | 
			
		||||
    :param tokens: list of str, tokenized sentence.
 | 
			
		||||
    :param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
 | 
			
		||||
    :return: (list of str, list of int), masked tokens and related labels for LM prediction
 | 
			
		||||
    """
 | 
			
		||||
    output_label = []
 | 
			
		||||
 | 
			
		||||
    for i, token in enumerate(tokens):
 | 
			
		||||
        prob = random.random()
 | 
			
		||||
        # mask token with 15% probability
 | 
			
		||||
        if prob < 0.15:
 | 
			
		||||
            prob /= 0.15
 | 
			
		||||
 | 
			
		||||
            # 80% randomly change token to mask token
 | 
			
		||||
            if prob < 0.8:
 | 
			
		||||
                tokens[i] = "[MASK]"
 | 
			
		||||
 | 
			
		||||
            # 10% randomly change token to random token
 | 
			
		||||
            elif prob < 0.9:
 | 
			
		||||
                tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
 | 
			
		||||
 | 
			
		||||
            # -> rest 10% randomly keep current token
 | 
			
		||||
 | 
			
		||||
            # append current token to output (we will predict these later)
 | 
			
		||||
            try:
 | 
			
		||||
                output_label.append(tokenizer.vocab[token])
 | 
			
		||||
            except KeyError:
 | 
			
		||||
                # For unknown words (should not occur with BPE vocab)
 | 
			
		||||
                output_label.append(tokenizer.vocab["[UNK]"])
 | 
			
		||||
                logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
 | 
			
		||||
        else:
 | 
			
		||||
            # no masking token (will be ignored by loss function later)
 | 
			
		||||
            output_label.append(-1)
 | 
			
		||||
 | 
			
		||||
    return tokens, output_label
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_example_to_features(example, max_seq_length, tokenizer):
 | 
			
		||||
    """
 | 
			
		||||
    Convert a raw sample (pair of sentences as tokenized strings) into a proper training sample with
 | 
			
		||||
    IDs, LM labels, input_mask, CLS and SEP tokens etc.
 | 
			
		||||
    :param example: InputExample, containing sentence input as strings and is_next label
 | 
			
		||||
    :param max_seq_length: int, maximum length of sequence.
 | 
			
		||||
    :param tokenizer: Tokenizer
 | 
			
		||||
    :return: InputFeatures, containing all inputs and labels of one sample as IDs (as used for model training)
 | 
			
		||||
    """
 | 
			
		||||
    tokens_a = example.tokens_a
 | 
			
		||||
    tokens_b = example.tokens_b
 | 
			
		||||
    # Modifies `tokens_a` and `tokens_b` in place so that the total
 | 
			
		||||
    # length is less than the specified length.
 | 
			
		||||
    # Account for [CLS], [SEP], [SEP] with "- 3"
 | 
			
		||||
    _truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
 | 
			
		||||
 | 
			
		||||
    tokens_a, t1_label = random_word(tokens_a, tokenizer)
 | 
			
		||||
    tokens_b, t2_label = random_word(tokens_b, tokenizer)
 | 
			
		||||
    # concatenate lm labels and account for CLS, SEP, SEP
 | 
			
		||||
    lm_label_ids = ([-1] + t1_label + [-1] + t2_label + [-1])
 | 
			
		||||
 | 
			
		||||
    # The convention in BERT is:
 | 
			
		||||
    # (a) For sequence pairs:
 | 
			
		||||
    #  tokens:   [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
 | 
			
		||||
    #  type_ids: 0   0  0    0    0     0       0 0    1  1  1  1   1 1
 | 
			
		||||
    # (b) For single sequences:
 | 
			
		||||
    #  tokens:   [CLS] the dog is hairy . [SEP]
 | 
			
		||||
    #  type_ids: 0   0   0   0  0     0 0
 | 
			
		||||
    #
 | 
			
		||||
    # Where "type_ids" are used to indicate whether this is the first
 | 
			
		||||
    # sequence or the second sequence. The embedding vectors for `type=0` and
 | 
			
		||||
    # `type=1` were learned during pre-training and are added to the wordpiece
 | 
			
		||||
    # embedding vector (and position vector). This is not *strictly* necessary
 | 
			
		||||
    # since the [SEP] token unambigiously separates the sequences, but it makes
 | 
			
		||||
    # it easier for the model to learn the concept of sequences.
 | 
			
		||||
    #
 | 
			
		||||
    # For classification tasks, the first vector (corresponding to [CLS]) is
 | 
			
		||||
    # used as as the "sentence vector". Note that this only makes sense because
 | 
			
		||||
    # the entire model is fine-tuned.
 | 
			
		||||
    tokens = []
 | 
			
		||||
    segment_ids = []
 | 
			
		||||
    tokens.append("[CLS]")
 | 
			
		||||
    segment_ids.append(0)
 | 
			
		||||
    for token in tokens_a:
 | 
			
		||||
        tokens.append(token)
 | 
			
		||||
        segment_ids.append(0)
 | 
			
		||||
    tokens.append("[SEP]")
 | 
			
		||||
    segment_ids.append(0)
 | 
			
		||||
 | 
			
		||||
    assert len(tokens_b) > 0
 | 
			
		||||
    for token in tokens_b:
 | 
			
		||||
        tokens.append(token)
 | 
			
		||||
        segment_ids.append(1)
 | 
			
		||||
    tokens.append("[SEP]")
 | 
			
		||||
    segment_ids.append(1)
 | 
			
		||||
 | 
			
		||||
    input_ids = tokenizer.convert_tokens_to_ids(tokens)
 | 
			
		||||
 | 
			
		||||
    # The mask has 1 for real tokens and 0 for padding tokens. Only real
 | 
			
		||||
    # tokens are attended to.
 | 
			
		||||
    input_mask = [1] * len(input_ids)
 | 
			
		||||
 | 
			
		||||
    # Zero-pad up to the sequence length.
 | 
			
		||||
    while len(input_ids) < max_seq_length:
 | 
			
		||||
        input_ids.append(0)
 | 
			
		||||
        input_mask.append(0)
 | 
			
		||||
        segment_ids.append(0)
 | 
			
		||||
        lm_label_ids.append(-1)
 | 
			
		||||
 | 
			
		||||
    assert len(input_ids) == max_seq_length
 | 
			
		||||
    assert len(input_mask) == max_seq_length
 | 
			
		||||
    assert len(segment_ids) == max_seq_length
 | 
			
		||||
    assert len(lm_label_ids) == max_seq_length
 | 
			
		||||
 | 
			
		||||
    if example.guid < 5:
 | 
			
		||||
        logger.info("*** Example ***")
 | 
			
		||||
        logger.info("guid: %s" % (example.guid))
 | 
			
		||||
        logger.info("tokens: %s" % " ".join(
 | 
			
		||||
                [str(x) for x in tokens]))
 | 
			
		||||
        logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
 | 
			
		||||
        logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
 | 
			
		||||
        logger.info(
 | 
			
		||||
                "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
 | 
			
		||||
        logger.info("LM label: %s " % (lm_label_ids))
 | 
			
		||||
        logger.info("Is next sentence label: %s " % (example.is_next))
 | 
			
		||||
 | 
			
		||||
    features = InputFeatures(input_ids=input_ids,
 | 
			
		||||
                             input_mask=input_mask,
 | 
			
		||||
                             segment_ids=segment_ids,
 | 
			
		||||
                             lm_label_ids=lm_label_ids,
 | 
			
		||||
                             is_next=example.is_next)
 | 
			
		||||
    return features
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--train_corpus",
 | 
			
		||||
                        default=None,
 | 
			
		||||
                        type=str,
 | 
			
		||||
                        required=True,
 | 
			
		||||
                        help="The input train corpus.")
 | 
			
		||||
    parser.add_argument("--bert_model", default=None, type=str, required=True,
 | 
			
		||||
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
    parser.add_argument("--output_dir",
 | 
			
		||||
                        default=None,
 | 
			
		||||
                        type=str,
 | 
			
		||||
                        required=True,
 | 
			
		||||
                        help="The output directory where the model checkpoints will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--max_seq_length",
 | 
			
		||||
                        default=128,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="The maximum total input sequence length after WordPiece tokenization. \n"
 | 
			
		||||
                             "Sequences longer than this will be truncated, and sequences shorter \n"
 | 
			
		||||
                             "than this will be padded.")
 | 
			
		||||
    parser.add_argument("--do_train",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--train_batch_size",
 | 
			
		||||
                        default=32,
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        help="Total batch size for training.")
 | 
			
		||||
    parser.add_argument("--learning_rate",
 | 
			
		||||
                        default=3e-5,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument("--num_train_epochs",
 | 
			
		||||
                        default=3.0,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="Total number of training epochs to perform.")
 | 
			
		||||
    parser.add_argument("--warmup_proportion",
 | 
			
		||||
                        default=0.1,
 | 
			
		||||
                        type=float,
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. "
 | 
			
		||||
                             "E.g., 0.1 = 10%% of training.")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument("--on_memory",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to load train samples into memory or use disk")
 | 
			
		||||
    parser.add_argument("--do_lower_case",
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to lower case the input text. True for uncased models, False for cased models.")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=-1,
 | 
			
		||||
                        help="local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument('--seed',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=42,
 | 
			
		||||
                        help="random seed for initialization")
 | 
			
		||||
    parser.add_argument('--gradient_accumulation_steps',
 | 
			
		||||
                        type=int,
 | 
			
		||||
                        default=1,
 | 
			
		||||
                        help="Number of updates steps to accumualte before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument('--fp16',
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit float precision instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--loss_scale',
 | 
			
		||||
                        type = float, default = 0,
 | 
			
		||||
                        help = "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
 | 
			
		||||
                        "0 (default value): dynamic loss scaling.\n"
 | 
			
		||||
                        "Positive power of 2: static loss scaling value.\n")
 | 
			
		||||
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
        n_gpu = torch.cuda.device_count()
 | 
			
		||||
    else:
 | 
			
		||||
        torch.cuda.set_device(args.local_rank)
 | 
			
		||||
        device = torch.device("cuda", args.local_rank)
 | 
			
		||||
        n_gpu = 1
 | 
			
		||||
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
 | 
			
		||||
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
 | 
			
		||||
 | 
			
		||||
    if args.gradient_accumulation_steps < 1:
 | 
			
		||||
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
 | 
			
		||||
                            args.gradient_accumulation_steps))
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    if n_gpu > 0:
 | 
			
		||||
        torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
    if not args.do_train:
 | 
			
		||||
        raise ValueError("Training is currently the only implemented execution option. Please set `do_train`.")
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
 | 
			
		||||
    if not os.path.exists(args.output_dir):
 | 
			
		||||
        os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    #train_examples = None
 | 
			
		||||
    num_train_optimization_steps = None
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        print("Loading Train Dataset", args.train_corpus)
 | 
			
		||||
        train_dataset = BERTDataset(args.train_corpus, tokenizer, seq_len=args.max_seq_length,
 | 
			
		||||
                                    corpus_lines=None, on_memory=args.on_memory)
 | 
			
		||||
        num_train_optimization_steps = int(
 | 
			
		||||
            len(train_dataset) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
 | 
			
		||||
        if args.local_rank != -1:
 | 
			
		||||
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
 | 
			
		||||
 | 
			
		||||
    # Prepare model
 | 
			
		||||
    model = BertForPreTraining.from_pretrained(args.bert_model)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        model.half()
 | 
			
		||||
    model.to(device)
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.parallel import DistributedDataParallel as DDP
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
        model = DDP(model)
 | 
			
		||||
    elif n_gpu > 1:
 | 
			
		||||
        model = torch.nn.DataParallel(model)
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer
 | 
			
		||||
    param_optimizer = list(model.named_parameters())
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.optimizers import FP16_Optimizer
 | 
			
		||||
            from apex.optimizers import FusedAdam
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
 | 
			
		||||
 | 
			
		||||
        optimizer = FusedAdam(optimizer_grouped_parameters,
 | 
			
		||||
                              lr=args.learning_rate,
 | 
			
		||||
                              bias_correction=False,
 | 
			
		||||
                              max_grad_norm=1.0)
 | 
			
		||||
        if args.loss_scale == 0:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
 | 
			
		||||
        else:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
 | 
			
		||||
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
 | 
			
		||||
                                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        optimizer = BertAdam(optimizer_grouped_parameters,
 | 
			
		||||
                             lr=args.learning_rate,
 | 
			
		||||
                             warmup=args.warmup_proportion,
 | 
			
		||||
                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        logger.info("***** Running training *****")
 | 
			
		||||
        logger.info("  Num examples = %d", len(train_dataset))
 | 
			
		||||
        logger.info("  Batch size = %d", args.train_batch_size)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
        if args.local_rank == -1:
 | 
			
		||||
            train_sampler = RandomSampler(train_dataset)
 | 
			
		||||
        else:
 | 
			
		||||
            #TODO: check if this works with current data generator from disk that relies on next(file)
 | 
			
		||||
            # (it doesn't return item back by index)
 | 
			
		||||
            train_sampler = DistributedSampler(train_dataset)
 | 
			
		||||
        train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
 | 
			
		||||
        model.train()
 | 
			
		||||
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
 | 
			
		||||
            tr_loss = 0
 | 
			
		||||
            nb_tr_examples, nb_tr_steps = 0, 0
 | 
			
		||||
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
 | 
			
		||||
                batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
 | 
			
		||||
                loss = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next)
 | 
			
		||||
                if n_gpu > 1:
 | 
			
		||||
                    loss = loss.mean() # mean() to average on multi-gpu.
 | 
			
		||||
                if args.gradient_accumulation_steps > 1:
 | 
			
		||||
                    loss = loss / args.gradient_accumulation_steps
 | 
			
		||||
                if args.fp16:
 | 
			
		||||
                    optimizer.backward(loss)
 | 
			
		||||
                else:
 | 
			
		||||
                    loss.backward()
 | 
			
		||||
                tr_loss += loss.item()
 | 
			
		||||
                nb_tr_examples += input_ids.size(0)
 | 
			
		||||
                nb_tr_steps += 1
 | 
			
		||||
                if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                    if args.fp16:
 | 
			
		||||
                        # modify learning rate with special warm up BERT uses
 | 
			
		||||
                        # if args.fp16 is False, BertAdam is used that handles this automatically
 | 
			
		||||
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,
 | 
			
		||||
                                                                                 args.warmup_proportion)
 | 
			
		||||
                        for param_group in optimizer.param_groups:
 | 
			
		||||
                            param_group['lr'] = lr_this_step
 | 
			
		||||
                    optimizer.step()
 | 
			
		||||
                    optimizer.zero_grad()
 | 
			
		||||
                    global_step += 1
 | 
			
		||||
 | 
			
		||||
        # Save a trained model
 | 
			
		||||
        logger.info("** ** * Saving fine - tuned model ** ** * ")
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
        output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
 | 
			
		||||
        if args.do_train:
 | 
			
		||||
            torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
 | 
			
		||||
    """Truncates a sequence pair in place to the maximum length."""
 | 
			
		||||
 | 
			
		||||
    # This is a simple heuristic which will always truncate the longer sequence
 | 
			
		||||
    # one token at a time. This makes more sense than truncating an equal percent
 | 
			
		||||
    # of tokens from each, since if one sequence is very short then each token
 | 
			
		||||
    # that's truncated likely contains more information than a longer sequence.
 | 
			
		||||
    while True:
 | 
			
		||||
        total_length = len(tokens_a) + len(tokens_b)
 | 
			
		||||
        if total_length <= max_length:
 | 
			
		||||
            break
 | 
			
		||||
        if len(tokens_a) > len(tokens_b):
 | 
			
		||||
            tokens_a.pop()
 | 
			
		||||
        else:
 | 
			
		||||
            tokens_b.pop()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def accuracy(out, labels):
 | 
			
		||||
    outputs = np.argmax(out, axis=1)
 | 
			
		||||
    return np.sum(outputs == labels)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
@ -1,5 +1,5 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
@ -15,30 +15,31 @@
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""BERT finetuning runner."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import csv
 | 
			
		||||
import os
 | 
			
		||||
import logging
 | 
			
		||||
import argparse
 | 
			
		||||
import csv
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from torch.nn import CrossEntropyLoss, MSELoss
 | 
			
		||||
from scipy.stats import pearsonr, spearmanr
 | 
			
		||||
from sklearn.metrics import matthews_corrcoef, f1_score
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForSequenceClassification, BertConfig
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForSequenceClassification
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam
 | 
			
		||||
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -91,10 +92,12 @@ class DataProcessor(object):
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def _read_tsv(cls, input_file, quotechar=None):
 | 
			
		||||
        """Reads a tab separated value file."""
 | 
			
		||||
        with open(input_file, "r", encoding='utf-8') as f:
 | 
			
		||||
        with open(input_file, "r", encoding="utf-8") as f:
 | 
			
		||||
            reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
 | 
			
		||||
            lines = []
 | 
			
		||||
            for line in reader:
 | 
			
		||||
                if sys.version_info[0] == 2:
 | 
			
		||||
                    line = list(unicode(cell, 'utf-8') for cell in line)
 | 
			
		||||
                lines.append(line)
 | 
			
		||||
            return lines
 | 
			
		||||
 | 
			
		||||
@ -165,6 +168,16 @@ class MnliProcessor(DataProcessor):
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MnliMismatchedProcessor(MnliProcessor):
 | 
			
		||||
    """Processor for the MultiNLI Mismatched data set (GLUE version)."""
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
 | 
			
		||||
            "dev_matched")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ColaProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the CoLA data set (GLUE version)."""
 | 
			
		||||
 | 
			
		||||
@ -194,13 +207,212 @@ class ColaProcessor(DataProcessor):
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
 | 
			
		||||
class Sst2Processor(DataProcessor):
 | 
			
		||||
    """Processor for the SST-2 data set (GLUE version)."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return ["0", "1"]
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, set_type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
        examples = []
 | 
			
		||||
        for (i, line) in enumerate(lines):
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                continue
 | 
			
		||||
            guid = "%s-%s" % (set_type, i)
 | 
			
		||||
            text_a = line[0]
 | 
			
		||||
            label = line[1]
 | 
			
		||||
            examples.append(
 | 
			
		||||
                InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class StsbProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the STS-B data set (GLUE version)."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return [None]
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, set_type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
        examples = []
 | 
			
		||||
        for (i, line) in enumerate(lines):
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                continue
 | 
			
		||||
            guid = "%s-%s" % (set_type, line[0])
 | 
			
		||||
            text_a = line[7]
 | 
			
		||||
            text_b = line[8]
 | 
			
		||||
            label = line[-1]
 | 
			
		||||
            examples.append(
 | 
			
		||||
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class QqpProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the STS-B data set (GLUE version)."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return ["0", "1"]
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, set_type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
        examples = []
 | 
			
		||||
        for (i, line) in enumerate(lines):
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                continue
 | 
			
		||||
            guid = "%s-%s" % (set_type, line[0])
 | 
			
		||||
            try:
 | 
			
		||||
                text_a = line[3]
 | 
			
		||||
                text_b = line[4]
 | 
			
		||||
                label = line[5]
 | 
			
		||||
            except IndexError:
 | 
			
		||||
                continue
 | 
			
		||||
            examples.append(
 | 
			
		||||
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class QnliProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the STS-B data set (GLUE version)."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), 
 | 
			
		||||
            "dev_matched")
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return ["entailment", "not_entailment"]
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, set_type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
        examples = []
 | 
			
		||||
        for (i, line) in enumerate(lines):
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                continue
 | 
			
		||||
            guid = "%s-%s" % (set_type, line[0])
 | 
			
		||||
            text_a = line[1]
 | 
			
		||||
            text_b = line[2]
 | 
			
		||||
            label = line[-1]
 | 
			
		||||
            examples.append(
 | 
			
		||||
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RteProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the RTE data set (GLUE version)."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return ["entailment", "not_entailment"]
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, set_type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
        examples = []
 | 
			
		||||
        for (i, line) in enumerate(lines):
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                continue
 | 
			
		||||
            guid = "%s-%s" % (set_type, line[0])
 | 
			
		||||
            text_a = line[1]
 | 
			
		||||
            text_b = line[2]
 | 
			
		||||
            label = line[-1]
 | 
			
		||||
            examples.append(
 | 
			
		||||
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class WnliProcessor(DataProcessor):
 | 
			
		||||
    """Processor for the WNLI data set (GLUE version)."""
 | 
			
		||||
 | 
			
		||||
    def get_train_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
 | 
			
		||||
 | 
			
		||||
    def get_dev_examples(self, data_dir):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return self._create_examples(
 | 
			
		||||
            self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
 | 
			
		||||
 | 
			
		||||
    def get_labels(self):
 | 
			
		||||
        """See base class."""
 | 
			
		||||
        return ["0", "1"]
 | 
			
		||||
 | 
			
		||||
    def _create_examples(self, lines, set_type):
 | 
			
		||||
        """Creates examples for the training and dev sets."""
 | 
			
		||||
        examples = []
 | 
			
		||||
        for (i, line) in enumerate(lines):
 | 
			
		||||
            if i == 0:
 | 
			
		||||
                continue
 | 
			
		||||
            guid = "%s-%s" % (set_type, line[0])
 | 
			
		||||
            text_a = line[1]
 | 
			
		||||
            text_b = line[2]
 | 
			
		||||
            label = line[-1]
 | 
			
		||||
            examples.append(
 | 
			
		||||
                InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
 | 
			
		||||
        return examples
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_examples_to_features(examples, label_list, max_seq_length,
 | 
			
		||||
                                 tokenizer, output_mode):
 | 
			
		||||
    """Loads a data file into a list of `InputBatch`s."""
 | 
			
		||||
 | 
			
		||||
    label_map = {label : i for i, label in enumerate(label_list)}
 | 
			
		||||
 | 
			
		||||
    features = []
 | 
			
		||||
    for (ex_index, example) in enumerate(examples):
 | 
			
		||||
        if ex_index % 10000 == 0:
 | 
			
		||||
            logger.info("Writing example %d of %d" % (ex_index, len(examples)))
 | 
			
		||||
 | 
			
		||||
        tokens_a = tokenizer.tokenize(example.text_a)
 | 
			
		||||
 | 
			
		||||
        tokens_b = None
 | 
			
		||||
@ -227,7 +439,7 @@ def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer
 | 
			
		||||
        # sequence or the second sequence. The embedding vectors for `type=0` and
 | 
			
		||||
        # `type=1` were learned during pre-training and are added to the wordpiece
 | 
			
		||||
        # embedding vector (and position vector). This is not *strictly* necessary
 | 
			
		||||
        # since the [SEP] token unambigiously separates the sequences, but it makes
 | 
			
		||||
        # since the [SEP] token unambiguously separates the sequences, but it makes
 | 
			
		||||
        # it easier for the model to learn the concept of sequences.
 | 
			
		||||
        #
 | 
			
		||||
        # For classification tasks, the first vector (corresponding to [CLS]) is
 | 
			
		||||
@ -256,7 +468,13 @@ def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer
 | 
			
		||||
        assert len(input_mask) == max_seq_length
 | 
			
		||||
        assert len(segment_ids) == max_seq_length
 | 
			
		||||
 | 
			
		||||
        label_id = label_map[example.label]
 | 
			
		||||
        if output_mode == "classification":
 | 
			
		||||
            label_id = label_map[example.label]
 | 
			
		||||
        elif output_mode == "regression":
 | 
			
		||||
            label_id = float(example.label)
 | 
			
		||||
        else:
 | 
			
		||||
            raise KeyError(output_mode)
 | 
			
		||||
 | 
			
		||||
        if ex_index < 5:
 | 
			
		||||
            logger.info("*** Example ***")
 | 
			
		||||
            logger.info("guid: %s" % (example.guid))
 | 
			
		||||
@ -292,14 +510,56 @@ def _truncate_seq_pair(tokens_a, tokens_b, max_length):
 | 
			
		||||
        else:
 | 
			
		||||
            tokens_b.pop()
 | 
			
		||||
 | 
			
		||||
def accuracy(out, labels):
 | 
			
		||||
    outputs = np.argmax(out, axis=1)
 | 
			
		||||
    return np.sum(outputs == labels)
 | 
			
		||||
 | 
			
		||||
def warmup_linear(x, warmup=0.002):
 | 
			
		||||
    if x < warmup:
 | 
			
		||||
        return x/warmup
 | 
			
		||||
    return 1.0 - x
 | 
			
		||||
def simple_accuracy(preds, labels):
 | 
			
		||||
    return (preds == labels).mean()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def acc_and_f1(preds, labels):
 | 
			
		||||
    acc = simple_accuracy(preds, labels)
 | 
			
		||||
    f1 = f1_score(y_true=labels, y_pred=preds)
 | 
			
		||||
    return {
 | 
			
		||||
        "acc": acc,
 | 
			
		||||
        "f1": f1,
 | 
			
		||||
        "acc_and_f1": (acc + f1) / 2,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pearson_and_spearman(preds, labels):
 | 
			
		||||
    pearson_corr = pearsonr(preds, labels)[0]
 | 
			
		||||
    spearman_corr = spearmanr(preds, labels)[0]
 | 
			
		||||
    return {
 | 
			
		||||
        "pearson": pearson_corr,
 | 
			
		||||
        "spearmanr": spearman_corr,
 | 
			
		||||
        "corr": (pearson_corr + spearman_corr) / 2,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def compute_metrics(task_name, preds, labels):
 | 
			
		||||
    assert len(preds) == len(labels)
 | 
			
		||||
    if task_name == "cola":
 | 
			
		||||
        return {"mcc": matthews_corrcoef(labels, preds)}
 | 
			
		||||
    elif task_name == "sst-2":
 | 
			
		||||
        return {"acc": simple_accuracy(preds, labels)}
 | 
			
		||||
    elif task_name == "mrpc":
 | 
			
		||||
        return acc_and_f1(preds, labels)
 | 
			
		||||
    elif task_name == "sts-b":
 | 
			
		||||
        return pearson_and_spearman(preds, labels)
 | 
			
		||||
    elif task_name == "qqp":
 | 
			
		||||
        return acc_and_f1(preds, labels)
 | 
			
		||||
    elif task_name == "mnli":
 | 
			
		||||
        return {"acc": simple_accuracy(preds, labels)}
 | 
			
		||||
    elif task_name == "mnli-mm":
 | 
			
		||||
        return {"acc": simple_accuracy(preds, labels)}
 | 
			
		||||
    elif task_name == "qnli":
 | 
			
		||||
        return {"acc": simple_accuracy(preds, labels)}
 | 
			
		||||
    elif task_name == "rte":
 | 
			
		||||
        return {"acc": simple_accuracy(preds, labels)}
 | 
			
		||||
    elif task_name == "wnli":
 | 
			
		||||
        return {"acc": simple_accuracy(preds, labels)}
 | 
			
		||||
    else:
 | 
			
		||||
        raise KeyError(task_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
@ -312,7 +572,8 @@ def main():
 | 
			
		||||
                        help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
 | 
			
		||||
    parser.add_argument("--bert_model", default=None, type=str, required=True,
 | 
			
		||||
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
 | 
			
		||||
                        "bert-base-multilingual-cased, bert-base-chinese.")
 | 
			
		||||
    parser.add_argument("--task_name",
 | 
			
		||||
                        default=None,
 | 
			
		||||
                        type=str,
 | 
			
		||||
@ -325,6 +586,10 @@ def main():
 | 
			
		||||
                        help="The output directory where the model predictions and checkpoints will be written.")
 | 
			
		||||
 | 
			
		||||
    ## Other parameters
 | 
			
		||||
    parser.add_argument("--cache_dir",
 | 
			
		||||
                        default="",
 | 
			
		||||
                        type=str,
 | 
			
		||||
                        help="Where do you want to store the pre-trained models downloaded from s3")
 | 
			
		||||
    parser.add_argument("--max_seq_length",
 | 
			
		||||
                        default=128,
 | 
			
		||||
                        type=int,
 | 
			
		||||
@ -332,15 +597,12 @@ def main():
 | 
			
		||||
                             "Sequences longer than this will be truncated, and sequences shorter \n"
 | 
			
		||||
                             "than this will be padded.")
 | 
			
		||||
    parser.add_argument("--do_train",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_eval",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--do_lower_case",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Set this flag if you are using an uncased model.")
 | 
			
		||||
    parser.add_argument("--train_batch_size",
 | 
			
		||||
@ -365,7 +627,6 @@ def main():
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. "
 | 
			
		||||
                             "E.g., 0.1 = 10%% of training.")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
@ -381,7 +642,6 @@ def main():
 | 
			
		||||
                        default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument('--fp16',
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit float precision instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--loss_scale',
 | 
			
		||||
@ -389,19 +649,40 @@ def main():
 | 
			
		||||
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
 | 
			
		||||
                             "0 (default value): dynamic loss scaling.\n"
 | 
			
		||||
                             "Positive power of 2: static loss scaling value.\n")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    processors = {
 | 
			
		||||
        "cola": ColaProcessor,
 | 
			
		||||
        "mnli": MnliProcessor,
 | 
			
		||||
        "mnli-mm": MnliMismatchedProcessor,
 | 
			
		||||
        "mrpc": MrpcProcessor,
 | 
			
		||||
        "sst-2": Sst2Processor,
 | 
			
		||||
        "sts-b": StsbProcessor,
 | 
			
		||||
        "qqp": QqpProcessor,
 | 
			
		||||
        "qnli": QnliProcessor,
 | 
			
		||||
        "rte": RteProcessor,
 | 
			
		||||
        "wnli": WnliProcessor,
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    num_labels_task = {
 | 
			
		||||
        "cola": 2,
 | 
			
		||||
        "mnli": 3,
 | 
			
		||||
        "mrpc": 2,
 | 
			
		||||
    output_modes = {
 | 
			
		||||
        "cola": "classification",
 | 
			
		||||
        "mnli": "classification",
 | 
			
		||||
        "mrpc": "classification",
 | 
			
		||||
        "sst-2": "classification",
 | 
			
		||||
        "sts-b": "regression",
 | 
			
		||||
        "qqp": "classification",
 | 
			
		||||
        "qnli": "classification",
 | 
			
		||||
        "rte": "classification",
 | 
			
		||||
        "wnli": "classification",
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
@ -413,6 +694,11 @@ def main():
 | 
			
		||||
        n_gpu = 1
 | 
			
		||||
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
 | 
			
		||||
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                        datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
 | 
			
		||||
 | 
			
		||||
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
 | 
			
		||||
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
 | 
			
		||||
 | 
			
		||||
@ -420,7 +706,7 @@ def main():
 | 
			
		||||
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
 | 
			
		||||
                            args.gradient_accumulation_steps))
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
 | 
			
		||||
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
@ -431,9 +717,10 @@ def main():
 | 
			
		||||
    if not args.do_train and not args.do_eval:
 | 
			
		||||
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
 | 
			
		||||
    os.makedirs(args.output_dir, exist_ok=True)
 | 
			
		||||
    if not os.path.exists(args.output_dir):
 | 
			
		||||
        os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
    task_name = args.task_name.lower()
 | 
			
		||||
 | 
			
		||||
@ -441,22 +728,27 @@ def main():
 | 
			
		||||
        raise ValueError("Task not found: %s" % (task_name))
 | 
			
		||||
 | 
			
		||||
    processor = processors[task_name]()
 | 
			
		||||
    num_labels = num_labels_task[task_name]
 | 
			
		||||
    output_mode = output_modes[task_name]
 | 
			
		||||
 | 
			
		||||
    label_list = processor.get_labels()
 | 
			
		||||
    num_labels = len(label_list)
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    train_examples = None
 | 
			
		||||
    num_train_steps = None
 | 
			
		||||
    num_train_optimization_steps = None
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_examples = processor.get_train_examples(args.data_dir)
 | 
			
		||||
        num_train_steps = int(
 | 
			
		||||
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
 | 
			
		||||
        num_train_optimization_steps = int(
 | 
			
		||||
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
 | 
			
		||||
        if args.local_rank != -1:
 | 
			
		||||
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
 | 
			
		||||
 | 
			
		||||
    # Prepare model
 | 
			
		||||
    cache_dir = args.cache_dir if args.cache_dir else os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank))
 | 
			
		||||
    model = BertForSequenceClassification.from_pretrained(args.bert_model,
 | 
			
		||||
              cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
 | 
			
		||||
              num_labels = num_labels)
 | 
			
		||||
              cache_dir=cache_dir,
 | 
			
		||||
              num_labels=num_labels)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        model.half()
 | 
			
		||||
    model.to(device)
 | 
			
		||||
@ -477,9 +769,6 @@ def main():
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    t_total = num_train_steps
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        t_total = t_total // torch.distributed.get_world_size()
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.optimizers import FP16_Optimizer
 | 
			
		||||
@ -495,25 +784,34 @@ def main():
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
 | 
			
		||||
        else:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
 | 
			
		||||
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
 | 
			
		||||
                                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    else:
 | 
			
		||||
        optimizer = BertAdam(optimizer_grouped_parameters,
 | 
			
		||||
                             lr=args.learning_rate,
 | 
			
		||||
                             warmup=args.warmup_proportion,
 | 
			
		||||
                             t_total=t_total)
 | 
			
		||||
                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    nb_tr_steps = 0
 | 
			
		||||
    tr_loss = 0
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_features = convert_examples_to_features(
 | 
			
		||||
            train_examples, label_list, args.max_seq_length, tokenizer)
 | 
			
		||||
            train_examples, label_list, args.max_seq_length, tokenizer, output_mode)
 | 
			
		||||
        logger.info("***** Running training *****")
 | 
			
		||||
        logger.info("  Num examples = %d", len(train_examples))
 | 
			
		||||
        logger.info("  Batch size = %d", args.train_batch_size)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_steps)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_optimization_steps)
 | 
			
		||||
        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
 | 
			
		||||
        all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
 | 
			
		||||
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
 | 
			
		||||
        all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
 | 
			
		||||
 | 
			
		||||
        if output_mode == "classification":
 | 
			
		||||
            all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
 | 
			
		||||
        elif output_mode == "regression":
 | 
			
		||||
            all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.float)
 | 
			
		||||
 | 
			
		||||
        train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
 | 
			
		||||
        if args.local_rank == -1:
 | 
			
		||||
            train_sampler = RandomSampler(train_data)
 | 
			
		||||
@ -528,7 +826,17 @@ def main():
 | 
			
		||||
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
 | 
			
		||||
                batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
                input_ids, input_mask, segment_ids, label_ids = batch
 | 
			
		||||
                loss = model(input_ids, segment_ids, input_mask, label_ids)
 | 
			
		||||
 | 
			
		||||
                # define a new function to compute loss values for both output_modes
 | 
			
		||||
                logits = model(input_ids, segment_ids, input_mask, labels=None)
 | 
			
		||||
 | 
			
		||||
                if output_mode == "classification":
 | 
			
		||||
                    loss_fct = CrossEntropyLoss()
 | 
			
		||||
                    loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
 | 
			
		||||
                elif output_mode == "regression":
 | 
			
		||||
                    loss_fct = MSELoss()
 | 
			
		||||
                    loss = loss_fct(logits.view(-1), label_ids.view(-1))
 | 
			
		||||
 | 
			
		||||
                if n_gpu > 1:
 | 
			
		||||
                    loss = loss.mean() # mean() to average on multi-gpu.
 | 
			
		||||
                if args.gradient_accumulation_steps > 1:
 | 
			
		||||
@ -543,70 +851,99 @@ def main():
 | 
			
		||||
                nb_tr_examples += input_ids.size(0)
 | 
			
		||||
                nb_tr_steps += 1
 | 
			
		||||
                if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                    # modify learning rate with special warm up BERT uses
 | 
			
		||||
                    lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
 | 
			
		||||
                    for param_group in optimizer.param_groups:
 | 
			
		||||
                        param_group['lr'] = lr_this_step
 | 
			
		||||
                    if args.fp16:
 | 
			
		||||
                        # modify learning rate with special warm up BERT uses
 | 
			
		||||
                        # if args.fp16 is False, BertAdam is used that handles this automatically
 | 
			
		||||
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,
 | 
			
		||||
                                                                                 args.warmup_proportion)
 | 
			
		||||
                        for param_group in optimizer.param_groups:
 | 
			
		||||
                            param_group['lr'] = lr_this_step
 | 
			
		||||
                    optimizer.step()
 | 
			
		||||
                    optimizer.zero_grad()
 | 
			
		||||
                    global_step += 1
 | 
			
		||||
 | 
			
		||||
    # Save a trained model
 | 
			
		||||
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
 | 
			
		||||
    torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        # Save a trained model, configuration and tokenizer
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
 | 
			
		||||
    # Load a trained model that you have fine-tuned
 | 
			
		||||
    model_state_dict = torch.load(output_model_file)
 | 
			
		||||
    model = BertForSequenceClassification.from_pretrained(args.bert_model, state_dict=model_state_dict)
 | 
			
		||||
        # If we save using the predefined names, we can load using `from_pretrained`
 | 
			
		||||
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
 | 
			
		||||
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
 | 
			
		||||
 | 
			
		||||
        torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
        model_to_save.config.to_json_file(output_config_file)
 | 
			
		||||
        tokenizer.save_vocabulary(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = BertForSequenceClassification.from_pretrained(args.output_dir, num_labels=num_labels)
 | 
			
		||||
        tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
 | 
			
		||||
    else:
 | 
			
		||||
        model = BertForSequenceClassification.from_pretrained(args.bert_model, num_labels=num_labels)
 | 
			
		||||
    model.to(device)
 | 
			
		||||
 | 
			
		||||
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        eval_examples = processor.get_dev_examples(args.data_dir)
 | 
			
		||||
        eval_features = convert_examples_to_features(
 | 
			
		||||
            eval_examples, label_list, args.max_seq_length, tokenizer)
 | 
			
		||||
            eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
 | 
			
		||||
        logger.info("***** Running evaluation *****")
 | 
			
		||||
        logger.info("  Num examples = %d", len(eval_examples))
 | 
			
		||||
        logger.info("  Batch size = %d", args.eval_batch_size)
 | 
			
		||||
        all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
 | 
			
		||||
        all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
 | 
			
		||||
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
 | 
			
		||||
        all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
 | 
			
		||||
 | 
			
		||||
        if output_mode == "classification":
 | 
			
		||||
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
 | 
			
		||||
        elif output_mode == "regression":
 | 
			
		||||
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.float)
 | 
			
		||||
 | 
			
		||||
        eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
 | 
			
		||||
        # Run prediction for full data
 | 
			
		||||
        eval_sampler = SequentialSampler(eval_data)
 | 
			
		||||
        eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
        model.eval()
 | 
			
		||||
        eval_loss, eval_accuracy = 0, 0
 | 
			
		||||
        nb_eval_steps, nb_eval_examples = 0, 0
 | 
			
		||||
        for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
 | 
			
		||||
        eval_loss = 0
 | 
			
		||||
        nb_eval_steps = 0
 | 
			
		||||
        preds = []
 | 
			
		||||
 | 
			
		||||
        for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
            input_ids = input_ids.to(device)
 | 
			
		||||
            input_mask = input_mask.to(device)
 | 
			
		||||
            segment_ids = segment_ids.to(device)
 | 
			
		||||
            label_ids = label_ids.to(device)
 | 
			
		||||
 | 
			
		||||
            with torch.no_grad():
 | 
			
		||||
                tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
 | 
			
		||||
                logits = model(input_ids, segment_ids, input_mask)
 | 
			
		||||
 | 
			
		||||
            logits = logits.detach().cpu().numpy()
 | 
			
		||||
            label_ids = label_ids.to('cpu').numpy()
 | 
			
		||||
            tmp_eval_accuracy = accuracy(logits, label_ids)
 | 
			
		||||
                logits = model(input_ids, segment_ids, input_mask, labels=None)
 | 
			
		||||
 | 
			
		||||
            # create eval loss and other metric required by the task
 | 
			
		||||
            if output_mode == "classification":
 | 
			
		||||
                loss_fct = CrossEntropyLoss()
 | 
			
		||||
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
 | 
			
		||||
            elif output_mode == "regression":
 | 
			
		||||
                loss_fct = MSELoss()
 | 
			
		||||
                tmp_eval_loss = loss_fct(logits.view(-1), label_ids.view(-1))
 | 
			
		||||
            
 | 
			
		||||
            eval_loss += tmp_eval_loss.mean().item()
 | 
			
		||||
            eval_accuracy += tmp_eval_accuracy
 | 
			
		||||
 | 
			
		||||
            nb_eval_examples += input_ids.size(0)
 | 
			
		||||
            nb_eval_steps += 1
 | 
			
		||||
            if len(preds) == 0:
 | 
			
		||||
                preds.append(logits.detach().cpu().numpy())
 | 
			
		||||
            else:
 | 
			
		||||
                preds[0] = np.append(
 | 
			
		||||
                    preds[0], logits.detach().cpu().numpy(), axis=0)
 | 
			
		||||
 | 
			
		||||
        eval_loss = eval_loss / nb_eval_steps
 | 
			
		||||
        eval_accuracy = eval_accuracy / nb_eval_examples
 | 
			
		||||
        preds = preds[0]
 | 
			
		||||
        if output_mode == "classification":
 | 
			
		||||
            preds = np.argmax(preds, axis=1)
 | 
			
		||||
        elif output_mode == "regression":
 | 
			
		||||
            preds = np.squeeze(preds)
 | 
			
		||||
        result = compute_metrics(task_name, preds, all_label_ids.numpy())
 | 
			
		||||
        loss = tr_loss/nb_tr_steps if args.do_train else None
 | 
			
		||||
 | 
			
		||||
        result = {'eval_loss': eval_loss,
 | 
			
		||||
                  'eval_accuracy': eval_accuracy,
 | 
			
		||||
                  'global_step': global_step,
 | 
			
		||||
                  'loss': tr_loss/nb_tr_steps}
 | 
			
		||||
        result['eval_loss'] = eval_loss
 | 
			
		||||
        result['global_step'] = global_step
 | 
			
		||||
        result['loss'] = loss
 | 
			
		||||
 | 
			
		||||
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
 | 
			
		||||
        with open(output_eval_file, "w") as writer:
 | 
			
		||||
@ -615,5 +952,73 @@ def main():
 | 
			
		||||
                logger.info("  %s = %s", key, str(result[key]))
 | 
			
		||||
                writer.write("%s = %s\n" % (key, str(result[key])))
 | 
			
		||||
 | 
			
		||||
        # hack for MNLI-MM
 | 
			
		||||
        if task_name == "mnli":
 | 
			
		||||
            task_name = "mnli-mm"
 | 
			
		||||
            processor = processors[task_name]()
 | 
			
		||||
 | 
			
		||||
            if os.path.exists(args.output_dir + '-MM') and os.listdir(args.output_dir + '-MM') and args.do_train:
 | 
			
		||||
                raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
 | 
			
		||||
            if not os.path.exists(args.output_dir + '-MM'):
 | 
			
		||||
                os.makedirs(args.output_dir + '-MM')
 | 
			
		||||
 | 
			
		||||
            eval_examples = processor.get_dev_examples(args.data_dir)
 | 
			
		||||
            eval_features = convert_examples_to_features(
 | 
			
		||||
                eval_examples, label_list, args.max_seq_length, tokenizer, output_mode)
 | 
			
		||||
            logger.info("***** Running evaluation *****")
 | 
			
		||||
            logger.info("  Num examples = %d", len(eval_examples))
 | 
			
		||||
            logger.info("  Batch size = %d", args.eval_batch_size)
 | 
			
		||||
            all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
 | 
			
		||||
            all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
 | 
			
		||||
            all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
 | 
			
		||||
            all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
 | 
			
		||||
 | 
			
		||||
            eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
 | 
			
		||||
            # Run prediction for full data
 | 
			
		||||
            eval_sampler = SequentialSampler(eval_data)
 | 
			
		||||
            eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
            model.eval()
 | 
			
		||||
            eval_loss = 0
 | 
			
		||||
            nb_eval_steps = 0
 | 
			
		||||
            preds = []
 | 
			
		||||
 | 
			
		||||
            for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
                input_ids = input_ids.to(device)
 | 
			
		||||
                input_mask = input_mask.to(device)
 | 
			
		||||
                segment_ids = segment_ids.to(device)
 | 
			
		||||
                label_ids = label_ids.to(device)
 | 
			
		||||
 | 
			
		||||
                with torch.no_grad():
 | 
			
		||||
                    logits = model(input_ids, segment_ids, input_mask, labels=None)
 | 
			
		||||
            
 | 
			
		||||
                loss_fct = CrossEntropyLoss()
 | 
			
		||||
                tmp_eval_loss = loss_fct(logits.view(-1, num_labels), label_ids.view(-1))
 | 
			
		||||
            
 | 
			
		||||
                eval_loss += tmp_eval_loss.mean().item()
 | 
			
		||||
                nb_eval_steps += 1
 | 
			
		||||
                if len(preds) == 0:
 | 
			
		||||
                    preds.append(logits.detach().cpu().numpy())
 | 
			
		||||
                else:
 | 
			
		||||
                    preds[0] = np.append(
 | 
			
		||||
                        preds[0], logits.detach().cpu().numpy(), axis=0)
 | 
			
		||||
 | 
			
		||||
            eval_loss = eval_loss / nb_eval_steps
 | 
			
		||||
            preds = preds[0]
 | 
			
		||||
            preds = np.argmax(preds, axis=1)
 | 
			
		||||
            result = compute_metrics(task_name, preds, all_label_ids.numpy())
 | 
			
		||||
            loss = tr_loss/nb_tr_steps if args.do_train else None
 | 
			
		||||
 | 
			
		||||
            result['eval_loss'] = eval_loss
 | 
			
		||||
            result['global_step'] = global_step
 | 
			
		||||
            result['loss'] = loss
 | 
			
		||||
 | 
			
		||||
            output_eval_file = os.path.join(args.output_dir + '-MM', "eval_results.txt")
 | 
			
		||||
            with open(output_eval_file, "w") as writer:
 | 
			
		||||
                logger.info("***** Eval results *****")
 | 
			
		||||
                for key in sorted(result.keys()):
 | 
			
		||||
                    logger.info("  %s = %s", key, str(result[key]))
 | 
			
		||||
                    writer.write("%s = %s\n" % (key, str(result[key])))
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    main()
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										131
									
								
								examples/run_gpt2.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										131
									
								
								examples/run_gpt2.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,131 @@
 | 
			
		||||
#!/usr/bin/env python3
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
from tqdm import trange
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import GPT2LMHeadModel, GPT2Tokenizer
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
def top_k_logits(logits, k):
 | 
			
		||||
    """
 | 
			
		||||
    Masks everything but the k top entries as -infinity (1e10).
 | 
			
		||||
    Used to mask logits such that e^-infinity -> 0 won't contribute to the
 | 
			
		||||
    sum of the denominator.
 | 
			
		||||
    """
 | 
			
		||||
    if k == 0:
 | 
			
		||||
        return logits
 | 
			
		||||
    else:
 | 
			
		||||
        values = torch.topk(logits, k)[0]
 | 
			
		||||
        batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
 | 
			
		||||
        return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits)
 | 
			
		||||
 | 
			
		||||
def sample_sequence(model, length, start_token=None, batch_size=None, context=None, temperature=1, top_k=0, device='cuda', sample=True):
 | 
			
		||||
    if start_token is None:
 | 
			
		||||
        assert context is not None, 'Specify exactly one of start_token and context!'
 | 
			
		||||
        context = torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0).repeat(batch_size, 1)
 | 
			
		||||
    else:
 | 
			
		||||
        assert context is None, 'Specify exactly one of start_token and context!'
 | 
			
		||||
        context = torch.full((batch_size, 1), start_token, device=device, dtype=torch.long)
 | 
			
		||||
    prev = context
 | 
			
		||||
    output = context
 | 
			
		||||
    past = None
 | 
			
		||||
    with torch.no_grad():
 | 
			
		||||
        for i in trange(length):
 | 
			
		||||
            logits, past = model(prev, past=past)
 | 
			
		||||
            logits = logits[:, -1, :] / temperature
 | 
			
		||||
            logits = top_k_logits(logits, k=top_k)
 | 
			
		||||
            log_probs = F.softmax(logits, dim=-1)
 | 
			
		||||
            if sample:
 | 
			
		||||
                prev = torch.multinomial(log_probs, num_samples=1)
 | 
			
		||||
            else:
 | 
			
		||||
                _, prev = torch.topk(log_probs, k=1, dim=-1)
 | 
			
		||||
            output = torch.cat((output, prev), dim=1)
 | 
			
		||||
    return output
 | 
			
		||||
 | 
			
		||||
def run_model():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--model_name_or_path', type=str, default='gpt2', help='pretrained model name or path to local checkpoint')
 | 
			
		||||
    parser.add_argument("--seed", type=int, default=0)
 | 
			
		||||
    parser.add_argument("--nsamples", type=int, default=1)
 | 
			
		||||
    parser.add_argument("--batch_size", type=int, default=-1)
 | 
			
		||||
    parser.add_argument("--length", type=int, default=-1)
 | 
			
		||||
    parser.add_argument("--temperature", type=float, default=1.0)
 | 
			
		||||
    parser.add_argument("--top_k", type=int, default=0)
 | 
			
		||||
    parser.add_argument('--unconditional', action='store_true', help='If true, unconditional generation.')
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    print(args)
 | 
			
		||||
 | 
			
		||||
    if args.batch_size == -1:
 | 
			
		||||
        args.batch_size = 1
 | 
			
		||||
    assert args.nsamples % args.batch_size == 0
 | 
			
		||||
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.random.manual_seed(args.seed)
 | 
			
		||||
    torch.cuda.manual_seed(args.seed)
 | 
			
		||||
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 | 
			
		||||
 | 
			
		||||
    enc = GPT2Tokenizer.from_pretrained(args.model_name_or_path)
 | 
			
		||||
    model = GPT2LMHeadModel.from_pretrained(args.model_name_or_path)
 | 
			
		||||
    model.to(device)
 | 
			
		||||
    model.eval()
 | 
			
		||||
 | 
			
		||||
    if args.length == -1:
 | 
			
		||||
        args.length = model.config.n_ctx // 2
 | 
			
		||||
    elif args.length > model.config.n_ctx:
 | 
			
		||||
        raise ValueError("Can't get samples longer than window size: %s" % model.config.n_ctx)
 | 
			
		||||
 | 
			
		||||
    while True:
 | 
			
		||||
        context_tokens = []
 | 
			
		||||
        if not args.unconditional:
 | 
			
		||||
            raw_text = input("Model prompt >>> ")
 | 
			
		||||
            while not raw_text:
 | 
			
		||||
                print('Prompt should not be empty!')
 | 
			
		||||
                raw_text = input("Model prompt >>> ")
 | 
			
		||||
            context_tokens = enc.encode(raw_text)
 | 
			
		||||
            generated = 0
 | 
			
		||||
            for _ in range(args.nsamples // args.batch_size):
 | 
			
		||||
                out = sample_sequence(
 | 
			
		||||
                    model=model, length=args.length,
 | 
			
		||||
                    context=context_tokens,
 | 
			
		||||
                    start_token=None,
 | 
			
		||||
                    batch_size=args.batch_size,
 | 
			
		||||
                    temperature=args.temperature, top_k=args.top_k, device=device
 | 
			
		||||
                )
 | 
			
		||||
                out = out[:, len(context_tokens):].tolist()
 | 
			
		||||
                for i in range(args.batch_size):
 | 
			
		||||
                    generated += 1
 | 
			
		||||
                    text = enc.decode(out[i])
 | 
			
		||||
                    print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
 | 
			
		||||
                    print(text)
 | 
			
		||||
            print("=" * 80)
 | 
			
		||||
        else:
 | 
			
		||||
            generated = 0
 | 
			
		||||
            for _ in range(args.nsamples // args.batch_size):
 | 
			
		||||
                out = sample_sequence(
 | 
			
		||||
                    model=model, length=args.length,
 | 
			
		||||
                    context=None,
 | 
			
		||||
                    start_token=enc.encoder['<|endoftext|>'],
 | 
			
		||||
                    batch_size=args.batch_size,
 | 
			
		||||
                    temperature=args.temperature, top_k=args.top_k, device=device
 | 
			
		||||
                )
 | 
			
		||||
                out = out[:,1:].tolist()
 | 
			
		||||
                for i in range(args.batch_size):
 | 
			
		||||
                    generated += 1
 | 
			
		||||
                    text = enc.decode(out[i])
 | 
			
		||||
                    print("=" * 40 + " SAMPLE " + str(generated) + " " + "=" * 40)
 | 
			
		||||
                    print(text)
 | 
			
		||||
            print("=" * 80)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    run_model()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										274
									
								
								examples/run_openai_gpt.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										274
									
								
								examples/run_openai_gpt.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,274 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" OpenAI GPT model fine-tuning script.
 | 
			
		||||
    Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py
 | 
			
		||||
    It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py
 | 
			
		||||
 | 
			
		||||
    This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:
 | 
			
		||||
        python run_openai_gpt.py \
 | 
			
		||||
          --model_name openai-gpt \
 | 
			
		||||
          --do_train \
 | 
			
		||||
          --do_eval \
 | 
			
		||||
          --train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \
 | 
			
		||||
          --eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \
 | 
			
		||||
          --output_dir ../log \
 | 
			
		||||
          --train_batch_size 16 \
 | 
			
		||||
"""
 | 
			
		||||
import argparse
 | 
			
		||||
import os
 | 
			
		||||
import csv
 | 
			
		||||
import random
 | 
			
		||||
import logging
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import (OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer,
 | 
			
		||||
                                     OpenAIAdam, cached_path, WEIGHTS_NAME, CONFIG_NAME)
 | 
			
		||||
 | 
			
		||||
ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
def accuracy(out, labels):
 | 
			
		||||
    outputs = np.argmax(out, axis=1)
 | 
			
		||||
    return np.sum(outputs == labels)
 | 
			
		||||
 | 
			
		||||
def load_rocstories_dataset(dataset_path):
 | 
			
		||||
    """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
 | 
			
		||||
    with open(dataset_path, encoding='utf_8') as f:
 | 
			
		||||
        f = csv.reader(f)
 | 
			
		||||
        output = []
 | 
			
		||||
        next(f) # skip the first line
 | 
			
		||||
        for line in tqdm(f):
 | 
			
		||||
            output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))
 | 
			
		||||
    return output
 | 
			
		||||
 | 
			
		||||
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
 | 
			
		||||
    """ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
 | 
			
		||||
 | 
			
		||||
        To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
 | 
			
		||||
        input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
 | 
			
		||||
    """
 | 
			
		||||
    tensor_datasets = []
 | 
			
		||||
    for dataset in encoded_datasets:
 | 
			
		||||
        n_batch = len(dataset)
 | 
			
		||||
        input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)
 | 
			
		||||
        mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)
 | 
			
		||||
        lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64)
 | 
			
		||||
        mc_labels = np.zeros((n_batch,), dtype=np.int64)
 | 
			
		||||
        for i, (story, cont1, cont2, mc_label), in enumerate(dataset):
 | 
			
		||||
            with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
 | 
			
		||||
            with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
 | 
			
		||||
            input_ids[i, 0, :len(with_cont1)] = with_cont1
 | 
			
		||||
            input_ids[i, 1, :len(with_cont2)] = with_cont2
 | 
			
		||||
            mc_token_ids[i, 0] = len(with_cont1) - 1
 | 
			
		||||
            mc_token_ids[i, 1] = len(with_cont2) - 1
 | 
			
		||||
            lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:]
 | 
			
		||||
            lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:]
 | 
			
		||||
            mc_labels[i] = mc_label
 | 
			
		||||
        all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
 | 
			
		||||
        tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
 | 
			
		||||
    return tensor_datasets
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument('--model_name', type=str, default='openai-gpt',
 | 
			
		||||
                        help='pretrained model name')
 | 
			
		||||
    parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--output_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The output directory where the model predictions and checkpoints will be written.")
 | 
			
		||||
    parser.add_argument('--train_dataset', type=str, default='')
 | 
			
		||||
    parser.add_argument('--eval_dataset', type=str, default='')
 | 
			
		||||
    parser.add_argument('--seed', type=int, default=42)
 | 
			
		||||
    parser.add_argument('--num_train_epochs', type=int, default=3)
 | 
			
		||||
    parser.add_argument('--train_batch_size', type=int, default=8)
 | 
			
		||||
    parser.add_argument('--eval_batch_size', type=int, default=16)
 | 
			
		||||
    parser.add_argument('--max_grad_norm', type=int, default=1)
 | 
			
		||||
    parser.add_argument('--learning_rate', type=float, default=6.25e-5)
 | 
			
		||||
    parser.add_argument('--warmup_proportion', type=float, default=0.002)
 | 
			
		||||
    parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
 | 
			
		||||
    parser.add_argument('--weight_decay', type=float, default=0.01)
 | 
			
		||||
    parser.add_argument('--lm_coef', type=float, default=0.9)
 | 
			
		||||
    parser.add_argument('--n_valid', type=int, default=374)
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    print(args)
 | 
			
		||||
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
    torch.manual_seed(args.seed)
 | 
			
		||||
    torch.cuda.manual_seed_all(args.seed)
 | 
			
		||||
 | 
			
		||||
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 | 
			
		||||
    n_gpu = torch.cuda.device_count()
 | 
			
		||||
    logger.info("device: {}, n_gpu {}".format(device, n_gpu))
 | 
			
		||||
 | 
			
		||||
    if not args.do_train and not args.do_eval:
 | 
			
		||||
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(args.output_dir):
 | 
			
		||||
        os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
    # Load tokenizer and model
 | 
			
		||||
    # This loading functions also add new tokens and embeddings called `special tokens`
 | 
			
		||||
    # These new embeddings will be fine-tuned on the RocStories dataset
 | 
			
		||||
    special_tokens = ['_start_', '_delimiter_', '_classify_']
 | 
			
		||||
    tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name, special_tokens=special_tokens)
 | 
			
		||||
    special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens)
 | 
			
		||||
    model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name, num_special_tokens=len(special_tokens))
 | 
			
		||||
    model.to(device)
 | 
			
		||||
 | 
			
		||||
    # Load and encode the datasets
 | 
			
		||||
    if not args.train_dataset and not args.eval_dataset:
 | 
			
		||||
        roc_stories = cached_path(ROCSTORIES_URL)
 | 
			
		||||
    def tokenize_and_encode(obj):
 | 
			
		||||
        """ Tokenize and encode a nested object """
 | 
			
		||||
        if isinstance(obj, str):
 | 
			
		||||
            return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
 | 
			
		||||
        elif isinstance(obj, int):
 | 
			
		||||
            return obj
 | 
			
		||||
        return list(tokenize_and_encode(o) for o in obj)
 | 
			
		||||
    logger.info("Encoding dataset...")
 | 
			
		||||
    train_dataset = load_rocstories_dataset(args.train_dataset)
 | 
			
		||||
    eval_dataset = load_rocstories_dataset(args.eval_dataset)
 | 
			
		||||
    datasets = (train_dataset, eval_dataset)
 | 
			
		||||
    encoded_datasets = tokenize_and_encode(datasets)
 | 
			
		||||
 | 
			
		||||
    # Compute the max input length for the Transformer
 | 
			
		||||
    max_length = model.config.n_positions // 2 - 2
 | 
			
		||||
    input_length = max(len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3  \
 | 
			
		||||
                           for dataset in encoded_datasets for story, cont1, cont2, _ in dataset)
 | 
			
		||||
    input_length = min(input_length, model.config.n_positions)  # Max size of input for the pre-trained model
 | 
			
		||||
 | 
			
		||||
    # Prepare inputs tensors and dataloaders
 | 
			
		||||
    tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)
 | 
			
		||||
    train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1]
 | 
			
		||||
 | 
			
		||||
    train_data = TensorDataset(*train_tensor_dataset)
 | 
			
		||||
    train_sampler = RandomSampler(train_data)
 | 
			
		||||
    train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)
 | 
			
		||||
 | 
			
		||||
    eval_data = TensorDataset(*eval_tensor_dataset)
 | 
			
		||||
    eval_sampler = SequentialSampler(eval_data)
 | 
			
		||||
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
 | 
			
		||||
 | 
			
		||||
    # Prepare optimizer
 | 
			
		||||
    param_optimizer = list(model.named_parameters())
 | 
			
		||||
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
 | 
			
		||||
    optimizer_grouped_parameters = [
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    num_train_optimization_steps = len(train_data) * args.num_train_epochs // args.train_batch_size
 | 
			
		||||
    optimizer = OpenAIAdam(optimizer_grouped_parameters,
 | 
			
		||||
                           lr=args.learning_rate,
 | 
			
		||||
                           warmup=args.warmup_proportion,
 | 
			
		||||
                           max_grad_norm=args.max_grad_norm,
 | 
			
		||||
                           weight_decay=args.weight_decay,
 | 
			
		||||
                           t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
 | 
			
		||||
        model.train()
 | 
			
		||||
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
 | 
			
		||||
            tr_loss = 0
 | 
			
		||||
            nb_tr_steps = 0
 | 
			
		||||
            tqdm_bar = tqdm(train_dataloader, desc="Training")
 | 
			
		||||
            for step, batch in enumerate(tqdm_bar):
 | 
			
		||||
                batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
                input_ids, mc_token_ids, lm_labels, mc_labels = batch
 | 
			
		||||
                losses = model(input_ids, mc_token_ids, lm_labels, mc_labels)
 | 
			
		||||
                loss = args.lm_coef * losses[0] + losses[1]
 | 
			
		||||
                loss.backward()
 | 
			
		||||
                optimizer.step()
 | 
			
		||||
                optimizer.zero_grad()
 | 
			
		||||
                tr_loss += loss.item()
 | 
			
		||||
                exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()
 | 
			
		||||
                nb_tr_steps += 1
 | 
			
		||||
                tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0])
 | 
			
		||||
 | 
			
		||||
    # Save a trained model
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        # Save a trained model, configuration and tokenizer
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
 | 
			
		||||
        # If we save using the predefined names, we can load using `from_pretrained`
 | 
			
		||||
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
 | 
			
		||||
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
 | 
			
		||||
 | 
			
		||||
        torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
        model_to_save.config.to_json_file(output_config_file)
 | 
			
		||||
        tokenizer.save_vocabulary(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
 | 
			
		||||
        model.to(device)
 | 
			
		||||
 | 
			
		||||
    if args.do_eval:
 | 
			
		||||
        model.eval()
 | 
			
		||||
        eval_loss, eval_accuracy = 0, 0
 | 
			
		||||
        nb_eval_steps, nb_eval_examples = 0, 0
 | 
			
		||||
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
            batch = tuple(t.to(device) for t in batch)
 | 
			
		||||
            input_ids, mc_token_ids, lm_labels, mc_labels = batch
 | 
			
		||||
            with torch.no_grad():
 | 
			
		||||
                _, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels)
 | 
			
		||||
                _, mc_logits = model(input_ids, mc_token_ids)
 | 
			
		||||
 | 
			
		||||
            mc_logits = mc_logits.detach().cpu().numpy()
 | 
			
		||||
            mc_labels = mc_labels.to('cpu').numpy()
 | 
			
		||||
            tmp_eval_accuracy = accuracy(mc_logits, mc_labels)
 | 
			
		||||
 | 
			
		||||
            eval_loss += mc_loss.mean().item()
 | 
			
		||||
            eval_accuracy += tmp_eval_accuracy
 | 
			
		||||
 | 
			
		||||
            nb_eval_examples += input_ids.size(0)
 | 
			
		||||
            nb_eval_steps += 1
 | 
			
		||||
 | 
			
		||||
        eval_loss = eval_loss / nb_eval_steps
 | 
			
		||||
        eval_accuracy = eval_accuracy / nb_eval_examples
 | 
			
		||||
        train_loss = tr_loss/nb_tr_steps if args.do_train else None
 | 
			
		||||
        result = {'eval_loss': eval_loss,
 | 
			
		||||
                  'eval_accuracy': eval_accuracy,
 | 
			
		||||
                  'train_loss': train_loss}
 | 
			
		||||
 | 
			
		||||
        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
 | 
			
		||||
        with open(output_eval_file, "w") as writer:
 | 
			
		||||
            logger.info("***** Eval results *****")
 | 
			
		||||
            for key in sorted(result.keys()):
 | 
			
		||||
                logger.info("  %s = %s", key, str(result[key]))
 | 
			
		||||
                writer.write("%s = %s\n" % (key, str(result[key])))
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
@ -1,5 +1,5 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
@ -15,38 +15,45 @@
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""Run BERT on SQuAD."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import collections
 | 
			
		||||
import logging
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import math
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
import pickle
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import whitespace_tokenize, BasicTokenizer, BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForQuestionAnswering
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam
 | 
			
		||||
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
 | 
			
		||||
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForQuestionAnswering, BertConfig
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import (BasicTokenizer,
 | 
			
		||||
                                                  BertTokenizer,
 | 
			
		||||
                                                  whitespace_tokenize)
 | 
			
		||||
 | 
			
		||||
if sys.version_info[0] == 2:
 | 
			
		||||
    import cPickle as pickle
 | 
			
		||||
else:
 | 
			
		||||
    import pickle
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SquadExample(object):
 | 
			
		||||
    """A single training/test example for the Squad dataset."""
 | 
			
		||||
    """
 | 
			
		||||
    A single training/test example for the Squad dataset.
 | 
			
		||||
    For examples without an answer, the start and end position are -1.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 qas_id,
 | 
			
		||||
@ -54,13 +61,15 @@ class SquadExample(object):
 | 
			
		||||
                 doc_tokens,
 | 
			
		||||
                 orig_answer_text=None,
 | 
			
		||||
                 start_position=None,
 | 
			
		||||
                 end_position=None):
 | 
			
		||||
                 end_position=None,
 | 
			
		||||
                 is_impossible=None):
 | 
			
		||||
        self.qas_id = qas_id
 | 
			
		||||
        self.question_text = question_text
 | 
			
		||||
        self.doc_tokens = doc_tokens
 | 
			
		||||
        self.orig_answer_text = orig_answer_text
 | 
			
		||||
        self.start_position = start_position
 | 
			
		||||
        self.end_position = end_position
 | 
			
		||||
        self.is_impossible = is_impossible
 | 
			
		||||
 | 
			
		||||
    def __str__(self):
 | 
			
		||||
        return self.__repr__()
 | 
			
		||||
@ -73,8 +82,10 @@ class SquadExample(object):
 | 
			
		||||
        s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
 | 
			
		||||
        if self.start_position:
 | 
			
		||||
            s += ", start_position: %d" % (self.start_position)
 | 
			
		||||
        if self.start_position:
 | 
			
		||||
        if self.end_position:
 | 
			
		||||
            s += ", end_position: %d" % (self.end_position)
 | 
			
		||||
        if self.is_impossible:
 | 
			
		||||
            s += ", is_impossible: %r" % (self.is_impossible)
 | 
			
		||||
        return s
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -92,7 +103,8 @@ class InputFeatures(object):
 | 
			
		||||
                 input_mask,
 | 
			
		||||
                 segment_ids,
 | 
			
		||||
                 start_position=None,
 | 
			
		||||
                 end_position=None):
 | 
			
		||||
                 end_position=None,
 | 
			
		||||
                 is_impossible=None):
 | 
			
		||||
        self.unique_id = unique_id
 | 
			
		||||
        self.example_index = example_index
 | 
			
		||||
        self.doc_span_index = doc_span_index
 | 
			
		||||
@ -104,9 +116,10 @@ class InputFeatures(object):
 | 
			
		||||
        self.segment_ids = segment_ids
 | 
			
		||||
        self.start_position = start_position
 | 
			
		||||
        self.end_position = end_position
 | 
			
		||||
        self.is_impossible = is_impossible
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_squad_examples(input_file, is_training):
 | 
			
		||||
def read_squad_examples(input_file, is_training, version_2_with_negative):
 | 
			
		||||
    """Read a SQuAD json file into a list of SquadExample."""
 | 
			
		||||
    with open(input_file, "r", encoding='utf-8') as reader:
 | 
			
		||||
        input_data = json.load(reader)["data"]
 | 
			
		||||
@ -140,29 +153,37 @@ def read_squad_examples(input_file, is_training):
 | 
			
		||||
                start_position = None
 | 
			
		||||
                end_position = None
 | 
			
		||||
                orig_answer_text = None
 | 
			
		||||
                is_impossible = False
 | 
			
		||||
                if is_training:
 | 
			
		||||
                    if len(qa["answers"]) != 1:
 | 
			
		||||
                    if version_2_with_negative:
 | 
			
		||||
                        is_impossible = qa["is_impossible"]
 | 
			
		||||
                    if (len(qa["answers"]) != 1) and (not is_impossible):
 | 
			
		||||
                        raise ValueError(
 | 
			
		||||
                            "For training, each question should have exactly 1 answer.")
 | 
			
		||||
                    answer = qa["answers"][0]
 | 
			
		||||
                    orig_answer_text = answer["text"]
 | 
			
		||||
                    answer_offset = answer["answer_start"]
 | 
			
		||||
                    answer_length = len(orig_answer_text)
 | 
			
		||||
                    start_position = char_to_word_offset[answer_offset]
 | 
			
		||||
                    end_position = char_to_word_offset[answer_offset + answer_length - 1]
 | 
			
		||||
                    # Only add answers where the text can be exactly recovered from the
 | 
			
		||||
                    # document. If this CAN'T happen it's likely due to weird Unicode
 | 
			
		||||
                    # stuff so we will just skip the example.
 | 
			
		||||
                    #
 | 
			
		||||
                    # Note that this means for training mode, every example is NOT
 | 
			
		||||
                    # guaranteed to be preserved.
 | 
			
		||||
                    actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
 | 
			
		||||
                    cleaned_answer_text = " ".join(
 | 
			
		||||
                        whitespace_tokenize(orig_answer_text))
 | 
			
		||||
                    if actual_text.find(cleaned_answer_text) == -1:
 | 
			
		||||
                        logger.warning("Could not find answer: '%s' vs. '%s'",
 | 
			
		||||
                    if not is_impossible:
 | 
			
		||||
                        answer = qa["answers"][0]
 | 
			
		||||
                        orig_answer_text = answer["text"]
 | 
			
		||||
                        answer_offset = answer["answer_start"]
 | 
			
		||||
                        answer_length = len(orig_answer_text)
 | 
			
		||||
                        start_position = char_to_word_offset[answer_offset]
 | 
			
		||||
                        end_position = char_to_word_offset[answer_offset + answer_length - 1]
 | 
			
		||||
                        # Only add answers where the text can be exactly recovered from the
 | 
			
		||||
                        # document. If this CAN'T happen it's likely due to weird Unicode
 | 
			
		||||
                        # stuff so we will just skip the example.
 | 
			
		||||
                        #
 | 
			
		||||
                        # Note that this means for training mode, every example is NOT
 | 
			
		||||
                        # guaranteed to be preserved.
 | 
			
		||||
                        actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
 | 
			
		||||
                        cleaned_answer_text = " ".join(
 | 
			
		||||
                            whitespace_tokenize(orig_answer_text))
 | 
			
		||||
                        if actual_text.find(cleaned_answer_text) == -1:
 | 
			
		||||
                            logger.warning("Could not find answer: '%s' vs. '%s'",
 | 
			
		||||
                                           actual_text, cleaned_answer_text)
 | 
			
		||||
                        continue
 | 
			
		||||
                            continue
 | 
			
		||||
                    else:
 | 
			
		||||
                        start_position = -1
 | 
			
		||||
                        end_position = -1
 | 
			
		||||
                        orig_answer_text = ""
 | 
			
		||||
 | 
			
		||||
                example = SquadExample(
 | 
			
		||||
                    qas_id=qas_id,
 | 
			
		||||
@ -170,7 +191,8 @@ def read_squad_examples(input_file, is_training):
 | 
			
		||||
                    doc_tokens=doc_tokens,
 | 
			
		||||
                    orig_answer_text=orig_answer_text,
 | 
			
		||||
                    start_position=start_position,
 | 
			
		||||
                    end_position=end_position)
 | 
			
		||||
                    end_position=end_position,
 | 
			
		||||
                    is_impossible=is_impossible)
 | 
			
		||||
                examples.append(example)
 | 
			
		||||
    return examples
 | 
			
		||||
 | 
			
		||||
@ -200,7 +222,10 @@ def convert_examples_to_features(examples, tokenizer, max_seq_length,
 | 
			
		||||
 | 
			
		||||
        tok_start_position = None
 | 
			
		||||
        tok_end_position = None
 | 
			
		||||
        if is_training:
 | 
			
		||||
        if is_training and example.is_impossible:
 | 
			
		||||
            tok_start_position = -1
 | 
			
		||||
            tok_end_position = -1
 | 
			
		||||
        if is_training and not example.is_impossible:
 | 
			
		||||
            tok_start_position = orig_to_tok_index[example.start_position]
 | 
			
		||||
            if example.end_position < len(example.doc_tokens) - 1:
 | 
			
		||||
                tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
 | 
			
		||||
@ -272,20 +297,25 @@ def convert_examples_to_features(examples, tokenizer, max_seq_length,
 | 
			
		||||
 | 
			
		||||
            start_position = None
 | 
			
		||||
            end_position = None
 | 
			
		||||
            if is_training:
 | 
			
		||||
            if is_training and not example.is_impossible:
 | 
			
		||||
                # For training, if our document chunk does not contain an annotation
 | 
			
		||||
                # we throw it out, since there is nothing to predict.
 | 
			
		||||
                doc_start = doc_span.start
 | 
			
		||||
                doc_end = doc_span.start + doc_span.length - 1
 | 
			
		||||
                if (example.start_position < doc_start or
 | 
			
		||||
                        example.end_position < doc_start or
 | 
			
		||||
                        example.start_position > doc_end or example.end_position > doc_end):
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
                doc_offset = len(query_tokens) + 2
 | 
			
		||||
                start_position = tok_start_position - doc_start + doc_offset
 | 
			
		||||
                end_position = tok_end_position - doc_start + doc_offset
 | 
			
		||||
 | 
			
		||||
                out_of_span = False
 | 
			
		||||
                if not (tok_start_position >= doc_start and
 | 
			
		||||
                        tok_end_position <= doc_end):
 | 
			
		||||
                    out_of_span = True
 | 
			
		||||
                if out_of_span:
 | 
			
		||||
                    start_position = 0
 | 
			
		||||
                    end_position = 0
 | 
			
		||||
                else:
 | 
			
		||||
                    doc_offset = len(query_tokens) + 2
 | 
			
		||||
                    start_position = tok_start_position - doc_start + doc_offset
 | 
			
		||||
                    end_position = tok_end_position - doc_start + doc_offset
 | 
			
		||||
            if is_training and example.is_impossible:
 | 
			
		||||
                start_position = 0
 | 
			
		||||
                end_position = 0
 | 
			
		||||
            if example_index < 20:
 | 
			
		||||
                logger.info("*** Example ***")
 | 
			
		||||
                logger.info("unique_id: %s" % (unique_id))
 | 
			
		||||
@ -302,7 +332,9 @@ def convert_examples_to_features(examples, tokenizer, max_seq_length,
 | 
			
		||||
                    "input_mask: %s" % " ".join([str(x) for x in input_mask]))
 | 
			
		||||
                logger.info(
 | 
			
		||||
                    "segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
 | 
			
		||||
                if is_training:
 | 
			
		||||
                if is_training and example.is_impossible:
 | 
			
		||||
                    logger.info("impossible example")
 | 
			
		||||
                if is_training and not example.is_impossible:
 | 
			
		||||
                    answer_text = " ".join(tokens[start_position:(end_position + 1)])
 | 
			
		||||
                    logger.info("start_position: %d" % (start_position))
 | 
			
		||||
                    logger.info("end_position: %d" % (end_position))
 | 
			
		||||
@ -321,7 +353,8 @@ def convert_examples_to_features(examples, tokenizer, max_seq_length,
 | 
			
		||||
                    input_mask=input_mask,
 | 
			
		||||
                    segment_ids=segment_ids,
 | 
			
		||||
                    start_position=start_position,
 | 
			
		||||
                    end_position=end_position))
 | 
			
		||||
                    end_position=end_position,
 | 
			
		||||
                    is_impossible=example.is_impossible))
 | 
			
		||||
            unique_id += 1
 | 
			
		||||
 | 
			
		||||
    return features
 | 
			
		||||
@ -401,15 +434,15 @@ def _check_is_max_context(doc_spans, cur_span_index, position):
 | 
			
		||||
    return cur_span_index == best_span_index
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
RawResult = collections.namedtuple("RawResult",
 | 
			
		||||
                                   ["unique_id", "start_logits", "end_logits"])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def write_predictions(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
                      max_answer_length, do_lower_case, output_prediction_file,
 | 
			
		||||
                      output_nbest_file, verbose_logging):
 | 
			
		||||
    """Write final predictions to the json file."""
 | 
			
		||||
                      output_nbest_file, output_null_log_odds_file, verbose_logging,
 | 
			
		||||
                      version_2_with_negative, null_score_diff_threshold):
 | 
			
		||||
    """Write final predictions to the json file and log-odds of null if needed."""
 | 
			
		||||
    logger.info("Writing predictions to: %s" % (output_prediction_file))
 | 
			
		||||
    logger.info("Writing nbest to: %s" % (output_nbest_file))
 | 
			
		||||
 | 
			
		||||
@ -427,15 +460,29 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
 | 
			
		||||
    all_predictions = collections.OrderedDict()
 | 
			
		||||
    all_nbest_json = collections.OrderedDict()
 | 
			
		||||
    scores_diff_json = collections.OrderedDict()
 | 
			
		||||
 | 
			
		||||
    for (example_index, example) in enumerate(all_examples):
 | 
			
		||||
        features = example_index_to_features[example_index]
 | 
			
		||||
 | 
			
		||||
        prelim_predictions = []
 | 
			
		||||
        # keep track of the minimum score of null start+end of position 0
 | 
			
		||||
        score_null = 1000000  # large and positive
 | 
			
		||||
        min_null_feature_index = 0  # the paragraph slice with min null score
 | 
			
		||||
        null_start_logit = 0  # the start logit at the slice with min null score
 | 
			
		||||
        null_end_logit = 0  # the end logit at the slice with min null score
 | 
			
		||||
        for (feature_index, feature) in enumerate(features):
 | 
			
		||||
            result = unique_id_to_result[feature.unique_id]
 | 
			
		||||
 | 
			
		||||
            start_indexes = _get_best_indexes(result.start_logits, n_best_size)
 | 
			
		||||
            end_indexes = _get_best_indexes(result.end_logits, n_best_size)
 | 
			
		||||
            # if we could have irrelevant answers, get the min score of irrelevant
 | 
			
		||||
            if version_2_with_negative:
 | 
			
		||||
                feature_null_score = result.start_logits[0] + result.end_logits[0]
 | 
			
		||||
                if feature_null_score < score_null:
 | 
			
		||||
                    score_null = feature_null_score
 | 
			
		||||
                    min_null_feature_index = feature_index
 | 
			
		||||
                    null_start_logit = result.start_logits[0]
 | 
			
		||||
                    null_end_logit = result.end_logits[0]
 | 
			
		||||
            for start_index in start_indexes:
 | 
			
		||||
                for end_index in end_indexes:
 | 
			
		||||
                    # We could hypothetically create invalid predictions, e.g., predict
 | 
			
		||||
@ -463,7 +510,14 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
                            end_index=end_index,
 | 
			
		||||
                            start_logit=result.start_logits[start_index],
 | 
			
		||||
                            end_logit=result.end_logits[end_index]))
 | 
			
		||||
 | 
			
		||||
        if version_2_with_negative:
 | 
			
		||||
            prelim_predictions.append(
 | 
			
		||||
                _PrelimPrediction(
 | 
			
		||||
                    feature_index=min_null_feature_index,
 | 
			
		||||
                    start_index=0,
 | 
			
		||||
                    end_index=0,
 | 
			
		||||
                    start_logit=null_start_logit,
 | 
			
		||||
                    end_logit=null_end_logit))
 | 
			
		||||
        prelim_predictions = sorted(
 | 
			
		||||
            prelim_predictions,
 | 
			
		||||
            key=lambda x: (x.start_logit + x.end_logit),
 | 
			
		||||
@ -478,33 +532,51 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
            if len(nbest) >= n_best_size:
 | 
			
		||||
                break
 | 
			
		||||
            feature = features[pred.feature_index]
 | 
			
		||||
            if pred.start_index > 0:  # this is a non-null prediction
 | 
			
		||||
                tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
 | 
			
		||||
                orig_doc_start = feature.token_to_orig_map[pred.start_index]
 | 
			
		||||
                orig_doc_end = feature.token_to_orig_map[pred.end_index]
 | 
			
		||||
                orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
 | 
			
		||||
                tok_text = " ".join(tok_tokens)
 | 
			
		||||
 | 
			
		||||
            tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
 | 
			
		||||
            orig_doc_start = feature.token_to_orig_map[pred.start_index]
 | 
			
		||||
            orig_doc_end = feature.token_to_orig_map[pred.end_index]
 | 
			
		||||
            orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
 | 
			
		||||
            tok_text = " ".join(tok_tokens)
 | 
			
		||||
                # De-tokenize WordPieces that have been split off.
 | 
			
		||||
                tok_text = tok_text.replace(" ##", "")
 | 
			
		||||
                tok_text = tok_text.replace("##", "")
 | 
			
		||||
 | 
			
		||||
            # De-tokenize WordPieces that have been split off.
 | 
			
		||||
            tok_text = tok_text.replace(" ##", "")
 | 
			
		||||
            tok_text = tok_text.replace("##", "")
 | 
			
		||||
                # Clean whitespace
 | 
			
		||||
                tok_text = tok_text.strip()
 | 
			
		||||
                tok_text = " ".join(tok_text.split())
 | 
			
		||||
                orig_text = " ".join(orig_tokens)
 | 
			
		||||
 | 
			
		||||
            # Clean whitespace
 | 
			
		||||
            tok_text = tok_text.strip()
 | 
			
		||||
            tok_text = " ".join(tok_text.split())
 | 
			
		||||
            orig_text = " ".join(orig_tokens)
 | 
			
		||||
                final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
 | 
			
		||||
                if final_text in seen_predictions:
 | 
			
		||||
                    continue
 | 
			
		||||
 | 
			
		||||
            final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
 | 
			
		||||
            if final_text in seen_predictions:
 | 
			
		||||
                continue
 | 
			
		||||
                seen_predictions[final_text] = True
 | 
			
		||||
            else:
 | 
			
		||||
                final_text = ""
 | 
			
		||||
                seen_predictions[final_text] = True
 | 
			
		||||
 | 
			
		||||
            seen_predictions[final_text] = True
 | 
			
		||||
            nbest.append(
 | 
			
		||||
                _NbestPrediction(
 | 
			
		||||
                    text=final_text,
 | 
			
		||||
                    start_logit=pred.start_logit,
 | 
			
		||||
                    end_logit=pred.end_logit))
 | 
			
		||||
 | 
			
		||||
        # if we didn't include the empty option in the n-best, include it
 | 
			
		||||
        if version_2_with_negative:
 | 
			
		||||
            if "" not in seen_predictions:
 | 
			
		||||
                nbest.append(
 | 
			
		||||
                    _NbestPrediction(
 | 
			
		||||
                        text="",
 | 
			
		||||
                        start_logit=null_start_logit,
 | 
			
		||||
                        end_logit=null_end_logit))
 | 
			
		||||
                
 | 
			
		||||
            # In very rare edge cases we could only have single null prediction.
 | 
			
		||||
            # So we just create a nonce prediction in this case to avoid failure.
 | 
			
		||||
            if len(nbest)==1:
 | 
			
		||||
                nbest.insert(0,
 | 
			
		||||
                    _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
 | 
			
		||||
                
 | 
			
		||||
        # In very rare edge cases we could have no valid predictions. So we
 | 
			
		||||
        # just create a nonce prediction in this case to avoid failure.
 | 
			
		||||
        if not nbest:
 | 
			
		||||
@ -514,8 +586,12 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
        assert len(nbest) >= 1
 | 
			
		||||
 | 
			
		||||
        total_scores = []
 | 
			
		||||
        best_non_null_entry = None
 | 
			
		||||
        for entry in nbest:
 | 
			
		||||
            total_scores.append(entry.start_logit + entry.end_logit)
 | 
			
		||||
            if not best_non_null_entry:
 | 
			
		||||
                if entry.text:
 | 
			
		||||
                    best_non_null_entry = entry
 | 
			
		||||
 | 
			
		||||
        probs = _compute_softmax(total_scores)
 | 
			
		||||
 | 
			
		||||
@ -530,8 +606,18 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
 | 
			
		||||
        assert len(nbest_json) >= 1
 | 
			
		||||
 | 
			
		||||
        all_predictions[example.qas_id] = nbest_json[0]["text"]
 | 
			
		||||
        all_nbest_json[example.qas_id] = nbest_json
 | 
			
		||||
        if not version_2_with_negative:
 | 
			
		||||
            all_predictions[example.qas_id] = nbest_json[0]["text"]
 | 
			
		||||
        else:
 | 
			
		||||
            # predict "" iff the null score - the score of best non-null > threshold
 | 
			
		||||
            score_diff = score_null - best_non_null_entry.start_logit - (
 | 
			
		||||
                best_non_null_entry.end_logit)
 | 
			
		||||
            scores_diff_json[example.qas_id] = score_diff
 | 
			
		||||
            if score_diff > null_score_diff_threshold:
 | 
			
		||||
                all_predictions[example.qas_id] = ""
 | 
			
		||||
            else:
 | 
			
		||||
                all_predictions[example.qas_id] = best_non_null_entry.text
 | 
			
		||||
            all_nbest_json[example.qas_id] = nbest_json
 | 
			
		||||
 | 
			
		||||
    with open(output_prediction_file, "w") as writer:
 | 
			
		||||
        writer.write(json.dumps(all_predictions, indent=4) + "\n")
 | 
			
		||||
@ -539,6 +625,10 @@ def write_predictions(all_examples, all_features, all_results, n_best_size,
 | 
			
		||||
    with open(output_nbest_file, "w") as writer:
 | 
			
		||||
        writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
 | 
			
		||||
 | 
			
		||||
    if version_2_with_negative:
 | 
			
		||||
        with open(output_null_log_odds_file, "w") as writer:
 | 
			
		||||
            writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
 | 
			
		||||
    """Project the tokenized prediction back to the original text."""
 | 
			
		||||
@ -564,8 +654,8 @@ def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
 | 
			
		||||
    #
 | 
			
		||||
    # What we really want to return is "Steve Smith".
 | 
			
		||||
    #
 | 
			
		||||
    # Therefore, we have to apply a semi-complicated alignment heruistic between
 | 
			
		||||
    # `pred_text` and `orig_text` to get a character-to-charcter alignment. This
 | 
			
		||||
    # Therefore, we have to apply a semi-complicated alignment heuristic between
 | 
			
		||||
    # `pred_text` and `orig_text` to get a character-to-character alignment. This
 | 
			
		||||
    # can fail in certain cases in which case we just return `orig_text`.
 | 
			
		||||
 | 
			
		||||
    def _strip_spaces(text):
 | 
			
		||||
@ -601,7 +691,7 @@ def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
 | 
			
		||||
    if len(orig_ns_text) != len(tok_ns_text):
 | 
			
		||||
        if verbose_logging:
 | 
			
		||||
            logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
 | 
			
		||||
                            orig_ns_text, tok_ns_text)
 | 
			
		||||
                        orig_ns_text, tok_ns_text)
 | 
			
		||||
        return orig_text
 | 
			
		||||
 | 
			
		||||
    # We then project the characters in `pred_text` back to `orig_text` using
 | 
			
		||||
@ -670,18 +760,14 @@ def _compute_softmax(scores):
 | 
			
		||||
        probs.append(score / total_sum)
 | 
			
		||||
    return probs
 | 
			
		||||
 | 
			
		||||
def warmup_linear(x, warmup=0.002):
 | 
			
		||||
    if x < warmup:
 | 
			
		||||
        return x/warmup
 | 
			
		||||
    return 1.0 - x
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--bert_model", default=None, type=str, required=True,
 | 
			
		||||
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
 | 
			
		||||
                        "bert-base-multilingual-cased, bert-base-chinese.")
 | 
			
		||||
    parser.add_argument("--output_dir", default=None, type=str, required=True,
 | 
			
		||||
                        help="The output directory where the model checkpoints and predictions will be written.")
 | 
			
		||||
 | 
			
		||||
@ -697,15 +783,15 @@ def main():
 | 
			
		||||
    parser.add_argument("--max_query_length", default=64, type=int,
 | 
			
		||||
                        help="The maximum number of tokens for the question. Questions longer than this will "
 | 
			
		||||
                             "be truncated to this length.")
 | 
			
		||||
    parser.add_argument("--do_train", default=False, action='store_true', help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_predict", default=False, action='store_true', help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_predict", action='store_true', help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.")
 | 
			
		||||
    parser.add_argument("--predict_batch_size", default=8, type=int, help="Total batch size for predictions.")
 | 
			
		||||
    parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
 | 
			
		||||
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
 | 
			
		||||
                        help="Total number of training epochs to perform.")
 | 
			
		||||
    parser.add_argument("--warmup_proportion", default=0.1, type=float,
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10% "
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. E.g., 0.1 = 10%% "
 | 
			
		||||
                             "of training.")
 | 
			
		||||
    parser.add_argument("--n_best_size", default=20, type=int,
 | 
			
		||||
                        help="The total number of n-best predictions to generate in the nbest_predictions.json "
 | 
			
		||||
@ -713,11 +799,10 @@ def main():
 | 
			
		||||
    parser.add_argument("--max_answer_length", default=30, type=int,
 | 
			
		||||
                        help="The maximum length of an answer that can be generated. This is needed because the start "
 | 
			
		||||
                             "and end predictions are not conditioned on one another.")
 | 
			
		||||
    parser.add_argument("--verbose_logging", default=False, action='store_true',
 | 
			
		||||
    parser.add_argument("--verbose_logging", action='store_true',
 | 
			
		||||
                        help="If true, all of the warnings related to data processing will be printed. "
 | 
			
		||||
                             "A number of warnings are expected for a normal SQuAD evaluation.")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument('--seed',
 | 
			
		||||
@ -729,7 +814,6 @@ def main():
 | 
			
		||||
                        default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument("--do_lower_case",
 | 
			
		||||
                        default=True,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to lower case the input text. True for uncased models, False for cased models.")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
@ -737,7 +821,6 @@ def main():
 | 
			
		||||
                        default=-1,
 | 
			
		||||
                        help="local_rank for distributed training on gpus")
 | 
			
		||||
    parser.add_argument('--fp16',
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit float precision instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--loss_scale',
 | 
			
		||||
@ -745,8 +828,23 @@ def main():
 | 
			
		||||
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
 | 
			
		||||
                             "0 (default value): dynamic loss scaling.\n"
 | 
			
		||||
                             "Positive power of 2: static loss scaling value.\n")
 | 
			
		||||
 | 
			
		||||
    parser.add_argument('--version_2_with_negative',
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help='If true, the SQuAD examples contain some that do not have an answer.')
 | 
			
		||||
    parser.add_argument('--null_score_diff_threshold',
 | 
			
		||||
                        type=float, default=0.0,
 | 
			
		||||
                        help="If null_score - best_non_null is greater than the threshold predict null.")
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    print(args)
 | 
			
		||||
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    if args.local_rank == -1 or args.no_cuda:
 | 
			
		||||
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
@ -757,6 +855,11 @@ def main():
 | 
			
		||||
        n_gpu = 1
 | 
			
		||||
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
 | 
			
		||||
        torch.distributed.init_process_group(backend='nccl')
 | 
			
		||||
 | 
			
		||||
    logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                        datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                        level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
 | 
			
		||||
 | 
			
		||||
    logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
 | 
			
		||||
        device, n_gpu, bool(args.local_rank != -1), args.fp16))
 | 
			
		||||
 | 
			
		||||
@ -764,7 +867,7 @@ def main():
 | 
			
		||||
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
 | 
			
		||||
                            args.gradient_accumulation_steps))
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
 | 
			
		||||
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
@ -784,23 +887,26 @@ def main():
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "If `do_predict` is True, then `predict_file` must be specified.")
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
 | 
			
		||||
        raise ValueError("Output directory () already exists and is not empty.")
 | 
			
		||||
    os.makedirs(args.output_dir, exist_ok=True)
 | 
			
		||||
    if not os.path.exists(args.output_dir):
 | 
			
		||||
        os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model)
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    train_examples = None
 | 
			
		||||
    num_train_steps = None
 | 
			
		||||
    num_train_optimization_steps = None
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_examples = read_squad_examples(
 | 
			
		||||
            input_file=args.train_file, is_training=True)
 | 
			
		||||
        num_train_steps = int(
 | 
			
		||||
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
 | 
			
		||||
            input_file=args.train_file, is_training=True, version_2_with_negative=args.version_2_with_negative)
 | 
			
		||||
        num_train_optimization_steps = int(
 | 
			
		||||
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
 | 
			
		||||
        if args.local_rank != -1:
 | 
			
		||||
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
 | 
			
		||||
 | 
			
		||||
    # Prepare model
 | 
			
		||||
    model = BertForQuestionAnswering.from_pretrained(args.bert_model,
 | 
			
		||||
                cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank))
 | 
			
		||||
                cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)))
 | 
			
		||||
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        model.half()
 | 
			
		||||
@ -828,9 +934,6 @@ def main():
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
    t_total = num_train_steps
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        t_total = t_total // torch.distributed.get_world_size()
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.optimizers import FP16_Optimizer
 | 
			
		||||
@ -846,16 +949,18 @@ def main():
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
 | 
			
		||||
        else:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
 | 
			
		||||
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
 | 
			
		||||
                                             t_total=num_train_optimization_steps)
 | 
			
		||||
    else:
 | 
			
		||||
        optimizer = BertAdam(optimizer_grouped_parameters,
 | 
			
		||||
                             lr=args.learning_rate,
 | 
			
		||||
                             warmup=args.warmup_proportion,
 | 
			
		||||
                             t_total=t_total)
 | 
			
		||||
                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        cached_train_features_file = args.train_file+'_{0}_{1}_{2}_{3}'.format(
 | 
			
		||||
            args.bert_model, str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length))
 | 
			
		||||
            list(filter(None, args.bert_model.split('/'))).pop(), str(args.max_seq_length), str(args.doc_stride), str(args.max_query_length))
 | 
			
		||||
        train_features = None
 | 
			
		||||
        try:
 | 
			
		||||
            with open(cached_train_features_file, "rb") as reader:
 | 
			
		||||
@ -876,7 +981,7 @@ def main():
 | 
			
		||||
        logger.info("  Num orig examples = %d", len(train_examples))
 | 
			
		||||
        logger.info("  Num split examples = %d", len(train_features))
 | 
			
		||||
        logger.info("  Batch size = %d", args.train_batch_size)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_steps)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_optimization_steps)
 | 
			
		||||
        all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
 | 
			
		||||
        all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
 | 
			
		||||
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
 | 
			
		||||
@ -892,7 +997,7 @@ def main():
 | 
			
		||||
 | 
			
		||||
        model.train()
 | 
			
		||||
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
 | 
			
		||||
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration")):
 | 
			
		||||
            for step, batch in enumerate(tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])):
 | 
			
		||||
                if n_gpu == 1:
 | 
			
		||||
                    batch = tuple(t.to(device) for t in batch) # multi-gpu does scattering it-self
 | 
			
		||||
                input_ids, input_mask, segment_ids, start_positions, end_positions = batch
 | 
			
		||||
@ -907,27 +1012,40 @@ def main():
 | 
			
		||||
                else:
 | 
			
		||||
                    loss.backward()
 | 
			
		||||
                if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                    # modify learning rate with special warm up BERT uses
 | 
			
		||||
                    lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
 | 
			
		||||
                    for param_group in optimizer.param_groups:
 | 
			
		||||
                        param_group['lr'] = lr_this_step
 | 
			
		||||
                    if args.fp16:
 | 
			
		||||
                        # modify learning rate with special warm up BERT uses
 | 
			
		||||
                        # if args.fp16 is False, BertAdam is used and handles this automatically
 | 
			
		||||
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,
 | 
			
		||||
                                                                                 args.warmup_proportion)
 | 
			
		||||
                        for param_group in optimizer.param_groups:
 | 
			
		||||
                            param_group['lr'] = lr_this_step
 | 
			
		||||
                    optimizer.step()
 | 
			
		||||
                    optimizer.zero_grad()
 | 
			
		||||
                    global_step += 1
 | 
			
		||||
 | 
			
		||||
    # Save a trained model
 | 
			
		||||
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
 | 
			
		||||
    torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
    if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        # Save a trained model, configuration and tokenizer
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
 | 
			
		||||
        # If we save using the predefined names, we can load using `from_pretrained`
 | 
			
		||||
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
 | 
			
		||||
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
 | 
			
		||||
 | 
			
		||||
        torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
        model_to_save.config.to_json_file(output_config_file)
 | 
			
		||||
        tokenizer.save_vocabulary(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = BertForQuestionAnswering.from_pretrained(args.output_dir)
 | 
			
		||||
        tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
 | 
			
		||||
    else:
 | 
			
		||||
        model = BertForQuestionAnswering.from_pretrained(args.bert_model)
 | 
			
		||||
 | 
			
		||||
    # Load a trained model that you have fine-tuned
 | 
			
		||||
    model_state_dict = torch.load(output_model_file)
 | 
			
		||||
    model = BertForQuestionAnswering.from_pretrained(args.bert_model, state_dict=model_state_dict)
 | 
			
		||||
    model.to(device)
 | 
			
		||||
 | 
			
		||||
    if args.do_predict and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        eval_examples = read_squad_examples(
 | 
			
		||||
            input_file=args.predict_file, is_training=False)
 | 
			
		||||
            input_file=args.predict_file, is_training=False, version_2_with_negative=args.version_2_with_negative)
 | 
			
		||||
        eval_features = convert_examples_to_features(
 | 
			
		||||
            examples=eval_examples,
 | 
			
		||||
            tokenizer=tokenizer,
 | 
			
		||||
@ -953,7 +1071,7 @@ def main():
 | 
			
		||||
        model.eval()
 | 
			
		||||
        all_results = []
 | 
			
		||||
        logger.info("Start evaluating")
 | 
			
		||||
        for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
        for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating", disable=args.local_rank not in [-1, 0]):
 | 
			
		||||
            if len(all_results) % 1000 == 0:
 | 
			
		||||
                logger.info("Processing example: %d" % (len(all_results)))
 | 
			
		||||
            input_ids = input_ids.to(device)
 | 
			
		||||
@ -971,10 +1089,12 @@ def main():
 | 
			
		||||
                                             end_logits=end_logits))
 | 
			
		||||
        output_prediction_file = os.path.join(args.output_dir, "predictions.json")
 | 
			
		||||
        output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
 | 
			
		||||
        output_null_log_odds_file = os.path.join(args.output_dir, "null_odds.json")
 | 
			
		||||
        write_predictions(eval_examples, eval_features, all_results,
 | 
			
		||||
                          args.n_best_size, args.max_answer_length,
 | 
			
		||||
                          args.do_lower_case, output_prediction_file,
 | 
			
		||||
                          output_nbest_file, args.verbose_logging)
 | 
			
		||||
                          output_nbest_file, output_null_log_odds_file, args.verbose_logging,
 | 
			
		||||
                          args.version_2_with_negative, args.null_score_diff_threshold)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
 | 
			
		||||
@ -1,5 +1,5 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
@ -15,22 +15,27 @@
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""BERT finetuning runner."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import csv
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import argparse
 | 
			
		||||
import random
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
import csv
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
 | 
			
		||||
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
 | 
			
		||||
                              TensorDataset)
 | 
			
		||||
from torch.utils.data.distributed import DistributedSampler
 | 
			
		||||
from tqdm import tqdm, trange
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE, WEIGHTS_NAME, CONFIG_NAME
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForMultipleChoice, BertConfig
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam, WarmupLinearSchedule
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertForMultipleChoice
 | 
			
		||||
from pytorch_pretrained_bert.optimization import BertAdam
 | 
			
		||||
from pytorch_pretrained_bert.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
@ -65,17 +70,17 @@ class SwagExample(object):
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        l = [
 | 
			
		||||
            f"swag_id: {self.swag_id}",
 | 
			
		||||
            f"context_sentence: {self.context_sentence}",
 | 
			
		||||
            f"start_ending: {self.start_ending}",
 | 
			
		||||
            f"ending_0: {self.endings[0]}",
 | 
			
		||||
            f"ending_1: {self.endings[1]}",
 | 
			
		||||
            f"ending_2: {self.endings[2]}",
 | 
			
		||||
            f"ending_3: {self.endings[3]}",
 | 
			
		||||
            "swag_id: {}".format(self.swag_id),
 | 
			
		||||
            "context_sentence: {}".format(self.context_sentence),
 | 
			
		||||
            "start_ending: {}".format(self.start_ending),
 | 
			
		||||
            "ending_0: {}".format(self.endings[0]),
 | 
			
		||||
            "ending_1: {}".format(self.endings[1]),
 | 
			
		||||
            "ending_2: {}".format(self.endings[2]),
 | 
			
		||||
            "ending_3: {}".format(self.endings[3]),
 | 
			
		||||
        ]
 | 
			
		||||
 | 
			
		||||
        if self.label is not None:
 | 
			
		||||
            l.append(f"label: {self.label}")
 | 
			
		||||
            l.append("label: {}".format(self.label))
 | 
			
		||||
 | 
			
		||||
        return ", ".join(l)
 | 
			
		||||
 | 
			
		||||
@ -102,7 +107,11 @@ class InputFeatures(object):
 | 
			
		||||
def read_swag_examples(input_file, is_training):
 | 
			
		||||
    with open(input_file, 'r', encoding='utf-8') as f:
 | 
			
		||||
        reader = csv.reader(f)
 | 
			
		||||
        lines = list(reader)
 | 
			
		||||
        lines = []
 | 
			
		||||
        for line in reader:
 | 
			
		||||
            if sys.version_info[0] == 2:
 | 
			
		||||
                line = list(unicode(cell, 'utf-8') for cell in line)
 | 
			
		||||
            lines.append(line)
 | 
			
		||||
 | 
			
		||||
    if is_training and lines[0][-1] != 'label':
 | 
			
		||||
        raise ValueError(
 | 
			
		||||
@ -184,15 +193,15 @@ def convert_examples_to_features(examples, tokenizer, max_seq_length,
 | 
			
		||||
        label = example.label
 | 
			
		||||
        if example_index < 5:
 | 
			
		||||
            logger.info("*** Example ***")
 | 
			
		||||
            logger.info(f"swag_id: {example.swag_id}")
 | 
			
		||||
            logger.info("swag_id: {}".format(example.swag_id))
 | 
			
		||||
            for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
 | 
			
		||||
                logger.info(f"choice: {choice_idx}")
 | 
			
		||||
                logger.info(f"tokens: {' '.join(tokens)}")
 | 
			
		||||
                logger.info(f"input_ids: {' '.join(map(str, input_ids))}")
 | 
			
		||||
                logger.info(f"input_mask: {' '.join(map(str, input_mask))}")
 | 
			
		||||
                logger.info(f"segment_ids: {' '.join(map(str, segment_ids))}")
 | 
			
		||||
                logger.info("choice: {}".format(choice_idx))
 | 
			
		||||
                logger.info("tokens: {}".format(' '.join(tokens)))
 | 
			
		||||
                logger.info("input_ids: {}".format(' '.join(map(str, input_ids))))
 | 
			
		||||
                logger.info("input_mask: {}".format(' '.join(map(str, input_mask))))
 | 
			
		||||
                logger.info("segment_ids: {}".format(' '.join(map(str, segment_ids))))
 | 
			
		||||
            if is_training:
 | 
			
		||||
                logger.info(f"label: {label}")
 | 
			
		||||
                logger.info("label: {}".format(label))
 | 
			
		||||
 | 
			
		||||
        features.append(
 | 
			
		||||
            InputFeatures(
 | 
			
		||||
@ -233,11 +242,6 @@ def select_field(features, field):
 | 
			
		||||
        for feature in features
 | 
			
		||||
    ]
 | 
			
		||||
 | 
			
		||||
def warmup_linear(x, warmup=0.002):
 | 
			
		||||
    if x < warmup:
 | 
			
		||||
        return x/warmup
 | 
			
		||||
    return 1.0 - x
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
 | 
			
		||||
@ -249,7 +253,8 @@ def main():
 | 
			
		||||
                        help="The input data dir. Should contain the .csv files (or other data files) for the task.")
 | 
			
		||||
    parser.add_argument("--bert_model", default=None, type=str, required=True,
 | 
			
		||||
                        help="Bert pre-trained model selected in the list: bert-base-uncased, "
 | 
			
		||||
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
 | 
			
		||||
                        "bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
 | 
			
		||||
                        "bert-base-multilingual-cased, bert-base-chinese.")
 | 
			
		||||
    parser.add_argument("--output_dir",
 | 
			
		||||
                        default=None,
 | 
			
		||||
                        type=str,
 | 
			
		||||
@ -264,15 +269,12 @@ def main():
 | 
			
		||||
                             "Sequences longer than this will be truncated, and sequences shorter \n"
 | 
			
		||||
                             "than this will be padded.")
 | 
			
		||||
    parser.add_argument("--do_train",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to run training.")
 | 
			
		||||
    parser.add_argument("--do_eval",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to run eval on the dev set.")
 | 
			
		||||
    parser.add_argument("--do_lower_case",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Set this flag if you are using an uncased model.")
 | 
			
		||||
    parser.add_argument("--train_batch_size",
 | 
			
		||||
@ -297,7 +299,6 @@ def main():
 | 
			
		||||
                        help="Proportion of training to perform linear learning rate warmup for. "
 | 
			
		||||
                             "E.g., 0.1 = 10%% of training.")
 | 
			
		||||
    parser.add_argument("--no_cuda",
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether not to use CUDA when available")
 | 
			
		||||
    parser.add_argument("--local_rank",
 | 
			
		||||
@ -313,7 +314,6 @@ def main():
 | 
			
		||||
                        default=1,
 | 
			
		||||
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
 | 
			
		||||
    parser.add_argument('--fp16',
 | 
			
		||||
                        default=False,
 | 
			
		||||
                        action='store_true',
 | 
			
		||||
                        help="Whether to use 16-bit float precision instead of 32-bit")
 | 
			
		||||
    parser.add_argument('--loss_scale',
 | 
			
		||||
@ -340,7 +340,7 @@ def main():
 | 
			
		||||
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
 | 
			
		||||
                            args.gradient_accumulation_steps))
 | 
			
		||||
 | 
			
		||||
    args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
 | 
			
		||||
    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
 | 
			
		||||
 | 
			
		||||
    random.seed(args.seed)
 | 
			
		||||
    np.random.seed(args.seed)
 | 
			
		||||
@ -353,20 +353,23 @@ def main():
 | 
			
		||||
 | 
			
		||||
    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
 | 
			
		||||
        raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
 | 
			
		||||
    os.makedirs(args.output_dir, exist_ok=True)
 | 
			
		||||
    if not os.path.exists(args.output_dir):
 | 
			
		||||
        os.makedirs(args.output_dir)
 | 
			
		||||
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)
 | 
			
		||||
 | 
			
		||||
    train_examples = None
 | 
			
		||||
    num_train_steps = None
 | 
			
		||||
    num_train_optimization_steps = None
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        train_examples = read_swag_examples(os.path.join(args.data_dir, 'train.csv'), is_training = True)
 | 
			
		||||
        num_train_steps = int(
 | 
			
		||||
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
 | 
			
		||||
        num_train_optimization_steps = int(
 | 
			
		||||
            len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
 | 
			
		||||
        if args.local_rank != -1:
 | 
			
		||||
            num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()
 | 
			
		||||
 | 
			
		||||
    # Prepare model
 | 
			
		||||
    model = BertForMultipleChoice.from_pretrained(args.bert_model,
 | 
			
		||||
        cache_dir=PYTORCH_PRETRAINED_BERT_CACHE / 'distributed_{}'.format(args.local_rank),
 | 
			
		||||
        cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), 'distributed_{}'.format(args.local_rank)),
 | 
			
		||||
        num_choices=4)
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        model.half()
 | 
			
		||||
@ -393,9 +396,6 @@ def main():
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
 | 
			
		||||
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
 | 
			
		||||
        ]
 | 
			
		||||
    t_total = num_train_steps
 | 
			
		||||
    if args.local_rank != -1:
 | 
			
		||||
        t_total = t_total // torch.distributed.get_world_size()
 | 
			
		||||
    if args.fp16:
 | 
			
		||||
        try:
 | 
			
		||||
            from apex.optimizers import FP16_Optimizer
 | 
			
		||||
@ -411,11 +411,13 @@ def main():
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
 | 
			
		||||
        else:
 | 
			
		||||
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
 | 
			
		||||
        warmup_linear = WarmupLinearSchedule(warmup=args.warmup_proportion,
 | 
			
		||||
                                             t_total=num_train_optimization_steps)
 | 
			
		||||
    else:
 | 
			
		||||
        optimizer = BertAdam(optimizer_grouped_parameters,
 | 
			
		||||
                             lr=args.learning_rate,
 | 
			
		||||
                             warmup=args.warmup_proportion,
 | 
			
		||||
                             t_total=t_total)
 | 
			
		||||
                             t_total=num_train_optimization_steps)
 | 
			
		||||
 | 
			
		||||
    global_step = 0
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
@ -424,7 +426,7 @@ def main():
 | 
			
		||||
        logger.info("***** Running training *****")
 | 
			
		||||
        logger.info("  Num examples = %d", len(train_examples))
 | 
			
		||||
        logger.info("  Batch size = %d", args.train_batch_size)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_steps)
 | 
			
		||||
        logger.info("  Num steps = %d", num_train_optimization_steps)
 | 
			
		||||
        all_input_ids = torch.tensor(select_field(train_features, 'input_ids'), dtype=torch.long)
 | 
			
		||||
        all_input_mask = torch.tensor(select_field(train_features, 'input_mask'), dtype=torch.long)
 | 
			
		||||
        all_segment_ids = torch.tensor(select_field(train_features, 'segment_ids'), dtype=torch.long)
 | 
			
		||||
@ -461,26 +463,38 @@ def main():
 | 
			
		||||
                else:
 | 
			
		||||
                    loss.backward()
 | 
			
		||||
                if (step + 1) % args.gradient_accumulation_steps == 0:
 | 
			
		||||
                    # modify learning rate with special warm up BERT uses
 | 
			
		||||
                    lr_this_step = args.learning_rate * warmup_linear(global_step/t_total, args.warmup_proportion)
 | 
			
		||||
                    for param_group in optimizer.param_groups:
 | 
			
		||||
                        param_group['lr'] = lr_this_step
 | 
			
		||||
                    if args.fp16:
 | 
			
		||||
                        # modify learning rate with special warm up BERT uses
 | 
			
		||||
                        # if args.fp16 is False, BertAdam is used that handles this automatically
 | 
			
		||||
                        lr_this_step = args.learning_rate * warmup_linear.get_lr(global_step/num_train_optimization_steps,
 | 
			
		||||
                                                                                 args.warmup_proportion)
 | 
			
		||||
                        for param_group in optimizer.param_groups:
 | 
			
		||||
                            param_group['lr'] = lr_this_step
 | 
			
		||||
                    optimizer.step()
 | 
			
		||||
                    optimizer.zero_grad()
 | 
			
		||||
                    global_step += 1
 | 
			
		||||
 | 
			
		||||
    # Save a trained model
 | 
			
		||||
    model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
 | 
			
		||||
    torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
 | 
			
		||||
    # Load a trained model that you have fine-tuned
 | 
			
		||||
    model_state_dict = torch.load(output_model_file)
 | 
			
		||||
    model = BertForMultipleChoice.from_pretrained(args.bert_model,
 | 
			
		||||
        state_dict=model_state_dict,
 | 
			
		||||
        num_choices=4)
 | 
			
		||||
    if args.do_train:
 | 
			
		||||
        # Save a trained model, configuration and tokenizer
 | 
			
		||||
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
 | 
			
		||||
 | 
			
		||||
        # If we save using the predefined names, we can load using `from_pretrained`
 | 
			
		||||
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
 | 
			
		||||
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)
 | 
			
		||||
 | 
			
		||||
        torch.save(model_to_save.state_dict(), output_model_file)
 | 
			
		||||
        model_to_save.config.to_json_file(output_config_file)
 | 
			
		||||
        tokenizer.save_vocabulary(args.output_dir)
 | 
			
		||||
 | 
			
		||||
        # Load a trained model and vocabulary that you have fine-tuned
 | 
			
		||||
        model = BertForMultipleChoice.from_pretrained(args.output_dir, num_choices=4)
 | 
			
		||||
        tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
 | 
			
		||||
    else:
 | 
			
		||||
        model = BertForMultipleChoice.from_pretrained(args.bert_model, num_choices=4)
 | 
			
		||||
    model.to(device)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    if args.do_eval and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
 | 
			
		||||
        eval_examples = read_swag_examples(os.path.join(args.data_dir, 'val.csv'), is_training = True)
 | 
			
		||||
        eval_features = convert_examples_to_features(
 | 
			
		||||
@ -500,7 +514,7 @@ def main():
 | 
			
		||||
        model.eval()
 | 
			
		||||
        eval_loss, eval_accuracy = 0, 0
 | 
			
		||||
        nb_eval_steps, nb_eval_examples = 0, 0
 | 
			
		||||
        for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
 | 
			
		||||
        for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader, desc="Evaluating"):
 | 
			
		||||
            input_ids = input_ids.to(device)
 | 
			
		||||
            input_mask = input_mask.to(device)
 | 
			
		||||
            segment_ids = segment_ids.to(device)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										153
									
								
								examples/run_transfo_xl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										153
									
								
								examples/run_transfo_xl.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,153 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" PyTorch Transformer XL model evaluation script.
 | 
			
		||||
    Adapted from https://github.com/kimiyoung/transformer-xl.
 | 
			
		||||
    In particular https://github.com/kimiyoung/transformer-xl/blob/master/pytorch/eval.py
 | 
			
		||||
 | 
			
		||||
    This script with default values evaluates a pretrained Transformer-XL on WikiText 103
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import logging
 | 
			
		||||
import time
 | 
			
		||||
import math
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import TransfoXLLMHeadModel, TransfoXLCorpus, TransfoXLTokenizer
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
 | 
			
		||||
                    datefmt = '%m/%d/%Y %H:%M:%S',
 | 
			
		||||
                    level = logging.INFO)
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
def main():
 | 
			
		||||
    parser = argparse.ArgumentParser(description='PyTorch Transformer Language Model')
 | 
			
		||||
    parser.add_argument('--model_name', type=str, default='transfo-xl-wt103',
 | 
			
		||||
                        help='pretrained model name')
 | 
			
		||||
    parser.add_argument('--split', type=str, default='test',
 | 
			
		||||
                        choices=['all', 'valid', 'test'],
 | 
			
		||||
                        help='which split to evaluate')
 | 
			
		||||
    parser.add_argument('--batch_size', type=int, default=10,
 | 
			
		||||
                        help='batch size')
 | 
			
		||||
    parser.add_argument('--tgt_len', type=int, default=128,
 | 
			
		||||
                        help='number of tokens to predict')
 | 
			
		||||
    parser.add_argument('--ext_len', type=int, default=0,
 | 
			
		||||
                        help='length of the extended context')
 | 
			
		||||
    parser.add_argument('--mem_len', type=int, default=1600,
 | 
			
		||||
                        help='length of the retained previous heads')
 | 
			
		||||
    parser.add_argument('--clamp_len', type=int, default=1000,
 | 
			
		||||
                        help='max positional embedding index')
 | 
			
		||||
    parser.add_argument('--no_cuda', action='store_true',
 | 
			
		||||
                        help='Do not use CUDA even though CUA is available')
 | 
			
		||||
    parser.add_argument('--work_dir', type=str, required=True,
 | 
			
		||||
                        help='path to the work_dir')
 | 
			
		||||
    parser.add_argument('--no_log', action='store_true',
 | 
			
		||||
                        help='do not log the eval result')
 | 
			
		||||
    parser.add_argument('--same_length', action='store_true',
 | 
			
		||||
                        help='set same length attention with masking')
 | 
			
		||||
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    assert args.ext_len >= 0, 'extended context length must be non-negative'
 | 
			
		||||
 | 
			
		||||
    if args.server_ip and args.server_port:
 | 
			
		||||
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
 | 
			
		||||
        import ptvsd
 | 
			
		||||
        print("Waiting for debugger attach")
 | 
			
		||||
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
 | 
			
		||||
        ptvsd.wait_for_attach()
 | 
			
		||||
 | 
			
		||||
    device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
 | 
			
		||||
    logger.info("device: {}".format(device))
 | 
			
		||||
 | 
			
		||||
    # Load a pre-processed dataset
 | 
			
		||||
    # You can also build the corpus yourself using TransfoXLCorpus methods
 | 
			
		||||
    # The pre-processing involve computing word frequencies to prepare the Adaptive input and SoftMax
 | 
			
		||||
    # and tokenizing the dataset
 | 
			
		||||
    # The pre-processed corpus is a convertion (using the conversion script )
 | 
			
		||||
    tokenizer = TransfoXLTokenizer.from_pretrained(args.model_name)
 | 
			
		||||
    corpus = TransfoXLCorpus.from_pretrained(args.model_name)
 | 
			
		||||
    ntokens = len(corpus.vocab)
 | 
			
		||||
 | 
			
		||||
    va_iter = corpus.get_iterator('valid', args.batch_size, args.tgt_len,
 | 
			
		||||
        device=device, ext_len=args.ext_len)
 | 
			
		||||
    te_iter = corpus.get_iterator('test', args.batch_size, args.tgt_len,
 | 
			
		||||
        device=device, ext_len=args.ext_len)
 | 
			
		||||
 | 
			
		||||
    # Load a pre-trained model
 | 
			
		||||
    model = TransfoXLLMHeadModel.from_pretrained(args.model_name)
 | 
			
		||||
    model = model.to(device)
 | 
			
		||||
 | 
			
		||||
    logger.info('Evaluating with bsz {} tgt_len {} ext_len {} mem_len {} clamp_len {}'.format(
 | 
			
		||||
        args.batch_size, args.tgt_len, args.ext_len, args.mem_len, args.clamp_len))
 | 
			
		||||
 | 
			
		||||
    model.reset_length(args.tgt_len, args.ext_len, args.mem_len)
 | 
			
		||||
    if args.clamp_len > 0:
 | 
			
		||||
        model.clamp_len = args.clamp_len
 | 
			
		||||
    if args.same_length:
 | 
			
		||||
        model.same_length = True
 | 
			
		||||
 | 
			
		||||
    ###############################################################################
 | 
			
		||||
    # Evaluation code
 | 
			
		||||
    ###############################################################################
 | 
			
		||||
    def evaluate(eval_iter):
 | 
			
		||||
        # Turn on evaluation mode which disables dropout.
 | 
			
		||||
        model.eval()
 | 
			
		||||
        total_len, total_loss = 0, 0.
 | 
			
		||||
        start_time = time.time()
 | 
			
		||||
        with torch.no_grad():
 | 
			
		||||
            mems = None
 | 
			
		||||
            for idx, (data, target, seq_len) in enumerate(eval_iter):
 | 
			
		||||
                ret = model(data, target, mems)
 | 
			
		||||
                loss, mems = ret
 | 
			
		||||
                loss = loss.mean()
 | 
			
		||||
                total_loss += seq_len * loss.item()
 | 
			
		||||
                total_len += seq_len
 | 
			
		||||
            total_time = time.time() - start_time
 | 
			
		||||
        logger.info('Time : {:.2f}s, {:.2f}ms/segment'.format(
 | 
			
		||||
                total_time, 1000 * total_time / (idx+1)))
 | 
			
		||||
        return total_loss / total_len
 | 
			
		||||
 | 
			
		||||
    # Run on test data.
 | 
			
		||||
    if args.split == 'all':
 | 
			
		||||
        test_loss = evaluate(te_iter)
 | 
			
		||||
        valid_loss = evaluate(va_iter)
 | 
			
		||||
    elif args.split == 'valid':
 | 
			
		||||
        valid_loss = evaluate(va_iter)
 | 
			
		||||
        test_loss = None
 | 
			
		||||
    elif args.split == 'test':
 | 
			
		||||
        test_loss = evaluate(te_iter)
 | 
			
		||||
        valid_loss = None
 | 
			
		||||
 | 
			
		||||
    def format_log(loss, split):
 | 
			
		||||
        log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format(
 | 
			
		||||
            split, loss, math.exp(loss))
 | 
			
		||||
        return log_str
 | 
			
		||||
 | 
			
		||||
    log_str = ''
 | 
			
		||||
    if valid_loss is not None:
 | 
			
		||||
        log_str += format_log(valid_loss, 'valid')
 | 
			
		||||
    if test_loss is not None:
 | 
			
		||||
        log_str += format_log(test_loss, 'test')
 | 
			
		||||
 | 
			
		||||
    logger.info('=' * 100)
 | 
			
		||||
    logger.info(log_str)
 | 
			
		||||
    logger.info('=' * 100)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
							
								
								
									
										187
									
								
								hubconf.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										187
									
								
								hubconf.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,187 @@
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import BertTokenizer
 | 
			
		||||
from pytorch_pretrained_bert.modeling import (
 | 
			
		||||
        BertModel,
 | 
			
		||||
        BertForNextSentencePrediction,
 | 
			
		||||
        BertForMaskedLM,
 | 
			
		||||
        BertForMultipleChoice,
 | 
			
		||||
        BertForPreTraining,
 | 
			
		||||
        BertForQuestionAnswering,
 | 
			
		||||
        BertForSequenceClassification,
 | 
			
		||||
        BertForTokenClassification,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
dependencies = ['torch', 'tqdm', 'boto3', 'requests', 'regex']
 | 
			
		||||
 | 
			
		||||
# A lot of models share the same param doc. Use a decorator
 | 
			
		||||
# to save typing
 | 
			
		||||
bert_docstring = """
 | 
			
		||||
    Params:
 | 
			
		||||
        pretrained_model_name_or_path: either:
 | 
			
		||||
            - a str with the name of a pre-trained model to load
 | 
			
		||||
                . `bert-base-uncased`
 | 
			
		||||
                . `bert-large-uncased`
 | 
			
		||||
                . `bert-base-cased`
 | 
			
		||||
                . `bert-large-cased`
 | 
			
		||||
                . `bert-base-multilingual-uncased`
 | 
			
		||||
                . `bert-base-multilingual-cased`
 | 
			
		||||
                . `bert-base-chinese`
 | 
			
		||||
            - a path or url to a pretrained model archive containing:
 | 
			
		||||
                . `bert_config.json` a configuration file for the model
 | 
			
		||||
                . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining
 | 
			
		||||
                  instance
 | 
			
		||||
            - a path or url to a pretrained model archive containing:
 | 
			
		||||
                . `bert_config.json` a configuration file for the model
 | 
			
		||||
                . `model.chkpt` a TensorFlow checkpoint
 | 
			
		||||
        from_tf: should we load the weights from a locally saved TensorFlow
 | 
			
		||||
                 checkpoint
 | 
			
		||||
        cache_dir: an optional path to a folder in which the pre-trained models
 | 
			
		||||
                   will be cached.
 | 
			
		||||
        state_dict: an optional state dictionnary
 | 
			
		||||
                    (collections.OrderedDict object) to use instead of Google
 | 
			
		||||
                    pre-trained models
 | 
			
		||||
        *inputs, **kwargs: additional input for the specific Bert class
 | 
			
		||||
            (ex: num_labels for BertForSequenceClassification)
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _append_from_pretrained_docstring(docstr):
 | 
			
		||||
    def docstring_decorator(fn):
 | 
			
		||||
        fn.__doc__ = fn.__doc__ + docstr
 | 
			
		||||
        return fn
 | 
			
		||||
    return docstring_decorator
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def bertTokenizer(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    Instantiate a BertTokenizer from a pre-trained/customized vocab file
 | 
			
		||||
    Args:
 | 
			
		||||
    pretrained_model_name_or_path: Path to pretrained model archive
 | 
			
		||||
                                   or one of pre-trained vocab configs below.
 | 
			
		||||
                                       * bert-base-uncased
 | 
			
		||||
                                       * bert-large-uncased
 | 
			
		||||
                                       * bert-base-cased
 | 
			
		||||
                                       * bert-large-cased
 | 
			
		||||
                                       * bert-base-multilingual-uncased
 | 
			
		||||
                                       * bert-base-multilingual-cased
 | 
			
		||||
                                       * bert-base-chinese
 | 
			
		||||
    Keyword args:
 | 
			
		||||
    cache_dir: an optional path to a specific directory to download and cache
 | 
			
		||||
               the pre-trained model weights.
 | 
			
		||||
               Default: None
 | 
			
		||||
    do_lower_case: Whether to lower case the input.
 | 
			
		||||
                   Only has an effect when do_wordpiece_only=False
 | 
			
		||||
                   Default: True
 | 
			
		||||
    do_basic_tokenize: Whether to do basic tokenization before wordpiece.
 | 
			
		||||
                       Default: True
 | 
			
		||||
    max_len: An artificial maximum length to truncate tokenized sequences to;
 | 
			
		||||
             Effective maximum length is always the minimum of this
 | 
			
		||||
             value (if specified) and the underlying BERT model's
 | 
			
		||||
             sequence length.
 | 
			
		||||
             Default: None
 | 
			
		||||
    never_split: List of tokens which will never be split during tokenization.
 | 
			
		||||
                 Only has an effect when do_wordpiece_only=False
 | 
			
		||||
                 Default: ["[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]"]
 | 
			
		||||
 | 
			
		||||
    Example:
 | 
			
		||||
        >>> sentence = 'Hello, World!'
 | 
			
		||||
        >>> tokenizer = torch.hub.load('ailzhang/pytorch-pretrained-BERT:hubconf', 'bertTokenizer', 'bert-base-cased', do_basic_tokenize=False, force_reload=False)
 | 
			
		||||
        >>> toks = tokenizer.tokenize(sentence)
 | 
			
		||||
        ['Hello', '##,', 'World', '##!']
 | 
			
		||||
        >>> ids = tokenizer.convert_tokens_to_ids(toks)
 | 
			
		||||
        [8667, 28136, 1291, 28125]
 | 
			
		||||
    """
 | 
			
		||||
    tokenizer = BertTokenizer.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return tokenizer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertModel(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertModel is the basic BERT Transformer model with a layer of summed token,
 | 
			
		||||
    position and sequence embeddings followed by a series of identical
 | 
			
		||||
    self-attention blocks (12 for BERT-base, 24 for BERT-large).
 | 
			
		||||
    """
 | 
			
		||||
    model = BertModel.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForNextSentencePrediction(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BERT model with next sentence prediction head.
 | 
			
		||||
    This module comprises the BERT model followed by the next sentence
 | 
			
		||||
    classification head.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForNextSentencePrediction.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForPreTraining(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BERT model with pre-training heads.
 | 
			
		||||
    This module comprises the BERT model followed by the two pre-training heads
 | 
			
		||||
        - the masked language modeling head, and
 | 
			
		||||
        - the next sentence classification head.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForPreTraining.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForMaskedLM(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForMaskedLM includes the BertModel Transformer followed by the
 | 
			
		||||
    (possibly) pre-trained masked language modeling head.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForMaskedLM.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForSequenceClassification(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForSequenceClassification is a fine-tuning model that includes
 | 
			
		||||
    BertModel and a sequence-level (sequence or pair of sequences) classifier
 | 
			
		||||
    on top of the BertModel.
 | 
			
		||||
 | 
			
		||||
    The sequence-level classifier is a linear layer that takes as input the
 | 
			
		||||
    last hidden state of the first character in the input sequence
 | 
			
		||||
    (see Figures 3a and 3b in the BERT paper).
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForSequenceClassification.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForMultipleChoice(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForMultipleChoice is a fine-tuning model that includes BertModel and a
 | 
			
		||||
    linear layer on top of the BertModel.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForMultipleChoice.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForQuestionAnswering(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForQuestionAnswering is a fine-tuning model that includes BertModel
 | 
			
		||||
    with a token-level classifiers on top of the full sequence of last hidden
 | 
			
		||||
    states.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForQuestionAnswering.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@_append_from_pretrained_docstring(bert_docstring)
 | 
			
		||||
def bertForTokenClassification(*args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    BertForTokenClassification is a fine-tuning model that includes BertModel
 | 
			
		||||
    and a token-level classifier on top of the BertModel.
 | 
			
		||||
 | 
			
		||||
    The token-level classifier is a linear layer that takes as input the last
 | 
			
		||||
    hidden state of the sequence.
 | 
			
		||||
    """
 | 
			
		||||
    model = BertForTokenClassification.from_pretrained(*args, **kwargs)
 | 
			
		||||
    return model
 | 
			
		||||
@ -1,8 +1,24 @@
 | 
			
		||||
__version__ = "0.4.0"
 | 
			
		||||
__version__ = "0.6.2"
 | 
			
		||||
from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer
 | 
			
		||||
from .tokenization_openai import OpenAIGPTTokenizer
 | 
			
		||||
from .tokenization_transfo_xl import (TransfoXLTokenizer, TransfoXLCorpus)
 | 
			
		||||
from .tokenization_gpt2 import GPT2Tokenizer
 | 
			
		||||
 | 
			
		||||
from .modeling import (BertConfig, BertModel, BertForPreTraining,
 | 
			
		||||
                       BertForMaskedLM, BertForNextSentencePrediction,
 | 
			
		||||
                       BertForSequenceClassification, BertForMultipleChoice,
 | 
			
		||||
                       BertForTokenClassification, BertForQuestionAnswering)
 | 
			
		||||
                       BertForTokenClassification, BertForQuestionAnswering,
 | 
			
		||||
                       load_tf_weights_in_bert)
 | 
			
		||||
from .modeling_openai import (OpenAIGPTConfig, OpenAIGPTModel,
 | 
			
		||||
                              OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel,
 | 
			
		||||
                              load_tf_weights_in_openai_gpt)
 | 
			
		||||
from .modeling_transfo_xl import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel,
 | 
			
		||||
                                  load_tf_weights_in_transfo_xl)
 | 
			
		||||
from .modeling_gpt2 import (GPT2Config, GPT2Model,
 | 
			
		||||
                            GPT2LMHeadModel, GPT2DoubleHeadsModel,
 | 
			
		||||
                            load_tf_weights_in_gpt2)
 | 
			
		||||
 | 
			
		||||
from .optimization import BertAdam
 | 
			
		||||
from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE
 | 
			
		||||
from .optimization_openai import OpenAIAdam
 | 
			
		||||
 | 
			
		||||
from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE, cached_path, WEIGHTS_NAME, CONFIG_NAME
 | 
			
		||||
 | 
			
		||||
@ -1,19 +1,83 @@
 | 
			
		||||
# coding: utf8
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
def main():
 | 
			
		||||
    import sys
 | 
			
		||||
    try:
 | 
			
		||||
        from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
 | 
			
		||||
    except ModuleNotFoundError:
 | 
			
		||||
        print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
 | 
			
		||||
              "In that case, it requires TensorFlow to be installed. Please see "
 | 
			
		||||
              "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
        raise
 | 
			
		||||
 | 
			
		||||
    if len(sys.argv) != 5:
 | 
			
		||||
        # pylint: disable=line-too-long
 | 
			
		||||
        print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
 | 
			
		||||
    if (len(sys.argv) != 4 and len(sys.argv) != 5) or sys.argv[1] not in [
 | 
			
		||||
        "convert_tf_checkpoint_to_pytorch",
 | 
			
		||||
        "convert_openai_checkpoint",
 | 
			
		||||
        "convert_transfo_xl_checkpoint",
 | 
			
		||||
        "convert_gpt2_checkpoint",
 | 
			
		||||
    ]:
 | 
			
		||||
        print(
 | 
			
		||||
        "Should be used as one of: \n"
 | 
			
		||||
        ">> `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`, \n"
 | 
			
		||||
        ">> `pytorch_pretrained_bert convert_openai_checkpoint OPENAI_GPT_CHECKPOINT_FOLDER_PATH PYTORCH_DUMP_OUTPUT [OPENAI_GPT_CONFIG]`, \n"
 | 
			
		||||
        ">> `pytorch_pretrained_bert convert_transfo_xl_checkpoint TF_CHECKPOINT_OR_DATASET PYTORCH_DUMP_OUTPUT [TF_CONFIG]` or \n"
 | 
			
		||||
        ">> `pytorch_pretrained_bert convert_gpt2_checkpoint TF_CHECKPOINT PYTORCH_DUMP_OUTPUT [GPT2_CONFIG]`")
 | 
			
		||||
    else:
 | 
			
		||||
        PYTORCH_DUMP_OUTPUT = sys.argv.pop()
 | 
			
		||||
        TF_CONFIG = sys.argv.pop()
 | 
			
		||||
        TF_CHECKPOINT = sys.argv.pop()
 | 
			
		||||
        convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
 | 
			
		||||
        if sys.argv[1] == "convert_tf_checkpoint_to_pytorch":
 | 
			
		||||
            try:
 | 
			
		||||
                from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
 | 
			
		||||
                    "In that case, it requires TensorFlow to be installed. Please see "
 | 
			
		||||
                    "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
            if len(sys.argv) != 5:
 | 
			
		||||
                # pylint: disable=line-too-long
 | 
			
		||||
                print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`")
 | 
			
		||||
            else:
 | 
			
		||||
                PYTORCH_DUMP_OUTPUT = sys.argv.pop()
 | 
			
		||||
                TF_CONFIG = sys.argv.pop()
 | 
			
		||||
                TF_CHECKPOINT = sys.argv.pop()
 | 
			
		||||
                convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
 | 
			
		||||
        elif sys.argv[1] == "convert_openai_checkpoint":
 | 
			
		||||
            from .convert_openai_checkpoint_to_pytorch import convert_openai_checkpoint_to_pytorch
 | 
			
		||||
            OPENAI_GPT_CHECKPOINT_FOLDER_PATH = sys.argv[2]
 | 
			
		||||
            PYTORCH_DUMP_OUTPUT = sys.argv[3]
 | 
			
		||||
            if len(sys.argv) == 5:
 | 
			
		||||
                OPENAI_GPT_CONFIG = sys.argv[4]
 | 
			
		||||
            else:
 | 
			
		||||
                OPENAI_GPT_CONFIG = ""
 | 
			
		||||
            convert_openai_checkpoint_to_pytorch(OPENAI_GPT_CHECKPOINT_FOLDER_PATH,
 | 
			
		||||
                                                 OPENAI_GPT_CONFIG,
 | 
			
		||||
                                                 PYTORCH_DUMP_OUTPUT)
 | 
			
		||||
        elif sys.argv[1] == "convert_transfo_xl_checkpoint":
 | 
			
		||||
            try:
 | 
			
		||||
                from .convert_transfo_xl_checkpoint_to_pytorch import convert_transfo_xl_checkpoint_to_pytorch
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
 | 
			
		||||
                    "In that case, it requires TensorFlow to be installed. Please see "
 | 
			
		||||
                    "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
            if 'ckpt' in sys.argv[2].lower():
 | 
			
		||||
                TF_CHECKPOINT = sys.argv[2]
 | 
			
		||||
                TF_DATASET_FILE = ""
 | 
			
		||||
            else:
 | 
			
		||||
                TF_DATASET_FILE = sys.argv[2]
 | 
			
		||||
                TF_CHECKPOINT = ""
 | 
			
		||||
            PYTORCH_DUMP_OUTPUT = sys.argv[3]
 | 
			
		||||
            if len(sys.argv) == 5:
 | 
			
		||||
                TF_CONFIG = sys.argv[4]
 | 
			
		||||
            else:
 | 
			
		||||
                TF_CONFIG = ""
 | 
			
		||||
            convert_transfo_xl_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT, TF_DATASET_FILE)
 | 
			
		||||
        else:
 | 
			
		||||
            try:
 | 
			
		||||
                from .convert_gpt2_checkpoint_to_pytorch import convert_gpt2_checkpoint_to_pytorch
 | 
			
		||||
            except ImportError:
 | 
			
		||||
                print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, "
 | 
			
		||||
                    "In that case, it requires TensorFlow to be installed. Please see "
 | 
			
		||||
                    "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
            TF_CHECKPOINT = sys.argv[2]
 | 
			
		||||
            PYTORCH_DUMP_OUTPUT = sys.argv[3]
 | 
			
		||||
            if len(sys.argv) == 5:
 | 
			
		||||
                TF_CONFIG = sys.argv[4]
 | 
			
		||||
            else:
 | 
			
		||||
                TF_CONFIG = ""
 | 
			
		||||
            convert_gpt2_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT)
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    main()
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										72
									
								
								pytorch_pretrained_bert/convert_gpt2_checkpoint_to_pytorch.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										72
									
								
								pytorch_pretrained_bert/convert_gpt2_checkpoint_to_pytorch.py
									
									
									
									
									
										Executable file
									
								
							@ -0,0 +1,72 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""Convert OpenAI GPT checkpoint."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.modeling_gpt2 import (CONFIG_NAME, WEIGHTS_NAME,
 | 
			
		||||
                                                     GPT2Config,
 | 
			
		||||
                                                     GPT2Model,
 | 
			
		||||
                                                     load_tf_weights_in_gpt2)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
 | 
			
		||||
    # Construct model
 | 
			
		||||
    if gpt2_config_file == "":
 | 
			
		||||
        config = GPT2Config()
 | 
			
		||||
    else:
 | 
			
		||||
        config = GPT2Config(gpt2_config_file)
 | 
			
		||||
    model = GPT2Model(config)
 | 
			
		||||
 | 
			
		||||
    # Load weights from numpy
 | 
			
		||||
    load_tf_weights_in_gpt2(model, gpt2_checkpoint_path)
 | 
			
		||||
 | 
			
		||||
    # Save pytorch-model
 | 
			
		||||
    pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
 | 
			
		||||
    pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
 | 
			
		||||
    print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
 | 
			
		||||
    torch.save(model.state_dict(), pytorch_weights_dump_path)
 | 
			
		||||
    print("Save configuration file to {}".format(pytorch_config_dump_path))
 | 
			
		||||
    with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
 | 
			
		||||
        f.write(config.to_json_string())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--gpt2_checkpoint_path",
 | 
			
		||||
                        default = None,
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        required = True,
 | 
			
		||||
                        help = "Path the TensorFlow checkpoint path.")
 | 
			
		||||
    parser.add_argument("--pytorch_dump_folder_path",
 | 
			
		||||
                        default = None,
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        required = True,
 | 
			
		||||
                        help = "Path to the output PyTorch model.")
 | 
			
		||||
    parser.add_argument("--gpt2_config_file",
 | 
			
		||||
                        default = "",
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
 | 
			
		||||
                            "This specifies the model architecture.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    convert_gpt2_checkpoint_to_pytorch(args.gpt2_checkpoint_path,
 | 
			
		||||
                                         args.gpt2_config_file,
 | 
			
		||||
                                         args.pytorch_dump_folder_path)
 | 
			
		||||
							
								
								
									
										72
									
								
								pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										72
									
								
								pytorch_pretrained_bert/convert_openai_checkpoint_to_pytorch.py
									
									
									
									
									
										Executable file
									
								
							@ -0,0 +1,72 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""Convert OpenAI GPT checkpoint."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.modeling_openai import (CONFIG_NAME, WEIGHTS_NAME,
 | 
			
		||||
                                                     OpenAIGPTConfig,
 | 
			
		||||
                                                     OpenAIGPTModel,
 | 
			
		||||
                                                     load_tf_weights_in_openai_gpt)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
 | 
			
		||||
    # Construct model
 | 
			
		||||
    if openai_config_file == "":
 | 
			
		||||
        config = OpenAIGPTConfig()
 | 
			
		||||
    else:
 | 
			
		||||
        config = OpenAIGPTConfig(openai_config_file)
 | 
			
		||||
    model = OpenAIGPTModel(config)
 | 
			
		||||
 | 
			
		||||
    # Load weights from numpy
 | 
			
		||||
    load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path)
 | 
			
		||||
 | 
			
		||||
    # Save pytorch-model
 | 
			
		||||
    pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
 | 
			
		||||
    pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
 | 
			
		||||
    print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
 | 
			
		||||
    torch.save(model.state_dict(), pytorch_weights_dump_path)
 | 
			
		||||
    print("Save configuration file to {}".format(pytorch_config_dump_path))
 | 
			
		||||
    with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
 | 
			
		||||
        f.write(config.to_json_string())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    ## Required parameters
 | 
			
		||||
    parser.add_argument("--openai_checkpoint_folder_path",
 | 
			
		||||
                        default = None,
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        required = True,
 | 
			
		||||
                        help = "Path the TensorFlow checkpoint path.")
 | 
			
		||||
    parser.add_argument("--pytorch_dump_folder_path",
 | 
			
		||||
                        default = None,
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        required = True,
 | 
			
		||||
                        help = "Path to the output PyTorch model.")
 | 
			
		||||
    parser.add_argument("--openai_config_file",
 | 
			
		||||
                        default = "",
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        help = "An optional config json file corresponding to the pre-trained OpenAI model. \n"
 | 
			
		||||
                            "This specifies the model architecture.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    convert_openai_checkpoint_to_pytorch(args.openai_checkpoint_folder_path,
 | 
			
		||||
                                         args.openai_config_file,
 | 
			
		||||
                                         args.pytorch_dump_folder_path)
 | 
			
		||||
@ -1,5 +1,5 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The HugginFace Inc. team.
 | 
			
		||||
# Copyright 2018 The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
@ -25,62 +25,16 @@ import tensorflow as tf
 | 
			
		||||
import torch
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from .modeling import BertConfig, BertForPreTraining
 | 
			
		||||
from pytorch_pretrained_bert.modeling import BertConfig, BertForPreTraining, load_tf_weights_in_bert
 | 
			
		||||
 | 
			
		||||
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
 | 
			
		||||
    config_path = os.path.abspath(bert_config_file)
 | 
			
		||||
    tf_path = os.path.abspath(tf_checkpoint_path)
 | 
			
		||||
    print("Converting TensorFlow checkpoint from {} with config at {}".format(tf_path, config_path))
 | 
			
		||||
    # Load weights from TF model
 | 
			
		||||
    init_vars = tf.train.list_variables(tf_path)
 | 
			
		||||
    names = []
 | 
			
		||||
    arrays = []
 | 
			
		||||
    for name, shape in init_vars:
 | 
			
		||||
        print("Loading TF weight {} with shape {}".format(name, shape))
 | 
			
		||||
        array = tf.train.load_variable(tf_path, name)
 | 
			
		||||
        names.append(name)
 | 
			
		||||
        arrays.append(array)
 | 
			
		||||
 | 
			
		||||
    # Initialise PyTorch model
 | 
			
		||||
    config = BertConfig.from_json_file(bert_config_file)
 | 
			
		||||
    print("Building PyTorch model from configuration: {}".format(str(config)))
 | 
			
		||||
    model = BertForPreTraining(config)
 | 
			
		||||
 | 
			
		||||
    for name, array in zip(names, arrays):
 | 
			
		||||
        name = name.split('/')
 | 
			
		||||
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
 | 
			
		||||
        # which are not required for using pretrained model
 | 
			
		||||
        if any(n in ["adam_v", "adam_m"] for n in name):
 | 
			
		||||
            print("Skipping {}".format("/".join(name)))
 | 
			
		||||
            continue
 | 
			
		||||
        pointer = model
 | 
			
		||||
        for m_name in name:
 | 
			
		||||
            if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
 | 
			
		||||
                l = re.split(r'_(\d+)', m_name)
 | 
			
		||||
            else:
 | 
			
		||||
                l = [m_name]
 | 
			
		||||
            if l[0] == 'kernel' or l[0] == 'gamma':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            elif l[0] == 'output_bias' or l[0] == 'beta':
 | 
			
		||||
                pointer = getattr(pointer, 'bias')
 | 
			
		||||
            elif l[0] == 'output_weights':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            else:
 | 
			
		||||
                pointer = getattr(pointer, l[0])
 | 
			
		||||
            if len(l) >= 2:
 | 
			
		||||
                num = int(l[1])
 | 
			
		||||
                pointer = pointer[num]
 | 
			
		||||
        if m_name[-11:] == '_embeddings':
 | 
			
		||||
            pointer = getattr(pointer, 'weight')
 | 
			
		||||
        elif m_name == 'kernel':
 | 
			
		||||
            array = np.transpose(array)
 | 
			
		||||
        try:
 | 
			
		||||
            assert pointer.shape == array.shape
 | 
			
		||||
        except AssertionError as e:
 | 
			
		||||
            e.args += (pointer.shape, array.shape)
 | 
			
		||||
            raise
 | 
			
		||||
        print("Initialize PyTorch weight {}".format(name))
 | 
			
		||||
        pointer.data = torch.from_numpy(array)
 | 
			
		||||
    # Load weights from tf checkpoint
 | 
			
		||||
    load_tf_weights_in_bert(model, tf_checkpoint_path)
 | 
			
		||||
 | 
			
		||||
    # Save pytorch-model
 | 
			
		||||
    print("Save PyTorch model to {}".format(pytorch_dump_path))
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										116
									
								
								pytorch_pretrained_bert/convert_transfo_xl_checkpoint_to_pytorch.py
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										116
									
								
								pytorch_pretrained_bert/convert_transfo_xl_checkpoint_to_pytorch.py
									
									
									
									
									
										Executable file
									
								
							@ -0,0 +1,116 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""Convert Transformer XL checkpoint and datasets."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
import pytorch_pretrained_bert.tokenization_transfo_xl as data_utils
 | 
			
		||||
from pytorch_pretrained_bert.modeling_transfo_xl import (CONFIG_NAME,
 | 
			
		||||
                                                         WEIGHTS_NAME,
 | 
			
		||||
                                                         TransfoXLConfig,
 | 
			
		||||
                                                         TransfoXLLMHeadModel,
 | 
			
		||||
                                                         load_tf_weights_in_transfo_xl)
 | 
			
		||||
from pytorch_pretrained_bert.tokenization_transfo_xl import (CORPUS_NAME,
 | 
			
		||||
                                                             VOCAB_NAME)
 | 
			
		||||
 | 
			
		||||
if sys.version_info[0] == 2:
 | 
			
		||||
    import cPickle as pickle
 | 
			
		||||
else:
 | 
			
		||||
    import pickle
 | 
			
		||||
 | 
			
		||||
# We do this to be able to load python 2 datasets pickles
 | 
			
		||||
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
 | 
			
		||||
data_utils.Vocab = data_utils.TransfoXLTokenizer
 | 
			
		||||
data_utils.Corpus = data_utils.TransfoXLCorpus
 | 
			
		||||
sys.modules['data_utils'] = data_utils
 | 
			
		||||
sys.modules['vocabulary'] = data_utils
 | 
			
		||||
 | 
			
		||||
def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path,
 | 
			
		||||
                                             transfo_xl_config_file,
 | 
			
		||||
                                             pytorch_dump_folder_path,
 | 
			
		||||
                                             transfo_xl_dataset_file):
 | 
			
		||||
    if transfo_xl_dataset_file:
 | 
			
		||||
        # Convert a pre-processed corpus (see original TensorFlow repo)
 | 
			
		||||
        with open(transfo_xl_dataset_file, "rb") as fp:
 | 
			
		||||
            corpus = pickle.load(fp, encoding="latin1")
 | 
			
		||||
        # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
 | 
			
		||||
        pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_NAME
 | 
			
		||||
        print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
 | 
			
		||||
        corpus_vocab_dict = corpus.vocab.__dict__
 | 
			
		||||
        torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
 | 
			
		||||
 | 
			
		||||
        corpus_dict_no_vocab = corpus.__dict__
 | 
			
		||||
        corpus_dict_no_vocab.pop('vocab', None)
 | 
			
		||||
        pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME
 | 
			
		||||
        print("Save dataset to {}".format(pytorch_dataset_dump_path))
 | 
			
		||||
        torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
 | 
			
		||||
 | 
			
		||||
    if tf_checkpoint_path:
 | 
			
		||||
        # Convert a pre-trained TensorFlow model
 | 
			
		||||
        config_path = os.path.abspath(transfo_xl_config_file)
 | 
			
		||||
        tf_path = os.path.abspath(tf_checkpoint_path)
 | 
			
		||||
 | 
			
		||||
        print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
 | 
			
		||||
        # Initialise PyTorch model
 | 
			
		||||
        if transfo_xl_config_file == "":
 | 
			
		||||
            config = TransfoXLConfig()
 | 
			
		||||
        else:
 | 
			
		||||
            config = TransfoXLConfig(transfo_xl_config_file)
 | 
			
		||||
        print("Building PyTorch model from configuration: {}".format(str(config)))
 | 
			
		||||
        model = TransfoXLLMHeadModel(config)
 | 
			
		||||
 | 
			
		||||
        model = load_tf_weights_in_transfo_xl(model, config, tf_path)
 | 
			
		||||
        # Save pytorch-model
 | 
			
		||||
        pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
 | 
			
		||||
        pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
 | 
			
		||||
        print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
 | 
			
		||||
        torch.save(model.state_dict(), pytorch_weights_dump_path)
 | 
			
		||||
        print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
 | 
			
		||||
        with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
 | 
			
		||||
            f.write(config.to_json_string())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    parser = argparse.ArgumentParser()
 | 
			
		||||
    parser.add_argument("--pytorch_dump_folder_path",
 | 
			
		||||
                        default = None,
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        required = True,
 | 
			
		||||
                        help = "Path to the folder to store the PyTorch model or dataset/vocab.")
 | 
			
		||||
    parser.add_argument("--tf_checkpoint_path",
 | 
			
		||||
                        default = "",
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        help = "An optional path to a TensorFlow checkpoint path to be converted.")
 | 
			
		||||
    parser.add_argument("--transfo_xl_config_file",
 | 
			
		||||
                        default = "",
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        help = "An optional config json file corresponding to the pre-trained BERT model. \n"
 | 
			
		||||
                            "This specifies the model architecture.")
 | 
			
		||||
    parser.add_argument("--transfo_xl_dataset_file",
 | 
			
		||||
                        default = "",
 | 
			
		||||
                        type = str,
 | 
			
		||||
                        help = "An optional dataset file to be converted in a vocabulary.")
 | 
			
		||||
    args = parser.parse_args()
 | 
			
		||||
    convert_transfo_xl_checkpoint_to_pytorch(args.tf_checkpoint_path,
 | 
			
		||||
                                     args.transfo_xl_config_file,
 | 
			
		||||
                                     args.pytorch_dump_folder_path,
 | 
			
		||||
                                     args.transfo_xl_dataset_file)
 | 
			
		||||
@ -3,31 +3,45 @@ Utilities for working with the local dataset cache.
 | 
			
		||||
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
 | 
			
		||||
Copyright by the AllenNLP authors.
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import (absolute_import, division, print_function, unicode_literals)
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import tempfile
 | 
			
		||||
import json
 | 
			
		||||
from urllib.parse import urlparse
 | 
			
		||||
from pathlib import Path
 | 
			
		||||
from typing import Optional, Tuple, Union, IO, Callable, Set
 | 
			
		||||
from hashlib import sha256
 | 
			
		||||
import fnmatch
 | 
			
		||||
from functools import wraps
 | 
			
		||||
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
from hashlib import sha256
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import boto3
 | 
			
		||||
from botocore.exceptions import ClientError
 | 
			
		||||
import requests
 | 
			
		||||
from botocore.exceptions import ClientError
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from urllib.parse import urlparse
 | 
			
		||||
except ImportError:
 | 
			
		||||
    from urlparse import urlparse
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from pathlib import Path
 | 
			
		||||
    PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
 | 
			
		||||
                                                   Path.home() / '.pytorch_pretrained_bert'))
 | 
			
		||||
except (AttributeError, ImportError):
 | 
			
		||||
    PYTORCH_PRETRAINED_BERT_CACHE = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
 | 
			
		||||
                                              os.path.join(os.path.expanduser("~"), '.pytorch_pretrained_bert'))
 | 
			
		||||
 | 
			
		||||
CONFIG_NAME = "config.json"
 | 
			
		||||
WEIGHTS_NAME = "pytorch_model.bin"
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)  # pylint: disable=invalid-name
 | 
			
		||||
 | 
			
		||||
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
 | 
			
		||||
                                               Path.home() / '.pytorch_pretrained_bert'))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def url_to_filename(url: str, etag: str = None) -> str:
 | 
			
		||||
def url_to_filename(url, etag=None):
 | 
			
		||||
    """
 | 
			
		||||
    Convert `url` into a hashed filename in a repeatable way.
 | 
			
		||||
    If `etag` is specified, append its hash to the url's, delimited
 | 
			
		||||
@ -45,25 +59,25 @@ def url_to_filename(url: str, etag: str = None) -> str:
 | 
			
		||||
    return filename
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
 | 
			
		||||
def filename_to_url(filename, cache_dir=None):
 | 
			
		||||
    """
 | 
			
		||||
    Return the url and etag (which may be ``None``) stored for `filename`.
 | 
			
		||||
    Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
 | 
			
		||||
    Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
 | 
			
		||||
    """
 | 
			
		||||
    if cache_dir is None:
 | 
			
		||||
        cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
 | 
			
		||||
    if isinstance(cache_dir, Path):
 | 
			
		||||
    if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
 | 
			
		||||
        cache_dir = str(cache_dir)
 | 
			
		||||
 | 
			
		||||
    cache_path = os.path.join(cache_dir, filename)
 | 
			
		||||
    if not os.path.exists(cache_path):
 | 
			
		||||
        raise FileNotFoundError("file {} not found".format(cache_path))
 | 
			
		||||
        raise EnvironmentError("file {} not found".format(cache_path))
 | 
			
		||||
 | 
			
		||||
    meta_path = cache_path + '.json'
 | 
			
		||||
    if not os.path.exists(meta_path):
 | 
			
		||||
        raise FileNotFoundError("file {} not found".format(meta_path))
 | 
			
		||||
        raise EnvironmentError("file {} not found".format(meta_path))
 | 
			
		||||
 | 
			
		||||
    with open(meta_path) as meta_file:
 | 
			
		||||
    with open(meta_path, encoding="utf-8") as meta_file:
 | 
			
		||||
        metadata = json.load(meta_file)
 | 
			
		||||
    url = metadata['url']
 | 
			
		||||
    etag = metadata['etag']
 | 
			
		||||
@ -71,7 +85,7 @@ def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[
 | 
			
		||||
    return url, etag
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
 | 
			
		||||
def cached_path(url_or_filename, cache_dir=None):
 | 
			
		||||
    """
 | 
			
		||||
    Given something that might be a URL (or might be a local path),
 | 
			
		||||
    determine which. If it's a URL, download the file and cache it, and
 | 
			
		||||
@ -80,9 +94,9 @@ def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] =
 | 
			
		||||
    """
 | 
			
		||||
    if cache_dir is None:
 | 
			
		||||
        cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
 | 
			
		||||
    if isinstance(url_or_filename, Path):
 | 
			
		||||
    if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
 | 
			
		||||
        url_or_filename = str(url_or_filename)
 | 
			
		||||
    if isinstance(cache_dir, Path):
 | 
			
		||||
    if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
 | 
			
		||||
        cache_dir = str(cache_dir)
 | 
			
		||||
 | 
			
		||||
    parsed = urlparse(url_or_filename)
 | 
			
		||||
@ -95,13 +109,13 @@ def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] =
 | 
			
		||||
        return url_or_filename
 | 
			
		||||
    elif parsed.scheme == '':
 | 
			
		||||
        # File, but it doesn't exist.
 | 
			
		||||
        raise FileNotFoundError("file {} not found".format(url_or_filename))
 | 
			
		||||
        raise EnvironmentError("file {} not found".format(url_or_filename))
 | 
			
		||||
    else:
 | 
			
		||||
        # Something unknown
 | 
			
		||||
        raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def split_s3_path(url: str) -> Tuple[str, str]:
 | 
			
		||||
def split_s3_path(url):
 | 
			
		||||
    """Split a full s3 path into the bucket name and path."""
 | 
			
		||||
    parsed = urlparse(url)
 | 
			
		||||
    if not parsed.netloc or not parsed.path:
 | 
			
		||||
@ -114,19 +128,19 @@ def split_s3_path(url: str) -> Tuple[str, str]:
 | 
			
		||||
    return bucket_name, s3_path
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def s3_request(func: Callable):
 | 
			
		||||
def s3_request(func):
 | 
			
		||||
    """
 | 
			
		||||
    Wrapper function for s3 requests in order to create more helpful error
 | 
			
		||||
    messages.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    @wraps(func)
 | 
			
		||||
    def wrapper(url: str, *args, **kwargs):
 | 
			
		||||
    def wrapper(url, *args, **kwargs):
 | 
			
		||||
        try:
 | 
			
		||||
            return func(url, *args, **kwargs)
 | 
			
		||||
        except ClientError as exc:
 | 
			
		||||
            if int(exc.response["Error"]["Code"]) == 404:
 | 
			
		||||
                raise FileNotFoundError("file {} not found".format(url))
 | 
			
		||||
                raise EnvironmentError("file {} not found".format(url))
 | 
			
		||||
            else:
 | 
			
		||||
                raise
 | 
			
		||||
 | 
			
		||||
@ -134,7 +148,7 @@ def s3_request(func: Callable):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@s3_request
 | 
			
		||||
def s3_etag(url: str) -> Optional[str]:
 | 
			
		||||
def s3_etag(url):
 | 
			
		||||
    """Check ETag on S3 object."""
 | 
			
		||||
    s3_resource = boto3.resource("s3")
 | 
			
		||||
    bucket_name, s3_path = split_s3_path(url)
 | 
			
		||||
@ -143,14 +157,14 @@ def s3_etag(url: str) -> Optional[str]:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@s3_request
 | 
			
		||||
def s3_get(url: str, temp_file: IO) -> None:
 | 
			
		||||
def s3_get(url, temp_file):
 | 
			
		||||
    """Pull a file directly from S3."""
 | 
			
		||||
    s3_resource = boto3.resource("s3")
 | 
			
		||||
    bucket_name, s3_path = split_s3_path(url)
 | 
			
		||||
    s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def http_get(url: str, temp_file: IO) -> None:
 | 
			
		||||
def http_get(url, temp_file):
 | 
			
		||||
    req = requests.get(url, stream=True)
 | 
			
		||||
    content_length = req.headers.get('Content-Length')
 | 
			
		||||
    total = int(content_length) if content_length is not None else None
 | 
			
		||||
@ -162,33 +176,47 @@ def http_get(url: str, temp_file: IO) -> None:
 | 
			
		||||
    progress.close()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
 | 
			
		||||
def get_from_cache(url, cache_dir=None):
 | 
			
		||||
    """
 | 
			
		||||
    Given a URL, look for the corresponding dataset in the local cache.
 | 
			
		||||
    If it's not there, download it. Then return the path to the cached file.
 | 
			
		||||
    """
 | 
			
		||||
    if cache_dir is None:
 | 
			
		||||
        cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
 | 
			
		||||
    if isinstance(cache_dir, Path):
 | 
			
		||||
    if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
 | 
			
		||||
        cache_dir = str(cache_dir)
 | 
			
		||||
 | 
			
		||||
    os.makedirs(cache_dir, exist_ok=True)
 | 
			
		||||
    if not os.path.exists(cache_dir):
 | 
			
		||||
        os.makedirs(cache_dir)
 | 
			
		||||
 | 
			
		||||
    # Get eTag to add to filename, if it exists.
 | 
			
		||||
    if url.startswith("s3://"):
 | 
			
		||||
        etag = s3_etag(url)
 | 
			
		||||
    else:
 | 
			
		||||
        response = requests.head(url, allow_redirects=True)
 | 
			
		||||
        if response.status_code != 200:
 | 
			
		||||
            raise IOError("HEAD request failed for url {} with status code {}"
 | 
			
		||||
                          .format(url, response.status_code))
 | 
			
		||||
        etag = response.headers.get("ETag")
 | 
			
		||||
        try:
 | 
			
		||||
            response = requests.head(url, allow_redirects=True)
 | 
			
		||||
            if response.status_code != 200:
 | 
			
		||||
                etag = None
 | 
			
		||||
            else:
 | 
			
		||||
                etag = response.headers.get("ETag")
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            etag = None
 | 
			
		||||
 | 
			
		||||
    if sys.version_info[0] == 2 and etag is not None:
 | 
			
		||||
        etag = etag.decode('utf-8')
 | 
			
		||||
    filename = url_to_filename(url, etag)
 | 
			
		||||
 | 
			
		||||
    # get cache path to put the file
 | 
			
		||||
    cache_path = os.path.join(cache_dir, filename)
 | 
			
		||||
 | 
			
		||||
    # If we don't have a connection (etag is None) and can't identify the file
 | 
			
		||||
    # try to get the last downloaded one
 | 
			
		||||
    if not os.path.exists(cache_path) and etag is None:
 | 
			
		||||
        matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
 | 
			
		||||
        matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
 | 
			
		||||
        if matching_files:
 | 
			
		||||
            cache_path = os.path.join(cache_dir, matching_files[-1])
 | 
			
		||||
 | 
			
		||||
    if not os.path.exists(cache_path):
 | 
			
		||||
        # Download to temporary file, then copy to cache dir once finished.
 | 
			
		||||
        # Otherwise you get corrupt cache entries if the download gets interrupted.
 | 
			
		||||
@ -214,14 +242,17 @@ def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
 | 
			
		||||
            meta = {'url': url, 'etag': etag}
 | 
			
		||||
            meta_path = cache_path + '.json'
 | 
			
		||||
            with open(meta_path, 'w') as meta_file:
 | 
			
		||||
                json.dump(meta, meta_file)
 | 
			
		||||
                output_string = json.dumps(meta)
 | 
			
		||||
                if sys.version_info[0] == 2 and isinstance(output_string, str):
 | 
			
		||||
                    output_string = unicode(output_string, 'utf-8')  # The beauty of python 2
 | 
			
		||||
                meta_file.write(output_string)
 | 
			
		||||
 | 
			
		||||
            logger.info("removing temp file %s", temp_file.name)
 | 
			
		||||
 | 
			
		||||
    return cache_path
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def read_set_from_file(filename: str) -> Set[str]:
 | 
			
		||||
def read_set_from_file(filename):
 | 
			
		||||
    '''
 | 
			
		||||
    Extract a de-duped collection (set) of text from a file.
 | 
			
		||||
    Expected file format is one item per line.
 | 
			
		||||
@ -233,7 +264,7 @@ def read_set_from_file(filename: str) -> Set[str]:
 | 
			
		||||
    return collection
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_file_extension(path: str, dot=True, lower: bool = True):
 | 
			
		||||
def get_file_extension(path, dot=True, lower=True):
 | 
			
		||||
    ext = os.path.splitext(path)[1]
 | 
			
		||||
    ext = ext if dot else ext[1:]
 | 
			
		||||
    return ext.lower() if lower else ext
 | 
			
		||||
 | 
			
		||||
@ -1,5 +1,5 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
@ -15,24 +15,24 @@
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""PyTorch BERT model."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import copy
 | 
			
		||||
import json
 | 
			
		||||
import math
 | 
			
		||||
import logging
 | 
			
		||||
import math
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import tarfile
 | 
			
		||||
import tempfile
 | 
			
		||||
import shutil
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
from torch import nn
 | 
			
		||||
from torch.nn import CrossEntropyLoss
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path
 | 
			
		||||
from .file_utils import cached_path, WEIGHTS_NAME, CONFIG_NAME
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
@ -45,13 +45,81 @@ PRETRAINED_MODEL_ARCHIVE_MAP = {
 | 
			
		||||
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
 | 
			
		||||
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
 | 
			
		||||
}
 | 
			
		||||
CONFIG_NAME = 'bert_config.json'
 | 
			
		||||
WEIGHTS_NAME = 'pytorch_model.bin'
 | 
			
		||||
BERT_CONFIG_NAME = 'bert_config.json'
 | 
			
		||||
TF_WEIGHTS_NAME = 'model.ckpt'
 | 
			
		||||
 | 
			
		||||
def load_tf_weights_in_bert(model, tf_checkpoint_path):
 | 
			
		||||
    """ Load tf checkpoints in a pytorch model
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        import re
 | 
			
		||||
        import numpy as np
 | 
			
		||||
        import tensorflow as tf
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
 | 
			
		||||
            "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
        raise
 | 
			
		||||
    tf_path = os.path.abspath(tf_checkpoint_path)
 | 
			
		||||
    print("Converting TensorFlow checkpoint from {}".format(tf_path))
 | 
			
		||||
    # Load weights from TF model
 | 
			
		||||
    init_vars = tf.train.list_variables(tf_path)
 | 
			
		||||
    names = []
 | 
			
		||||
    arrays = []
 | 
			
		||||
    for name, shape in init_vars:
 | 
			
		||||
        print("Loading TF weight {} with shape {}".format(name, shape))
 | 
			
		||||
        array = tf.train.load_variable(tf_path, name)
 | 
			
		||||
        names.append(name)
 | 
			
		||||
        arrays.append(array)
 | 
			
		||||
 | 
			
		||||
    for name, array in zip(names, arrays):
 | 
			
		||||
        name = name.split('/')
 | 
			
		||||
        # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
 | 
			
		||||
        # which are not required for using pretrained model
 | 
			
		||||
        if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
 | 
			
		||||
            print("Skipping {}".format("/".join(name)))
 | 
			
		||||
            continue
 | 
			
		||||
        pointer = model
 | 
			
		||||
        for m_name in name:
 | 
			
		||||
            if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
 | 
			
		||||
                l = re.split(r'_(\d+)', m_name)
 | 
			
		||||
            else:
 | 
			
		||||
                l = [m_name]
 | 
			
		||||
            if l[0] == 'kernel' or l[0] == 'gamma':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            elif l[0] == 'output_bias' or l[0] == 'beta':
 | 
			
		||||
                pointer = getattr(pointer, 'bias')
 | 
			
		||||
            elif l[0] == 'output_weights':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            elif l[0] == 'squad':
 | 
			
		||||
                pointer = getattr(pointer, 'classifier')
 | 
			
		||||
            else:
 | 
			
		||||
                try:
 | 
			
		||||
                    pointer = getattr(pointer, l[0])
 | 
			
		||||
                except AttributeError:
 | 
			
		||||
                    print("Skipping {}".format("/".join(name)))
 | 
			
		||||
                    continue
 | 
			
		||||
            if len(l) >= 2:
 | 
			
		||||
                num = int(l[1])
 | 
			
		||||
                pointer = pointer[num]
 | 
			
		||||
        if m_name[-11:] == '_embeddings':
 | 
			
		||||
            pointer = getattr(pointer, 'weight')
 | 
			
		||||
        elif m_name == 'kernel':
 | 
			
		||||
            array = np.transpose(array)
 | 
			
		||||
        try:
 | 
			
		||||
            assert pointer.shape == array.shape
 | 
			
		||||
        except AssertionError as e:
 | 
			
		||||
            e.args += (pointer.shape, array.shape)
 | 
			
		||||
            raise
 | 
			
		||||
        print("Initialize PyTorch weight {}".format(name))
 | 
			
		||||
        pointer.data = torch.from_numpy(array)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gelu(x):
 | 
			
		||||
    """Implementation of the gelu activation function.
 | 
			
		||||
        For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
 | 
			
		||||
        0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
 | 
			
		||||
        Also see https://arxiv.org/abs/1606.08415
 | 
			
		||||
    """
 | 
			
		||||
    return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
 | 
			
		||||
 | 
			
		||||
@ -102,7 +170,8 @@ class BertConfig(object):
 | 
			
		||||
            initializer_range: The sttdev of the truncated_normal_initializer for
 | 
			
		||||
                initializing all weight matrices.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(vocab_size_or_config_json_file, str):
 | 
			
		||||
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
 | 
			
		||||
                        and isinstance(vocab_size_or_config_json_file, unicode)):
 | 
			
		||||
            with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
 | 
			
		||||
                json_config = json.loads(reader.read())
 | 
			
		||||
            for key, value in json_config.items():
 | 
			
		||||
@ -150,10 +219,15 @@ class BertConfig(object):
 | 
			
		||||
        """Serializes this instance to a JSON string."""
 | 
			
		||||
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
 | 
			
		||||
 | 
			
		||||
    def to_json_file(self, json_file_path):
 | 
			
		||||
        """ Save this instance to a json file."""
 | 
			
		||||
        with open(json_file_path, "w", encoding='utf-8') as writer:
 | 
			
		||||
            writer.write(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
 | 
			
		||||
except ImportError:
 | 
			
		||||
    print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
 | 
			
		||||
    logger.info("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex .")
 | 
			
		||||
    class BertLayerNorm(nn.Module):
 | 
			
		||||
        def __init__(self, hidden_size, eps=1e-12):
 | 
			
		||||
            """Construct a layernorm module in the TF style (epsilon inside the square root).
 | 
			
		||||
@ -174,7 +248,7 @@ class BertEmbeddings(nn.Module):
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(BertEmbeddings, self).__init__()
 | 
			
		||||
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
 | 
			
		||||
        self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
 | 
			
		||||
        self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
 | 
			
		||||
        self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
 | 
			
		||||
 | 
			
		||||
@ -281,8 +355,10 @@ class BertIntermediate(nn.Module):
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(BertIntermediate, self).__init__()
 | 
			
		||||
        self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
 | 
			
		||||
        self.intermediate_act_fn = ACT2FN[config.hidden_act] \
 | 
			
		||||
            if isinstance(config.hidden_act, str) else config.hidden_act
 | 
			
		||||
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
 | 
			
		||||
            self.intermediate_act_fn = ACT2FN[config.hidden_act]
 | 
			
		||||
        else:
 | 
			
		||||
            self.intermediate_act_fn = config.hidden_act
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_states):
 | 
			
		||||
        hidden_states = self.dense(hidden_states)
 | 
			
		||||
@ -354,8 +430,10 @@ class BertPredictionHeadTransform(nn.Module):
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(BertPredictionHeadTransform, self).__init__()
 | 
			
		||||
        self.dense = nn.Linear(config.hidden_size, config.hidden_size)
 | 
			
		||||
        self.transform_act_fn = ACT2FN[config.hidden_act] \
 | 
			
		||||
            if isinstance(config.hidden_act, str) else config.hidden_act
 | 
			
		||||
        if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
 | 
			
		||||
            self.transform_act_fn = ACT2FN[config.hidden_act]
 | 
			
		||||
        else:
 | 
			
		||||
            self.transform_act_fn = config.hidden_act
 | 
			
		||||
        self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_states):
 | 
			
		||||
@ -416,12 +494,12 @@ class BertPreTrainingHeads(nn.Module):
 | 
			
		||||
        return prediction_scores, seq_relationship_score
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PreTrainedBertModel(nn.Module):
 | 
			
		||||
class BertPreTrainedModel(nn.Module):
 | 
			
		||||
    """ An abstract class to handle weights initialization and
 | 
			
		||||
        a simple interface for dowloading and loading pretrained models.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, config, *inputs, **kwargs):
 | 
			
		||||
        super(PreTrainedBertModel, self).__init__()
 | 
			
		||||
        super(BertPreTrainedModel, self).__init__()
 | 
			
		||||
        if not isinstance(config, BertConfig):
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
 | 
			
		||||
@ -439,46 +517,59 @@ class PreTrainedBertModel(nn.Module):
 | 
			
		||||
            # cf https://github.com/pytorch/pytorch/pull/5617
 | 
			
		||||
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
 | 
			
		||||
        elif isinstance(module, BertLayerNorm):
 | 
			
		||||
            module.bias.data.normal_(mean=0.0, std=self.config.initializer_range)
 | 
			
		||||
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
            module.weight.data.fill_(1.0)
 | 
			
		||||
        if isinstance(module, nn.Linear) and module.bias is not None:
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs):
 | 
			
		||||
    def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
 | 
			
		||||
        Instantiate a BertPreTrainedModel from a pre-trained model file or a pytorch state dict.
 | 
			
		||||
        Download and cache the pre-trained model file if needed.
 | 
			
		||||
 | 
			
		||||
        Params:
 | 
			
		||||
            pretrained_model_name: either:
 | 
			
		||||
            pretrained_model_name_or_path: either:
 | 
			
		||||
                - a str with the name of a pre-trained model to load selected in the list of:
 | 
			
		||||
                    . `bert-base-uncased`
 | 
			
		||||
                    . `bert-large-uncased`
 | 
			
		||||
                    . `bert-base-cased`
 | 
			
		||||
                    . `bert-base-multilingual`
 | 
			
		||||
                    . `bert-large-cased`
 | 
			
		||||
                    . `bert-base-multilingual-uncased`
 | 
			
		||||
                    . `bert-base-multilingual-cased`
 | 
			
		||||
                    . `bert-base-chinese`
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `bert_config.json` a configuration file for the model
 | 
			
		||||
                    . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `bert_config.json` a configuration file for the model
 | 
			
		||||
                    . `model.chkpt` a TensorFlow checkpoint
 | 
			
		||||
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
 | 
			
		||||
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
 | 
			
		||||
            state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
 | 
			
		||||
            *inputs, **kwargs: additional input for the specific Bert class
 | 
			
		||||
                (ex: num_labels for BertForSequenceClassification)
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP:
 | 
			
		||||
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name]
 | 
			
		||||
        state_dict = kwargs.get('state_dict', None)
 | 
			
		||||
        kwargs.pop('state_dict', None)
 | 
			
		||||
        cache_dir = kwargs.get('cache_dir', None)
 | 
			
		||||
        kwargs.pop('cache_dir', None)
 | 
			
		||||
        from_tf = kwargs.get('from_tf', False)
 | 
			
		||||
        kwargs.pop('from_tf', None)
 | 
			
		||||
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
 | 
			
		||||
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
        else:
 | 
			
		||||
            archive_file = pretrained_model_name
 | 
			
		||||
            archive_file = pretrained_model_name_or_path
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
 | 
			
		||||
        except FileNotFoundError:
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find any file "
 | 
			
		||||
                "associated to this path or url.".format(
 | 
			
		||||
                    pretrained_model_name,
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()),
 | 
			
		||||
                    archive_file))
 | 
			
		||||
            return None
 | 
			
		||||
@ -488,7 +579,7 @@ class PreTrainedBertModel(nn.Module):
 | 
			
		||||
            logger.info("loading archive file {} from cache at {}".format(
 | 
			
		||||
                archive_file, resolved_archive_file))
 | 
			
		||||
        tempdir = None
 | 
			
		||||
        if os.path.isdir(resolved_archive_file):
 | 
			
		||||
        if os.path.isdir(resolved_archive_file) or from_tf:
 | 
			
		||||
            serialization_dir = resolved_archive_file
 | 
			
		||||
        else:
 | 
			
		||||
            # Extract archive to temp dir
 | 
			
		||||
@ -500,14 +591,24 @@ class PreTrainedBertModel(nn.Module):
 | 
			
		||||
            serialization_dir = tempdir
 | 
			
		||||
        # Load config
 | 
			
		||||
        config_file = os.path.join(serialization_dir, CONFIG_NAME)
 | 
			
		||||
        if not os.path.exists(config_file):
 | 
			
		||||
            # Backward compatibility with old naming format
 | 
			
		||||
            config_file = os.path.join(serialization_dir, BERT_CONFIG_NAME)
 | 
			
		||||
        config = BertConfig.from_json_file(config_file)
 | 
			
		||||
        logger.info("Model config {}".format(config))
 | 
			
		||||
        # Instantiate model.
 | 
			
		||||
        model = cls(config, *inputs, **kwargs)
 | 
			
		||||
        if state_dict is None:
 | 
			
		||||
        if state_dict is None and not from_tf:
 | 
			
		||||
            weights_path = os.path.join(serialization_dir, WEIGHTS_NAME)
 | 
			
		||||
            state_dict = torch.load(weights_path)
 | 
			
		||||
 | 
			
		||||
            state_dict = torch.load(weights_path, map_location='cpu')
 | 
			
		||||
        if tempdir:
 | 
			
		||||
            # Clean up temp dir
 | 
			
		||||
            shutil.rmtree(tempdir)
 | 
			
		||||
        if from_tf:
 | 
			
		||||
            # Directly load from a TensorFlow checkpoint
 | 
			
		||||
            weights_path = os.path.join(serialization_dir, TF_WEIGHTS_NAME)
 | 
			
		||||
            return load_tf_weights_in_bert(model, weights_path)
 | 
			
		||||
        # Load from a PyTorch state_dict
 | 
			
		||||
        old_keys = []
 | 
			
		||||
        new_keys = []
 | 
			
		||||
        for key in state_dict.keys():
 | 
			
		||||
@ -538,20 +639,23 @@ class PreTrainedBertModel(nn.Module):
 | 
			
		||||
            for name, child in module._modules.items():
 | 
			
		||||
                if child is not None:
 | 
			
		||||
                    load(child, prefix + name + '.')
 | 
			
		||||
        load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
 | 
			
		||||
        start_prefix = ''
 | 
			
		||||
        if not hasattr(model, 'bert') and any(s.startswith('bert.') for s in state_dict.keys()):
 | 
			
		||||
            start_prefix = 'bert.'
 | 
			
		||||
        load(model, prefix=start_prefix)
 | 
			
		||||
        if len(missing_keys) > 0:
 | 
			
		||||
            logger.info("Weights of {} not initialized from pretrained model: {}".format(
 | 
			
		||||
                model.__class__.__name__, missing_keys))
 | 
			
		||||
        if len(unexpected_keys) > 0:
 | 
			
		||||
            logger.info("Weights from pretrained model not used in {}: {}".format(
 | 
			
		||||
                model.__class__.__name__, unexpected_keys))
 | 
			
		||||
        if tempdir:
 | 
			
		||||
            # Clean up temp dir
 | 
			
		||||
            shutil.rmtree(tempdir)
 | 
			
		||||
        if len(error_msgs) > 0:
 | 
			
		||||
            raise RuntimeError('Error(s) in loading state_dict for {}:\n\t{}'.format(
 | 
			
		||||
                               model.__class__.__name__, "\n\t".join(error_msgs)))
 | 
			
		||||
        return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertModel(PreTrainedBertModel):
 | 
			
		||||
class BertModel(BertPreTrainedModel):
 | 
			
		||||
    """BERT model ("Bidirectional Embedding Representations from a Transformer").
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
@ -579,7 +683,7 @@ class BertModel(PreTrainedBertModel):
 | 
			
		||||
                to the last attention block of shape [batch_size, sequence_length, hidden_size],
 | 
			
		||||
        `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
 | 
			
		||||
            classifier pretrained on top of the hidden state associated to the first character of the
 | 
			
		||||
            input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
 | 
			
		||||
            input (`CLS`) to train on the Next-Sentence task (see BERT's paper).
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
@ -634,7 +738,7 @@ class BertModel(PreTrainedBertModel):
 | 
			
		||||
        return encoded_layers, pooled_output
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertForPreTraining(PreTrainedBertModel):
 | 
			
		||||
class BertForPreTraining(BertPreTrainedModel):
 | 
			
		||||
    """BERT model with pre-training heads.
 | 
			
		||||
    This module comprises the BERT model followed by the two pre-training heads:
 | 
			
		||||
        - the masked language modeling head, and
 | 
			
		||||
@ -654,10 +758,10 @@ class BertForPreTraining(PreTrainedBertModel):
 | 
			
		||||
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
 | 
			
		||||
            input sequence length in the current batch. It's the mask that we typically use for attention when
 | 
			
		||||
            a batch has varying length sentences.
 | 
			
		||||
        `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
        `masked_lm_labels`: optional masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., vocab_size]
 | 
			
		||||
        `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
 | 
			
		||||
        `next_sentence_label`: optional next sentence classification loss: torch.LongTensor of shape [batch_size]
 | 
			
		||||
            with indices selected in [0, 1].
 | 
			
		||||
            0 => next sentence is the continuation, 1 => next sentence is a random sentence.
 | 
			
		||||
 | 
			
		||||
@ -705,7 +809,7 @@ class BertForPreTraining(PreTrainedBertModel):
 | 
			
		||||
            return prediction_scores, seq_relationship_score
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertForMaskedLM(PreTrainedBertModel):
 | 
			
		||||
class BertForMaskedLM(BertPreTrainedModel):
 | 
			
		||||
    """BERT model with the masked language modeling head.
 | 
			
		||||
    This module comprises the BERT model followed by the masked language modeling head.
 | 
			
		||||
 | 
			
		||||
@ -728,7 +832,7 @@ class BertForMaskedLM(PreTrainedBertModel):
 | 
			
		||||
            is only computed for the labels set in [0, ..., vocab_size]
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `masked_lm_labels` is `None`:
 | 
			
		||||
        if `masked_lm_labels` is  not `None`:
 | 
			
		||||
            Outputs the masked language modeling loss.
 | 
			
		||||
        if `masked_lm_labels` is `None`:
 | 
			
		||||
            Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size].
 | 
			
		||||
@ -766,7 +870,7 @@ class BertForMaskedLM(PreTrainedBertModel):
 | 
			
		||||
            return prediction_scores
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertForNextSentencePrediction(PreTrainedBertModel):
 | 
			
		||||
class BertForNextSentencePrediction(BertPreTrainedModel):
 | 
			
		||||
    """BERT model with next sentence prediction head.
 | 
			
		||||
    This module comprises the BERT model followed by the next sentence classification head.
 | 
			
		||||
 | 
			
		||||
@ -828,7 +932,7 @@ class BertForNextSentencePrediction(PreTrainedBertModel):
 | 
			
		||||
            return seq_relationship_score
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertForSequenceClassification(PreTrainedBertModel):
 | 
			
		||||
class BertForSequenceClassification(BertPreTrainedModel):
 | 
			
		||||
    """BERT model for classification.
 | 
			
		||||
    This module is composed of the BERT model with a linear layer on top of
 | 
			
		||||
    the pooled output.
 | 
			
		||||
@ -839,7 +943,7 @@ class BertForSequenceClassification(PreTrainedBertModel):
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
            with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
 | 
			
		||||
            with the word token indices in the vocabulary. Items in the batch should begin with the special "CLS" token. (see the tokens preprocessing logic in the scripts
 | 
			
		||||
            `extract_features.py`, `run_classifier.py` and `run_squad.py`)
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
 | 
			
		||||
            types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
 | 
			
		||||
@ -873,7 +977,7 @@ class BertForSequenceClassification(PreTrainedBertModel):
 | 
			
		||||
    logits = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, config, num_labels=2):
 | 
			
		||||
    def __init__(self, config, num_labels):
 | 
			
		||||
        super(BertForSequenceClassification, self).__init__(config)
 | 
			
		||||
        self.num_labels = num_labels
 | 
			
		||||
        self.bert = BertModel(config)
 | 
			
		||||
@ -894,7 +998,7 @@ class BertForSequenceClassification(PreTrainedBertModel):
 | 
			
		||||
            return logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertForMultipleChoice(PreTrainedBertModel):
 | 
			
		||||
class BertForMultipleChoice(BertPreTrainedModel):
 | 
			
		||||
    """BERT model for multiple choice tasks.
 | 
			
		||||
    This module is composed of the BERT model with a linear layer on top of
 | 
			
		||||
    the pooled output.
 | 
			
		||||
@ -938,7 +1042,7 @@ class BertForMultipleChoice(PreTrainedBertModel):
 | 
			
		||||
    logits = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, config, num_choices=2):
 | 
			
		||||
    def __init__(self, config, num_choices):
 | 
			
		||||
        super(BertForMultipleChoice, self).__init__(config)
 | 
			
		||||
        self.num_choices = num_choices
 | 
			
		||||
        self.bert = BertModel(config)
 | 
			
		||||
@ -948,8 +1052,8 @@ class BertForMultipleChoice(PreTrainedBertModel):
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None):
 | 
			
		||||
        flat_input_ids = input_ids.view(-1, input_ids.size(-1))
 | 
			
		||||
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
 | 
			
		||||
        flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1))
 | 
			
		||||
        flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
 | 
			
		||||
        flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
 | 
			
		||||
        _, pooled_output = self.bert(flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False)
 | 
			
		||||
        pooled_output = self.dropout(pooled_output)
 | 
			
		||||
        logits = self.classifier(pooled_output)
 | 
			
		||||
@ -963,7 +1067,7 @@ class BertForMultipleChoice(PreTrainedBertModel):
 | 
			
		||||
            return reshaped_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertForTokenClassification(PreTrainedBertModel):
 | 
			
		||||
class BertForTokenClassification(BertPreTrainedModel):
 | 
			
		||||
    """BERT model for token-level classification.
 | 
			
		||||
    This module is composed of the BERT model with a linear layer on top of
 | 
			
		||||
    the full hidden state of the last layer.
 | 
			
		||||
@ -983,7 +1087,7 @@ class BertForTokenClassification(PreTrainedBertModel):
 | 
			
		||||
            selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
 | 
			
		||||
            input sequence length in the current batch. It's the mask that we typically use for attention when
 | 
			
		||||
            a batch has varying length sentences.
 | 
			
		||||
        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size]
 | 
			
		||||
        `labels`: labels for the classification output: torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
            with indices selected in [0, ..., num_labels].
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
@ -1008,7 +1112,7 @@ class BertForTokenClassification(PreTrainedBertModel):
 | 
			
		||||
    logits = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, config, num_labels=2):
 | 
			
		||||
    def __init__(self, config, num_labels):
 | 
			
		||||
        super(BertForTokenClassification, self).__init__(config)
 | 
			
		||||
        self.num_labels = num_labels
 | 
			
		||||
        self.bert = BertModel(config)
 | 
			
		||||
@ -1023,27 +1127,26 @@ class BertForTokenClassification(PreTrainedBertModel):
 | 
			
		||||
 | 
			
		||||
        if labels is not None:
 | 
			
		||||
            loss_fct = CrossEntropyLoss()
 | 
			
		||||
            loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
 | 
			
		||||
            # Only keep active parts of the loss
 | 
			
		||||
            if attention_mask is not None:
 | 
			
		||||
                active_loss = attention_mask.view(-1) == 1
 | 
			
		||||
                active_logits = logits.view(-1, self.num_labels)[active_loss]
 | 
			
		||||
                active_labels = labels.view(-1)[active_loss]
 | 
			
		||||
                loss = loss_fct(active_logits, active_labels)
 | 
			
		||||
            else:
 | 
			
		||||
                loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
 | 
			
		||||
            return loss
 | 
			
		||||
        else:
 | 
			
		||||
            return logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertForQuestionAnswering(PreTrainedBertModel):
 | 
			
		||||
class BertForQuestionAnswering(BertPreTrainedModel):
 | 
			
		||||
    """BERT model for Question Answering (span extraction).
 | 
			
		||||
    This module is composed of the BERT model with a linear layer on top of
 | 
			
		||||
    the sequence output that computes start_logits and end_logits
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        `config`: either
 | 
			
		||||
            - a BertConfig class instance with the configuration to build a new model, or
 | 
			
		||||
            - a str with the name of a pre-trained model to load selected in the list of:
 | 
			
		||||
                . `bert-base-uncased`
 | 
			
		||||
                . `bert-large-uncased`
 | 
			
		||||
                . `bert-base-cased`
 | 
			
		||||
                . `bert-base-multilingual`
 | 
			
		||||
                . `bert-base-chinese`
 | 
			
		||||
                The pre-trained model will be downloaded and cached if needed.
 | 
			
		||||
        `config`: a BertConfig class instance with the configuration to build a new model.
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										711
									
								
								pytorch_pretrained_bert/modeling_gpt2.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										711
									
								
								pytorch_pretrained_bert/modeling_gpt2.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,711 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""PyTorch OpenAI GPT-2 model."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import collections
 | 
			
		||||
import copy
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import math
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import tarfile
 | 
			
		||||
import tempfile
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
from torch.nn import CrossEntropyLoss
 | 
			
		||||
from torch.nn.parameter import Parameter
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
 | 
			
		||||
from .modeling import BertLayerNorm as LayerNorm
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
PRETRAINED_MODEL_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-pytorch_model.bin"}
 | 
			
		||||
PRETRAINED_CONFIG_ARCHIVE_MAP = {"gpt2": "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-config.json"}
 | 
			
		||||
 | 
			
		||||
def load_tf_weights_in_gpt2(model, gpt2_checkpoint_path):
 | 
			
		||||
    """ Load tf checkpoints in a pytorch model
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        import re
 | 
			
		||||
        import numpy as np
 | 
			
		||||
        import tensorflow as tf
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        print("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
 | 
			
		||||
            "https://www.tensorflow.org/install/ for installation instructions.")
 | 
			
		||||
        raise
 | 
			
		||||
    tf_path = os.path.abspath(gpt2_checkpoint_path)
 | 
			
		||||
    print("Converting TensorFlow checkpoint from {}".format(tf_path))
 | 
			
		||||
    # Load weights from TF model
 | 
			
		||||
    init_vars = tf.train.list_variables(tf_path)
 | 
			
		||||
    names = []
 | 
			
		||||
    arrays = []
 | 
			
		||||
    for name, shape in init_vars:
 | 
			
		||||
        print("Loading TF weight {} with shape {}".format(name, shape))
 | 
			
		||||
        array = tf.train.load_variable(tf_path, name)
 | 
			
		||||
        names.append(name)
 | 
			
		||||
        arrays.append(array.squeeze())
 | 
			
		||||
 | 
			
		||||
    for name, array in zip(names, arrays):
 | 
			
		||||
        name = name[6:]  # skip "model/"
 | 
			
		||||
        name = name.split('/')
 | 
			
		||||
        pointer = model
 | 
			
		||||
        for m_name in name:
 | 
			
		||||
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
 | 
			
		||||
                l = re.split(r'(\d+)', m_name)
 | 
			
		||||
            else:
 | 
			
		||||
                l = [m_name]
 | 
			
		||||
            if l[0] == 'w' or l[0] == 'g':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            elif l[0] == 'b':
 | 
			
		||||
                pointer = getattr(pointer, 'bias')
 | 
			
		||||
            elif l[0] == 'wpe' or l[0] == 'wte':
 | 
			
		||||
                pointer = getattr(pointer, l[0])
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            else:
 | 
			
		||||
                pointer = getattr(pointer, l[0])
 | 
			
		||||
            if len(l) >= 2:
 | 
			
		||||
                num = int(l[1])
 | 
			
		||||
                pointer = pointer[num]
 | 
			
		||||
        try:
 | 
			
		||||
            assert pointer.shape == array.shape
 | 
			
		||||
        except AssertionError as e:
 | 
			
		||||
            e.args += (pointer.shape, array.shape)
 | 
			
		||||
            raise
 | 
			
		||||
        print("Initialize PyTorch weight {}".format(name))
 | 
			
		||||
        pointer.data = torch.from_numpy(array)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gelu(x):
 | 
			
		||||
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2Config(object):
 | 
			
		||||
    """Configuration class to store the configuration of a `GPT2Model`.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        vocab_size_or_config_json_file=50257,
 | 
			
		||||
        n_positions=1024,
 | 
			
		||||
        n_ctx=1024,
 | 
			
		||||
        n_embd=768,
 | 
			
		||||
        n_layer=12,
 | 
			
		||||
        n_head=12,
 | 
			
		||||
        layer_norm_epsilon=1e-5,
 | 
			
		||||
        initializer_range=0.02,
 | 
			
		||||
    ):
 | 
			
		||||
        """Constructs GPT2Config.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `GPT2Model` or a configuration json file.
 | 
			
		||||
            n_positions: Number of positional embeddings.
 | 
			
		||||
            n_ctx: Size of the causal mask (usually same as n_positions).
 | 
			
		||||
            n_embd: Dimensionality of the embeddings and hidden states.
 | 
			
		||||
            n_layer: Number of hidden layers in the Transformer encoder.
 | 
			
		||||
            n_head: Number of attention heads for each attention layer in
 | 
			
		||||
                the Transformer encoder.
 | 
			
		||||
            layer_norm_epsilon: epsilon to use in the layer norm layers
 | 
			
		||||
            initializer_range: The sttdev of the truncated_normal_initializer for
 | 
			
		||||
                initializing all weight matrices.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
 | 
			
		||||
                        and isinstance(vocab_size_or_config_json_file, unicode)):
 | 
			
		||||
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
 | 
			
		||||
                json_config = json.loads(reader.read())
 | 
			
		||||
            for key, value in json_config.items():
 | 
			
		||||
                self.__dict__[key] = value
 | 
			
		||||
        elif isinstance(vocab_size_or_config_json_file, int):
 | 
			
		||||
            self.vocab_size = vocab_size_or_config_json_file
 | 
			
		||||
            self.n_ctx = n_ctx
 | 
			
		||||
            self.n_positions = n_positions
 | 
			
		||||
            self.n_embd = n_embd
 | 
			
		||||
            self.n_layer = n_layer
 | 
			
		||||
            self.n_head = n_head
 | 
			
		||||
            self.layer_norm_epsilon = layer_norm_epsilon
 | 
			
		||||
            self.initializer_range = initializer_range
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "First argument must be either a vocabulary size (int)"
 | 
			
		||||
                "or the path to a pretrained model config file (str)"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_dict(cls, json_object):
 | 
			
		||||
        """Constructs a `GPT2Config` from a Python dictionary of parameters."""
 | 
			
		||||
        config = GPT2Config(vocab_size_or_config_json_file=-1)
 | 
			
		||||
        for key, value in json_object.items():
 | 
			
		||||
            config.__dict__[key] = value
 | 
			
		||||
        return config
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_json_file(cls, json_file):
 | 
			
		||||
        """Constructs a `GPT2Config` from a json file of parameters."""
 | 
			
		||||
        with open(json_file, "r", encoding="utf-8") as reader:
 | 
			
		||||
            text = reader.read()
 | 
			
		||||
        return cls.from_dict(json.loads(text))
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return str(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
    def to_dict(self):
 | 
			
		||||
        """Serializes this instance to a Python dictionary."""
 | 
			
		||||
        output = copy.deepcopy(self.__dict__)
 | 
			
		||||
        return output
 | 
			
		||||
 | 
			
		||||
    def to_json_string(self):
 | 
			
		||||
        """Serializes this instance to a JSON string."""
 | 
			
		||||
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
 | 
			
		||||
 | 
			
		||||
    def to_json_file(self, json_file_path):
 | 
			
		||||
        """ Save this instance to a json file."""
 | 
			
		||||
        with open(json_file_path, "w", encoding='utf-8') as writer:
 | 
			
		||||
            writer.write(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv1D(nn.Module):
 | 
			
		||||
    def __init__(self, nf, nx):
 | 
			
		||||
        super(Conv1D, self).__init__()
 | 
			
		||||
        self.nf = nf
 | 
			
		||||
        w = torch.empty(nx, nf)
 | 
			
		||||
        nn.init.normal_(w, std=0.02)
 | 
			
		||||
        self.weight = Parameter(w)
 | 
			
		||||
        self.bias = Parameter(torch.zeros(nf))
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        size_out = x.size()[:-1] + (self.nf,)
 | 
			
		||||
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
 | 
			
		||||
        x = x.view(*size_out)
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Attention(nn.Module):
 | 
			
		||||
    def __init__(self, nx, n_ctx, config, scale=False):
 | 
			
		||||
        super(Attention, self).__init__()
 | 
			
		||||
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
 | 
			
		||||
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
 | 
			
		||||
        assert n_state % config.n_head == 0
 | 
			
		||||
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
 | 
			
		||||
        self.n_head = config.n_head
 | 
			
		||||
        self.split_size = n_state
 | 
			
		||||
        self.scale = scale
 | 
			
		||||
        self.c_attn = Conv1D(n_state * 3, nx)
 | 
			
		||||
        self.c_proj = Conv1D(n_state, nx)
 | 
			
		||||
 | 
			
		||||
    def _attn(self, q, k, v):
 | 
			
		||||
        w = torch.matmul(q, k)
 | 
			
		||||
        if self.scale:
 | 
			
		||||
            w = w / math.sqrt(v.size(-1))
 | 
			
		||||
        nd, ns = w.size(-2), w.size(-1)
 | 
			
		||||
        b = self.bias[:, :, ns-nd:ns, :ns]
 | 
			
		||||
        w = w * b - 1e4 * (1 - b)
 | 
			
		||||
 | 
			
		||||
        w = nn.Softmax(dim=-1)(w)
 | 
			
		||||
        return torch.matmul(w, v)
 | 
			
		||||
 | 
			
		||||
    def merge_heads(self, x):
 | 
			
		||||
        x = x.permute(0, 2, 1, 3).contiguous()
 | 
			
		||||
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
 | 
			
		||||
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states
 | 
			
		||||
 | 
			
		||||
    def split_heads(self, x, k=False):
 | 
			
		||||
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
 | 
			
		||||
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
 | 
			
		||||
        if k:
 | 
			
		||||
            return x.permute(0, 2, 3, 1)  # (batch, head, head_features, seq_length)
 | 
			
		||||
        else:
 | 
			
		||||
            return x.permute(0, 2, 1, 3)  # (batch, head, seq_length, head_features)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x, layer_past=None):
 | 
			
		||||
        x = self.c_attn(x)
 | 
			
		||||
        query, key, value = x.split(self.split_size, dim=2)
 | 
			
		||||
        query = self.split_heads(query)
 | 
			
		||||
        key = self.split_heads(key, k=True)
 | 
			
		||||
        value = self.split_heads(value)
 | 
			
		||||
        if layer_past is not None:
 | 
			
		||||
            past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1]  # transpose back cf below
 | 
			
		||||
            key = torch.cat((past_key, key), dim=-1)
 | 
			
		||||
            value = torch.cat((past_value, value), dim=-2)
 | 
			
		||||
        present = torch.stack((key.transpose(-2, -1), value))  # transpose to have same shapes for stacking
 | 
			
		||||
        a = self._attn(query, key, value)
 | 
			
		||||
        a = self.merge_heads(a)
 | 
			
		||||
        a = self.c_proj(a)
 | 
			
		||||
        return a, present
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MLP(nn.Module):
 | 
			
		||||
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
 | 
			
		||||
        super(MLP, self).__init__()
 | 
			
		||||
        nx = config.n_embd
 | 
			
		||||
        self.c_fc = Conv1D(n_state, nx)
 | 
			
		||||
        self.c_proj = Conv1D(nx, n_state)
 | 
			
		||||
        self.act = gelu
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        h = self.act(self.c_fc(x))
 | 
			
		||||
        h2 = self.c_proj(h)
 | 
			
		||||
        return h2
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Block(nn.Module):
 | 
			
		||||
    def __init__(self, n_ctx, config, scale=False):
 | 
			
		||||
        super(Block, self).__init__()
 | 
			
		||||
        nx = config.n_embd
 | 
			
		||||
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
 | 
			
		||||
        self.attn = Attention(nx, n_ctx, config, scale)
 | 
			
		||||
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
 | 
			
		||||
        self.mlp = MLP(4 * nx, config)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x, layer_past=None):
 | 
			
		||||
        a, present = self.attn(self.ln_1(x), layer_past=layer_past)
 | 
			
		||||
        x = x + a
 | 
			
		||||
        m = self.mlp(self.ln_2(x))
 | 
			
		||||
        x = x + m
 | 
			
		||||
        return x, present
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2LMHead(nn.Module):
 | 
			
		||||
    """ Language Model Head for the transformer """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, model_embeddings_weights, config):
 | 
			
		||||
        super(GPT2LMHead, self).__init__()
 | 
			
		||||
        self.n_embd = config.n_embd
 | 
			
		||||
        self.set_embeddings_weights(model_embeddings_weights)
 | 
			
		||||
 | 
			
		||||
    def set_embeddings_weights(self, model_embeddings_weights):
 | 
			
		||||
        embed_shape = model_embeddings_weights.shape
 | 
			
		||||
        self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
 | 
			
		||||
        self.decoder.weight = model_embeddings_weights  # Tied weights
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_state):
 | 
			
		||||
        # Truncated Language modeling logits (we remove the last token)
 | 
			
		||||
        # h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
 | 
			
		||||
        lm_logits = self.decoder(hidden_state)
 | 
			
		||||
        return lm_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2MultipleChoiceHead(nn.Module):
 | 
			
		||||
    """ Classifier Head for the transformer """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(GPT2MultipleChoiceHead, self).__init__()
 | 
			
		||||
        self.n_embd = config.n_embd
 | 
			
		||||
        self.linear = nn.Linear(config.n_embd, 1)
 | 
			
		||||
 | 
			
		||||
        nn.init.normal_(self.linear.weight, std=0.02)
 | 
			
		||||
        nn.init.normal_(self.linear.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_states, mc_token_ids):
 | 
			
		||||
        # Classification logits
 | 
			
		||||
        # hidden_state (bsz, num_choices, seq_length, hidden_size)
 | 
			
		||||
        # mc_token_ids (bsz, num_choices)
 | 
			
		||||
        mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
 | 
			
		||||
        # (bsz, num_choices, 1, hidden_size)
 | 
			
		||||
        multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
 | 
			
		||||
        # (bsz, num_choices, hidden_size)
 | 
			
		||||
        multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
 | 
			
		||||
        # (bsz, num_choices)
 | 
			
		||||
        return multiple_choice_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2PreTrainedModel(nn.Module):
 | 
			
		||||
    """ An abstract class to handle weights initialization and
 | 
			
		||||
        a simple interface for dowloading and loading pretrained models.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config, *inputs, **kwargs):
 | 
			
		||||
        super(GPT2PreTrainedModel, self).__init__()
 | 
			
		||||
        if not isinstance(config, GPT2Config):
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "Parameter config in `{}(config)` should be an instance of class `GPT2Config`. "
 | 
			
		||||
                "To create a model from a pretrained model use "
 | 
			
		||||
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
 | 
			
		||||
                    self.__class__.__name__, self.__class__.__name__
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
        self.config = config
 | 
			
		||||
 | 
			
		||||
    def set_tied(self):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    def init_weights(self, module):
 | 
			
		||||
        """ Initialize the weights.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(module, (nn.Linear, nn.Embedding)):
 | 
			
		||||
            # Slightly different from the TF version which uses truncated_normal for initialization
 | 
			
		||||
            # cf https://github.com/pytorch/pytorch/pull/5617
 | 
			
		||||
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
 | 
			
		||||
        elif isinstance(module, LayerNorm):
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
            module.weight.data.fill_(1.0)
 | 
			
		||||
        if isinstance(module, nn.Linear) and module.bias is not None:
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(
 | 
			
		||||
        cls, pretrained_model_name_or_path, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a GPT2PreTrainedModel from a pre-trained model file or a pytorch state dict.
 | 
			
		||||
        Download and cache the pre-trained model file if needed.
 | 
			
		||||
 | 
			
		||||
        Params:
 | 
			
		||||
            pretrained_model_name_or_path: either:
 | 
			
		||||
                - a str with the name of a pre-trained model to load selected in the list of:
 | 
			
		||||
                    . `gpt2`
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `gpt2_config.json` a configuration file for the model
 | 
			
		||||
                    . `pytorch_model.bin` a PyTorch dump of a GPT2Model instance
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `gpt2_config.json` a configuration file for the model
 | 
			
		||||
                    . a TensorFlow checkpoint with trained weights
 | 
			
		||||
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
 | 
			
		||||
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
 | 
			
		||||
            state_dict: an optional state dictionary (collections.OrderedDict object) to use instead of pre-trained models
 | 
			
		||||
            *inputs, **kwargs: additional input for the specific GPT class
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
 | 
			
		||||
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
        else:
 | 
			
		||||
            archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
 | 
			
		||||
            config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
 | 
			
		||||
            resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find files {} and {} "
 | 
			
		||||
                "at this path or url.".format(
 | 
			
		||||
                    pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
 | 
			
		||||
                    archive_file, config_file
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
            return None
 | 
			
		||||
        if resolved_archive_file == archive_file and resolved_config_file == config_file:
 | 
			
		||||
            logger.info("loading weights file {}".format(archive_file))
 | 
			
		||||
            logger.info("loading configuration file {}".format(config_file))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading weights file {} from cache at {}".format(
 | 
			
		||||
                archive_file, resolved_archive_file))
 | 
			
		||||
            logger.info("loading configuration file {} from cache at {}".format(
 | 
			
		||||
                config_file, resolved_config_file))
 | 
			
		||||
        # Load config
 | 
			
		||||
        config = GPT2Config.from_json_file(resolved_config_file)
 | 
			
		||||
        logger.info("Model config {}".format(config))
 | 
			
		||||
        # Instantiate model.
 | 
			
		||||
        model = cls(config, *inputs, **kwargs)
 | 
			
		||||
        if state_dict is None and not from_tf:
 | 
			
		||||
            state_dict = torch.load(resolved_archive_file, map_location='cpu')
 | 
			
		||||
        if from_tf:
 | 
			
		||||
            # Directly load from a TensorFlow checkpoint (stored as NumPy array)
 | 
			
		||||
            return load_tf_weights_in_gpt2(model, resolved_archive_file)
 | 
			
		||||
 | 
			
		||||
        old_keys = []
 | 
			
		||||
        new_keys = []
 | 
			
		||||
        for key in state_dict.keys():
 | 
			
		||||
            new_key = None
 | 
			
		||||
            if key.endswith(".g"):
 | 
			
		||||
                new_key = key[:-2] + ".weight"
 | 
			
		||||
            elif key.endswith(".b"):
 | 
			
		||||
                new_key = key[:-2] + ".bias"
 | 
			
		||||
            elif key.endswith(".w"):
 | 
			
		||||
                new_key = key[:-2] + ".weight"
 | 
			
		||||
            if new_key:
 | 
			
		||||
                old_keys.append(key)
 | 
			
		||||
                new_keys.append(new_key)
 | 
			
		||||
        for old_key, new_key in zip(old_keys, new_keys):
 | 
			
		||||
            state_dict[new_key] = state_dict.pop(old_key)
 | 
			
		||||
 | 
			
		||||
        missing_keys = []
 | 
			
		||||
        unexpected_keys = []
 | 
			
		||||
        error_msgs = []
 | 
			
		||||
        # copy state_dict so _load_from_state_dict can modify it
 | 
			
		||||
        metadata = getattr(state_dict, "_metadata", None)
 | 
			
		||||
        state_dict = state_dict.copy()
 | 
			
		||||
        if metadata is not None:
 | 
			
		||||
            state_dict._metadata = metadata
 | 
			
		||||
 | 
			
		||||
        def load(module, prefix=""):
 | 
			
		||||
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
 | 
			
		||||
            module._load_from_state_dict(
 | 
			
		||||
                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
 | 
			
		||||
            )
 | 
			
		||||
            for name, child in module._modules.items():
 | 
			
		||||
                if child is not None:
 | 
			
		||||
                    load(child, prefix + name + ".")
 | 
			
		||||
 | 
			
		||||
        start_model = model
 | 
			
		||||
        if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
 | 
			
		||||
            start_model = model.transformer
 | 
			
		||||
        load(start_model, prefix="")
 | 
			
		||||
 | 
			
		||||
        if len(missing_keys) > 0:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
 | 
			
		||||
            )
 | 
			
		||||
        if len(unexpected_keys) > 0:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
 | 
			
		||||
            )
 | 
			
		||||
        if len(error_msgs) > 0:
 | 
			
		||||
            raise RuntimeError(
 | 
			
		||||
                "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        # Make sure we are still sharing the output and input embeddings after loading weights
 | 
			
		||||
        model.set_tied()
 | 
			
		||||
        return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2Model(GPT2PreTrainedModel):
 | 
			
		||||
    """OpenAI GPT-2 model ("Language Models are Unsupervised Multitask Learners").
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a GPT2Config class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
 | 
			
		||||
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
 | 
			
		||||
            (key and values in the attention blocks) to speed up sequential decoding
 | 
			
		||||
            (this is the presents output of the model, cf. below).
 | 
			
		||||
 | 
			
		||||
    Outputs a tuple consisting of:
 | 
			
		||||
        `hidden_states`: the encoded-hidden-states at the top of the model
 | 
			
		||||
            as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
 | 
			
		||||
            (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
 | 
			
		||||
        `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
 | 
			
		||||
            torch.FloatTensors. They can be reused to speed up sequential decoding.
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
 | 
			
		||||
 | 
			
		||||
    config = modeling_gpt2.GPT2Config()
 | 
			
		||||
 | 
			
		||||
    model = modeling_gpt2.GPT2Model(config)
 | 
			
		||||
    hidden_states, presents = model(input_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(GPT2Model, self).__init__(config)
 | 
			
		||||
        self.wte = nn.Embedding(config.vocab_size, config.n_embd)
 | 
			
		||||
        self.wpe = nn.Embedding(config.n_positions, config.n_embd)
 | 
			
		||||
        block = Block(config.n_ctx, config, scale=True)
 | 
			
		||||
        self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
 | 
			
		||||
        self.ln_f = LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
 | 
			
		||||
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, position_ids=None, token_type_ids=None, past=None):
 | 
			
		||||
        if past is None:
 | 
			
		||||
            past_length = 0
 | 
			
		||||
            past = [None] * len(self.h)
 | 
			
		||||
        else:
 | 
			
		||||
            past_length = past[0][0].size(-2)
 | 
			
		||||
        if position_ids is None:
 | 
			
		||||
            position_ids = torch.arange(past_length, input_ids.size(-1) + past_length, dtype=torch.long, device=input_ids.device)
 | 
			
		||||
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
 | 
			
		||||
 | 
			
		||||
        input_shape = input_ids.size()
 | 
			
		||||
        input_ids = input_ids.view(-1, input_ids.size(-1))
 | 
			
		||||
        position_ids = position_ids.view(-1, position_ids.size(-1))
 | 
			
		||||
 | 
			
		||||
        inputs_embeds = self.wte(input_ids)
 | 
			
		||||
        position_embeds = self.wpe(position_ids)
 | 
			
		||||
        if token_type_ids is not None:
 | 
			
		||||
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
 | 
			
		||||
            token_type_embeds = self.wte(token_type_ids)
 | 
			
		||||
        else:
 | 
			
		||||
            token_type_embeds = 0
 | 
			
		||||
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
 | 
			
		||||
        presents = []
 | 
			
		||||
        for block, layer_past in zip(self.h, past):
 | 
			
		||||
            hidden_states, present = block(hidden_states, layer_past)
 | 
			
		||||
            presents.append(present)
 | 
			
		||||
        hidden_states = self.ln_f(hidden_states)
 | 
			
		||||
        output_shape = input_shape + (hidden_states.size(-1),)
 | 
			
		||||
        return hidden_states.view(*output_shape), presents
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2LMHeadModel(GPT2PreTrainedModel):
 | 
			
		||||
    """OpenAI GPT-2 model with a Language Modeling head ("Language Models are Unsupervised Multitask Learners").
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a GPT2Config class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
 | 
			
		||||
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, config.vocab_size[
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., vocab_size]
 | 
			
		||||
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
 | 
			
		||||
            (key and values in the attention blocks) to speed up sequential decoding
 | 
			
		||||
            (this is the presents output of the model, cf. below).
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `lm_labels` is not `None`:
 | 
			
		||||
            Outputs the language modeling loss.
 | 
			
		||||
        else a tuple:
 | 
			
		||||
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, config.vocab_size]
 | 
			
		||||
                (or more generally [d_1, ..., d_n, config.vocab_size] were d_1 ... d_n are the dimension of input_ids)
 | 
			
		||||
            `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
 | 
			
		||||
                torch.FloatTensors. They can be reused to speed up sequential decoding.
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
 | 
			
		||||
 | 
			
		||||
    config = modeling_gpt2.GPT2Config()
 | 
			
		||||
 | 
			
		||||
    model = modeling_gpt2.GPT2LMHeadModel(config)
 | 
			
		||||
    lm_logits, presents = model(input_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(GPT2LMHeadModel, self).__init__(config)
 | 
			
		||||
        self.transformer = GPT2Model(config)
 | 
			
		||||
        self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def set_tied(self):
 | 
			
		||||
        """ Make sure we are sharing the embeddings
 | 
			
		||||
        """
 | 
			
		||||
        self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None, past=None):
 | 
			
		||||
        hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
 | 
			
		||||
        lm_logits = self.lm_head(hidden_states)
 | 
			
		||||
        if lm_labels is not None:
 | 
			
		||||
            # Shift so that tokens < n predict n
 | 
			
		||||
            shift_logits = lm_logits[:, :-1].contiguous()
 | 
			
		||||
            shift_labels = lm_labels[:, 1:].contiguous()
 | 
			
		||||
 | 
			
		||||
            # Flatten the tokens
 | 
			
		||||
            loss_fct = CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
 | 
			
		||||
                            shift_labels.view(-1))
 | 
			
		||||
            return loss
 | 
			
		||||
        return lm_logits, presents
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2DoubleHeadsModel(GPT2PreTrainedModel):
 | 
			
		||||
    """OpenAI GPT-2 model with a Language Modeling and a Multiple Choice head ("Language Models are Unsupervised Multitask Learners").
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a GPT2Config class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
 | 
			
		||||
            indices selected in the range [0, config.vocab_size[
 | 
			
		||||
        `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
 | 
			
		||||
            which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., config.vocab_size]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., config.vocab_size]
 | 
			
		||||
        `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
 | 
			
		||||
            with indices selected in [0, ..., num_choices].
 | 
			
		||||
        `past`: an optional list of torch.LongTensor that contains pre-computed hidden-states
 | 
			
		||||
            (key and values in the attention blocks) to speed up sequential decoding
 | 
			
		||||
            (this is the presents output of the model, cf. below).
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `lm_labels` and `multiple_choice_labels` are not `None`:
 | 
			
		||||
            Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
 | 
			
		||||
        else: a tuple with
 | 
			
		||||
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, config.vocab_size]
 | 
			
		||||
            `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
 | 
			
		||||
            `presents`: a list of pre-computed hidden-states (key and values in each attention blocks) as
 | 
			
		||||
                torch.FloatTensors. They can be reused to speed up sequential decoding.
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]])  # (bsz, number of choice, seq length)
 | 
			
		||||
    mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
 | 
			
		||||
 | 
			
		||||
    config = modeling_gpt2.GPT2Config()
 | 
			
		||||
 | 
			
		||||
    model = modeling_gpt2.GPT2LMHeadModel(config)
 | 
			
		||||
    lm_logits, multiple_choice_logits, presents = model(input_ids, mc_token_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(GPT2DoubleHeadsModel, self).__init__(config)
 | 
			
		||||
        self.transformer = GPT2Model(config)
 | 
			
		||||
        self.lm_head = GPT2LMHead(self.transformer.wte.weight, config)
 | 
			
		||||
        self.multiple_choice_head = GPT2MultipleChoiceHead(config)
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def set_tied(self):
 | 
			
		||||
        """ Make sure we are sharing the embeddings
 | 
			
		||||
        """
 | 
			
		||||
        self.lm_head.set_embeddings_weights(self.transformer.wte.weight)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None, past=None):
 | 
			
		||||
        hidden_states, presents = self.transformer(input_ids, position_ids, token_type_ids, past)
 | 
			
		||||
        lm_logits = self.lm_head(hidden_states)
 | 
			
		||||
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
 | 
			
		||||
        losses = []
 | 
			
		||||
        if lm_labels is not None:
 | 
			
		||||
            shift_logits = lm_logits[:, :-1].contiguous()
 | 
			
		||||
            shift_labels = lm_labels[:, 1:].contiguous()
 | 
			
		||||
            loss_fct = CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
            losses.append(loss_fct(shift_logits.view(-1,
 | 
			
		||||
                          shift_logits.size(-1)), shift_labels.view(-1)))
 | 
			
		||||
        if mc_labels is not None:
 | 
			
		||||
            loss_fct = CrossEntropyLoss()
 | 
			
		||||
            losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
 | 
			
		||||
        if losses:
 | 
			
		||||
            return losses
 | 
			
		||||
        return lm_logits, mc_logits, presents
 | 
			
		||||
							
								
								
									
										822
									
								
								pytorch_pretrained_bert/modeling_openai.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										822
									
								
								pytorch_pretrained_bert/modeling_openai.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,822 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""PyTorch OpenAI GPT model."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import collections
 | 
			
		||||
import copy
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import math
 | 
			
		||||
import os
 | 
			
		||||
import shutil
 | 
			
		||||
import tarfile
 | 
			
		||||
import tempfile
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
from torch.nn import CrossEntropyLoss
 | 
			
		||||
from torch.nn.parameter import Parameter
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path, CONFIG_NAME, WEIGHTS_NAME
 | 
			
		||||
from .modeling import BertLayerNorm as LayerNorm
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
PRETRAINED_MODEL_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-pytorch_model.bin"}
 | 
			
		||||
PRETRAINED_CONFIG_ARCHIVE_MAP = {"openai-gpt": "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-config.json"}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def load_tf_weights_in_openai_gpt(model, openai_checkpoint_folder_path):
 | 
			
		||||
    """ Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
 | 
			
		||||
    """
 | 
			
		||||
    import re
 | 
			
		||||
    import numpy as np
 | 
			
		||||
    print("Loading weights...")
 | 
			
		||||
    names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
 | 
			
		||||
    shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
 | 
			
		||||
    offsets = np.cumsum([np.prod(shape) for shape in shapes])
 | 
			
		||||
    init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
 | 
			
		||||
    init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
 | 
			
		||||
    init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
 | 
			
		||||
 | 
			
		||||
    # This was used when we had a single embedding matrix for positions and tokens
 | 
			
		||||
    # init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
 | 
			
		||||
    # del init_params[1]
 | 
			
		||||
    init_params = [arr.squeeze() for arr in init_params]
 | 
			
		||||
 | 
			
		||||
    try:
 | 
			
		||||
        assert model.tokens_embed.weight.shape == init_params[1].shape
 | 
			
		||||
        assert model.positions_embed.weight.shape == init_params[0].shape
 | 
			
		||||
    except AssertionError as e:
 | 
			
		||||
        e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
 | 
			
		||||
        e.args += (model.positions_embed.weight.shape, init_params[0].shape)
 | 
			
		||||
        raise
 | 
			
		||||
 | 
			
		||||
    model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
 | 
			
		||||
    model.positions_embed.weight.data = torch.from_numpy(init_params[0])
 | 
			
		||||
    names.pop(0)
 | 
			
		||||
    # Pop position and token embedding arrays
 | 
			
		||||
    init_params.pop(0)
 | 
			
		||||
    init_params.pop(0)
 | 
			
		||||
 | 
			
		||||
    for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
 | 
			
		||||
        name = name[6:]  # skip "model/"
 | 
			
		||||
        assert name[-2:] == ":0"
 | 
			
		||||
        name = name[:-2]
 | 
			
		||||
        name = name.split('/')
 | 
			
		||||
        pointer = model
 | 
			
		||||
        for m_name in name:
 | 
			
		||||
            if re.fullmatch(r'[A-Za-z]+\d+', m_name):
 | 
			
		||||
                l = re.split(r'(\d+)', m_name)
 | 
			
		||||
            else:
 | 
			
		||||
                l = [m_name]
 | 
			
		||||
            if l[0] == 'g':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            elif l[0] == 'b':
 | 
			
		||||
                pointer = getattr(pointer, 'bias')
 | 
			
		||||
            elif l[0] == 'w':
 | 
			
		||||
                pointer = getattr(pointer, 'weight')
 | 
			
		||||
            else:
 | 
			
		||||
                pointer = getattr(pointer, l[0])
 | 
			
		||||
            if len(l) >= 2:
 | 
			
		||||
                num = int(l[1])
 | 
			
		||||
                pointer = pointer[num]
 | 
			
		||||
        try:
 | 
			
		||||
            assert pointer.shape == array.shape
 | 
			
		||||
        except AssertionError as e:
 | 
			
		||||
            e.args += (pointer.shape, array.shape)
 | 
			
		||||
            raise
 | 
			
		||||
        try:
 | 
			
		||||
            assert pointer.shape == array.shape
 | 
			
		||||
        except AssertionError as e:
 | 
			
		||||
            e.args += (pointer.shape, array.shape)
 | 
			
		||||
            raise
 | 
			
		||||
        print("Initialize PyTorch weight {}".format(name))
 | 
			
		||||
        pointer.data = torch.from_numpy(array)
 | 
			
		||||
    return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def gelu(x):
 | 
			
		||||
    return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def swish(x):
 | 
			
		||||
    return x * torch.sigmoid(x)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
ACT_FNS = {"relu": nn.ReLU, "swish": swish, "gelu": gelu}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTConfig(object):
 | 
			
		||||
    """Configuration class to store the configuration of a `OpenAIGPTModel`.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        vocab_size_or_config_json_file=40478,
 | 
			
		||||
        n_special=0,
 | 
			
		||||
        n_positions=512,
 | 
			
		||||
        n_ctx=512,
 | 
			
		||||
        n_embd=768,
 | 
			
		||||
        n_layer=12,
 | 
			
		||||
        n_head=12,
 | 
			
		||||
        afn="gelu",
 | 
			
		||||
        resid_pdrop=0.1,
 | 
			
		||||
        embd_pdrop=0.1,
 | 
			
		||||
        attn_pdrop=0.1,
 | 
			
		||||
        layer_norm_epsilon=1e-5,
 | 
			
		||||
        initializer_range=0.02,
 | 
			
		||||
    ):
 | 
			
		||||
        """Constructs OpenAIGPTConfig.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
            vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `OpenAIGPTModel` or a configuration json file.
 | 
			
		||||
            n_special: The number of special tokens to learn during fine-tuning ('[SEP]', '[CLF]', ...)
 | 
			
		||||
            n_positions: Number of positional embeddings.
 | 
			
		||||
            n_ctx: Size of the causal mask (usually same as n_positions).
 | 
			
		||||
            n_embd: Dimensionality of the embeddings and hidden states.
 | 
			
		||||
            n_layer: Number of hidden layers in the Transformer encoder.
 | 
			
		||||
            n_head: Number of attention heads for each attention layer in
 | 
			
		||||
                the Transformer encoder.
 | 
			
		||||
            afn: The non-linear activation function (function or string) in the
 | 
			
		||||
                encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
 | 
			
		||||
            resid_pdrop: The dropout probabilitiy for all fully connected
 | 
			
		||||
                layers in the embeddings, encoder, and pooler.
 | 
			
		||||
            attn_pdrop: The dropout ratio for the attention
 | 
			
		||||
                probabilities.
 | 
			
		||||
            embd_pdrop: The dropout ratio for the embeddings.
 | 
			
		||||
            layer_norm_epsilon: epsilon to use in the layer norm layers
 | 
			
		||||
            initializer_range: The sttdev of the truncated_normal_initializer for
 | 
			
		||||
                initializing all weight matrices.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(vocab_size_or_config_json_file, str) or (sys.version_info[0] == 2
 | 
			
		||||
                        and isinstance(vocab_size_or_config_json_file, unicode)):
 | 
			
		||||
            with open(vocab_size_or_config_json_file, "r", encoding="utf-8") as reader:
 | 
			
		||||
                json_config = json.loads(reader.read())
 | 
			
		||||
            for key, value in json_config.items():
 | 
			
		||||
                self.__dict__[key] = value
 | 
			
		||||
        elif isinstance(vocab_size_or_config_json_file, int):
 | 
			
		||||
            self.vocab_size = vocab_size_or_config_json_file
 | 
			
		||||
            self.n_special = n_special
 | 
			
		||||
            self.n_ctx = n_ctx
 | 
			
		||||
            self.n_positions = n_positions
 | 
			
		||||
            self.n_embd = n_embd
 | 
			
		||||
            self.n_layer = n_layer
 | 
			
		||||
            self.n_head = n_head
 | 
			
		||||
            self.afn = afn
 | 
			
		||||
            self.resid_pdrop = resid_pdrop
 | 
			
		||||
            self.embd_pdrop = embd_pdrop
 | 
			
		||||
            self.attn_pdrop = attn_pdrop
 | 
			
		||||
            self.layer_norm_epsilon = layer_norm_epsilon
 | 
			
		||||
            self.initializer_range = initializer_range
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "First argument must be either a vocabulary size (int)"
 | 
			
		||||
                "or the path to a pretrained model config file (str)"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def total_tokens_embeddings(self):
 | 
			
		||||
        return self.vocab_size + self.n_special
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_dict(cls, json_object):
 | 
			
		||||
        """Constructs a `OpenAIGPTConfig` from a Python dictionary of parameters."""
 | 
			
		||||
        config = OpenAIGPTConfig(vocab_size_or_config_json_file=-1)
 | 
			
		||||
        for key, value in json_object.items():
 | 
			
		||||
            config.__dict__[key] = value
 | 
			
		||||
        return config
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_json_file(cls, json_file):
 | 
			
		||||
        """Constructs a `OpenAIGPTConfig` from a json file of parameters."""
 | 
			
		||||
        with open(json_file, "r", encoding="utf-8") as reader:
 | 
			
		||||
            text = reader.read()
 | 
			
		||||
        return cls.from_dict(json.loads(text))
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return str(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
    def to_dict(self):
 | 
			
		||||
        """Serializes this instance to a Python dictionary."""
 | 
			
		||||
        output = copy.deepcopy(self.__dict__)
 | 
			
		||||
        return output
 | 
			
		||||
 | 
			
		||||
    def to_json_string(self):
 | 
			
		||||
        """Serializes this instance to a JSON string."""
 | 
			
		||||
        return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
 | 
			
		||||
 | 
			
		||||
    def to_json_file(self, json_file_path):
 | 
			
		||||
        """ Save this instance to a json file."""
 | 
			
		||||
        with open(json_file_path, "w", encoding='utf-8') as writer:
 | 
			
		||||
            writer.write(self.to_json_string())
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Conv1D(nn.Module):
 | 
			
		||||
    def __init__(self, nf, rf, nx):
 | 
			
		||||
        super(Conv1D, self).__init__()
 | 
			
		||||
        self.rf = rf
 | 
			
		||||
        self.nf = nf
 | 
			
		||||
        if rf == 1:  # faster 1x1 conv
 | 
			
		||||
            w = torch.empty(nx, nf)
 | 
			
		||||
            nn.init.normal_(w, std=0.02)
 | 
			
		||||
            self.weight = Parameter(w)
 | 
			
		||||
            self.bias = Parameter(torch.zeros(nf))
 | 
			
		||||
        else:  # was used to train LM
 | 
			
		||||
            raise NotImplementedError
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        if self.rf == 1:
 | 
			
		||||
            size_out = x.size()[:-1] + (self.nf,)
 | 
			
		||||
            x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
 | 
			
		||||
            x = x.view(*size_out)
 | 
			
		||||
        else:
 | 
			
		||||
            raise NotImplementedError
 | 
			
		||||
        return x
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Attention(nn.Module):
 | 
			
		||||
    def __init__(self, nx, n_ctx, config, scale=False):
 | 
			
		||||
        super(Attention, self).__init__()
 | 
			
		||||
        n_state = nx  # in Attention: n_state=768 (nx=n_embd)
 | 
			
		||||
        # [switch nx => n_state from Block to Attention to keep identical to TF implem]
 | 
			
		||||
        assert n_state % config.n_head == 0
 | 
			
		||||
        self.register_buffer("bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx))
 | 
			
		||||
        self.n_head = config.n_head
 | 
			
		||||
        self.split_size = n_state
 | 
			
		||||
        self.scale = scale
 | 
			
		||||
        self.c_attn = Conv1D(n_state * 3, 1, nx)
 | 
			
		||||
        self.c_proj = Conv1D(n_state, 1, nx)
 | 
			
		||||
        self.attn_dropout = nn.Dropout(config.attn_pdrop)
 | 
			
		||||
        self.resid_dropout = nn.Dropout(config.resid_pdrop)
 | 
			
		||||
 | 
			
		||||
    def _attn(self, q, k, v):
 | 
			
		||||
        w = torch.matmul(q, k)
 | 
			
		||||
        if self.scale:
 | 
			
		||||
            w = w / math.sqrt(v.size(-1))
 | 
			
		||||
        # w = w * self.bias + -1e9 * (1 - self.bias)  # TF implem method: mask_attn_weights
 | 
			
		||||
        # XD: self.b may be larger than w, so we need to crop it
 | 
			
		||||
        b = self.bias[:, :, : w.size(-2), : w.size(-1)]
 | 
			
		||||
        w = w * b + -1e9 * (1 - b)
 | 
			
		||||
 | 
			
		||||
        w = nn.Softmax(dim=-1)(w)
 | 
			
		||||
        w = self.attn_dropout(w)
 | 
			
		||||
        return torch.matmul(w, v)
 | 
			
		||||
 | 
			
		||||
    def merge_heads(self, x):
 | 
			
		||||
        x = x.permute(0, 2, 1, 3).contiguous()
 | 
			
		||||
        new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
 | 
			
		||||
        return x.view(*new_x_shape)  # in Tensorflow implem: fct merge_states
 | 
			
		||||
 | 
			
		||||
    def split_heads(self, x, k=False):
 | 
			
		||||
        new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
 | 
			
		||||
        x = x.view(*new_x_shape)  # in Tensorflow implem: fct split_states
 | 
			
		||||
        if k:
 | 
			
		||||
            return x.permute(0, 2, 3, 1)
 | 
			
		||||
        else:
 | 
			
		||||
            return x.permute(0, 2, 1, 3)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        x = self.c_attn(x)
 | 
			
		||||
        query, key, value = x.split(self.split_size, dim=2)
 | 
			
		||||
        query = self.split_heads(query)
 | 
			
		||||
        key = self.split_heads(key, k=True)
 | 
			
		||||
        value = self.split_heads(value)
 | 
			
		||||
        a = self._attn(query, key, value)
 | 
			
		||||
        a = self.merge_heads(a)
 | 
			
		||||
        a = self.c_proj(a)
 | 
			
		||||
        a = self.resid_dropout(a)
 | 
			
		||||
        return a
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MLP(nn.Module):
 | 
			
		||||
    def __init__(self, n_state, config):  # in MLP: n_state=3072 (4 * n_embd)
 | 
			
		||||
        super(MLP, self).__init__()
 | 
			
		||||
        nx = config.n_embd
 | 
			
		||||
        self.c_fc = Conv1D(n_state, 1, nx)
 | 
			
		||||
        self.c_proj = Conv1D(nx, 1, n_state)
 | 
			
		||||
        self.act = ACT_FNS[config.afn]
 | 
			
		||||
        self.dropout = nn.Dropout(config.resid_pdrop)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        h = self.act(self.c_fc(x))
 | 
			
		||||
        h2 = self.c_proj(h)
 | 
			
		||||
        return self.dropout(h2)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Block(nn.Module):
 | 
			
		||||
    def __init__(self, n_ctx, config, scale=False):
 | 
			
		||||
        super(Block, self).__init__()
 | 
			
		||||
        nx = config.n_embd
 | 
			
		||||
        self.attn = Attention(nx, n_ctx, config, scale)
 | 
			
		||||
        self.ln_1 = LayerNorm(nx, eps=config.layer_norm_epsilon)
 | 
			
		||||
        self.mlp = MLP(4 * nx, config)
 | 
			
		||||
        self.ln_2 = LayerNorm(nx, eps=config.layer_norm_epsilon)
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        a = self.attn(x)
 | 
			
		||||
        n = self.ln_1(x + a)
 | 
			
		||||
        m = self.mlp(n)
 | 
			
		||||
        h = self.ln_2(n + m)
 | 
			
		||||
        return h
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTLMHead(nn.Module):
 | 
			
		||||
    """ Language Model Head for the transformer """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, model_embeddings_weights, config):
 | 
			
		||||
        super(OpenAIGPTLMHead, self).__init__()
 | 
			
		||||
        self.n_embd = config.n_embd
 | 
			
		||||
        self.set_embeddings_weights(model_embeddings_weights)
 | 
			
		||||
 | 
			
		||||
    def set_embeddings_weights(self, model_embeddings_weights):
 | 
			
		||||
        embed_shape = model_embeddings_weights.shape
 | 
			
		||||
        self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
 | 
			
		||||
        self.decoder.weight = model_embeddings_weights  # Tied weights
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_state):
 | 
			
		||||
        # Truncated Language modeling logits (we remove the last token)
 | 
			
		||||
        # h_trunc = h[:, :-1].contiguous().view(-1, self.n_embd)
 | 
			
		||||
        lm_logits = self.decoder(hidden_state)
 | 
			
		||||
        return lm_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTMultipleChoiceHead(nn.Module):
 | 
			
		||||
    """ Classifier Head for the transformer """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(OpenAIGPTMultipleChoiceHead, self).__init__()
 | 
			
		||||
        self.n_embd = config.n_embd
 | 
			
		||||
        # self.multiple_choice_token = multiple_choice_token
 | 
			
		||||
        self.dropout = nn.Dropout2d(config.resid_pdrop)  # To reproduce the noise_shape parameter of TF implementation
 | 
			
		||||
        self.linear = nn.Linear(config.n_embd, 1)
 | 
			
		||||
 | 
			
		||||
        nn.init.normal_(self.linear.weight, std=0.02)
 | 
			
		||||
        nn.init.normal_(self.linear.bias, 0)
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_states, mc_token_ids):
 | 
			
		||||
        # Classification logits
 | 
			
		||||
        # hidden_state (bsz, num_choices, seq_length, hidden_size)
 | 
			
		||||
        # mc_token_ids (bsz, num_choices)
 | 
			
		||||
        mc_token_ids = mc_token_ids.unsqueeze(-1).unsqueeze(-1).expand(-1, -1, -1, hidden_states.size(-1))
 | 
			
		||||
        # (bsz, num_choices, 1, hidden_size)
 | 
			
		||||
        multiple_choice_h = hidden_states.gather(2, mc_token_ids).squeeze(2)
 | 
			
		||||
        # (bsz, num_choices, hidden_size)
 | 
			
		||||
        multiple_choice_h = self.dropout(multiple_choice_h.transpose(1, 2)).transpose(1, 2)
 | 
			
		||||
        multiple_choice_logits = self.linear(multiple_choice_h).squeeze(-1)
 | 
			
		||||
        # (bsz, num_choices)
 | 
			
		||||
        return multiple_choice_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTPreTrainedModel(nn.Module):
 | 
			
		||||
    """ An abstract class to handle weights initialization and
 | 
			
		||||
        a simple interface for dowloading and loading pretrained models.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config, *inputs, **kwargs):
 | 
			
		||||
        super(OpenAIGPTPreTrainedModel, self).__init__()
 | 
			
		||||
        if not isinstance(config, OpenAIGPTConfig):
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "Parameter config in `{}(config)` should be an instance of class `OpenAIGPTConfig`. "
 | 
			
		||||
                "To create a model from a pretrained model use "
 | 
			
		||||
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
 | 
			
		||||
                    self.__class__.__name__, self.__class__.__name__
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
        self.config = config
 | 
			
		||||
 | 
			
		||||
    def init_weights(self, module):
 | 
			
		||||
        """ Initialize the weights.
 | 
			
		||||
        """
 | 
			
		||||
        if isinstance(module, (nn.Linear, nn.Embedding)):
 | 
			
		||||
            # Slightly different from the TF version which uses truncated_normal for initialization
 | 
			
		||||
            # cf https://github.com/pytorch/pytorch/pull/5617
 | 
			
		||||
            module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
 | 
			
		||||
        elif isinstance(module, LayerNorm):
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
            module.weight.data.fill_(1.0)
 | 
			
		||||
        if isinstance(module, nn.Linear) and module.bias is not None:
 | 
			
		||||
            module.bias.data.zero_()
 | 
			
		||||
 | 
			
		||||
    def set_num_special_tokens(self, num_special_tokens):
 | 
			
		||||
        pass
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(
 | 
			
		||||
        cls, pretrained_model_name_or_path, num_special_tokens=None, state_dict=None, cache_dir=None, from_tf=False, *inputs, **kwargs
 | 
			
		||||
    ):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a OpenAIGPTPreTrainedModel from a pre-trained model file or a pytorch state dict.
 | 
			
		||||
        Download and cache the pre-trained model file if needed.
 | 
			
		||||
 | 
			
		||||
        Params:
 | 
			
		||||
            pretrained_model_name_or_path: either:
 | 
			
		||||
                - a str with the name of a pre-trained model to load selected in the list of:
 | 
			
		||||
                    . `openai-gpt`
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `openai_gpt_config.json` a configuration file for the model
 | 
			
		||||
                    . `pytorch_model.bin` a PyTorch dump of a OpenAIGPTModel instance
 | 
			
		||||
                - a path or url to a pretrained model archive containing:
 | 
			
		||||
                    . `bert_config.json` a configuration file for the model
 | 
			
		||||
                    . a series of NumPy files containing OpenAI TensorFlow trained weights
 | 
			
		||||
            from_tf: should we load the weights from a locally saved TensorFlow checkpoint
 | 
			
		||||
            cache_dir: an optional path to a folder in which the pre-trained models will be cached.
 | 
			
		||||
            state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of pre-trained models
 | 
			
		||||
            *inputs, **kwargs: additional input for the specific Bert class
 | 
			
		||||
                (ex: num_labels for BertForSequenceClassification)
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_MODEL_ARCHIVE_MAP:
 | 
			
		||||
            archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            config_file = PRETRAINED_CONFIG_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
        else:
 | 
			
		||||
            archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
 | 
			
		||||
            config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)
 | 
			
		||||
            resolved_config_file = cached_path(config_file, cache_dir=cache_dir)
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find files {} and {} "
 | 
			
		||||
                "at this path or url.".format(
 | 
			
		||||
                    pretrained_model_name_or_path, ", ".join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), pretrained_model_name_or_path,
 | 
			
		||||
                    archive_file, config_file
 | 
			
		||||
                )
 | 
			
		||||
            )
 | 
			
		||||
            return None
 | 
			
		||||
        if resolved_archive_file == archive_file and resolved_config_file == config_file:
 | 
			
		||||
            logger.info("loading weights file {}".format(archive_file))
 | 
			
		||||
            logger.info("loading configuration file {}".format(config_file))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading weights file {} from cache at {}".format(
 | 
			
		||||
                archive_file, resolved_archive_file))
 | 
			
		||||
            logger.info("loading configuration file {} from cache at {}".format(
 | 
			
		||||
                config_file, resolved_config_file))
 | 
			
		||||
        # Load config
 | 
			
		||||
        config = OpenAIGPTConfig.from_json_file(resolved_config_file)
 | 
			
		||||
        logger.info("Model config {}".format(config))
 | 
			
		||||
        # Instantiate model.
 | 
			
		||||
        model = cls(config, *inputs, **kwargs)
 | 
			
		||||
        if state_dict is None and not from_tf:
 | 
			
		||||
            state_dict = torch.load(resolved_archive_file, map_location='cpu')
 | 
			
		||||
        if from_tf:
 | 
			
		||||
            # Directly load from a TensorFlow checkpoint (stored as NumPy array)
 | 
			
		||||
            return load_tf_weights_in_openai_gpt(model, resolved_archive_file)
 | 
			
		||||
 | 
			
		||||
        old_keys = []
 | 
			
		||||
        new_keys = []
 | 
			
		||||
        for key in state_dict.keys():
 | 
			
		||||
            new_key = None
 | 
			
		||||
            if key.endswith(".g"):
 | 
			
		||||
                new_key = key[:-2] + ".weight"
 | 
			
		||||
            elif key.endswith(".b"):
 | 
			
		||||
                new_key = key[:-2] + ".bias"
 | 
			
		||||
            elif key.endswith(".w"):
 | 
			
		||||
                new_key = key[:-2] + ".weight"
 | 
			
		||||
            if new_key:
 | 
			
		||||
                old_keys.append(key)
 | 
			
		||||
                new_keys.append(new_key)
 | 
			
		||||
        for old_key, new_key in zip(old_keys, new_keys):
 | 
			
		||||
            state_dict[new_key] = state_dict.pop(old_key)
 | 
			
		||||
 | 
			
		||||
        missing_keys = []
 | 
			
		||||
        unexpected_keys = []
 | 
			
		||||
        error_msgs = []
 | 
			
		||||
        # copy state_dict so _load_from_state_dict can modify it
 | 
			
		||||
        metadata = getattr(state_dict, "_metadata", None)
 | 
			
		||||
        state_dict = state_dict.copy()
 | 
			
		||||
        if metadata is not None:
 | 
			
		||||
            state_dict._metadata = metadata
 | 
			
		||||
 | 
			
		||||
        def load(module, prefix=""):
 | 
			
		||||
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
 | 
			
		||||
            module._load_from_state_dict(
 | 
			
		||||
                state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
 | 
			
		||||
            )
 | 
			
		||||
            for name, child in module._modules.items():
 | 
			
		||||
                if child is not None:
 | 
			
		||||
                    load(child, prefix + name + ".")
 | 
			
		||||
 | 
			
		||||
        start_model = model
 | 
			
		||||
        if hasattr(model, "transformer") and all(not s.startswith('transformer.') for s in state_dict.keys()):
 | 
			
		||||
            start_model = model.transformer
 | 
			
		||||
        load(start_model, prefix="")
 | 
			
		||||
 | 
			
		||||
        if len(missing_keys) > 0:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Weights of {} not initialized from pretrained model: {}".format(model.__class__.__name__, missing_keys)
 | 
			
		||||
            )
 | 
			
		||||
        if len(unexpected_keys) > 0:
 | 
			
		||||
            logger.info(
 | 
			
		||||
                "Weights from pretrained model not used in {}: {}".format(model.__class__.__name__, unexpected_keys)
 | 
			
		||||
            )
 | 
			
		||||
        if len(error_msgs) > 0:
 | 
			
		||||
            raise RuntimeError(
 | 
			
		||||
                "Error(s) in loading state_dict for {}:\n\t{}".format(model.__class__.__name__, "\n\t".join(error_msgs))
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        # Add additional embeddings for special tokens if needed
 | 
			
		||||
        # This step also make sure we are still sharing the output and input embeddings after loading weights
 | 
			
		||||
        model.set_num_special_tokens(num_special_tokens if num_special_tokens is not None else config.n_special)
 | 
			
		||||
        return model
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTModel(OpenAIGPTPreTrainedModel):
 | 
			
		||||
    """OpenAI GPT model ("Improving Language Understanding by Generative Pre-Training").
 | 
			
		||||
 | 
			
		||||
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
 | 
			
		||||
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
 | 
			
		||||
    Special tokens need to be trained during the fine-tuning if you use them.
 | 
			
		||||
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
 | 
			
		||||
 | 
			
		||||
    The embeddings are ordered as follow in the token embeddings matrice:
 | 
			
		||||
        [0,                                                         ----------------------
 | 
			
		||||
         ...                                                        -> word embeddings
 | 
			
		||||
         config.vocab_size - 1,                                     ______________________
 | 
			
		||||
         config.vocab_size,
 | 
			
		||||
         ...                                                        -> special embeddings
 | 
			
		||||
         config.vocab_size + config.n_special - 1]                  ______________________
 | 
			
		||||
 | 
			
		||||
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
 | 
			
		||||
        total_tokens_embeddings = config.vocab_size + config.n_special
 | 
			
		||||
    You should use the associate indices to index the embeddings.
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a OpenAIGPTConfig class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
 | 
			
		||||
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        `hidden_states`: the encoded-hidden-states at the top of the model
 | 
			
		||||
            as a torch.FloatTensor of size [batch_size, sequence_length, hidden_size]
 | 
			
		||||
            (or more generally [d_1, ..., d_n, hidden_size] were d_1 ... d_n are the dimension of input_ids)
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
 | 
			
		||||
 | 
			
		||||
    config = modeling_openai.OpenAIGPTConfig()
 | 
			
		||||
 | 
			
		||||
    model = modeling_openai.OpenAIGPTModel(config)
 | 
			
		||||
    hidden_states = model(input_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(OpenAIGPTModel, self).__init__(config)
 | 
			
		||||
        num_tokens = config.vocab_size + config.n_special
 | 
			
		||||
        self.tokens_embed = nn.Embedding(num_tokens, config.n_embd)
 | 
			
		||||
        self.positions_embed = nn.Embedding(config.n_positions, config.n_embd)
 | 
			
		||||
        self.drop = nn.Dropout(config.embd_pdrop)
 | 
			
		||||
        block = Block(config.n_ctx, config, scale=True)
 | 
			
		||||
        self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(config.n_layer)])
 | 
			
		||||
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
        # nn.init.normal_(self.embed.weight, std=0.02)
 | 
			
		||||
 | 
			
		||||
    def set_num_special_tokens(self, num_special_tokens):
 | 
			
		||||
        " Update input embeddings with new embedding matrice if needed "
 | 
			
		||||
        if self.config.n_special == num_special_tokens:
 | 
			
		||||
            return
 | 
			
		||||
        # Update config
 | 
			
		||||
        self.config.n_special = num_special_tokens
 | 
			
		||||
        # Build new embeddings and initialize all new embeddings (in particular the special tokens)
 | 
			
		||||
        old_embed = self.tokens_embed
 | 
			
		||||
        self.tokens_embed = nn.Embedding(self.config.total_tokens_embeddings, self.config.n_embd)
 | 
			
		||||
        self.tokens_embed.to(old_embed.weight.device)
 | 
			
		||||
        self.init_weights(self.tokens_embed)
 | 
			
		||||
        # Copy word embeddings from the previous weights
 | 
			
		||||
        self.tokens_embed.weight.data[:self.config.vocab_size, :] = old_embed.weight.data[:self.config.vocab_size, :]
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, position_ids=None, token_type_ids=None):
 | 
			
		||||
        if position_ids is None:
 | 
			
		||||
            # This was used when we had a single embedding matrice from position and token embeddings
 | 
			
		||||
            # start = self.config.vocab_size + self.config.n_special
 | 
			
		||||
            # end = start + input_ids.size(-1)
 | 
			
		||||
            # position_ids = torch.arange(start, end, dtype=torch.long, device=input_ids.device)
 | 
			
		||||
            position_ids = torch.arange(input_ids.size(-1), dtype=torch.long, device=input_ids.device)
 | 
			
		||||
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
 | 
			
		||||
 | 
			
		||||
        input_shape = input_ids.size()
 | 
			
		||||
        input_ids = input_ids.view(-1, input_ids.size(-1))
 | 
			
		||||
        position_ids = position_ids.view(-1, position_ids.size(-1))
 | 
			
		||||
 | 
			
		||||
        inputs_embeds = self.tokens_embed(input_ids)
 | 
			
		||||
        position_embeds = self.positions_embed(position_ids)
 | 
			
		||||
        if token_type_ids is not None:
 | 
			
		||||
            token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1))
 | 
			
		||||
            token_type_embeds = self.tokens_embed(token_type_ids)
 | 
			
		||||
        else:
 | 
			
		||||
            token_type_embeds = 0
 | 
			
		||||
        # Add the position information to the input embeddings
 | 
			
		||||
        # h = e.sum(dim=2)
 | 
			
		||||
        hidden_states = inputs_embeds + position_embeds + token_type_embeds
 | 
			
		||||
        for block in self.h:
 | 
			
		||||
            hidden_states = block(hidden_states)
 | 
			
		||||
        output_shape = input_shape + (hidden_states.size(-1),)
 | 
			
		||||
        return hidden_states.view(*output_shape)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTLMHeadModel(OpenAIGPTPreTrainedModel):
 | 
			
		||||
    """OpenAI GPT model with a Language Modeling head ("Improving Language Understanding by Generative Pre-Training").
 | 
			
		||||
 | 
			
		||||
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
 | 
			
		||||
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
 | 
			
		||||
    Special tokens need to be trained during the fine-tuning if you use them.
 | 
			
		||||
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
 | 
			
		||||
 | 
			
		||||
    The embeddings are ordered as follow in the token embeddings matrice:
 | 
			
		||||
        [0,                                                         ----------------------
 | 
			
		||||
         ...                                                        -> word embeddings
 | 
			
		||||
         config.vocab_size - 1,                                     ______________________
 | 
			
		||||
         config.vocab_size,
 | 
			
		||||
         ...                                                        -> special embeddings
 | 
			
		||||
         config.vocab_size + config.n_special - 1]                  ______________________
 | 
			
		||||
 | 
			
		||||
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
 | 
			
		||||
        total_tokens_embeddings = config.vocab_size + config.n_special
 | 
			
		||||
    You should use the associate indices to index the embeddings.
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a OpenAIGPTConfig class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] (or more generally [d_1, ..., d_n, sequence_length]
 | 
			
		||||
            were d_1 ... d_n are arbitrary dimensions) with the word BPE token indices selected in the range [0, total_tokens_embeddings[
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., vocab_size]
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `lm_labels` is not `None`:
 | 
			
		||||
            Outputs the language modeling loss.
 | 
			
		||||
        else:
 | 
			
		||||
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, sequence_length, total_tokens_embeddings]
 | 
			
		||||
                (or more generally [d_1, ..., d_n, total_tokens_embeddings] were d_1 ... d_n are the dimension of input_ids)
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
 | 
			
		||||
 | 
			
		||||
    config = modeling_openai.OpenAIGPTConfig()
 | 
			
		||||
 | 
			
		||||
    model = modeling_openai.OpenAIGPTLMHeadModel(config)
 | 
			
		||||
    lm_logits = model(input_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(OpenAIGPTLMHeadModel, self).__init__(config)
 | 
			
		||||
        self.transformer = OpenAIGPTModel(config)
 | 
			
		||||
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def set_num_special_tokens(self, num_special_tokens):
 | 
			
		||||
        """ Update input and output embeddings with new embedding matrice
 | 
			
		||||
            Make sure we are sharing the embeddings
 | 
			
		||||
        """
 | 
			
		||||
        self.transformer.set_num_special_tokens(num_special_tokens)
 | 
			
		||||
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, position_ids=None, token_type_ids=None, lm_labels=None):
 | 
			
		||||
        hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
 | 
			
		||||
        lm_logits = self.lm_head(hidden_states)
 | 
			
		||||
        if lm_labels is not None:
 | 
			
		||||
            # Shift so that tokens < n predict n
 | 
			
		||||
            shift_logits = lm_logits[..., :-1, :].contiguous()
 | 
			
		||||
            shift_labels = lm_labels[..., 1:].contiguous()
 | 
			
		||||
            # Flatten the tokens
 | 
			
		||||
            loss_fct = CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
            loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)),
 | 
			
		||||
                            shift_labels.view(-1))
 | 
			
		||||
            return loss
 | 
			
		||||
        return lm_logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTDoubleHeadsModel(OpenAIGPTPreTrainedModel):
 | 
			
		||||
    """OpenAI GPT model with a Language Modeling and a Multiple Choice head ("Improving Language Understanding by Generative Pre-Training").
 | 
			
		||||
 | 
			
		||||
    OpenAI GPT use a single embedding matrix to store the word and special embeddings.
 | 
			
		||||
    Special tokens embeddings are additional tokens that are not pre-trained: [SEP], [CLS]...
 | 
			
		||||
    Special tokens need to be trained during the fine-tuning if you use them.
 | 
			
		||||
    The number of special embeddings can be controled using the `set_num_special_tokens(num_special_tokens)` function.
 | 
			
		||||
 | 
			
		||||
    The embeddings are ordered as follow in the token embeddings matrice:
 | 
			
		||||
        [0,                                                         ----------------------
 | 
			
		||||
         ...                                                        -> word embeddings
 | 
			
		||||
         config.vocab_size - 1,                                     ______________________
 | 
			
		||||
         config.vocab_size,
 | 
			
		||||
         ...                                                        -> special embeddings
 | 
			
		||||
         config.vocab_size + config.n_special - 1]                  ______________________
 | 
			
		||||
 | 
			
		||||
    where total_tokens_embeddings can be obtained as config.total_tokens_embeddings and is:
 | 
			
		||||
        total_tokens_embeddings = config.vocab_size + config.n_special
 | 
			
		||||
    You should use the associate indices to index the embeddings.
 | 
			
		||||
 | 
			
		||||
    Params:
 | 
			
		||||
        config: a OpenAIGPTConfig class instance with the configuration to build a new model
 | 
			
		||||
 | 
			
		||||
    Inputs:
 | 
			
		||||
        `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the BPE token
 | 
			
		||||
            indices selected in the range [0, total_tokens_embeddings[
 | 
			
		||||
        `mc_token_ids`: a torch.LongTensor of shape [batch_size, num_choices] with the index of the token from
 | 
			
		||||
            which we should take the hidden state to feed the multiple choice classifier (usually last token of the sequence)
 | 
			
		||||
        `position_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            with the position indices (selected in the range [0, config.n_positions - 1[.
 | 
			
		||||
        `token_type_ids`: an optional torch.LongTensor with the same shape as input_ids
 | 
			
		||||
            You can use it to add a third type of embedding to each input token in the sequence
 | 
			
		||||
            (the previous two being the word and position embeddings).
 | 
			
		||||
            The input, position and token_type embeddings are summed inside the Transformer before the first
 | 
			
		||||
            self-attention block.
 | 
			
		||||
        `lm_labels`: optional language modeling labels: torch.LongTensor of shape [batch_size, num_choices, sequence_length]
 | 
			
		||||
            with indices selected in [-1, 0, ..., total_tokens_embeddings]. All labels set to -1 are ignored (masked), the loss
 | 
			
		||||
            is only computed for the labels set in [0, ..., total_tokens_embeddings]
 | 
			
		||||
        `multiple_choice_labels`: optional multiple choice labels: torch.LongTensor of shape [batch_size]
 | 
			
		||||
            with indices selected in [0, ..., num_choices].
 | 
			
		||||
 | 
			
		||||
    Outputs:
 | 
			
		||||
        if `lm_labels` and `multiple_choice_labels` are not `None`:
 | 
			
		||||
            Outputs a tuple of losses with the language modeling loss and the multiple choice loss.
 | 
			
		||||
        else: a tuple with
 | 
			
		||||
            `lm_logits`: the language modeling logits as a torch.FloatTensor of size [batch_size, num_choices, sequence_length, total_tokens_embeddings]
 | 
			
		||||
            `multiple_choice_logits`: the multiple choice logits as a torch.FloatTensor of size [batch_size, num_choices]
 | 
			
		||||
 | 
			
		||||
    Example usage:
 | 
			
		||||
    ```python
 | 
			
		||||
    # Already been converted into BPE token ids
 | 
			
		||||
    input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]]])  # (bsz, number of choice, seq length)
 | 
			
		||||
    mc_token_ids = torch.LongTensor([[2], [1]]) # (bsz, number of choice)
 | 
			
		||||
 | 
			
		||||
    config = modeling_openai.OpenAIGPTConfig()
 | 
			
		||||
 | 
			
		||||
    model = modeling_openai.OpenAIGPTLMHeadModel(config)
 | 
			
		||||
    lm_logits, multiple_choice_logits = model(input_ids, mc_token_ids)
 | 
			
		||||
    ```
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super(OpenAIGPTDoubleHeadsModel, self).__init__(config)
 | 
			
		||||
        self.transformer = OpenAIGPTModel(config)
 | 
			
		||||
        self.lm_head = OpenAIGPTLMHead(self.transformer.tokens_embed.weight, config)
 | 
			
		||||
        self.multiple_choice_head = OpenAIGPTMultipleChoiceHead(config)
 | 
			
		||||
        self.apply(self.init_weights)
 | 
			
		||||
 | 
			
		||||
    def set_num_special_tokens(self, num_special_tokens):
 | 
			
		||||
        """ Update input and output embeddings with new embedding matrice
 | 
			
		||||
            Make sure we are sharing the embeddings
 | 
			
		||||
        """
 | 
			
		||||
        self.transformer.set_num_special_tokens(num_special_tokens)
 | 
			
		||||
        self.lm_head.set_embeddings_weights(self.transformer.tokens_embed.weight)
 | 
			
		||||
 | 
			
		||||
    def forward(self, input_ids, mc_token_ids, lm_labels=None, mc_labels=None, token_type_ids=None, position_ids=None):
 | 
			
		||||
        hidden_states = self.transformer(input_ids, position_ids, token_type_ids)
 | 
			
		||||
        lm_logits = self.lm_head(hidden_states)
 | 
			
		||||
        mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids)
 | 
			
		||||
        losses = []
 | 
			
		||||
        if lm_labels is not None:
 | 
			
		||||
            shift_logits = lm_logits[..., :-1, :].contiguous()
 | 
			
		||||
            shift_labels = lm_labels[..., 1:].contiguous()
 | 
			
		||||
            loss_fct = CrossEntropyLoss(ignore_index=-1)
 | 
			
		||||
            losses.append(loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)))
 | 
			
		||||
        if mc_labels is not None:
 | 
			
		||||
            loss_fct = CrossEntropyLoss()
 | 
			
		||||
            losses.append(loss_fct(mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1)))
 | 
			
		||||
        if losses:
 | 
			
		||||
            return losses
 | 
			
		||||
        return lm_logits, mc_logits
 | 
			
		||||
							
								
								
									
										1387
									
								
								pytorch_pretrained_bert/modeling_transfo_xl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1387
									
								
								pytorch_pretrained_bert/modeling_transfo_xl.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										402
									
								
								pytorch_pretrained_bert/modeling_transfo_xl_utilities.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										402
									
								
								pytorch_pretrained_bert/modeling_transfo_xl_utilities.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,402 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Utilities for PyTorch Transformer XL model.
 | 
			
		||||
    Directly adapted from https://github.com/kimiyoung/transformer-xl.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
from collections import defaultdict
 | 
			
		||||
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
 | 
			
		||||
# CUDA_MAJOR = int(torch.version.cuda.split('.')[0])
 | 
			
		||||
# CUDA_MINOR = int(torch.version.cuda.split('.')[1])
 | 
			
		||||
 | 
			
		||||
class ProjectedAdaptiveLogSoftmax(nn.Module):
 | 
			
		||||
    def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
 | 
			
		||||
                 keep_order=False):
 | 
			
		||||
        super(ProjectedAdaptiveLogSoftmax, self).__init__()
 | 
			
		||||
 | 
			
		||||
        self.n_token = n_token
 | 
			
		||||
        self.d_embed = d_embed
 | 
			
		||||
        self.d_proj = d_proj
 | 
			
		||||
 | 
			
		||||
        self.cutoffs = cutoffs + [n_token]
 | 
			
		||||
        self.cutoff_ends = [0] + self.cutoffs
 | 
			
		||||
        self.div_val = div_val
 | 
			
		||||
 | 
			
		||||
        self.shortlist_size = self.cutoffs[0]
 | 
			
		||||
        self.n_clusters = len(self.cutoffs) - 1
 | 
			
		||||
        self.head_size = self.shortlist_size + self.n_clusters
 | 
			
		||||
 | 
			
		||||
        if self.n_clusters > 0:
 | 
			
		||||
            self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
 | 
			
		||||
            self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
 | 
			
		||||
 | 
			
		||||
        self.out_layers = nn.ModuleList()
 | 
			
		||||
        self.out_projs = nn.ParameterList()
 | 
			
		||||
 | 
			
		||||
        if div_val == 1:
 | 
			
		||||
            for i in range(len(self.cutoffs)):
 | 
			
		||||
                if d_proj != d_embed:
 | 
			
		||||
                    self.out_projs.append(
 | 
			
		||||
                        nn.Parameter(torch.Tensor(d_proj, d_embed))
 | 
			
		||||
                    )
 | 
			
		||||
                else:
 | 
			
		||||
                    self.out_projs.append(None)
 | 
			
		||||
 | 
			
		||||
            self.out_layers.append(nn.Linear(d_embed, n_token))
 | 
			
		||||
        else:
 | 
			
		||||
            for i in range(len(self.cutoffs)):
 | 
			
		||||
                l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
 | 
			
		||||
                d_emb_i = d_embed // (div_val ** i)
 | 
			
		||||
 | 
			
		||||
                self.out_projs.append(
 | 
			
		||||
                    nn.Parameter(torch.Tensor(d_proj, d_emb_i))
 | 
			
		||||
                )
 | 
			
		||||
 | 
			
		||||
                self.out_layers.append(nn.Linear(d_emb_i, r_idx-l_idx))
 | 
			
		||||
 | 
			
		||||
        self.keep_order = keep_order
 | 
			
		||||
 | 
			
		||||
    def _compute_logit(self, hidden, weight, bias, proj):
 | 
			
		||||
        if proj is None:
 | 
			
		||||
            logit = F.linear(hidden, weight, bias=bias)
 | 
			
		||||
        else:
 | 
			
		||||
            # if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
 | 
			
		||||
            proj_hid = F.linear(hidden, proj.t().contiguous())
 | 
			
		||||
            logit = F.linear(proj_hid, weight, bias=bias)
 | 
			
		||||
            # else:
 | 
			
		||||
            #     logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
 | 
			
		||||
            #     if bias is not None:
 | 
			
		||||
            #         logit = logit + bias
 | 
			
		||||
 | 
			
		||||
        return logit
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden, target=None, keep_order=False):
 | 
			
		||||
        '''
 | 
			
		||||
            Params:
 | 
			
		||||
                hidden :: [len*bsz x d_proj]
 | 
			
		||||
                target :: [len*bsz]
 | 
			
		||||
            Return:
 | 
			
		||||
                if target is None:
 | 
			
		||||
                    out :: [len*bsz] Negative log likelihood
 | 
			
		||||
                else:
 | 
			
		||||
                    out :: [len*bsz x n_tokens] log probabilities of tokens over the vocabulary
 | 
			
		||||
            We could replace this implementation by the native PyTorch one
 | 
			
		||||
            if their's had an option to set bias on all clusters in the native one.
 | 
			
		||||
            here: https://github.com/pytorch/pytorch/blob/dbe6a7a9ff1a364a8706bf5df58a1ca96d2fd9da/torch/nn/modules/adaptive.py#L138
 | 
			
		||||
        '''
 | 
			
		||||
 | 
			
		||||
        if target is not None:
 | 
			
		||||
            target = target.view(-1)
 | 
			
		||||
            if hidden.size(0) != target.size(0):
 | 
			
		||||
                raise RuntimeError('Input and target should have the same size '
 | 
			
		||||
                                'in the batch dimension.')
 | 
			
		||||
 | 
			
		||||
        if self.n_clusters == 0:
 | 
			
		||||
            logit = self._compute_logit(hidden, self.out_layers[0].weight,
 | 
			
		||||
                                        self.out_layers[0].bias, self.out_projs[0])
 | 
			
		||||
            if target is not None:
 | 
			
		||||
                output = -F.log_softmax(logit, dim=-1) \
 | 
			
		||||
                        .gather(1, target.unsqueeze(1)).squeeze(1)
 | 
			
		||||
            else:
 | 
			
		||||
                output = F.log_softmax(logit, dim=-1)
 | 
			
		||||
        else:
 | 
			
		||||
            # construct weights and biases
 | 
			
		||||
            weights, biases = [], []
 | 
			
		||||
            for i in range(len(self.cutoffs)):
 | 
			
		||||
                if self.div_val == 1:
 | 
			
		||||
                    l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
 | 
			
		||||
                    weight_i = self.out_layers[0].weight[l_idx:r_idx]
 | 
			
		||||
                    bias_i = self.out_layers[0].bias[l_idx:r_idx]
 | 
			
		||||
                else:
 | 
			
		||||
                    weight_i = self.out_layers[i].weight
 | 
			
		||||
                    bias_i = self.out_layers[i].bias
 | 
			
		||||
 | 
			
		||||
                if i == 0:
 | 
			
		||||
                    weight_i = torch.cat(
 | 
			
		||||
                        [weight_i, self.cluster_weight], dim=0)
 | 
			
		||||
                    bias_i = torch.cat(
 | 
			
		||||
                        [bias_i, self.cluster_bias], dim=0)
 | 
			
		||||
 | 
			
		||||
                weights.append(weight_i)
 | 
			
		||||
                biases.append(bias_i)
 | 
			
		||||
 | 
			
		||||
            head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
 | 
			
		||||
 | 
			
		||||
            head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
 | 
			
		||||
            head_logprob = F.log_softmax(head_logit, dim=1)
 | 
			
		||||
 | 
			
		||||
            if target is None:
 | 
			
		||||
                out = hidden.new_empty((head_logit.size(0), self.n_token))
 | 
			
		||||
            else:
 | 
			
		||||
                out = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
 | 
			
		||||
 | 
			
		||||
            offset = 0
 | 
			
		||||
            cutoff_values = [0] + self.cutoffs
 | 
			
		||||
            for i in range(len(cutoff_values) - 1):
 | 
			
		||||
                l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
 | 
			
		||||
 | 
			
		||||
                if target is not None:
 | 
			
		||||
                    mask_i = (target >= l_idx) & (target < r_idx)
 | 
			
		||||
                    indices_i = mask_i.nonzero().squeeze()
 | 
			
		||||
 | 
			
		||||
                    if indices_i.numel() == 0:
 | 
			
		||||
                        continue
 | 
			
		||||
 | 
			
		||||
                    target_i = target.index_select(0, indices_i) - l_idx
 | 
			
		||||
                    head_logprob_i = head_logprob.index_select(0, indices_i)
 | 
			
		||||
                    hidden_i = hidden.index_select(0, indices_i)
 | 
			
		||||
                else:
 | 
			
		||||
                    hidden_i = hidden
 | 
			
		||||
 | 
			
		||||
                if i == 0:
 | 
			
		||||
                    if target is not None:
 | 
			
		||||
                        logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
 | 
			
		||||
                    else:
 | 
			
		||||
                        out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
 | 
			
		||||
                else:
 | 
			
		||||
                    weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
 | 
			
		||||
 | 
			
		||||
                    tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
 | 
			
		||||
                    tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
 | 
			
		||||
                    cluster_prob_idx = self.cutoffs[0] + i - 1  # No probability for the head cluster
 | 
			
		||||
                    if target is not None:
 | 
			
		||||
                        logprob_i = head_logprob_i[:, cluster_prob_idx] \
 | 
			
		||||
                                + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
 | 
			
		||||
                    else:
 | 
			
		||||
                        logprob_i = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
 | 
			
		||||
                        out[:, l_idx:r_idx] = logprob_i
 | 
			
		||||
 | 
			
		||||
                if target is not None:
 | 
			
		||||
                    if (hasattr(self, 'keep_order') and self.keep_order) or keep_order:
 | 
			
		||||
                        out.index_copy_(0, indices_i, -logprob_i)
 | 
			
		||||
                    else:
 | 
			
		||||
                        out[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
 | 
			
		||||
                    offset += logprob_i.size(0)
 | 
			
		||||
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def log_prob(self, hidden):
 | 
			
		||||
        r""" Computes log probabilities for all :math:`n\_classes`
 | 
			
		||||
        From: https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/adaptive.py
 | 
			
		||||
        Args:
 | 
			
		||||
            hidden (Tensor): a minibatch of examples
 | 
			
		||||
        Returns:
 | 
			
		||||
            log-probabilities of for each class :math:`c`
 | 
			
		||||
            in range :math:`0 <= c <= n\_classes`, where :math:`n\_classes` is a
 | 
			
		||||
            parameter passed to ``AdaptiveLogSoftmaxWithLoss`` constructor.
 | 
			
		||||
        Shape:
 | 
			
		||||
            - Input: :math:`(N, in\_features)`
 | 
			
		||||
            - Output: :math:`(N, n\_classes)`
 | 
			
		||||
        """
 | 
			
		||||
        if self.n_clusters == 0:
 | 
			
		||||
            logit = self._compute_logit(hidden, self.out_layers[0].weight,
 | 
			
		||||
                                        self.out_layers[0].bias, self.out_projs[0])
 | 
			
		||||
            return F.log_softmax(logit, dim=-1)
 | 
			
		||||
        else:
 | 
			
		||||
            # construct weights and biases
 | 
			
		||||
            weights, biases = [], []
 | 
			
		||||
            for i in range(len(self.cutoffs)):
 | 
			
		||||
                if self.div_val == 1:
 | 
			
		||||
                    l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
 | 
			
		||||
                    weight_i = self.out_layers[0].weight[l_idx:r_idx]
 | 
			
		||||
                    bias_i = self.out_layers[0].bias[l_idx:r_idx]
 | 
			
		||||
                else:
 | 
			
		||||
                    weight_i = self.out_layers[i].weight
 | 
			
		||||
                    bias_i = self.out_layers[i].bias
 | 
			
		||||
 | 
			
		||||
                if i == 0:
 | 
			
		||||
                    weight_i = torch.cat(
 | 
			
		||||
                        [weight_i, self.cluster_weight], dim=0)
 | 
			
		||||
                    bias_i = torch.cat(
 | 
			
		||||
                        [bias_i, self.cluster_bias], dim=0)
 | 
			
		||||
 | 
			
		||||
                weights.append(weight_i)
 | 
			
		||||
                biases.append(bias_i)
 | 
			
		||||
 | 
			
		||||
            head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0]
 | 
			
		||||
            head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
 | 
			
		||||
 | 
			
		||||
            out = hidden.new_empty((head_logit.size(0), self.n_token))
 | 
			
		||||
            head_logprob = F.log_softmax(head_logit, dim=1)
 | 
			
		||||
 | 
			
		||||
            cutoff_values = [0] + self.cutoffs
 | 
			
		||||
            for i in range(len(cutoff_values) - 1):
 | 
			
		||||
                start_idx, stop_idx = cutoff_values[i], cutoff_values[i + 1]
 | 
			
		||||
 | 
			
		||||
                if i == 0:
 | 
			
		||||
                    out[:, :self.cutoffs[0]] = head_logprob[:, :self.cutoffs[0]]
 | 
			
		||||
                else:
 | 
			
		||||
                    weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i]
 | 
			
		||||
 | 
			
		||||
                    tail_logit_i = self._compute_logit(hidden, weight_i, bias_i, proj_i)
 | 
			
		||||
                    tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
 | 
			
		||||
 | 
			
		||||
                    logprob_i = head_logprob[:, -i] + tail_logprob_i
 | 
			
		||||
                    out[:, start_idx, stop_idx] = logprob_i
 | 
			
		||||
 | 
			
		||||
            return out
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LogUniformSampler(object):
 | 
			
		||||
    def __init__(self, range_max, n_sample):
 | 
			
		||||
        """
 | 
			
		||||
        Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
 | 
			
		||||
            `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
 | 
			
		||||
 | 
			
		||||
        expected count can be approximated by 1 - (1 - p)^n
 | 
			
		||||
        and we use a numerically stable version -expm1(num_tries * log1p(-p))
 | 
			
		||||
 | 
			
		||||
        Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run
 | 
			
		||||
        """
 | 
			
		||||
        with torch.no_grad():
 | 
			
		||||
            self.range_max = range_max
 | 
			
		||||
            log_indices = torch.arange(1., range_max+2., 1.).log_()
 | 
			
		||||
            self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
 | 
			
		||||
            # print('P', self.dist.numpy().tolist()[-30:])
 | 
			
		||||
 | 
			
		||||
            self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float()
 | 
			
		||||
 | 
			
		||||
        self.n_sample = n_sample
 | 
			
		||||
 | 
			
		||||
    def sample(self, labels):
 | 
			
		||||
        """
 | 
			
		||||
            labels: [b1, b2]
 | 
			
		||||
        Return
 | 
			
		||||
            true_log_probs: [b1, b2]
 | 
			
		||||
            samp_log_probs: [n_sample]
 | 
			
		||||
            neg_samples: [n_sample]
 | 
			
		||||
        """
 | 
			
		||||
 | 
			
		||||
        # neg_samples = torch.empty(0).long()
 | 
			
		||||
        n_sample = self.n_sample
 | 
			
		||||
        n_tries = 2 * n_sample
 | 
			
		||||
 | 
			
		||||
        with torch.no_grad():
 | 
			
		||||
            neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique()
 | 
			
		||||
            device = labels.device
 | 
			
		||||
            neg_samples = neg_samples.to(device)
 | 
			
		||||
            true_log_probs = self.log_q[labels].to(device)
 | 
			
		||||
            samp_log_probs = self.log_q[neg_samples].to(device)
 | 
			
		||||
            return true_log_probs, samp_log_probs, neg_samples
 | 
			
		||||
 | 
			
		||||
def sample_logits(embedding, bias, labels, inputs, sampler):
 | 
			
		||||
    """
 | 
			
		||||
        embedding: an nn.Embedding layer
 | 
			
		||||
        bias: [n_vocab]
 | 
			
		||||
        labels: [b1, b2]
 | 
			
		||||
        inputs: [b1, b2, n_emb]
 | 
			
		||||
        sampler: you may use a LogUniformSampler
 | 
			
		||||
    Return
 | 
			
		||||
        logits: [b1, b2, 1 + n_sample]
 | 
			
		||||
    """
 | 
			
		||||
    true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
 | 
			
		||||
    n_sample = neg_samples.size(0)
 | 
			
		||||
    b1, b2 = labels.size(0), labels.size(1)
 | 
			
		||||
    all_ids = torch.cat([labels.view(-1), neg_samples])
 | 
			
		||||
    all_w = embedding(all_ids)
 | 
			
		||||
    true_w = all_w[: -n_sample].view(b1, b2, -1)
 | 
			
		||||
    sample_w = all_w[- n_sample:].view(n_sample, -1)
 | 
			
		||||
 | 
			
		||||
    all_b = bias[all_ids]
 | 
			
		||||
    true_b = all_b[: -n_sample].view(b1, b2)
 | 
			
		||||
    sample_b = all_b[- n_sample:]
 | 
			
		||||
 | 
			
		||||
    hit = (labels[:, :, None] == neg_samples).detach()
 | 
			
		||||
 | 
			
		||||
    true_logits = torch.einsum('ijk,ijk->ij',
 | 
			
		||||
        [true_w, inputs]) + true_b - true_log_probs
 | 
			
		||||
    sample_logits = torch.einsum('lk,ijk->ijl',
 | 
			
		||||
        [sample_w, inputs]) + sample_b - samp_log_probs
 | 
			
		||||
    sample_logits.masked_fill_(hit, -1e30)
 | 
			
		||||
    logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
 | 
			
		||||
 | 
			
		||||
    return logits
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# class LogUniformSampler(object):
 | 
			
		||||
#     def __init__(self, range_max, unique=False):
 | 
			
		||||
#         """
 | 
			
		||||
#         Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py
 | 
			
		||||
#             `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)`
 | 
			
		||||
#         """
 | 
			
		||||
#         self.range_max = range_max
 | 
			
		||||
#         log_indices = torch.arange(1., range_max+2., 1.).log_()
 | 
			
		||||
#         self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1]
 | 
			
		||||
 | 
			
		||||
#         self.unique = unique
 | 
			
		||||
 | 
			
		||||
#         if self.unique:
 | 
			
		||||
#             self.exclude_mask = torch.ByteTensor(range_max).fill_(0)
 | 
			
		||||
 | 
			
		||||
#     def sample(self, n_sample, labels):
 | 
			
		||||
#         pos_sample, new_labels = labels.unique(return_inverse=True)
 | 
			
		||||
#         n_pos_sample = pos_sample.size(0)
 | 
			
		||||
#         n_neg_sample = n_sample - n_pos_sample
 | 
			
		||||
 | 
			
		||||
#         if self.unique:
 | 
			
		||||
#             self.exclude_mask.index_fill_(0, pos_sample, 1)
 | 
			
		||||
#             sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0)
 | 
			
		||||
#             self.exclude_mask.index_fill_(0, pos_sample, 0)
 | 
			
		||||
#         else:
 | 
			
		||||
#             sample_dist = self.dist
 | 
			
		||||
 | 
			
		||||
#         neg_sample = torch.multinomial(sample_dist, n_neg_sample)
 | 
			
		||||
 | 
			
		||||
#         sample = torch.cat([pos_sample, neg_sample])
 | 
			
		||||
#         sample_prob = self.dist[sample]
 | 
			
		||||
 | 
			
		||||
#         return new_labels, sample, sample_prob
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    S, B = 3, 4
 | 
			
		||||
    n_vocab = 10000
 | 
			
		||||
    n_sample = 5
 | 
			
		||||
    H = 32
 | 
			
		||||
 | 
			
		||||
    labels = torch.LongTensor(S, B).random_(0, n_vocab)
 | 
			
		||||
 | 
			
		||||
    # sampler = LogUniformSampler(n_vocab, unique=False)
 | 
			
		||||
    # new_labels, sample, sample_prob = sampler.sample(n_sample, labels)
 | 
			
		||||
 | 
			
		||||
    sampler = LogUniformSampler(n_vocab, n_sample)#, unique=True)
 | 
			
		||||
    # true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels)
 | 
			
		||||
 | 
			
		||||
    # print('true_probs', true_probs.numpy().tolist())
 | 
			
		||||
    # print('samp_probs', samp_probs.numpy().tolist())
 | 
			
		||||
    # print('neg_samples', neg_samples.numpy().tolist())
 | 
			
		||||
 | 
			
		||||
    # print('sum', torch.sum(sampler.dist).item())
 | 
			
		||||
 | 
			
		||||
    # assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item()
 | 
			
		||||
 | 
			
		||||
    embedding = nn.Embedding(n_vocab, H)
 | 
			
		||||
    bias = torch.zeros(n_vocab)
 | 
			
		||||
    inputs = torch.Tensor(S, B, H).normal_()
 | 
			
		||||
 | 
			
		||||
    logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample)
 | 
			
		||||
    print('logits', logits.detach().numpy().tolist())
 | 
			
		||||
    print('logits shape', logits.size())
 | 
			
		||||
    print('out_labels', out_labels.detach().numpy().tolist())
 | 
			
		||||
    print('out_labels shape', out_labels.size())
 | 
			
		||||
 | 
			
		||||
@ -1,5 +1,5 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
@ -19,26 +19,164 @@ import torch
 | 
			
		||||
from torch.optim import Optimizer
 | 
			
		||||
from torch.optim.optimizer import required
 | 
			
		||||
from torch.nn.utils import clip_grad_norm_
 | 
			
		||||
import logging
 | 
			
		||||
import abc
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
def warmup_cosine(x, warmup=0.002):
 | 
			
		||||
    if x < warmup:
 | 
			
		||||
        return x/warmup
 | 
			
		||||
    return 0.5 * (1.0 + torch.cos(math.pi * x))
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
def warmup_constant(x, warmup=0.002):
 | 
			
		||||
    if x < warmup:
 | 
			
		||||
        return x/warmup
 | 
			
		||||
    return 1.0
 | 
			
		||||
 | 
			
		||||
def warmup_linear(x, warmup=0.002):
 | 
			
		||||
    if x < warmup:
 | 
			
		||||
        return x/warmup
 | 
			
		||||
    return 1.0 - x
 | 
			
		||||
if sys.version_info >= (3, 4):
 | 
			
		||||
    ABC = abc.ABC
 | 
			
		||||
else:
 | 
			
		||||
    ABC = abc.ABCMeta('ABC', (), {})
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class _LRSchedule(ABC):
 | 
			
		||||
    """ Parent of all LRSchedules here. """
 | 
			
		||||
    warn_t_total = False        # is set to True for schedules where progressing beyond t_total steps doesn't make sense
 | 
			
		||||
    def __init__(self, warmup=0.002, t_total=-1, **kw):
 | 
			
		||||
        """
 | 
			
		||||
        :param warmup:  what fraction of t_total steps will be used for linear warmup
 | 
			
		||||
        :param t_total: how many training steps (updates) are planned
 | 
			
		||||
        :param kw:
 | 
			
		||||
        """
 | 
			
		||||
        super(_LRSchedule, self).__init__(**kw)
 | 
			
		||||
        if t_total < 0:
 | 
			
		||||
            logger.warning("t_total value of {} results in schedule not being applied".format(t_total))
 | 
			
		||||
        if not 0.0 <= warmup < 1.0 and not warmup == -1:
 | 
			
		||||
            raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
 | 
			
		||||
        warmup = max(warmup, 0.)
 | 
			
		||||
        self.warmup, self.t_total = float(warmup), float(t_total)
 | 
			
		||||
        self.warned_for_t_total_at_progress = -1
 | 
			
		||||
 | 
			
		||||
    def get_lr(self, step, nowarn=False):
 | 
			
		||||
        """
 | 
			
		||||
        :param step:    which of t_total steps we're on
 | 
			
		||||
        :param nowarn:  set to True to suppress warning regarding training beyond specified 't_total' steps
 | 
			
		||||
        :return:        learning rate multiplier for current update
 | 
			
		||||
        """
 | 
			
		||||
        if self.t_total < 0:
 | 
			
		||||
            return 1.
 | 
			
		||||
        progress = float(step) / self.t_total
 | 
			
		||||
        ret = self.get_lr_(progress)
 | 
			
		||||
        # warning for exceeding t_total (only active with warmup_linear
 | 
			
		||||
        if not nowarn and self.warn_t_total and progress > 1. and progress > self.warned_for_t_total_at_progress:
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Training beyond specified 't_total'. Learning rate multiplier set to {}. Please set 't_total' of {} correctly."
 | 
			
		||||
                    .format(ret, self.__class__.__name__))
 | 
			
		||||
            self.warned_for_t_total_at_progress = progress
 | 
			
		||||
        # end warning
 | 
			
		||||
        return ret
 | 
			
		||||
 | 
			
		||||
    @abc.abstractmethod
 | 
			
		||||
    def get_lr_(self, progress):
 | 
			
		||||
        """
 | 
			
		||||
        :param progress:    value between 0 and 1 (unless going beyond t_total steps) specifying training progress
 | 
			
		||||
        :return:            learning rate multiplier for current update
 | 
			
		||||
        """
 | 
			
		||||
        return 1.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ConstantLR(_LRSchedule):
 | 
			
		||||
    def get_lr_(self, progress):
 | 
			
		||||
        return 1.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class WarmupCosineSchedule(_LRSchedule):
 | 
			
		||||
    """
 | 
			
		||||
    Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
 | 
			
		||||
    Decreases learning rate from 1. to 0. over remaining `1 - warmup` steps following a cosine curve.
 | 
			
		||||
    If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
 | 
			
		||||
    """
 | 
			
		||||
    warn_t_total = True
 | 
			
		||||
    def __init__(self, warmup=0.002, t_total=-1, cycles=.5, **kw):
 | 
			
		||||
        """
 | 
			
		||||
        :param warmup:      see LRSchedule
 | 
			
		||||
        :param t_total:     see LRSchedule
 | 
			
		||||
        :param cycles:      number of cycles. Default: 0.5, corresponding to cosine decay from 1. at progress==warmup and 0 at progress==1.
 | 
			
		||||
        :param kw:
 | 
			
		||||
        """
 | 
			
		||||
        super(WarmupCosineSchedule, self).__init__(warmup=warmup, t_total=t_total, **kw)
 | 
			
		||||
        self.cycles = cycles
 | 
			
		||||
 | 
			
		||||
    def get_lr_(self, progress):
 | 
			
		||||
        if progress < self.warmup:
 | 
			
		||||
            return progress / self.warmup
 | 
			
		||||
        else:
 | 
			
		||||
            progress = (progress - self.warmup) / (1 - self.warmup)   # progress after warmup
 | 
			
		||||
            return 0.5 * (1. + math.cos(math.pi * self.cycles * 2 * progress))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class WarmupCosineWithHardRestartsSchedule(WarmupCosineSchedule):
 | 
			
		||||
    """
 | 
			
		||||
    Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
 | 
			
		||||
    If `cycles` (default=1.) is different from default, learning rate follows `cycles` times a cosine decaying
 | 
			
		||||
    learning rate (with hard restarts).
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
 | 
			
		||||
        super(WarmupCosineWithHardRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
 | 
			
		||||
        assert(cycles >= 1.)
 | 
			
		||||
 | 
			
		||||
    def get_lr_(self, progress):
 | 
			
		||||
        if progress < self.warmup:
 | 
			
		||||
            return progress / self.warmup
 | 
			
		||||
        else:
 | 
			
		||||
            progress = (progress - self.warmup) / (1 - self.warmup)     # progress after warmup
 | 
			
		||||
            ret = 0.5 * (1. + math.cos(math.pi * ((self.cycles * progress) % 1)))
 | 
			
		||||
            return ret
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class WarmupCosineWithWarmupRestartsSchedule(WarmupCosineWithHardRestartsSchedule):
 | 
			
		||||
    """
 | 
			
		||||
    All training progress is divided in `cycles` (default=1.) parts of equal length.
 | 
			
		||||
    Every part follows a schedule with the first `warmup` fraction of the training steps linearly increasing from 0. to 1.,
 | 
			
		||||
    followed by a learning rate decreasing from 1. to 0. following a cosine curve.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, warmup=0.002, t_total=-1, cycles=1., **kw):
 | 
			
		||||
        assert(warmup * cycles < 1.)
 | 
			
		||||
        warmup = warmup * cycles if warmup >= 0 else warmup
 | 
			
		||||
        super(WarmupCosineWithWarmupRestartsSchedule, self).__init__(warmup=warmup, t_total=t_total, cycles=cycles, **kw)
 | 
			
		||||
 | 
			
		||||
    def get_lr_(self, progress):
 | 
			
		||||
        progress = progress * self.cycles % 1.
 | 
			
		||||
        if progress < self.warmup:
 | 
			
		||||
            return progress / self.warmup
 | 
			
		||||
        else:
 | 
			
		||||
            progress = (progress - self.warmup) / (1 - self.warmup)     # progress after warmup
 | 
			
		||||
            ret = 0.5 * (1. + math.cos(math.pi * progress))
 | 
			
		||||
            return ret
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class WarmupConstantSchedule(_LRSchedule):
 | 
			
		||||
    """
 | 
			
		||||
    Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
 | 
			
		||||
    Keeps learning rate equal to 1. after warmup.
 | 
			
		||||
    """
 | 
			
		||||
    def get_lr_(self, progress):
 | 
			
		||||
        if progress < self.warmup:
 | 
			
		||||
            return progress / self.warmup
 | 
			
		||||
        return 1.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class WarmupLinearSchedule(_LRSchedule):
 | 
			
		||||
    """
 | 
			
		||||
    Linearly increases learning rate from 0 to 1 over `warmup` fraction of training steps.
 | 
			
		||||
    Linearly decreases learning rate from 1. to 0. over remaining `1 - warmup` steps.
 | 
			
		||||
    """
 | 
			
		||||
    warn_t_total = True
 | 
			
		||||
    def get_lr_(self, progress):
 | 
			
		||||
        if progress < self.warmup:
 | 
			
		||||
            return progress / self.warmup
 | 
			
		||||
        return max((progress - 1.) / (self.warmup - 1.), 0.)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
SCHEDULES = {
 | 
			
		||||
    'warmup_cosine':warmup_cosine,
 | 
			
		||||
    'warmup_constant':warmup_constant,
 | 
			
		||||
    'warmup_linear':warmup_linear,
 | 
			
		||||
    None:       ConstantLR,
 | 
			
		||||
    "none":     ConstantLR,
 | 
			
		||||
    "warmup_cosine": WarmupCosineSchedule,
 | 
			
		||||
    "warmup_constant": WarmupConstantSchedule,
 | 
			
		||||
    "warmup_linear": WarmupLinearSchedule
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -48,8 +186,11 @@ class BertAdam(Optimizer):
 | 
			
		||||
        lr: learning rate
 | 
			
		||||
        warmup: portion of t_total for the warmup, -1  means no warmup. Default: -1
 | 
			
		||||
        t_total: total number of training steps for the learning
 | 
			
		||||
            rate schedule, -1  means constant learning rate. Default: -1
 | 
			
		||||
        schedule: schedule to use for the warmup (see above). Default: 'warmup_linear'
 | 
			
		||||
            rate schedule, -1  means constant learning rate of 1. (no warmup regardless of warmup setting). Default: -1
 | 
			
		||||
        schedule: schedule to use for the warmup (see above).
 | 
			
		||||
            Can be `'warmup_linear'`, `'warmup_constant'`, `'warmup_cosine'`, `'none'`, `None` or a `_LRSchedule` object (see below).
 | 
			
		||||
            If `None` or `'none'`, learning rate is always kept constant.
 | 
			
		||||
            Default : `'warmup_linear'`
 | 
			
		||||
        b1: Adams b1. Default: 0.9
 | 
			
		||||
        b2: Adams b2. Default: 0.999
 | 
			
		||||
        e: Adams epsilon. Default: 1e-6
 | 
			
		||||
@ -57,21 +198,26 @@ class BertAdam(Optimizer):
 | 
			
		||||
        max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear',
 | 
			
		||||
                 b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01,
 | 
			
		||||
                 max_grad_norm=1.0):
 | 
			
		||||
                 b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0, **kwargs):
 | 
			
		||||
        if lr is not required and lr < 0.0:
 | 
			
		||||
            raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
 | 
			
		||||
        if schedule not in SCHEDULES:
 | 
			
		||||
        if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
 | 
			
		||||
            raise ValueError("Invalid schedule parameter: {}".format(schedule))
 | 
			
		||||
        if not 0.0 <= warmup < 1.0 and not warmup == -1:
 | 
			
		||||
            raise ValueError("Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup))
 | 
			
		||||
        if not 0.0 <= b1 < 1.0:
 | 
			
		||||
            raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
 | 
			
		||||
        if not 0.0 <= b2 < 1.0:
 | 
			
		||||
            raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
 | 
			
		||||
        if not e >= 0.0:
 | 
			
		||||
            raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
 | 
			
		||||
        defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total,
 | 
			
		||||
        # initialize schedule object
 | 
			
		||||
        if not isinstance(schedule, _LRSchedule):
 | 
			
		||||
            schedule_type = SCHEDULES[schedule]
 | 
			
		||||
            schedule = schedule_type(warmup=warmup, t_total=t_total)
 | 
			
		||||
        else:
 | 
			
		||||
            if warmup != -1 or t_total != -1:
 | 
			
		||||
                logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
 | 
			
		||||
                               "Please specify custom warmup and t_total in _LRSchedule object.")
 | 
			
		||||
        defaults = dict(lr=lr, schedule=schedule,
 | 
			
		||||
                        b1=b1, b2=b2, e=e, weight_decay=weight_decay,
 | 
			
		||||
                        max_grad_norm=max_grad_norm)
 | 
			
		||||
        super(BertAdam, self).__init__(params, defaults)
 | 
			
		||||
@ -83,11 +229,8 @@ class BertAdam(Optimizer):
 | 
			
		||||
                state = self.state[p]
 | 
			
		||||
                if len(state) == 0:
 | 
			
		||||
                    return [0]
 | 
			
		||||
                if group['t_total'] != -1:
 | 
			
		||||
                    schedule_fct = SCHEDULES[group['schedule']]
 | 
			
		||||
                    lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
 | 
			
		||||
                else:
 | 
			
		||||
                    lr_scheduled = group['lr']
 | 
			
		||||
                lr_scheduled = group['lr']
 | 
			
		||||
                lr_scheduled *= group['schedule'].get_lr(state['step'])
 | 
			
		||||
                lr.append(lr_scheduled)
 | 
			
		||||
        return lr
 | 
			
		||||
 | 
			
		||||
@ -143,11 +286,8 @@ class BertAdam(Optimizer):
 | 
			
		||||
                if group['weight_decay'] > 0.0:
 | 
			
		||||
                    update += group['weight_decay'] * p.data
 | 
			
		||||
 | 
			
		||||
                if group['t_total'] != -1:
 | 
			
		||||
                    schedule_fct = SCHEDULES[group['schedule']]
 | 
			
		||||
                    lr_scheduled = group['lr'] * schedule_fct(state['step']/group['t_total'], group['warmup'])
 | 
			
		||||
                else:
 | 
			
		||||
                    lr_scheduled = group['lr']
 | 
			
		||||
                lr_scheduled = group['lr']
 | 
			
		||||
                lr_scheduled *= group['schedule'].get_lr(state['step'])
 | 
			
		||||
 | 
			
		||||
                update_with_lr = lr_scheduled * update
 | 
			
		||||
                p.data.add_(-update_with_lr)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										127
									
								
								pytorch_pretrained_bert/optimization_openai.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										127
									
								
								pytorch_pretrained_bert/optimization_openai.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,127 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""PyTorch optimization for OpenAI GPT model."""
 | 
			
		||||
 | 
			
		||||
import math
 | 
			
		||||
import torch
 | 
			
		||||
from torch.optim import Optimizer
 | 
			
		||||
from torch.optim.optimizer import required
 | 
			
		||||
from torch.nn.utils import clip_grad_norm_
 | 
			
		||||
import logging
 | 
			
		||||
from .optimization import SCHEDULES, _LRSchedule, WarmupCosineWithWarmupRestartsSchedule, \
 | 
			
		||||
    WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule, WarmupLinearSchedule, WarmupConstantSchedule
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIAdam(Optimizer):
 | 
			
		||||
    """Implements Open AI version of Adam algorithm with weight decay fix.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, params, lr=required, schedule='warmup_linear', warmup=-1, t_total=-1,
 | 
			
		||||
                 b1=0.9, b2=0.999, e=1e-8, weight_decay=0,
 | 
			
		||||
                 vector_l2=False, max_grad_norm=-1, **kwargs):
 | 
			
		||||
        if lr is not required and lr < 0.0:
 | 
			
		||||
            raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr))
 | 
			
		||||
        if not isinstance(schedule, _LRSchedule) and schedule not in SCHEDULES:
 | 
			
		||||
            raise ValueError("Invalid schedule parameter: {}".format(schedule))
 | 
			
		||||
        if not 0.0 <= b1 < 1.0:
 | 
			
		||||
            raise ValueError("Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1))
 | 
			
		||||
        if not 0.0 <= b2 < 1.0:
 | 
			
		||||
            raise ValueError("Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2))
 | 
			
		||||
        if not e >= 0.0:
 | 
			
		||||
            raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(e))
 | 
			
		||||
        # initialize schedule object
 | 
			
		||||
        if not isinstance(schedule, _LRSchedule):
 | 
			
		||||
            schedule_type = SCHEDULES[schedule]
 | 
			
		||||
            schedule = schedule_type(warmup=warmup, t_total=t_total)
 | 
			
		||||
        else:
 | 
			
		||||
            if warmup != -1 or t_total != -1:
 | 
			
		||||
                logger.warning("warmup and t_total on the optimizer are ineffective when _LRSchedule object is provided as schedule. "
 | 
			
		||||
                               "Please specify custom warmup and t_total in _LRSchedule object.")
 | 
			
		||||
        defaults = dict(lr=lr, schedule=schedule,
 | 
			
		||||
                        b1=b1, b2=b2, e=e, weight_decay=weight_decay, vector_l2=vector_l2,
 | 
			
		||||
                        max_grad_norm=max_grad_norm)
 | 
			
		||||
        super(OpenAIAdam, self).__init__(params, defaults)
 | 
			
		||||
 | 
			
		||||
    def get_lr(self):
 | 
			
		||||
        lr = []
 | 
			
		||||
        for group in self.param_groups:
 | 
			
		||||
            for p in group['params']:
 | 
			
		||||
                state = self.state[p]
 | 
			
		||||
                if len(state) == 0:
 | 
			
		||||
                    return [0]
 | 
			
		||||
                lr_scheduled = group['lr']
 | 
			
		||||
                lr_scheduled *= group['schedule'].get_lr(state['step'])
 | 
			
		||||
                lr.append(lr_scheduled)
 | 
			
		||||
        return lr
 | 
			
		||||
 | 
			
		||||
    def step(self, closure=None):
 | 
			
		||||
        """Performs a single optimization step.
 | 
			
		||||
 | 
			
		||||
        Arguments:
 | 
			
		||||
            closure (callable, optional): A closure that reevaluates the model
 | 
			
		||||
                and returns the loss.
 | 
			
		||||
        """
 | 
			
		||||
        loss = None
 | 
			
		||||
        if closure is not None:
 | 
			
		||||
            loss = closure()
 | 
			
		||||
 | 
			
		||||
        for group in self.param_groups:
 | 
			
		||||
            for p in group['params']:
 | 
			
		||||
                if p.grad is None:
 | 
			
		||||
                    continue
 | 
			
		||||
                grad = p.grad.data
 | 
			
		||||
                if grad.is_sparse:
 | 
			
		||||
                    raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
 | 
			
		||||
 | 
			
		||||
                state = self.state[p]
 | 
			
		||||
 | 
			
		||||
                # State initialization
 | 
			
		||||
                if len(state) == 0:
 | 
			
		||||
                    state['step'] = 0
 | 
			
		||||
                    # Exponential moving average of gradient values
 | 
			
		||||
                    state['exp_avg'] = torch.zeros_like(p.data)
 | 
			
		||||
                    # Exponential moving average of squared gradient values
 | 
			
		||||
                    state['exp_avg_sq'] = torch.zeros_like(p.data)
 | 
			
		||||
 | 
			
		||||
                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
 | 
			
		||||
                beta1, beta2 = group['b1'], group['b2']
 | 
			
		||||
 | 
			
		||||
                state['step'] += 1
 | 
			
		||||
 | 
			
		||||
                # Add grad clipping
 | 
			
		||||
                if group['max_grad_norm'] > 0:
 | 
			
		||||
                    clip_grad_norm_(p, group['max_grad_norm'])
 | 
			
		||||
 | 
			
		||||
                # Decay the first and second moment running average coefficient
 | 
			
		||||
                exp_avg.mul_(beta1).add_(1 - beta1, grad)
 | 
			
		||||
                exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
 | 
			
		||||
                denom = exp_avg_sq.sqrt().add_(group['e'])
 | 
			
		||||
 | 
			
		||||
                bias_correction1 = 1 - beta1 ** state['step']
 | 
			
		||||
                bias_correction2 = 1 - beta2 ** state['step']
 | 
			
		||||
 | 
			
		||||
                lr_scheduled = group['lr']
 | 
			
		||||
                lr_scheduled *= group['schedule'].get_lr(state['step'])
 | 
			
		||||
 | 
			
		||||
                step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
 | 
			
		||||
 | 
			
		||||
                p.data.addcdiv_(-step_size, exp_avg, denom)
 | 
			
		||||
 | 
			
		||||
                # Add weight decay at the end (fixed version)
 | 
			
		||||
                if (len(p.size()) > 1 or group['vector_l2']) and group['weight_decay'] > 0:
 | 
			
		||||
                    p.data.add_(-lr_scheduled * group['weight_decay'], p.data)
 | 
			
		||||
 | 
			
		||||
        return loss
 | 
			
		||||
@ -1,5 +1,5 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
@ -14,14 +14,13 @@
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""Tokenization classes."""
 | 
			
		||||
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import collections
 | 
			
		||||
import unicodedata
 | 
			
		||||
import os
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import unicodedata
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path
 | 
			
		||||
 | 
			
		||||
@ -36,6 +35,15 @@ PRETRAINED_VOCAB_ARCHIVE_MAP = {
 | 
			
		||||
    'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
 | 
			
		||||
    'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
 | 
			
		||||
}
 | 
			
		||||
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
 | 
			
		||||
    'bert-base-uncased': 512,
 | 
			
		||||
    'bert-large-uncased': 512,
 | 
			
		||||
    'bert-base-cased': 512,
 | 
			
		||||
    'bert-large-cased': 512,
 | 
			
		||||
    'bert-base-multilingual-uncased': 512,
 | 
			
		||||
    'bert-base-multilingual-cased': 512,
 | 
			
		||||
    'bert-base-chinese': 512,
 | 
			
		||||
}
 | 
			
		||||
VOCAB_NAME = 'vocab.txt'
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -55,7 +63,7 @@ def load_vocab(vocab_file):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def whitespace_tokenize(text):
 | 
			
		||||
    """Runs basic whitespace cleaning and splitting on a peice of text."""
 | 
			
		||||
    """Runs basic whitespace cleaning and splitting on a piece of text."""
 | 
			
		||||
    text = text.strip()
 | 
			
		||||
    if not text:
 | 
			
		||||
        return []
 | 
			
		||||
@ -65,7 +73,23 @@ def whitespace_tokenize(text):
 | 
			
		||||
 | 
			
		||||
class BertTokenizer(object):
 | 
			
		||||
    """Runs end-to-end tokenization: punctuation splitting + wordpiece"""
 | 
			
		||||
    def __init__(self, vocab_file, do_lower_case=True):
 | 
			
		||||
 | 
			
		||||
    def __init__(self, vocab_file, do_lower_case=True, max_len=None, do_basic_tokenize=True,
 | 
			
		||||
                 never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
 | 
			
		||||
        """Constructs a BertTokenizer.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
          vocab_file: Path to a one-wordpiece-per-line vocabulary file
 | 
			
		||||
          do_lower_case: Whether to lower case the input
 | 
			
		||||
                         Only has an effect when do_wordpiece_only=False
 | 
			
		||||
          do_basic_tokenize: Whether to do basic tokenization before wordpiece.
 | 
			
		||||
          max_len: An artificial maximum length to truncate tokenized sequences to;
 | 
			
		||||
                         Effective maximum length is always the minimum of this
 | 
			
		||||
                         value (if specified) and the underlying BERT model's
 | 
			
		||||
                         sequence length.
 | 
			
		||||
          never_split: List of tokens which will never be split during tokenization.
 | 
			
		||||
                         Only has an effect when do_wordpiece_only=False
 | 
			
		||||
        """
 | 
			
		||||
        if not os.path.isfile(vocab_file):
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
 | 
			
		||||
@ -73,14 +97,21 @@ class BertTokenizer(object):
 | 
			
		||||
        self.vocab = load_vocab(vocab_file)
 | 
			
		||||
        self.ids_to_tokens = collections.OrderedDict(
 | 
			
		||||
            [(ids, tok) for tok, ids in self.vocab.items()])
 | 
			
		||||
        self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
 | 
			
		||||
        self.do_basic_tokenize = do_basic_tokenize
 | 
			
		||||
        if do_basic_tokenize:
 | 
			
		||||
          self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
 | 
			
		||||
                                                never_split=never_split)
 | 
			
		||||
        self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
 | 
			
		||||
        self.max_len = max_len if max_len is not None else int(1e12)
 | 
			
		||||
 | 
			
		||||
    def tokenize(self, text):
 | 
			
		||||
        split_tokens = []
 | 
			
		||||
        for token in self.basic_tokenizer.tokenize(text):
 | 
			
		||||
            for sub_token in self.wordpiece_tokenizer.tokenize(token):
 | 
			
		||||
                split_tokens.append(sub_token)
 | 
			
		||||
        if self.do_basic_tokenize:
 | 
			
		||||
            for token in self.basic_tokenizer.tokenize(text):
 | 
			
		||||
                for sub_token in self.wordpiece_tokenizer.tokenize(token):
 | 
			
		||||
                    split_tokens.append(sub_token)
 | 
			
		||||
        else:
 | 
			
		||||
            split_tokens = self.wordpiece_tokenizer.tokenize(text)
 | 
			
		||||
        return split_tokens
 | 
			
		||||
 | 
			
		||||
    def convert_tokens_to_ids(self, tokens):
 | 
			
		||||
@ -88,6 +119,12 @@ class BertTokenizer(object):
 | 
			
		||||
        ids = []
 | 
			
		||||
        for token in tokens:
 | 
			
		||||
            ids.append(self.vocab[token])
 | 
			
		||||
        if len(ids) > self.max_len:
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Token indices sequence length is longer than the specified maximum "
 | 
			
		||||
                " sequence length for this BERT model ({} > {}). Running this"
 | 
			
		||||
                " sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
 | 
			
		||||
            )
 | 
			
		||||
        return ids
 | 
			
		||||
 | 
			
		||||
    def convert_ids_to_tokens(self, ids):
 | 
			
		||||
@ -97,27 +134,52 @@ class BertTokenizer(object):
 | 
			
		||||
            tokens.append(self.ids_to_tokens[i])
 | 
			
		||||
        return tokens
 | 
			
		||||
 | 
			
		||||
    def save_vocabulary(self, vocab_path):
 | 
			
		||||
        """Save the tokenizer vocabulary to a directory or file."""
 | 
			
		||||
        index = 0
 | 
			
		||||
        if os.path.isdir(vocab_path):
 | 
			
		||||
            vocab_file = os.path.join(vocab_path, VOCAB_NAME)
 | 
			
		||||
        with open(vocab_file, "w", encoding="utf-8") as writer:
 | 
			
		||||
            for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
 | 
			
		||||
                if index != token_index:
 | 
			
		||||
                    logger.warning("Saving vocabulary to {}: vocabulary indices are not consecutive."
 | 
			
		||||
                                   " Please check that the vocabulary is not corrupted!".format(vocab_file))
 | 
			
		||||
                    index = token_index
 | 
			
		||||
                writer.write(token + u'\n')
 | 
			
		||||
                index += 1
 | 
			
		||||
        return vocab_file
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs):
 | 
			
		||||
    def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a PreTrainedBertModel from a pre-trained model file.
 | 
			
		||||
        Download and cache the pre-trained model file if needed.
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP:
 | 
			
		||||
            vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name]
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
 | 
			
		||||
            vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            if '-cased' in pretrained_model_name_or_path and kwargs.get('do_lower_case', True):
 | 
			
		||||
                logger.warning("The pre-trained model you are loading is a cased model but you have not set "
 | 
			
		||||
                               "`do_lower_case` to False. We are setting `do_lower_case=False` for you but "
 | 
			
		||||
                               "you may want to check this behavior.")
 | 
			
		||||
                kwargs['do_lower_case'] = False
 | 
			
		||||
            elif '-cased' not in pretrained_model_name_or_path and not kwargs.get('do_lower_case', True):
 | 
			
		||||
                logger.warning("The pre-trained model you are loading is an uncased model but you have set "
 | 
			
		||||
                               "`do_lower_case` to False. We are setting `do_lower_case=True` for you "
 | 
			
		||||
                               "but you may want to check this behavior.")
 | 
			
		||||
                kwargs['do_lower_case'] = True
 | 
			
		||||
        else:
 | 
			
		||||
            vocab_file = pretrained_model_name
 | 
			
		||||
            vocab_file = pretrained_model_name_or_path
 | 
			
		||||
        if os.path.isdir(vocab_file):
 | 
			
		||||
            vocab_file = os.path.join(vocab_file, VOCAB_NAME)
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
 | 
			
		||||
        except FileNotFoundError:
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find any file "
 | 
			
		||||
                "associated to this path or url.".format(
 | 
			
		||||
                    pretrained_model_name,
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
 | 
			
		||||
                    vocab_file))
 | 
			
		||||
            return None
 | 
			
		||||
@ -126,6 +188,11 @@ class BertTokenizer(object):
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading vocabulary file {} from cache at {}".format(
 | 
			
		||||
                vocab_file, resolved_vocab_file))
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
 | 
			
		||||
            # if we're using a pretrained model, ensure the tokenizer wont index sequences longer
 | 
			
		||||
            # than the number of positional embeddings
 | 
			
		||||
            max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
 | 
			
		||||
        # Instantiate tokenizer.
 | 
			
		||||
        tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
 | 
			
		||||
        return tokenizer
 | 
			
		||||
@ -134,13 +201,16 @@ class BertTokenizer(object):
 | 
			
		||||
class BasicTokenizer(object):
 | 
			
		||||
    """Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, do_lower_case=True):
 | 
			
		||||
    def __init__(self,
 | 
			
		||||
                 do_lower_case=True,
 | 
			
		||||
                 never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
 | 
			
		||||
        """Constructs a BasicTokenizer.
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
          do_lower_case: Whether to lower case the input.
 | 
			
		||||
        """
 | 
			
		||||
        self.do_lower_case = do_lower_case
 | 
			
		||||
        self.never_split = never_split
 | 
			
		||||
 | 
			
		||||
    def tokenize(self, text):
 | 
			
		||||
        """Tokenizes a piece of text."""
 | 
			
		||||
@ -155,7 +225,7 @@ class BasicTokenizer(object):
 | 
			
		||||
        orig_tokens = whitespace_tokenize(text)
 | 
			
		||||
        split_tokens = []
 | 
			
		||||
        for token in orig_tokens:
 | 
			
		||||
            if self.do_lower_case:
 | 
			
		||||
            if self.do_lower_case and token not in self.never_split:
 | 
			
		||||
                token = token.lower()
 | 
			
		||||
                token = self._run_strip_accents(token)
 | 
			
		||||
            split_tokens.extend(self._run_split_on_punc(token))
 | 
			
		||||
@ -176,6 +246,8 @@ class BasicTokenizer(object):
 | 
			
		||||
 | 
			
		||||
    def _run_split_on_punc(self, text):
 | 
			
		||||
        """Splits punctuation on a piece of text."""
 | 
			
		||||
        if text in self.never_split:
 | 
			
		||||
            return [text]
 | 
			
		||||
        chars = list(text)
 | 
			
		||||
        i = 0
 | 
			
		||||
        start_new_word = True
 | 
			
		||||
@ -193,7 +265,7 @@ class BasicTokenizer(object):
 | 
			
		||||
            i += 1
 | 
			
		||||
 | 
			
		||||
        return ["".join(x) for x in output]
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    def _tokenize_chinese_chars(self, text):
 | 
			
		||||
        """Adds whitespace around any CJK character."""
 | 
			
		||||
        output = []
 | 
			
		||||
@ -218,17 +290,17 @@ class BasicTokenizer(object):
 | 
			
		||||
        # space-separated words, so they are not treated specially and handled
 | 
			
		||||
        # like the all of the other languages.
 | 
			
		||||
        if ((cp >= 0x4E00 and cp <= 0x9FFF) or  #
 | 
			
		||||
            (cp >= 0x3400 and cp <= 0x4DBF) or  #
 | 
			
		||||
            (cp >= 0x20000 and cp <= 0x2A6DF) or  #
 | 
			
		||||
            (cp >= 0x2A700 and cp <= 0x2B73F) or  #
 | 
			
		||||
            (cp >= 0x2B740 and cp <= 0x2B81F) or  #
 | 
			
		||||
            (cp >= 0x2B820 and cp <= 0x2CEAF) or
 | 
			
		||||
            (cp >= 0xF900 and cp <= 0xFAFF) or  #
 | 
			
		||||
            (cp >= 0x2F800 and cp <= 0x2FA1F)):  #
 | 
			
		||||
                (cp >= 0x3400 and cp <= 0x4DBF) or  #
 | 
			
		||||
                (cp >= 0x20000 and cp <= 0x2A6DF) or  #
 | 
			
		||||
                (cp >= 0x2A700 and cp <= 0x2B73F) or  #
 | 
			
		||||
                (cp >= 0x2B740 and cp <= 0x2B81F) or  #
 | 
			
		||||
                (cp >= 0x2B820 and cp <= 0x2CEAF) or
 | 
			
		||||
                (cp >= 0xF900 and cp <= 0xFAFF) or  #
 | 
			
		||||
                (cp >= 0x2F800 and cp <= 0x2FA1F)):  #
 | 
			
		||||
            return True
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
        return False
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
    def _clean_text(self, text):
 | 
			
		||||
        """Performs invalid character removal and whitespace cleanup on text."""
 | 
			
		||||
        output = []
 | 
			
		||||
@ -263,7 +335,7 @@ class WordpieceTokenizer(object):
 | 
			
		||||
 | 
			
		||||
        Args:
 | 
			
		||||
          text: A single token or whitespace separated tokens. This should have
 | 
			
		||||
            already been passed through `BasicTokenizer.
 | 
			
		||||
            already been passed through `BasicTokenizer`.
 | 
			
		||||
 | 
			
		||||
        Returns:
 | 
			
		||||
          A list of wordpiece tokens.
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										301
									
								
								pytorch_pretrained_bert/tokenization_gpt2.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										301
									
								
								pytorch_pretrained_bert/tokenization_gpt2.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,301 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""Tokenization classes for OpenAI GPT."""
 | 
			
		||||
from __future__ import (absolute_import, division, print_function,
 | 
			
		||||
                        unicode_literals)
 | 
			
		||||
 | 
			
		||||
import sys
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import regex as re
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
    from functools import lru_cache
 | 
			
		||||
except ImportError:
 | 
			
		||||
    # Just a dummy decorator to get the checks to run on python2
 | 
			
		||||
    # because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
 | 
			
		||||
    def lru_cache():
 | 
			
		||||
        return lambda func: func
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
PRETRAINED_VOCAB_ARCHIVE_MAP = {
 | 
			
		||||
    'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-vocab.json",
 | 
			
		||||
}
 | 
			
		||||
PRETRAINED_MERGES_ARCHIVE_MAP = {
 | 
			
		||||
    'gpt2': "https://s3.amazonaws.com/models.huggingface.co/bert/gpt2-merges.txt",
 | 
			
		||||
}
 | 
			
		||||
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
 | 
			
		||||
    'gpt2': 1024,
 | 
			
		||||
}
 | 
			
		||||
VOCAB_NAME = 'vocab.json'
 | 
			
		||||
MERGES_NAME = 'merges.txt'
 | 
			
		||||
SPECIAL_TOKENS_NAME = 'special_tokens.txt'
 | 
			
		||||
 | 
			
		||||
@lru_cache()
 | 
			
		||||
def bytes_to_unicode():
 | 
			
		||||
    """
 | 
			
		||||
    Returns list of utf-8 byte and a corresponding list of unicode strings.
 | 
			
		||||
    The reversible bpe codes work on unicode strings.
 | 
			
		||||
    This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
 | 
			
		||||
    When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
 | 
			
		||||
    This is a signficant percentage of your normal, say, 32K bpe vocab.
 | 
			
		||||
    To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
 | 
			
		||||
    And avoids mapping to whitespace/control characters the bpe code barfs on.
 | 
			
		||||
    """
 | 
			
		||||
    _chr = unichr if sys.version_info[0] == 2 else chr
 | 
			
		||||
    bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
 | 
			
		||||
    cs = bs[:]
 | 
			
		||||
    n = 0
 | 
			
		||||
    for b in range(2**8):
 | 
			
		||||
        if b not in bs:
 | 
			
		||||
            bs.append(b)
 | 
			
		||||
            cs.append(2**8+n)
 | 
			
		||||
            n += 1
 | 
			
		||||
    cs = [_chr(n) for n in cs]
 | 
			
		||||
    return dict(zip(bs, cs))
 | 
			
		||||
 | 
			
		||||
def get_pairs(word):
 | 
			
		||||
    """Return set of symbol pairs in a word.
 | 
			
		||||
 | 
			
		||||
    Word is represented as tuple of symbols (symbols being variable-length strings).
 | 
			
		||||
    """
 | 
			
		||||
    pairs = set()
 | 
			
		||||
    prev_char = word[0]
 | 
			
		||||
    for char in word[1:]:
 | 
			
		||||
        pairs.add((prev_char, char))
 | 
			
		||||
        prev_char = char
 | 
			
		||||
    return pairs
 | 
			
		||||
 | 
			
		||||
class GPT2Tokenizer(object):
 | 
			
		||||
    """
 | 
			
		||||
    GPT-2 BPE tokenizer. Peculiarities:
 | 
			
		||||
        - Byte-level BPE
 | 
			
		||||
    """
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a PreTrainedBertModel from a pre-trained model file.
 | 
			
		||||
        Download and cache the pre-trained model file if needed.
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
 | 
			
		||||
            vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            special_tokens_file = None
 | 
			
		||||
        else:
 | 
			
		||||
            vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
 | 
			
		||||
            merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
 | 
			
		||||
            special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
 | 
			
		||||
            if not os.path.exists(special_tokens_file):
 | 
			
		||||
                special_tokens_file = None
 | 
			
		||||
            else:
 | 
			
		||||
                logger.info("loading special tokens file {}".format(special_tokens_file))
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
 | 
			
		||||
            resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find files {} and {} "
 | 
			
		||||
                "at this path or url.".format(
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    vocab_file, merges_file))
 | 
			
		||||
            return None
 | 
			
		||||
        if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
 | 
			
		||||
            logger.info("loading vocabulary file {}".format(vocab_file))
 | 
			
		||||
            logger.info("loading merges file {}".format(merges_file))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading vocabulary file {} from cache at {}".format(
 | 
			
		||||
                vocab_file, resolved_vocab_file))
 | 
			
		||||
            logger.info("loading merges file {} from cache at {}".format(
 | 
			
		||||
                merges_file, resolved_merges_file))
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
 | 
			
		||||
            # if we're using a pretrained model, ensure the tokenizer wont index sequences longer
 | 
			
		||||
            # than the number of positional embeddings
 | 
			
		||||
            max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
 | 
			
		||||
        # Instantiate tokenizer.
 | 
			
		||||
        if special_tokens_file and 'special_tokens' not in kwargs:
 | 
			
		||||
            special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
 | 
			
		||||
        else:
 | 
			
		||||
            special_tokens = kwargs.pop('special_tokens', [])
 | 
			
		||||
        tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
 | 
			
		||||
        return tokenizer
 | 
			
		||||
 | 
			
		||||
    def __init__(self, vocab_file, merges_file, errors='replace', special_tokens=None, max_len=None):
 | 
			
		||||
        self.max_len = max_len if max_len is not None else int(1e12)
 | 
			
		||||
        self.encoder = json.load(open(vocab_file))
 | 
			
		||||
        self.decoder = {v:k for k,v in self.encoder.items()}
 | 
			
		||||
        self.errors = errors # how to handle errors in decoding
 | 
			
		||||
        self.byte_encoder = bytes_to_unicode()
 | 
			
		||||
        self.byte_decoder = {v:k for k, v in self.byte_encoder.items()}
 | 
			
		||||
        bpe_data = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
 | 
			
		||||
        bpe_merges = [tuple(merge.split()) for merge in bpe_data]
 | 
			
		||||
        self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
 | 
			
		||||
        self.cache = {}
 | 
			
		||||
 | 
			
		||||
        # Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
 | 
			
		||||
        self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
 | 
			
		||||
 | 
			
		||||
        self.special_tokens = {}
 | 
			
		||||
        self.special_tokens_decoder = {}
 | 
			
		||||
        self.set_special_tokens(special_tokens)
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return len(self.encoder) + len(self.special_tokens)
 | 
			
		||||
 | 
			
		||||
    def set_special_tokens(self, special_tokens):
 | 
			
		||||
        """ Add a list of additional tokens to the encoder.
 | 
			
		||||
            The additional tokens are indexed starting from the last index of the
 | 
			
		||||
            current vocabulary in the order of the `special_tokens` list.
 | 
			
		||||
        """
 | 
			
		||||
        if not special_tokens:
 | 
			
		||||
            self.special_tokens = {}
 | 
			
		||||
            self.special_tokens_decoder = {}
 | 
			
		||||
            return
 | 
			
		||||
        self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
 | 
			
		||||
        self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
 | 
			
		||||
        logger.info("Special tokens {}".format(self.special_tokens))
 | 
			
		||||
 | 
			
		||||
    def bpe(self, token):
 | 
			
		||||
        if token in self.cache:
 | 
			
		||||
            return self.cache[token]
 | 
			
		||||
        word = tuple(token)
 | 
			
		||||
        pairs = get_pairs(word)
 | 
			
		||||
 | 
			
		||||
        if not pairs:
 | 
			
		||||
            return token
 | 
			
		||||
 | 
			
		||||
        while True:
 | 
			
		||||
            bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
 | 
			
		||||
            if bigram not in self.bpe_ranks:
 | 
			
		||||
                break
 | 
			
		||||
            first, second = bigram
 | 
			
		||||
            new_word = []
 | 
			
		||||
            i = 0
 | 
			
		||||
            while i < len(word):
 | 
			
		||||
                try:
 | 
			
		||||
                    j = word.index(first, i)
 | 
			
		||||
                    new_word.extend(word[i:j])
 | 
			
		||||
                    i = j
 | 
			
		||||
                except:
 | 
			
		||||
                    new_word.extend(word[i:])
 | 
			
		||||
                    break
 | 
			
		||||
 | 
			
		||||
                if word[i] == first and i < len(word)-1 and word[i+1] == second:
 | 
			
		||||
                    new_word.append(first+second)
 | 
			
		||||
                    i += 2
 | 
			
		||||
                else:
 | 
			
		||||
                    new_word.append(word[i])
 | 
			
		||||
                    i += 1
 | 
			
		||||
            new_word = tuple(new_word)
 | 
			
		||||
            word = new_word
 | 
			
		||||
            if len(word) == 1:
 | 
			
		||||
                break
 | 
			
		||||
            else:
 | 
			
		||||
                pairs = get_pairs(word)
 | 
			
		||||
        word = ' '.join(word)
 | 
			
		||||
        self.cache[token] = word
 | 
			
		||||
        return word
 | 
			
		||||
 | 
			
		||||
    def tokenize(self, text):
 | 
			
		||||
        """ Tokenize a string. """
 | 
			
		||||
        bpe_tokens = []
 | 
			
		||||
        for token in re.findall(self.pat, text):
 | 
			
		||||
            token = ''.join(self.byte_encoder[ord(b)] for b in token)
 | 
			
		||||
            bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(' '))
 | 
			
		||||
        return bpe_tokens
 | 
			
		||||
 | 
			
		||||
    def convert_tokens_to_ids(self, tokens):
 | 
			
		||||
        """ Converts a sequence of tokens into ids using the vocab. """
 | 
			
		||||
        ids = []
 | 
			
		||||
        if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
 | 
			
		||||
            if tokens in self.special_tokens:
 | 
			
		||||
                return self.special_tokens[tokens]
 | 
			
		||||
            else:
 | 
			
		||||
                return self.encoder.get(tokens, 0)
 | 
			
		||||
        for token in tokens:
 | 
			
		||||
            if token in self.special_tokens:
 | 
			
		||||
                ids.append(self.special_tokens[token])
 | 
			
		||||
            else:
 | 
			
		||||
                ids.append(self.encoder.get(token, 0))
 | 
			
		||||
        if len(ids) > self.max_len:
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Token indices sequence length is longer than the specified maximum "
 | 
			
		||||
                " sequence length for this OpenAI GPT model ({} > {}). Running this"
 | 
			
		||||
                " sequence through the model will result in indexing errors".format(len(ids), self.max_len)
 | 
			
		||||
            )
 | 
			
		||||
        return ids
 | 
			
		||||
 | 
			
		||||
    def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
 | 
			
		||||
        """Converts a sequence of ids in BPE tokens using the vocab."""
 | 
			
		||||
        tokens = []
 | 
			
		||||
        for i in ids:
 | 
			
		||||
            if i in self.special_tokens_decoder:
 | 
			
		||||
                if not skip_special_tokens:
 | 
			
		||||
                    tokens.append(self.special_tokens_decoder[i])
 | 
			
		||||
            else:
 | 
			
		||||
                tokens.append(self.decoder[i])
 | 
			
		||||
        return tokens
 | 
			
		||||
 | 
			
		||||
    def encode(self, text):
 | 
			
		||||
        return self.convert_tokens_to_ids(self.tokenize(text))
 | 
			
		||||
 | 
			
		||||
    def decode(self, tokens):
 | 
			
		||||
        text = ''.join([self.decoder[token] for token in tokens])
 | 
			
		||||
        text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors=self.errors)
 | 
			
		||||
        return text
 | 
			
		||||
 | 
			
		||||
    def save_vocabulary(self, vocab_path):
 | 
			
		||||
        """Save the tokenizer vocabulary and merge files to a directory."""
 | 
			
		||||
        if not os.path.isdir(vocab_path):
 | 
			
		||||
            logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
 | 
			
		||||
            return
 | 
			
		||||
        vocab_file = os.path.join(vocab_path, VOCAB_NAME)
 | 
			
		||||
        merge_file = os.path.join(vocab_path, MERGES_NAME)
 | 
			
		||||
        special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
 | 
			
		||||
 | 
			
		||||
        with open(vocab_file, 'w', encoding='utf-8') as f:
 | 
			
		||||
            f.write(json.dumps(self.encoder, ensure_ascii=False))
 | 
			
		||||
 | 
			
		||||
        index = 0
 | 
			
		||||
        with open(merge_file, "w", encoding="utf-8") as writer:
 | 
			
		||||
            writer.write(u'#version: 0.2\n')
 | 
			
		||||
            for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
 | 
			
		||||
                if index != token_index:
 | 
			
		||||
                    logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
 | 
			
		||||
                                   " Please check that the tokenizer is not corrupted!".format(merge_file))
 | 
			
		||||
                    index = token_index
 | 
			
		||||
                writer.write(' '.join(bpe_tokens) + u'\n')
 | 
			
		||||
                index += 1
 | 
			
		||||
 | 
			
		||||
        index = len(self.encoder)
 | 
			
		||||
        with open(special_tokens_file, 'w', encoding='utf-8') as writer:
 | 
			
		||||
            for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):
 | 
			
		||||
                if index != token_index:
 | 
			
		||||
                    logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive."
 | 
			
		||||
                                   " Please check that the tokenizer is not corrupted!".format(special_tokens_file))
 | 
			
		||||
                    index = token_index
 | 
			
		||||
                writer.write(token + u'\n')
 | 
			
		||||
                index += 1
 | 
			
		||||
 | 
			
		||||
        return vocab_file, merge_file, special_tokens_file
 | 
			
		||||
							
								
								
									
										313
									
								
								pytorch_pretrained_bert/tokenization_openai.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										313
									
								
								pytorch_pretrained_bert/tokenization_openai.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,313 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
"""Tokenization classes for OpenAI GPT."""
 | 
			
		||||
from __future__ import (absolute_import, division, print_function,
 | 
			
		||||
                        unicode_literals)
 | 
			
		||||
 | 
			
		||||
import json
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import sys
 | 
			
		||||
from io import open
 | 
			
		||||
 | 
			
		||||
from tqdm import tqdm
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path
 | 
			
		||||
from .tokenization import BasicTokenizer
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
PRETRAINED_VOCAB_ARCHIVE_MAP = {
 | 
			
		||||
    'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-vocab.json",
 | 
			
		||||
}
 | 
			
		||||
PRETRAINED_MERGES_ARCHIVE_MAP = {
 | 
			
		||||
    'openai-gpt': "https://s3.amazonaws.com/models.huggingface.co/bert/openai-gpt-merges.txt",
 | 
			
		||||
}
 | 
			
		||||
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
 | 
			
		||||
    'openai-gpt': 512,
 | 
			
		||||
}
 | 
			
		||||
VOCAB_NAME = 'vocab.json'
 | 
			
		||||
MERGES_NAME = 'merges.txt'
 | 
			
		||||
SPECIAL_TOKENS_NAME = 'special_tokens.txt'
 | 
			
		||||
 | 
			
		||||
def get_pairs(word):
 | 
			
		||||
    """
 | 
			
		||||
    Return set of symbol pairs in a word.
 | 
			
		||||
    word is represented as tuple of symbols (symbols being variable-length strings)
 | 
			
		||||
    """
 | 
			
		||||
    pairs = set()
 | 
			
		||||
    prev_char = word[0]
 | 
			
		||||
    for char in word[1:]:
 | 
			
		||||
        pairs.add((prev_char, char))
 | 
			
		||||
        prev_char = char
 | 
			
		||||
    return pairs
 | 
			
		||||
 | 
			
		||||
def text_standardize(text):
 | 
			
		||||
    """
 | 
			
		||||
    fixes some issues the spacy tokenizer had on books corpus
 | 
			
		||||
    also does some whitespace standardization
 | 
			
		||||
    """
 | 
			
		||||
    text = text.replace('—', '-')
 | 
			
		||||
    text = text.replace('–', '-')
 | 
			
		||||
    text = text.replace('―', '-')
 | 
			
		||||
    text = text.replace('…', '...')
 | 
			
		||||
    text = text.replace('´', "'")
 | 
			
		||||
    text = re.sub(r'''(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)''', r' \1 ', text)
 | 
			
		||||
    text = re.sub(r'\s*\n\s*', ' \n ', text)
 | 
			
		||||
    text = re.sub(r'[^\S\n]+', ' ', text)
 | 
			
		||||
    return text.strip()
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTTokenizer(object):
 | 
			
		||||
    """
 | 
			
		||||
    BPE tokenizer. Peculiarities:
 | 
			
		||||
        - lower case all inputs
 | 
			
		||||
        - uses SpaCy tokenizer and ftfy for pre-BPE tokenization if they are installed, fallback to BERT's BasicTokenizer if not.
 | 
			
		||||
        - argument special_tokens and function set_special_tokens:
 | 
			
		||||
            can be used to add additional symbols (ex: "__classify__") to a vocabulary.
 | 
			
		||||
    """
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a PreTrainedBertModel from a pre-trained model file.
 | 
			
		||||
        Download and cache the pre-trained model file if needed.
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
 | 
			
		||||
            vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            merges_file = PRETRAINED_MERGES_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            special_tokens_file = None
 | 
			
		||||
        else:
 | 
			
		||||
            vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
 | 
			
		||||
            merges_file = os.path.join(pretrained_model_name_or_path, MERGES_NAME)
 | 
			
		||||
            special_tokens_file = os.path.join(pretrained_model_name_or_path, SPECIAL_TOKENS_NAME)
 | 
			
		||||
            if not os.path.exists(special_tokens_file):
 | 
			
		||||
                special_tokens_file = None
 | 
			
		||||
            else:
 | 
			
		||||
                logger.info("loading special tokens file {}".format(special_tokens_file))
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
 | 
			
		||||
            resolved_merges_file = cached_path(merges_file, cache_dir=cache_dir)
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find files {} and {} "
 | 
			
		||||
                "at this path or url.".format(
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    vocab_file, merges_file))
 | 
			
		||||
            return None
 | 
			
		||||
        if resolved_vocab_file == vocab_file and resolved_merges_file == merges_file:
 | 
			
		||||
            logger.info("loading vocabulary file {}".format(vocab_file))
 | 
			
		||||
            logger.info("loading merges file {}".format(merges_file))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading vocabulary file {} from cache at {}".format(
 | 
			
		||||
                vocab_file, resolved_vocab_file))
 | 
			
		||||
            logger.info("loading merges file {} from cache at {}".format(
 | 
			
		||||
                merges_file, resolved_merges_file))
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
 | 
			
		||||
            # if we're using a pretrained model, ensure the tokenizer wont index sequences longer
 | 
			
		||||
            # than the number of positional embeddings
 | 
			
		||||
            max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
            kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
 | 
			
		||||
        # Instantiate tokenizer.
 | 
			
		||||
        if special_tokens_file and 'special_tokens' not in kwargs:
 | 
			
		||||
            special_tokens = open(special_tokens_file, encoding='utf-8').read().split('\n')[:-1]
 | 
			
		||||
        else:
 | 
			
		||||
            special_tokens = kwargs.pop('special_tokens', [])
 | 
			
		||||
        tokenizer = cls(resolved_vocab_file, resolved_merges_file, special_tokens=special_tokens, *inputs, **kwargs)
 | 
			
		||||
        return tokenizer
 | 
			
		||||
 | 
			
		||||
    def __init__(self, vocab_file, merges_file, special_tokens=None, max_len=None):
 | 
			
		||||
        try:
 | 
			
		||||
            import ftfy
 | 
			
		||||
            import spacy
 | 
			
		||||
            self.nlp = spacy.load('en', disable=['parser', 'tagger', 'ner', 'textcat'])
 | 
			
		||||
            self.fix_text = ftfy.fix_text
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.")
 | 
			
		||||
            self.nlp = BasicTokenizer(do_lower_case=True,
 | 
			
		||||
                                      never_split=special_tokens if special_tokens is not None else [])
 | 
			
		||||
            self.fix_text = None
 | 
			
		||||
 | 
			
		||||
        self.max_len = max_len if max_len is not None else int(1e12)
 | 
			
		||||
        self.encoder = json.load(open(vocab_file, encoding="utf-8"))
 | 
			
		||||
        self.decoder = {v:k for k,v in self.encoder.items()}
 | 
			
		||||
        merges = open(merges_file, encoding='utf-8').read().split('\n')[1:-1]
 | 
			
		||||
        merges = [tuple(merge.split()) for merge in merges]
 | 
			
		||||
        self.bpe_ranks = dict(zip(merges, range(len(merges))))
 | 
			
		||||
        self.cache = {}
 | 
			
		||||
        self.special_tokens = {}
 | 
			
		||||
        self.special_tokens_decoder = {}
 | 
			
		||||
        self.set_special_tokens(special_tokens)
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return len(self.encoder) + len(self.special_tokens)
 | 
			
		||||
 | 
			
		||||
    def set_special_tokens(self, special_tokens):
 | 
			
		||||
        """ Add a list of additional tokens to the encoder.
 | 
			
		||||
            The additional tokens are indexed starting from the last index of the
 | 
			
		||||
            current vocabulary in the order of the `special_tokens` list.
 | 
			
		||||
        """
 | 
			
		||||
        if not special_tokens:
 | 
			
		||||
            self.special_tokens = {}
 | 
			
		||||
            self.special_tokens_decoder = {}
 | 
			
		||||
            return
 | 
			
		||||
        self.special_tokens = dict((tok, len(self.encoder) + i) for i, tok in enumerate(special_tokens))
 | 
			
		||||
        self.special_tokens_decoder = {v:k for k, v in self.special_tokens.items()}
 | 
			
		||||
        if self.fix_text is None:
 | 
			
		||||
            # Using BERT's BasicTokenizer: we can update the tokenizer
 | 
			
		||||
            self.nlp.never_split = special_tokens
 | 
			
		||||
        logger.info("Special tokens {}".format(self.special_tokens))
 | 
			
		||||
 | 
			
		||||
    def bpe(self, token):
 | 
			
		||||
        word = tuple(token[:-1]) + (token[-1] + '</w>',)
 | 
			
		||||
        if token in self.cache:
 | 
			
		||||
            return self.cache[token]
 | 
			
		||||
        pairs = get_pairs(word)
 | 
			
		||||
 | 
			
		||||
        if not pairs:
 | 
			
		||||
            return token+'</w>'
 | 
			
		||||
 | 
			
		||||
        while True:
 | 
			
		||||
            bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
 | 
			
		||||
            if bigram not in self.bpe_ranks:
 | 
			
		||||
                break
 | 
			
		||||
            first, second = bigram
 | 
			
		||||
            new_word = []
 | 
			
		||||
            i = 0
 | 
			
		||||
            while i < len(word):
 | 
			
		||||
                try:
 | 
			
		||||
                    j = word.index(first, i)
 | 
			
		||||
                    new_word.extend(word[i:j])
 | 
			
		||||
                    i = j
 | 
			
		||||
                except:
 | 
			
		||||
                    new_word.extend(word[i:])
 | 
			
		||||
                    break
 | 
			
		||||
 | 
			
		||||
                if word[i] == first and i < len(word)-1 and word[i+1] == second:
 | 
			
		||||
                    new_word.append(first+second)
 | 
			
		||||
                    i += 2
 | 
			
		||||
                else:
 | 
			
		||||
                    new_word.append(word[i])
 | 
			
		||||
                    i += 1
 | 
			
		||||
            new_word = tuple(new_word)
 | 
			
		||||
            word = new_word
 | 
			
		||||
            if len(word) == 1:
 | 
			
		||||
                break
 | 
			
		||||
            else:
 | 
			
		||||
                pairs = get_pairs(word)
 | 
			
		||||
        word = ' '.join(word)
 | 
			
		||||
        if word == '\n  </w>':
 | 
			
		||||
            word = '\n</w>'
 | 
			
		||||
        self.cache[token] = word
 | 
			
		||||
        return word
 | 
			
		||||
 | 
			
		||||
    def tokenize(self, text):
 | 
			
		||||
        """ Tokenize a string. """
 | 
			
		||||
        split_tokens = []
 | 
			
		||||
        if self.fix_text is None:
 | 
			
		||||
            # Using BERT's BasicTokenizer
 | 
			
		||||
            text = self.nlp.tokenize(text)
 | 
			
		||||
            for token in text:
 | 
			
		||||
                split_tokens.extend([t for t in self.bpe(token).split(' ')])
 | 
			
		||||
        else:
 | 
			
		||||
            # Using SpaCy & ftfy (original tokenization process of OpenAI GPT)
 | 
			
		||||
            text = self.nlp(text_standardize(self.fix_text(text)))
 | 
			
		||||
            for token in text:
 | 
			
		||||
                split_tokens.extend([t for t in self.bpe(token.text.lower()).split(' ')])
 | 
			
		||||
        return split_tokens
 | 
			
		||||
 | 
			
		||||
    def convert_tokens_to_ids(self, tokens):
 | 
			
		||||
        """ Converts a sequence of tokens into ids using the vocab. """
 | 
			
		||||
        ids = []
 | 
			
		||||
        if isinstance(tokens, str) or (sys.version_info[0] == 2 and isinstance(tokens, unicode)):
 | 
			
		||||
            if tokens in self.special_tokens:
 | 
			
		||||
                return self.special_tokens[tokens]
 | 
			
		||||
            else:
 | 
			
		||||
                return self.encoder.get(tokens, 0)
 | 
			
		||||
        for token in tokens:
 | 
			
		||||
            if token in self.special_tokens:
 | 
			
		||||
                ids.append(self.special_tokens[token])
 | 
			
		||||
            else:
 | 
			
		||||
                ids.append(self.encoder.get(token, 0))
 | 
			
		||||
        if len(ids) > self.max_len:
 | 
			
		||||
            logger.warning(
 | 
			
		||||
                "Token indices sequence length is longer than the specified maximum "
 | 
			
		||||
                " sequence length for this OpenAI GPT model ({} > {}). Running this"
 | 
			
		||||
                " sequence through the model will result in indexing errors".format(len(ids), self.max_len)
 | 
			
		||||
            )
 | 
			
		||||
        return ids
 | 
			
		||||
 | 
			
		||||
    def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
 | 
			
		||||
        """Converts a sequence of ids in BPE tokens using the vocab."""
 | 
			
		||||
        tokens = []
 | 
			
		||||
        for i in ids:
 | 
			
		||||
            if i in self.special_tokens_decoder:
 | 
			
		||||
                if not skip_special_tokens:
 | 
			
		||||
                    tokens.append(self.special_tokens_decoder[i])
 | 
			
		||||
            else:
 | 
			
		||||
                tokens.append(self.decoder[i])
 | 
			
		||||
        return tokens
 | 
			
		||||
 | 
			
		||||
    def encode(self, text):
 | 
			
		||||
        return self.convert_tokens_to_ids(self.tokenize(text))
 | 
			
		||||
 | 
			
		||||
    def decode(self, ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
 | 
			
		||||
        """Converts a sequence of ids in a string."""
 | 
			
		||||
        tokens = self.convert_ids_to_tokens(ids, skip_special_tokens=skip_special_tokens)
 | 
			
		||||
        out_string = ''.join(tokens).replace('</w>', ' ').strip()
 | 
			
		||||
        if clean_up_tokenization_spaces:
 | 
			
		||||
            out_string = out_string.replace('<unk>', '')
 | 
			
		||||
            out_string = out_string.replace(' .', '.').replace(' ?', '?').replace(' !', '!').replace(' ,', ',').replace(' ,', ','
 | 
			
		||||
                    ).replace(" ' ", "'").replace(" n't", "n't").replace(" 'm", "'m").replace(" do not", " don't"
 | 
			
		||||
                    ).replace(" 's", "'s").replace(" 've", "'ve").replace(" 're", "'re")
 | 
			
		||||
        return out_string
 | 
			
		||||
 | 
			
		||||
    def save_vocabulary(self, vocab_path):
 | 
			
		||||
        """Save the tokenizer vocabulary and merge files to a directory."""
 | 
			
		||||
        if not os.path.isdir(vocab_path):
 | 
			
		||||
            logger.error("Vocabulary path ({}) should be a directory".format(vocab_path))
 | 
			
		||||
            return
 | 
			
		||||
        vocab_file = os.path.join(vocab_path, VOCAB_NAME)
 | 
			
		||||
        merge_file = os.path.join(vocab_path, MERGES_NAME)
 | 
			
		||||
        special_tokens_file = os.path.join(vocab_path, SPECIAL_TOKENS_NAME)
 | 
			
		||||
 | 
			
		||||
        with open(vocab_file, 'w', encoding='utf-8') as f:
 | 
			
		||||
            f.write(json.dumps(self.encoder, ensure_ascii=False))
 | 
			
		||||
 | 
			
		||||
        index = 0
 | 
			
		||||
        with open(merge_file, "w", encoding="utf-8") as writer:
 | 
			
		||||
            writer.write(u'#version: 0.2\n')
 | 
			
		||||
            for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
 | 
			
		||||
                if index != token_index:
 | 
			
		||||
                    logger.warning("Saving vocabulary to {}: BPE merge indices are not consecutive."
 | 
			
		||||
                                   " Please check that the tokenizer is not corrupted!".format(merge_file))
 | 
			
		||||
                    index = token_index
 | 
			
		||||
                writer.write(' '.join(bpe_tokens) + u'\n')
 | 
			
		||||
                index += 1
 | 
			
		||||
 | 
			
		||||
        index = len(self.encoder)
 | 
			
		||||
        with open(special_tokens_file, 'w', encoding='utf-8') as writer:
 | 
			
		||||
            for token, token_index in sorted(self.special_tokens.items(), key=lambda kv: kv[1]):
 | 
			
		||||
                if index != token_index:
 | 
			
		||||
                    logger.warning("Saving special tokens vocabulary to {}: BPE indices are not consecutive."
 | 
			
		||||
                                   " Please check that the tokenizer is not corrupted!".format(special_tokens_file))
 | 
			
		||||
                    index = token_index
 | 
			
		||||
                writer.write(token + u'\n')
 | 
			
		||||
                index += 1
 | 
			
		||||
 | 
			
		||||
        return vocab_file, merge_file, special_tokens_file
 | 
			
		||||
							
								
								
									
										586
									
								
								pytorch_pretrained_bert/tokenization_transfo_xl.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										586
									
								
								pytorch_pretrained_bert/tokenization_transfo_xl.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,586 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
 | 
			
		||||
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
""" Tokenization classes for Transformer XL model.
 | 
			
		||||
    Adapted from https://github.com/kimiyoung/transformer-xl.
 | 
			
		||||
"""
 | 
			
		||||
from __future__ import (absolute_import, division, print_function,
 | 
			
		||||
                        unicode_literals)
 | 
			
		||||
 | 
			
		||||
import glob
 | 
			
		||||
import logging
 | 
			
		||||
import os
 | 
			
		||||
import sys
 | 
			
		||||
from collections import Counter, OrderedDict
 | 
			
		||||
from io import open
 | 
			
		||||
import unicodedata
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
from .file_utils import cached_path
 | 
			
		||||
 | 
			
		||||
if sys.version_info[0] == 2:
 | 
			
		||||
    import cPickle as pickle
 | 
			
		||||
else:
 | 
			
		||||
    import pickle
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
PRETRAINED_VOCAB_ARCHIVE_MAP = {
 | 
			
		||||
    'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-vocab.bin",
 | 
			
		||||
}
 | 
			
		||||
VOCAB_NAME = 'vocab.bin'
 | 
			
		||||
 | 
			
		||||
PRETRAINED_CORPUS_ARCHIVE_MAP = {
 | 
			
		||||
    'transfo-xl-wt103': "https://s3.amazonaws.com/models.huggingface.co/bert/transfo-xl-wt103-corpus.bin",
 | 
			
		||||
}
 | 
			
		||||
CORPUS_NAME = 'corpus.bin'
 | 
			
		||||
 | 
			
		||||
class TransfoXLTokenizer(object):
 | 
			
		||||
    """
 | 
			
		||||
    Transformer-XL tokenizer adapted from Vocab class in https://github.com/kimiyoung/transformer-xl
 | 
			
		||||
    """
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a TransfoXLTokenizer.
 | 
			
		||||
        The TransfoXLTokenizer.
 | 
			
		||||
        """
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
 | 
			
		||||
            vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
        else:
 | 
			
		||||
            if os.path.isdir(pretrained_model_name_or_path):
 | 
			
		||||
                vocab_file = os.path.join(pretrained_model_name_or_path, VOCAB_NAME)
 | 
			
		||||
            else:
 | 
			
		||||
                vocab_file = pretrained_model_name_or_path
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir)
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Model name '{}' was not found in model name list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find files {} "
 | 
			
		||||
                "at this path or url.".format(
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    vocab_file))
 | 
			
		||||
            return None
 | 
			
		||||
        if resolved_vocab_file == vocab_file:
 | 
			
		||||
            logger.info("loading vocabulary file {}".format(vocab_file))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading vocabulary file {} from cache at {}".format(
 | 
			
		||||
                vocab_file, resolved_vocab_file))
 | 
			
		||||
 | 
			
		||||
        # Instantiate tokenizer.
 | 
			
		||||
        tokenizer = cls(*inputs, **kwargs)
 | 
			
		||||
        vocab_dict = torch.load(resolved_vocab_file)
 | 
			
		||||
        for key, value in vocab_dict.items():
 | 
			
		||||
            tokenizer.__dict__[key] = value
 | 
			
		||||
        return tokenizer
 | 
			
		||||
 | 
			
		||||
    def __init__(self, special=[], min_freq=0, max_size=None, lower_case=False,
 | 
			
		||||
                 delimiter=None, vocab_file=None, never_split=("<unk>", "<eos>", "<formula>")):
 | 
			
		||||
        self.counter = Counter()
 | 
			
		||||
        self.special = special
 | 
			
		||||
        self.min_freq = min_freq
 | 
			
		||||
        self.max_size = max_size
 | 
			
		||||
        self.lower_case = lower_case
 | 
			
		||||
        self.delimiter = delimiter
 | 
			
		||||
        self.vocab_file = vocab_file
 | 
			
		||||
        self.never_split = never_split
 | 
			
		||||
 | 
			
		||||
    def count_file(self, path, verbose=False, add_eos=False):
 | 
			
		||||
        if verbose: print('counting file {} ...'.format(path))
 | 
			
		||||
        assert os.path.exists(path)
 | 
			
		||||
 | 
			
		||||
        sents = []
 | 
			
		||||
        with open(path, 'r', encoding='utf-8') as f:
 | 
			
		||||
            for idx, line in enumerate(f):
 | 
			
		||||
                if verbose and idx > 0 and idx % 500000 == 0:
 | 
			
		||||
                    print('    line {}'.format(idx))
 | 
			
		||||
                symbols = self.tokenize(line, add_eos=add_eos)
 | 
			
		||||
                self.counter.update(symbols)
 | 
			
		||||
                sents.append(symbols)
 | 
			
		||||
 | 
			
		||||
        return sents
 | 
			
		||||
 | 
			
		||||
    def count_sents(self, sents, verbose=False):
 | 
			
		||||
        """
 | 
			
		||||
            sents : a list of sentences, each a list of tokenized symbols
 | 
			
		||||
        """
 | 
			
		||||
        if verbose: print('counting {} sents ...'.format(len(sents)))
 | 
			
		||||
        for idx, symbols in enumerate(sents):
 | 
			
		||||
            if verbose and idx > 0 and idx % 500000 == 0:
 | 
			
		||||
                print('    line {}'.format(idx))
 | 
			
		||||
            self.counter.update(symbols)
 | 
			
		||||
 | 
			
		||||
    def _build_from_file(self, vocab_file):
 | 
			
		||||
        self.idx2sym = []
 | 
			
		||||
        self.sym2idx = OrderedDict()
 | 
			
		||||
 | 
			
		||||
        with open(vocab_file, 'r', encoding='utf-8') as f:
 | 
			
		||||
            for line in f:
 | 
			
		||||
                symb = line.strip().split()[0]
 | 
			
		||||
                self.add_symbol(symb)
 | 
			
		||||
        if '<UNK>' in self.sym2idx:
 | 
			
		||||
            self.unk_idx = self.sym2idx['<UNK>']
 | 
			
		||||
        elif '<unk>' in self.sym2idx:
 | 
			
		||||
            self.unk_idx = self.sym2idx['<unk>']
 | 
			
		||||
        else:
 | 
			
		||||
            raise ValueError('No <unkown> token in vocabulary')
 | 
			
		||||
 | 
			
		||||
    def save_vocabulary(self, vocab_path):
 | 
			
		||||
        """Save the tokenizer vocabulary to a directory or file."""
 | 
			
		||||
        index = 0
 | 
			
		||||
        if os.path.isdir(vocab_path):
 | 
			
		||||
            vocab_file = os.path.join(vocab_path, VOCAB_NAME)
 | 
			
		||||
        torch.save(self.__dict__, vocab_file)
 | 
			
		||||
        return vocab_file
 | 
			
		||||
 | 
			
		||||
    def build_vocab(self):
 | 
			
		||||
        if self.vocab_file:
 | 
			
		||||
            print('building vocab from {}'.format(self.vocab_file))
 | 
			
		||||
            self._build_from_file(self.vocab_file)
 | 
			
		||||
            print('final vocab size {}'.format(len(self)))
 | 
			
		||||
        else:
 | 
			
		||||
            print('building vocab with min_freq={}, max_size={}'.format(
 | 
			
		||||
                self.min_freq, self.max_size))
 | 
			
		||||
            self.idx2sym = []
 | 
			
		||||
            self.sym2idx = OrderedDict()
 | 
			
		||||
 | 
			
		||||
            for sym in self.special:
 | 
			
		||||
                self.add_special(sym)
 | 
			
		||||
 | 
			
		||||
            for sym, cnt in self.counter.most_common(self.max_size):
 | 
			
		||||
                if cnt < self.min_freq: break
 | 
			
		||||
                self.add_symbol(sym)
 | 
			
		||||
 | 
			
		||||
            print('final vocab size {} from {} unique tokens'.format(
 | 
			
		||||
                len(self), len(self.counter)))
 | 
			
		||||
 | 
			
		||||
    def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
 | 
			
		||||
            add_double_eos=False):
 | 
			
		||||
        if verbose: print('encoding file {} ...'.format(path))
 | 
			
		||||
        assert os.path.exists(path)
 | 
			
		||||
        encoded = []
 | 
			
		||||
        with open(path, 'r', encoding='utf-8') as f:
 | 
			
		||||
            for idx, line in enumerate(f):
 | 
			
		||||
                if verbose and idx > 0 and idx % 500000 == 0:
 | 
			
		||||
                    print('    line {}'.format(idx))
 | 
			
		||||
                symbols = self.tokenize(line, add_eos=add_eos,
 | 
			
		||||
                    add_double_eos=add_double_eos)
 | 
			
		||||
                encoded.append(self.convert_to_tensor(symbols))
 | 
			
		||||
 | 
			
		||||
        if ordered:
 | 
			
		||||
            encoded = torch.cat(encoded)
 | 
			
		||||
 | 
			
		||||
        return encoded
 | 
			
		||||
 | 
			
		||||
    def encode_sents(self, sents, ordered=False, verbose=False):
 | 
			
		||||
        if verbose: print('encoding {} sents ...'.format(len(sents)))
 | 
			
		||||
        encoded = []
 | 
			
		||||
        for idx, symbols in enumerate(sents):
 | 
			
		||||
            if verbose and idx > 0 and idx % 500000 == 0:
 | 
			
		||||
                print('    line {}'.format(idx))
 | 
			
		||||
            encoded.append(self.convert_to_tensor(symbols))
 | 
			
		||||
 | 
			
		||||
        if ordered:
 | 
			
		||||
            encoded = torch.cat(encoded)
 | 
			
		||||
 | 
			
		||||
        return encoded
 | 
			
		||||
 | 
			
		||||
    def add_special(self, sym):
 | 
			
		||||
        if sym not in self.sym2idx:
 | 
			
		||||
            self.idx2sym.append(sym)
 | 
			
		||||
            self.sym2idx[sym] = len(self.idx2sym) - 1
 | 
			
		||||
            setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
 | 
			
		||||
 | 
			
		||||
    def add_symbol(self, sym):
 | 
			
		||||
        if sym not in self.sym2idx:
 | 
			
		||||
            self.idx2sym.append(sym)
 | 
			
		||||
            self.sym2idx[sym] = len(self.idx2sym) - 1
 | 
			
		||||
 | 
			
		||||
    def get_sym(self, idx):
 | 
			
		||||
        assert 0 <= idx < len(self), 'Index {} out of vocabulary range'.format(idx)
 | 
			
		||||
        return self.idx2sym[idx]
 | 
			
		||||
 | 
			
		||||
    def get_idx(self, sym):
 | 
			
		||||
        if sym in self.sym2idx:
 | 
			
		||||
            return self.sym2idx[sym]
 | 
			
		||||
        else:
 | 
			
		||||
            # print('encounter unk {}'.format(sym))
 | 
			
		||||
            # assert '<eos>' not in sym
 | 
			
		||||
            if hasattr(self, 'unk_idx'):
 | 
			
		||||
                return self.sym2idx.get(sym, self.unk_idx)
 | 
			
		||||
            # Backward compatibility with pre-trained models
 | 
			
		||||
            elif '<unk>' in self.sym2idx:
 | 
			
		||||
                return self.sym2idx['<unk>']
 | 
			
		||||
            elif '<UNK>' in self.sym2idx:
 | 
			
		||||
                return self.sym2idx['<UNK>']
 | 
			
		||||
            else:
 | 
			
		||||
                raise ValueError('Token not in vocabulary and no <unk> token in vocabulary for replacement')
 | 
			
		||||
 | 
			
		||||
    def convert_ids_to_tokens(self, indices):
 | 
			
		||||
        """Converts a sequence of indices in symbols using the vocab."""
 | 
			
		||||
        return [self.get_sym(idx) for idx in indices]
 | 
			
		||||
 | 
			
		||||
    def convert_tokens_to_ids(self, symbols):
 | 
			
		||||
        """Converts a sequence of symbols into ids using the vocab."""
 | 
			
		||||
        return [self.get_idx(sym) for sym in symbols]
 | 
			
		||||
 | 
			
		||||
    def convert_to_tensor(self, symbols):
 | 
			
		||||
        return torch.LongTensor(self.convert_tokens_to_ids(symbols))
 | 
			
		||||
 | 
			
		||||
    def decode(self, indices, exclude=None):
 | 
			
		||||
        """Converts a sequence of indices in a string."""
 | 
			
		||||
        if exclude is None:
 | 
			
		||||
            return ' '.join([self.get_sym(idx) for idx in indices])
 | 
			
		||||
        else:
 | 
			
		||||
            return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
 | 
			
		||||
 | 
			
		||||
    def __len__(self):
 | 
			
		||||
        return len(self.idx2sym)
 | 
			
		||||
 | 
			
		||||
    def tokenize(self, line, add_eos=False, add_double_eos=False):
 | 
			
		||||
        line = line.strip()
 | 
			
		||||
        # convert to lower case
 | 
			
		||||
        if self.lower_case:
 | 
			
		||||
            line = line.lower()
 | 
			
		||||
 | 
			
		||||
        # empty delimiter '' will evaluate False
 | 
			
		||||
        if self.delimiter == '':
 | 
			
		||||
            symbols = line
 | 
			
		||||
        else:
 | 
			
		||||
            symbols = line.split(self.delimiter)
 | 
			
		||||
 | 
			
		||||
        if add_double_eos: # lm1b
 | 
			
		||||
            return ['<S>'] + symbols + ['<S>']
 | 
			
		||||
        elif add_eos:
 | 
			
		||||
            return symbols + ['<eos>']
 | 
			
		||||
        else:
 | 
			
		||||
            return symbols
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LMOrderedIterator(object):
 | 
			
		||||
    def __init__(self, data, bsz, bptt, device='cpu', ext_len=None):
 | 
			
		||||
        """
 | 
			
		||||
            data -- LongTensor -- the LongTensor is strictly ordered
 | 
			
		||||
        """
 | 
			
		||||
        self.bsz = bsz
 | 
			
		||||
        self.bptt = bptt
 | 
			
		||||
        self.ext_len = ext_len if ext_len is not None else 0
 | 
			
		||||
 | 
			
		||||
        self.device = device
 | 
			
		||||
 | 
			
		||||
        # Work out how cleanly we can divide the dataset into bsz parts.
 | 
			
		||||
        self.n_step = data.size(0) // bsz
 | 
			
		||||
 | 
			
		||||
        # Trim off any extra elements that wouldn't cleanly fit (remainders).
 | 
			
		||||
        data = data.narrow(0, 0, self.n_step * bsz)
 | 
			
		||||
 | 
			
		||||
        # Evenly divide the data across the bsz batches.
 | 
			
		||||
        self.data = data.view(bsz, -1).t().contiguous().to(device)
 | 
			
		||||
 | 
			
		||||
        # Number of mini-batches
 | 
			
		||||
        self.n_batch = (self.n_step + self.bptt - 1) // self.bptt
 | 
			
		||||
 | 
			
		||||
    def get_batch(self, i, bptt=None):
 | 
			
		||||
        if bptt is None: bptt = self.bptt
 | 
			
		||||
        seq_len = min(bptt, self.data.size(0) - 1 - i)
 | 
			
		||||
 | 
			
		||||
        end_idx = i + seq_len
 | 
			
		||||
        beg_idx = max(0, i - self.ext_len)
 | 
			
		||||
 | 
			
		||||
        data = self.data[beg_idx:end_idx]
 | 
			
		||||
        target = self.data[i+1:i+1+seq_len]
 | 
			
		||||
 | 
			
		||||
        data_out = data.transpose(0, 1).contiguous().to(self.device)
 | 
			
		||||
        target_out = target.transpose(0, 1).contiguous().to(self.device)
 | 
			
		||||
 | 
			
		||||
        return data_out, target_out, seq_len
 | 
			
		||||
 | 
			
		||||
    def get_fixlen_iter(self, start=0):
 | 
			
		||||
        for i in range(start, self.data.size(0) - 1, self.bptt):
 | 
			
		||||
            yield self.get_batch(i)
 | 
			
		||||
 | 
			
		||||
    def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3):
 | 
			
		||||
        max_len = self.bptt + max_deviation * std
 | 
			
		||||
        i = start
 | 
			
		||||
        while True:
 | 
			
		||||
            bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2.
 | 
			
		||||
            bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std))))
 | 
			
		||||
            data, target, seq_len = self.get_batch(i, bptt)
 | 
			
		||||
            i += seq_len
 | 
			
		||||
            yield data, target, seq_len
 | 
			
		||||
            if i >= self.data.size(0) - 2:
 | 
			
		||||
                break
 | 
			
		||||
 | 
			
		||||
    def __iter__(self):
 | 
			
		||||
        return self.get_fixlen_iter()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LMShuffledIterator(object):
 | 
			
		||||
    def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False):
 | 
			
		||||
        """
 | 
			
		||||
            data -- list[LongTensor] -- there is no order among the LongTensors
 | 
			
		||||
        """
 | 
			
		||||
        self.data = data
 | 
			
		||||
 | 
			
		||||
        self.bsz = bsz
 | 
			
		||||
        self.bptt = bptt
 | 
			
		||||
        self.ext_len = ext_len if ext_len is not None else 0
 | 
			
		||||
 | 
			
		||||
        self.device = device
 | 
			
		||||
        self.shuffle = shuffle
 | 
			
		||||
 | 
			
		||||
    def get_sent_stream(self):
 | 
			
		||||
        # index iterator
 | 
			
		||||
        epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \
 | 
			
		||||
            else np.array(range(len(self.data)))
 | 
			
		||||
 | 
			
		||||
        # sentence iterator
 | 
			
		||||
        for idx in epoch_indices:
 | 
			
		||||
            yield self.data[idx]
 | 
			
		||||
 | 
			
		||||
    def stream_iterator(self, sent_stream):
 | 
			
		||||
        # streams for each data in the batch
 | 
			
		||||
        streams = [None] * self.bsz
 | 
			
		||||
 | 
			
		||||
        data = torch.LongTensor(self.bptt, self.bsz)
 | 
			
		||||
        target = torch.LongTensor(self.bptt, self.bsz)
 | 
			
		||||
 | 
			
		||||
        n_retain = 0
 | 
			
		||||
 | 
			
		||||
        while True:
 | 
			
		||||
            # data   : [n_retain+bptt x bsz]
 | 
			
		||||
            # target : [bptt x bsz]
 | 
			
		||||
            data[n_retain:].fill_(-1)
 | 
			
		||||
            target.fill_(-1)
 | 
			
		||||
 | 
			
		||||
            valid_batch = True
 | 
			
		||||
 | 
			
		||||
            for i in range(self.bsz):
 | 
			
		||||
                n_filled = 0
 | 
			
		||||
                try:
 | 
			
		||||
                    while n_filled < self.bptt:
 | 
			
		||||
                        if streams[i] is None or len(streams[i]) <= 1:
 | 
			
		||||
                            streams[i] = next(sent_stream)
 | 
			
		||||
                        # number of new tokens to fill in
 | 
			
		||||
                        n_new = min(len(streams[i]) - 1, self.bptt - n_filled)
 | 
			
		||||
                        # first n_retain tokens are retained from last batch
 | 
			
		||||
                        data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \
 | 
			
		||||
                            streams[i][:n_new]
 | 
			
		||||
                        target[n_filled:n_filled+n_new, i] = \
 | 
			
		||||
                            streams[i][1:n_new+1]
 | 
			
		||||
                        streams[i] = streams[i][n_new:]
 | 
			
		||||
                        n_filled += n_new
 | 
			
		||||
                except StopIteration:
 | 
			
		||||
                    valid_batch = False
 | 
			
		||||
                    break
 | 
			
		||||
 | 
			
		||||
            if not valid_batch:
 | 
			
		||||
                return
 | 
			
		||||
 | 
			
		||||
            data_out = data.transpose(0, 1).contiguous().to(self.device)
 | 
			
		||||
            target_out = target.transpose(0, 1).contiguous().to(self.device)
 | 
			
		||||
 | 
			
		||||
            yield data_out, target_out, self.bptt
 | 
			
		||||
 | 
			
		||||
            n_retain = min(data.size(0), self.ext_len)
 | 
			
		||||
            if n_retain > 0:
 | 
			
		||||
                data[:n_retain] = data[-n_retain:]
 | 
			
		||||
            data.resize_(n_retain + self.bptt, data.size(1))
 | 
			
		||||
 | 
			
		||||
    def __iter__(self):
 | 
			
		||||
        # sent_stream is an iterator
 | 
			
		||||
        sent_stream = self.get_sent_stream()
 | 
			
		||||
 | 
			
		||||
        for batch in self.stream_iterator(sent_stream):
 | 
			
		||||
            yield batch
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class LMMultiFileIterator(LMShuffledIterator):
 | 
			
		||||
    def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None,
 | 
			
		||||
        shuffle=False):
 | 
			
		||||
 | 
			
		||||
        self.paths = paths
 | 
			
		||||
        self.vocab = vocab
 | 
			
		||||
 | 
			
		||||
        self.bsz = bsz
 | 
			
		||||
        self.bptt = bptt
 | 
			
		||||
        self.ext_len = ext_len if ext_len is not None else 0
 | 
			
		||||
 | 
			
		||||
        self.device = device
 | 
			
		||||
        self.shuffle = shuffle
 | 
			
		||||
 | 
			
		||||
    def get_sent_stream(self, path):
 | 
			
		||||
        sents = self.vocab.encode_file(path, add_double_eos=True)
 | 
			
		||||
        if self.shuffle:
 | 
			
		||||
            np.random.shuffle(sents)
 | 
			
		||||
        sent_stream = iter(sents)
 | 
			
		||||
 | 
			
		||||
        return sent_stream
 | 
			
		||||
 | 
			
		||||
    def __iter__(self):
 | 
			
		||||
        if self.shuffle:
 | 
			
		||||
            np.random.shuffle(self.paths)
 | 
			
		||||
 | 
			
		||||
        for path in self.paths:
 | 
			
		||||
            # sent_stream is an iterator
 | 
			
		||||
            sent_stream = self.get_sent_stream(path)
 | 
			
		||||
            for batch in self.stream_iterator(sent_stream):
 | 
			
		||||
                yield batch
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TransfoXLCorpus(object):
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
 | 
			
		||||
        """
 | 
			
		||||
        Instantiate a pre-processed corpus.
 | 
			
		||||
        """
 | 
			
		||||
        vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
 | 
			
		||||
        if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
 | 
			
		||||
            corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
 | 
			
		||||
        else:
 | 
			
		||||
            corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
 | 
			
		||||
        # redirect to the cache, if necessary
 | 
			
		||||
        try:
 | 
			
		||||
            resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
 | 
			
		||||
        except EnvironmentError:
 | 
			
		||||
            logger.error(
 | 
			
		||||
                "Corpus '{}' was not found in corpus list ({}). "
 | 
			
		||||
                "We assumed '{}' was a path or url but couldn't find files {} "
 | 
			
		||||
                "at this path or url.".format(
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
 | 
			
		||||
                    pretrained_model_name_or_path,
 | 
			
		||||
                    corpus_file))
 | 
			
		||||
            return None
 | 
			
		||||
        if resolved_corpus_file == corpus_file:
 | 
			
		||||
            logger.info("loading corpus file {}".format(corpus_file))
 | 
			
		||||
        else:
 | 
			
		||||
            logger.info("loading corpus file {} from cache at {}".format(
 | 
			
		||||
                corpus_file, resolved_corpus_file))
 | 
			
		||||
 | 
			
		||||
        # Instantiate tokenizer.
 | 
			
		||||
        corpus = cls(*inputs, **kwargs)
 | 
			
		||||
        corpus_dict = torch.load(resolved_corpus_file)
 | 
			
		||||
        for key, value in corpus_dict.items():
 | 
			
		||||
            corpus.__dict__[key] = value
 | 
			
		||||
        corpus.vocab = vocab
 | 
			
		||||
        if corpus.train is not None:
 | 
			
		||||
            corpus.train = torch.tensor(corpus.train, dtype=torch.long)
 | 
			
		||||
        if corpus.valid is not None:
 | 
			
		||||
            corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
 | 
			
		||||
        if corpus.test is not None:
 | 
			
		||||
            corpus.test = torch.tensor(corpus.test, dtype=torch.long)
 | 
			
		||||
        return corpus
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        self.vocab = TransfoXLTokenizer(*args, **kwargs)
 | 
			
		||||
        self.dataset = None
 | 
			
		||||
        self.train = None
 | 
			
		||||
        self.valid = None
 | 
			
		||||
        self.test = None
 | 
			
		||||
 | 
			
		||||
    def build_corpus(self, path, dataset):
 | 
			
		||||
        self.dataset = dataset
 | 
			
		||||
 | 
			
		||||
        if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']:
 | 
			
		||||
            self.vocab.count_file(os.path.join(path, 'train.txt'))
 | 
			
		||||
            self.vocab.count_file(os.path.join(path, 'valid.txt'))
 | 
			
		||||
            self.vocab.count_file(os.path.join(path, 'test.txt'))
 | 
			
		||||
        elif self.dataset == 'wt103':
 | 
			
		||||
            self.vocab.count_file(os.path.join(path, 'train.txt'))
 | 
			
		||||
        elif self.dataset == 'lm1b':
 | 
			
		||||
            train_path_pattern = os.path.join(
 | 
			
		||||
                path, '1-billion-word-language-modeling-benchmark-r13output',
 | 
			
		||||
                'training-monolingual.tokenized.shuffled', 'news.en-*')
 | 
			
		||||
            train_paths = glob.glob(train_path_pattern)
 | 
			
		||||
            # the vocab will load from file when build_vocab() is called
 | 
			
		||||
 | 
			
		||||
        self.vocab.build_vocab()
 | 
			
		||||
 | 
			
		||||
        if self.dataset in ['ptb', 'wt2', 'wt103']:
 | 
			
		||||
            self.train = self.vocab.encode_file(
 | 
			
		||||
                os.path.join(path, 'train.txt'), ordered=True)
 | 
			
		||||
            self.valid = self.vocab.encode_file(
 | 
			
		||||
                os.path.join(path, 'valid.txt'), ordered=True)
 | 
			
		||||
            self.test = self.vocab.encode_file(
 | 
			
		||||
                os.path.join(path, 'test.txt'), ordered=True)
 | 
			
		||||
        elif self.dataset in ['enwik8', 'text8']:
 | 
			
		||||
            self.train = self.vocab.encode_file(
 | 
			
		||||
                os.path.join(path, 'train.txt'), ordered=True, add_eos=False)
 | 
			
		||||
            self.valid = self.vocab.encode_file(
 | 
			
		||||
                os.path.join(path, 'valid.txt'), ordered=True, add_eos=False)
 | 
			
		||||
            self.test = self.vocab.encode_file(
 | 
			
		||||
                os.path.join(path, 'test.txt'), ordered=True, add_eos=False)
 | 
			
		||||
        elif self.dataset == 'lm1b':
 | 
			
		||||
            self.train = train_paths
 | 
			
		||||
            self.valid = self.vocab.encode_file(
 | 
			
		||||
                os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True)
 | 
			
		||||
            self.test = self.vocab.encode_file(
 | 
			
		||||
                os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True)
 | 
			
		||||
 | 
			
		||||
    def get_iterator(self, split, *args, **kwargs):
 | 
			
		||||
        if split == 'train':
 | 
			
		||||
            if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
 | 
			
		||||
                data_iter = LMOrderedIterator(self.train, *args, **kwargs)
 | 
			
		||||
            elif self.dataset == 'lm1b':
 | 
			
		||||
                kwargs['shuffle'] = True
 | 
			
		||||
                data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
 | 
			
		||||
        elif split in ['valid', 'test']:
 | 
			
		||||
            data = self.valid if split == 'valid' else self.test
 | 
			
		||||
            if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']:
 | 
			
		||||
                data_iter = LMOrderedIterator(data, *args, **kwargs)
 | 
			
		||||
            elif self.dataset == 'lm1b':
 | 
			
		||||
                data_iter = LMShuffledIterator(data, *args, **kwargs)
 | 
			
		||||
 | 
			
		||||
        return data_iter
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_lm_corpus(datadir, dataset):
 | 
			
		||||
    fn = os.path.join(datadir, 'cache.pt')
 | 
			
		||||
    fn_pickle = os.path.join(datadir, 'cache.pkl')
 | 
			
		||||
    if os.path.exists(fn):
 | 
			
		||||
        print('Loading cached dataset...')
 | 
			
		||||
        corpus = torch.load(fn_pickle)
 | 
			
		||||
    elif os.path.exists(fn):
 | 
			
		||||
        print('Loading cached dataset from pickle...')
 | 
			
		||||
        with open(fn, "rb") as fp:
 | 
			
		||||
            corpus = pickle.load(fp)
 | 
			
		||||
    else:
 | 
			
		||||
        print('Producing dataset {}...'.format(dataset))
 | 
			
		||||
        kwargs = {}
 | 
			
		||||
        if dataset in ['wt103', 'wt2']:
 | 
			
		||||
            kwargs['special'] = ['<eos>']
 | 
			
		||||
            kwargs['lower_case'] = False
 | 
			
		||||
        elif dataset == 'ptb':
 | 
			
		||||
            kwargs['special'] = ['<eos>']
 | 
			
		||||
            kwargs['lower_case'] = True
 | 
			
		||||
        elif dataset == 'lm1b':
 | 
			
		||||
            kwargs['special'] = []
 | 
			
		||||
            kwargs['lower_case'] = False
 | 
			
		||||
            kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt')
 | 
			
		||||
        elif dataset in ['enwik8', 'text8']:
 | 
			
		||||
            pass
 | 
			
		||||
 | 
			
		||||
        corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
 | 
			
		||||
        torch.save(corpus, fn)
 | 
			
		||||
 | 
			
		||||
    return corpus
 | 
			
		||||
@ -5,4 +5,6 @@ tqdm
 | 
			
		||||
# Accessing files from S3 directly.
 | 
			
		||||
boto3
 | 
			
		||||
# Used for downloading models over HTTP
 | 
			
		||||
requests
 | 
			
		||||
requests
 | 
			
		||||
# For OpenAI GPT
 | 
			
		||||
regex
 | 
			
		||||
							
								
								
									
										16
									
								
								setup.py
									
									
									
									
									
								
							
							
						
						
									
										16
									
								
								setup.py
									
									
									
									
									
								
							@ -33,12 +33,13 @@ To create the package for pypi.
 | 
			
		||||
7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory.
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
from io import open
 | 
			
		||||
from setuptools import find_packages, setup
 | 
			
		||||
 | 
			
		||||
setup(
 | 
			
		||||
    name="pytorch_pretrained_bert",
 | 
			
		||||
    version="0.4.0",
 | 
			
		||||
    author="Thomas Wolf, Victor Sanh, Tim Rault, Google AI Language Team Authors",
 | 
			
		||||
    version="0.6.2",
 | 
			
		||||
    author="Thomas Wolf, Victor Sanh, Tim Rault, Google AI Language Team Authors, Open AI team Authors",
 | 
			
		||||
    author_email="thomas@huggingface.co",
 | 
			
		||||
    description="PyTorch version of Google AI BERT model with script to load Google pre-trained models",
 | 
			
		||||
    long_description=open("README.md", "r", encoding='utf-8').read(),
 | 
			
		||||
@ -52,9 +53,14 @@ setup(
 | 
			
		||||
                      'numpy',
 | 
			
		||||
                      'boto3',
 | 
			
		||||
                      'requests',
 | 
			
		||||
                      'tqdm'],
 | 
			
		||||
    scripts=["bin/pytorch_pretrained_bert"],
 | 
			
		||||
    python_requires='>=3.5.0',
 | 
			
		||||
                      'tqdm',
 | 
			
		||||
                      'regex'],
 | 
			
		||||
    entry_points={
 | 
			
		||||
      'console_scripts': [
 | 
			
		||||
        "pytorch_pretrained_bert=pytorch_pretrained_bert.__main__:main",
 | 
			
		||||
      ]
 | 
			
		||||
    },
 | 
			
		||||
    # python_requires='>=3.5.0',
 | 
			
		||||
    tests_require=['pytest'],
 | 
			
		||||
    classifiers=[
 | 
			
		||||
          'Intended Audience :: Science/Research',
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										19
									
								
								tests/conftest.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								tests/conftest.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,19 @@
 | 
			
		||||
# content of conftest.py
 | 
			
		||||
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pytest_addoption(parser):
 | 
			
		||||
    parser.addoption(
 | 
			
		||||
        "--runslow", action="store_true", default=False, help="run slow tests"
 | 
			
		||||
    )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pytest_collection_modifyitems(config, items):
 | 
			
		||||
    if config.getoption("--runslow"):
 | 
			
		||||
        # --runslow given in cli: do not skip slow tests
 | 
			
		||||
        return
 | 
			
		||||
    skip_slow = pytest.mark.skip(reason="need --runslow option to run")
 | 
			
		||||
    for item in items:
 | 
			
		||||
        if "slow" in item.keywords:
 | 
			
		||||
            item.add_marker(skip_slow)
 | 
			
		||||
							
								
								
									
										229
									
								
								tests/modeling_gpt2_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										229
									
								
								tests/modeling_gpt2_test.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,229 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import unittest
 | 
			
		||||
import json
 | 
			
		||||
import random
 | 
			
		||||
import shutil
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import (GPT2Config, GPT2Model,
 | 
			
		||||
                                     GPT2LMHeadModel, GPT2DoubleHeadsModel)
 | 
			
		||||
from pytorch_pretrained_bert.modeling_gpt2 import PRETRAINED_MODEL_ARCHIVE_MAP
 | 
			
		||||
 | 
			
		||||
class GPT2ModelTest(unittest.TestCase):
 | 
			
		||||
    class GPT2ModelTester(object):
 | 
			
		||||
 | 
			
		||||
        def __init__(self,
 | 
			
		||||
                     parent,
 | 
			
		||||
                     batch_size=13,
 | 
			
		||||
                     seq_length=7,
 | 
			
		||||
                     is_training=True,
 | 
			
		||||
                     use_position_ids=True,
 | 
			
		||||
                     use_token_type_ids=True,
 | 
			
		||||
                     use_labels=True,
 | 
			
		||||
                     vocab_size=99,
 | 
			
		||||
                     n_positions=33,
 | 
			
		||||
                     n_embd=32,
 | 
			
		||||
                     n_layer=5,
 | 
			
		||||
                     n_head=4,
 | 
			
		||||
                     n_choices=3,
 | 
			
		||||
                     type_sequence_label_size=2,
 | 
			
		||||
                     initializer_range=0.02,
 | 
			
		||||
                     num_labels=3,
 | 
			
		||||
                     scope=None):
 | 
			
		||||
            self.parent = parent
 | 
			
		||||
            self.batch_size = batch_size
 | 
			
		||||
            self.seq_length = seq_length
 | 
			
		||||
            self.is_training = is_training
 | 
			
		||||
            self.use_position_ids = use_position_ids
 | 
			
		||||
            self.use_token_type_ids = use_token_type_ids
 | 
			
		||||
            self.use_labels = use_labels
 | 
			
		||||
            self.vocab_size = vocab_size
 | 
			
		||||
            self.n_positions = n_positions
 | 
			
		||||
            self.n_embd = n_embd
 | 
			
		||||
            self.n_layer = n_layer
 | 
			
		||||
            self.n_head = n_head
 | 
			
		||||
            self.n_choices = n_choices
 | 
			
		||||
            self.type_sequence_label_size = type_sequence_label_size
 | 
			
		||||
            self.initializer_range = initializer_range
 | 
			
		||||
            self.num_labels = num_labels
 | 
			
		||||
            self.scope = scope
 | 
			
		||||
 | 
			
		||||
        def prepare_config_and_inputs(self):
 | 
			
		||||
            input_ids = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.vocab_size)
 | 
			
		||||
 | 
			
		||||
            position_ids = None
 | 
			
		||||
            if self.use_position_ids:
 | 
			
		||||
                position_ids = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.n_positions)
 | 
			
		||||
 | 
			
		||||
            token_type_ids = None
 | 
			
		||||
            if self.use_token_type_ids:
 | 
			
		||||
                total_voc = self.vocab_size
 | 
			
		||||
                token_type_ids = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_voc)
 | 
			
		||||
 | 
			
		||||
            mc_labels = None
 | 
			
		||||
            lm_labels = None
 | 
			
		||||
            mc_token_ids = None
 | 
			
		||||
            if self.use_labels:
 | 
			
		||||
                mc_labels = GPT2ModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size)
 | 
			
		||||
                lm_labels = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.num_labels)
 | 
			
		||||
                mc_token_ids = GPT2ModelTest.ids_tensor([self.batch_size, self.n_choices], self.seq_length)
 | 
			
		||||
 | 
			
		||||
            config = GPT2Config(
 | 
			
		||||
                vocab_size_or_config_json_file=self.vocab_size,
 | 
			
		||||
                n_positions=self.n_positions,
 | 
			
		||||
                n_embd=self.n_embd,
 | 
			
		||||
                n_layer=self.n_layer,
 | 
			
		||||
                n_head=self.n_head,
 | 
			
		||||
                initializer_range=self.initializer_range)
 | 
			
		||||
 | 
			
		||||
            return (config, input_ids, token_type_ids, position_ids,
 | 
			
		||||
                    mc_labels, lm_labels, mc_token_ids)
 | 
			
		||||
 | 
			
		||||
        def create_gpt2_model(self, config, input_ids, token_type_ids, position_ids,
 | 
			
		||||
                                mc_labels, lm_labels, mc_token_ids):
 | 
			
		||||
            model = GPT2Model(config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            hidden_states, presents = model(input_ids, position_ids, token_type_ids)
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "hidden_states": hidden_states,
 | 
			
		||||
                "presents": presents,
 | 
			
		||||
            }
 | 
			
		||||
            return outputs
 | 
			
		||||
 | 
			
		||||
        def check_gpt2_model_output(self, result):
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["hidden_states"].size()),
 | 
			
		||||
                [self.batch_size, self.n_choices, self.seq_length, self.n_embd])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        def create_gpt2_lm_head(self, config, input_ids, token_type_ids, position_ids,
 | 
			
		||||
                                       mc_labels, lm_labels, mc_token_ids):
 | 
			
		||||
            model = GPT2LMHeadModel(config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, position_ids, token_type_ids, lm_labels)
 | 
			
		||||
            lm_logits, presents = model(input_ids, position_ids, token_type_ids)
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "loss": loss,
 | 
			
		||||
                "lm_logits": lm_logits,
 | 
			
		||||
                "presents": presents,
 | 
			
		||||
            }
 | 
			
		||||
            return outputs
 | 
			
		||||
 | 
			
		||||
        def check_gpt2_lm_head_output(self, result):
 | 
			
		||||
            total_voc = self.vocab_size
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["lm_logits"].size()),
 | 
			
		||||
                [self.batch_size, self.n_choices, self.seq_length, total_voc])
 | 
			
		||||
 | 
			
		||||
        def check_gpt2_lm_head_loss_output(self, result):
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["loss"].size()),
 | 
			
		||||
                [])
 | 
			
		||||
 | 
			
		||||
        def create_gpt2_double_heads(self, config, input_ids, token_type_ids, position_ids,
 | 
			
		||||
                                       mc_labels, lm_labels, mc_token_ids):
 | 
			
		||||
            model = GPT2DoubleHeadsModel(config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, mc_token_ids,
 | 
			
		||||
                         lm_labels=lm_labels, mc_labels=mc_labels,
 | 
			
		||||
                         token_type_ids=token_type_ids, position_ids=position_ids)
 | 
			
		||||
            lm_logits, mc_logits, presents = model(input_ids, mc_token_ids, position_ids=position_ids, token_type_ids=token_type_ids)
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "loss": loss,
 | 
			
		||||
                "lm_logits": lm_logits,
 | 
			
		||||
                "mc_logits": mc_logits,
 | 
			
		||||
                "presents": presents,
 | 
			
		||||
            }
 | 
			
		||||
            return outputs
 | 
			
		||||
 | 
			
		||||
        def check_gpt2_double_heads_output(self, result):
 | 
			
		||||
            total_voc = self.vocab_size
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["lm_logits"].size()),
 | 
			
		||||
                [self.batch_size, self.n_choices, self.seq_length, total_voc])
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["mc_logits"].size()),
 | 
			
		||||
                [self.batch_size, self.n_choices])
 | 
			
		||||
 | 
			
		||||
        def check_gpt2_double_heads_loss_output(self, result):
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                [list(l.size()) for l in result["loss"]],
 | 
			
		||||
                [[], []])
 | 
			
		||||
 | 
			
		||||
    def test_default(self):
 | 
			
		||||
        self.run_tester(GPT2ModelTest.GPT2ModelTester(self))
 | 
			
		||||
 | 
			
		||||
    def test_config_to_json_string(self):
 | 
			
		||||
        config = GPT2Config(vocab_size_or_config_json_file=99, n_embd=37)
 | 
			
		||||
        obj = json.loads(config.to_json_string())
 | 
			
		||||
        self.assertEqual(obj["vocab_size"], 99)
 | 
			
		||||
        self.assertEqual(obj["n_embd"], 37)
 | 
			
		||||
 | 
			
		||||
    def test_config_to_json_file(self):
 | 
			
		||||
        config_first = GPT2Config(vocab_size_or_config_json_file=99, n_embd=37)
 | 
			
		||||
        json_file_path = "/tmp/config.json"
 | 
			
		||||
        config_first.to_json_file(json_file_path)
 | 
			
		||||
        config_second = GPT2Config.from_json_file(json_file_path)
 | 
			
		||||
        os.remove(json_file_path)
 | 
			
		||||
        self.assertEqual(config_second.to_dict(), config_first.to_dict())
 | 
			
		||||
 | 
			
		||||
    @pytest.mark.slow
 | 
			
		||||
    def test_model_from_pretrained(self):
 | 
			
		||||
        cache_dir = "/tmp/pytorch_pretrained_bert_test/"
 | 
			
		||||
        for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
 | 
			
		||||
            model = GPT2Model.from_pretrained(model_name, cache_dir=cache_dir)
 | 
			
		||||
            shutil.rmtree(cache_dir)
 | 
			
		||||
            self.assertIsNotNone(model)
 | 
			
		||||
 | 
			
		||||
    def run_tester(self, tester):
 | 
			
		||||
        config_and_inputs = tester.prepare_config_and_inputs()
 | 
			
		||||
        output_result = tester.create_gpt2_model(*config_and_inputs)
 | 
			
		||||
        tester.check_gpt2_model_output(output_result)
 | 
			
		||||
 | 
			
		||||
        output_result = tester.create_gpt2_lm_head(*config_and_inputs)
 | 
			
		||||
        tester.check_gpt2_lm_head_output(output_result)
 | 
			
		||||
        tester.check_gpt2_lm_head_loss_output(output_result)
 | 
			
		||||
 | 
			
		||||
        output_result = tester.create_gpt2_double_heads(*config_and_inputs)
 | 
			
		||||
        tester.check_gpt2_double_heads_output(output_result)
 | 
			
		||||
        tester.check_gpt2_double_heads_loss_output(output_result)
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
 | 
			
		||||
        """Creates a random int32 tensor of the shape within the vocab size."""
 | 
			
		||||
        if rng is None:
 | 
			
		||||
            rng = random.Random()
 | 
			
		||||
 | 
			
		||||
        total_dims = 1
 | 
			
		||||
        for dim in shape:
 | 
			
		||||
            total_dims *= dim
 | 
			
		||||
 | 
			
		||||
        values = []
 | 
			
		||||
        for _ in range(total_dims):
 | 
			
		||||
            values.append(rng.randint(0, vocab_size - 1))
 | 
			
		||||
 | 
			
		||||
        return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										241
									
								
								tests/modeling_openai_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										241
									
								
								tests/modeling_openai_test.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,241 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import unittest
 | 
			
		||||
import json
 | 
			
		||||
import random
 | 
			
		||||
import shutil
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import (OpenAIGPTConfig, OpenAIGPTModel,
 | 
			
		||||
                                     OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel)
 | 
			
		||||
from pytorch_pretrained_bert.modeling_openai import PRETRAINED_MODEL_ARCHIVE_MAP
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTModelTest(unittest.TestCase):
 | 
			
		||||
    class OpenAIGPTModelTester(object):
 | 
			
		||||
 | 
			
		||||
        def __init__(self,
 | 
			
		||||
                     parent,
 | 
			
		||||
                     batch_size=13,
 | 
			
		||||
                     seq_length=7,
 | 
			
		||||
                     is_training=True,
 | 
			
		||||
                     use_position_ids=True,
 | 
			
		||||
                     use_token_type_ids=True,
 | 
			
		||||
                     use_labels=True,
 | 
			
		||||
                     vocab_size=99,
 | 
			
		||||
                     n_special=1,
 | 
			
		||||
                     n_positions=33,
 | 
			
		||||
                     n_embd=32,
 | 
			
		||||
                     n_layer=5,
 | 
			
		||||
                     n_head=4,
 | 
			
		||||
                     n_choices=3,
 | 
			
		||||
                     afn="gelu",
 | 
			
		||||
                     resid_pdrop=0.1,
 | 
			
		||||
                     attn_pdrop=0.1,
 | 
			
		||||
                     embd_pdrop=0.1,
 | 
			
		||||
                     type_sequence_label_size=2,
 | 
			
		||||
                     initializer_range=0.02,
 | 
			
		||||
                     num_labels=3,
 | 
			
		||||
                     scope=None):
 | 
			
		||||
            self.parent = parent
 | 
			
		||||
            self.batch_size = batch_size
 | 
			
		||||
            self.seq_length = seq_length
 | 
			
		||||
            self.is_training = is_training
 | 
			
		||||
            self.use_position_ids = use_position_ids
 | 
			
		||||
            self.use_token_type_ids = use_token_type_ids
 | 
			
		||||
            self.use_labels = use_labels
 | 
			
		||||
            self.vocab_size = vocab_size
 | 
			
		||||
            self.n_special = n_special
 | 
			
		||||
            self.n_positions = n_positions
 | 
			
		||||
            self.n_embd = n_embd
 | 
			
		||||
            self.n_layer = n_layer
 | 
			
		||||
            self.n_head = n_head
 | 
			
		||||
            self.afn = afn
 | 
			
		||||
            self.n_choices = n_choices
 | 
			
		||||
            self.resid_pdrop = resid_pdrop
 | 
			
		||||
            self.attn_pdrop = attn_pdrop
 | 
			
		||||
            self.embd_pdrop = embd_pdrop
 | 
			
		||||
            self.type_sequence_label_size = type_sequence_label_size
 | 
			
		||||
            self.initializer_range = initializer_range
 | 
			
		||||
            self.num_labels = num_labels
 | 
			
		||||
            self.scope = scope
 | 
			
		||||
 | 
			
		||||
        def prepare_config_and_inputs(self):
 | 
			
		||||
            input_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.vocab_size)
 | 
			
		||||
 | 
			
		||||
            position_ids = None
 | 
			
		||||
            if self.use_position_ids:
 | 
			
		||||
                position_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.n_positions)
 | 
			
		||||
 | 
			
		||||
            token_type_ids = None
 | 
			
		||||
            if self.use_token_type_ids:
 | 
			
		||||
                total_voc = self.vocab_size + self.n_special
 | 
			
		||||
                token_type_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], total_voc)
 | 
			
		||||
 | 
			
		||||
            mc_labels = None
 | 
			
		||||
            lm_labels = None
 | 
			
		||||
            mc_token_ids = None
 | 
			
		||||
            if self.use_labels:
 | 
			
		||||
                mc_labels = OpenAIGPTModelTest.ids_tensor([self.batch_size], self.type_sequence_label_size)
 | 
			
		||||
                lm_labels = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices, self.seq_length], self.num_labels)
 | 
			
		||||
                mc_token_ids = OpenAIGPTModelTest.ids_tensor([self.batch_size, self.n_choices], self.seq_length)
 | 
			
		||||
 | 
			
		||||
            config = OpenAIGPTConfig(
 | 
			
		||||
                vocab_size_or_config_json_file=self.vocab_size,
 | 
			
		||||
                n_positions=self.n_positions,
 | 
			
		||||
                n_special=self.n_special,
 | 
			
		||||
                n_embd=self.n_embd,
 | 
			
		||||
                n_layer=self.n_layer,
 | 
			
		||||
                n_head=self.n_head,
 | 
			
		||||
                afn=self.afn,
 | 
			
		||||
                resid_pdrop=self.resid_pdrop,
 | 
			
		||||
                attn_pdrop=self.attn_pdrop,
 | 
			
		||||
                embd_pdrop=self.embd_pdrop,
 | 
			
		||||
                initializer_range=self.initializer_range)
 | 
			
		||||
 | 
			
		||||
            return (config, input_ids, token_type_ids, position_ids,
 | 
			
		||||
                    mc_labels, lm_labels, mc_token_ids)
 | 
			
		||||
 | 
			
		||||
        def create_openai_model(self, config, input_ids, token_type_ids, position_ids,
 | 
			
		||||
                                mc_labels, lm_labels, mc_token_ids):
 | 
			
		||||
            model = OpenAIGPTModel(config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            hidden_states = model(input_ids, position_ids, token_type_ids)
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "hidden_states": hidden_states,
 | 
			
		||||
            }
 | 
			
		||||
            return outputs
 | 
			
		||||
 | 
			
		||||
        def check_openai_model_output(self, result):
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["hidden_states"].size()),
 | 
			
		||||
                [self.batch_size, self.n_choices, self.seq_length, self.n_embd])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        def create_openai_lm_head(self, config, input_ids, token_type_ids, position_ids,
 | 
			
		||||
                                       mc_labels, lm_labels, mc_token_ids):
 | 
			
		||||
            model = OpenAIGPTLMHeadModel(config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, position_ids, token_type_ids, lm_labels)
 | 
			
		||||
            lm_logits = model(input_ids, position_ids, token_type_ids)
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "loss": loss,
 | 
			
		||||
                "lm_logits": lm_logits,
 | 
			
		||||
            }
 | 
			
		||||
            return outputs
 | 
			
		||||
 | 
			
		||||
        def check_openai_lm_head_output(self, result):
 | 
			
		||||
            total_voc = self.n_special + self.vocab_size
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["lm_logits"].size()),
 | 
			
		||||
                [self.batch_size, self.n_choices, self.seq_length, total_voc])
 | 
			
		||||
 | 
			
		||||
        def check_openai_lm_head_loss_output(self, result):
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["loss"].size()),
 | 
			
		||||
                [])
 | 
			
		||||
 | 
			
		||||
        def create_openai_double_heads(self, config, input_ids, token_type_ids, position_ids,
 | 
			
		||||
                                       mc_labels, lm_labels, mc_token_ids):
 | 
			
		||||
            model = OpenAIGPTDoubleHeadsModel(config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, mc_token_ids,
 | 
			
		||||
                         lm_labels=lm_labels, mc_labels=mc_labels,
 | 
			
		||||
                         token_type_ids=token_type_ids, position_ids=position_ids)
 | 
			
		||||
            lm_logits, mc_logits = model(input_ids, mc_token_ids, position_ids=position_ids, token_type_ids=token_type_ids)
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "loss": loss,
 | 
			
		||||
                "lm_logits": lm_logits,
 | 
			
		||||
                "mc_logits": mc_logits,
 | 
			
		||||
            }
 | 
			
		||||
            return outputs
 | 
			
		||||
 | 
			
		||||
        def check_openai_double_heads_output(self, result):
 | 
			
		||||
            total_voc = self.n_special + self.vocab_size
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["lm_logits"].size()),
 | 
			
		||||
                [self.batch_size, self.n_choices, self.seq_length, total_voc])
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["mc_logits"].size()),
 | 
			
		||||
                [self.batch_size, self.n_choices])
 | 
			
		||||
 | 
			
		||||
        def check_openai_double_heads_loss_output(self, result):
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                [list(l.size()) for l in result["loss"]],
 | 
			
		||||
                [[], []])
 | 
			
		||||
 | 
			
		||||
    def test_default(self):
 | 
			
		||||
        self.run_tester(OpenAIGPTModelTest.OpenAIGPTModelTester(self))
 | 
			
		||||
 | 
			
		||||
    def test_config_to_json_string(self):
 | 
			
		||||
        config = OpenAIGPTConfig(vocab_size_or_config_json_file=99, n_embd=37)
 | 
			
		||||
        obj = json.loads(config.to_json_string())
 | 
			
		||||
        self.assertEqual(obj["vocab_size"], 99)
 | 
			
		||||
        self.assertEqual(obj["n_embd"], 37)
 | 
			
		||||
 | 
			
		||||
    def test_config_to_json_file(self):
 | 
			
		||||
        config_first = OpenAIGPTConfig(vocab_size_or_config_json_file=99, n_embd=37)
 | 
			
		||||
        json_file_path = "/tmp/config.json"
 | 
			
		||||
        config_first.to_json_file(json_file_path)
 | 
			
		||||
        config_second = OpenAIGPTConfig.from_json_file(json_file_path)
 | 
			
		||||
        os.remove(json_file_path)
 | 
			
		||||
        self.assertEqual(config_second.to_dict(), config_first.to_dict())
 | 
			
		||||
 | 
			
		||||
    @pytest.mark.slow
 | 
			
		||||
    def test_model_from_pretrained(self):
 | 
			
		||||
        cache_dir = "/tmp/pytorch_pretrained_bert_test/"
 | 
			
		||||
        for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
 | 
			
		||||
            model = OpenAIGPTModel.from_pretrained(model_name, cache_dir=cache_dir)
 | 
			
		||||
            shutil.rmtree(cache_dir)
 | 
			
		||||
            self.assertIsNotNone(model)
 | 
			
		||||
 | 
			
		||||
    def run_tester(self, tester):
 | 
			
		||||
        config_and_inputs = tester.prepare_config_and_inputs()
 | 
			
		||||
        output_result = tester.create_openai_model(*config_and_inputs)
 | 
			
		||||
        tester.check_openai_model_output(output_result)
 | 
			
		||||
 | 
			
		||||
        output_result = tester.create_openai_lm_head(*config_and_inputs)
 | 
			
		||||
        tester.check_openai_lm_head_output(output_result)
 | 
			
		||||
        tester.check_openai_lm_head_loss_output(output_result)
 | 
			
		||||
 | 
			
		||||
        output_result = tester.create_openai_double_heads(*config_and_inputs)
 | 
			
		||||
        tester.check_openai_double_heads_output(output_result)
 | 
			
		||||
        tester.check_openai_double_heads_loss_output(output_result)
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
 | 
			
		||||
        """Creates a random int32 tensor of the shape within the vocab size."""
 | 
			
		||||
        if rng is None:
 | 
			
		||||
            rng = random.Random()
 | 
			
		||||
 | 
			
		||||
        total_dims = 1
 | 
			
		||||
        for dim in shape:
 | 
			
		||||
            total_dims *= dim
 | 
			
		||||
 | 
			
		||||
        values = []
 | 
			
		||||
        for _ in range(total_dims):
 | 
			
		||||
            values.append(rng.randint(0, vocab_size - 1))
 | 
			
		||||
 | 
			
		||||
        return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    unittest.main()
 | 
			
		||||
@ -16,9 +16,12 @@ from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import unittest
 | 
			
		||||
import json
 | 
			
		||||
import random
 | 
			
		||||
import shutil
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
@ -26,6 +29,7 @@ from pytorch_pretrained_bert import (BertConfig, BertModel, BertForMaskedLM,
 | 
			
		||||
                                     BertForNextSentencePrediction, BertForPreTraining,
 | 
			
		||||
                                     BertForQuestionAnswering, BertForSequenceClassification,
 | 
			
		||||
                                     BertForTokenClassification)
 | 
			
		||||
from pytorch_pretrained_bert.modeling import PRETRAINED_MODEL_ARCHIVE_MAP
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BertModelTest(unittest.TestCase):
 | 
			
		||||
@ -114,6 +118,7 @@ class BertModelTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
        def create_bert_model(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
 | 
			
		||||
            model = BertModel(config=config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            all_encoder_layers, pooled_output = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "sequence_output": all_encoder_layers[-1],
 | 
			
		||||
@ -134,6 +139,7 @@ class BertModelTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
        def create_bert_for_masked_lm(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
 | 
			
		||||
            model = BertForMaskedLM(config=config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, token_type_ids, input_mask, token_labels)
 | 
			
		||||
            prediction_scores = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
            outputs = {
 | 
			
		||||
@ -149,6 +155,7 @@ class BertModelTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
        def create_bert_for_next_sequence_prediction(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
 | 
			
		||||
            model = BertForNextSentencePrediction(config=config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
 | 
			
		||||
            seq_relationship_score = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
            outputs = {
 | 
			
		||||
@ -165,6 +172,7 @@ class BertModelTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
        def create_bert_for_pretraining(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
 | 
			
		||||
            model = BertForPreTraining(config=config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, token_type_ids, input_mask, token_labels, sequence_labels)
 | 
			
		||||
            prediction_scores, seq_relationship_score = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
            outputs = {
 | 
			
		||||
@ -185,6 +193,7 @@ class BertModelTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
        def create_bert_for_question_answering(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
 | 
			
		||||
            model = BertForQuestionAnswering(config=config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels, sequence_labels)
 | 
			
		||||
            start_logits, end_logits = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
            outputs = {
 | 
			
		||||
@ -205,6 +214,7 @@ class BertModelTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
        def create_bert_for_sequence_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
 | 
			
		||||
            model = BertForSequenceClassification(config=config, num_labels=self.num_labels)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, token_type_ids, input_mask, sequence_labels)
 | 
			
		||||
            logits = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
            outputs = {
 | 
			
		||||
@ -221,6 +231,7 @@ class BertModelTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
        def create_bert_for_token_classification(self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels):
 | 
			
		||||
            model = BertForTokenClassification(config=config, num_labels=self.num_labels)
 | 
			
		||||
            model.eval()
 | 
			
		||||
            loss = model(input_ids, token_type_ids, input_mask, token_labels)
 | 
			
		||||
            logits = model(input_ids, token_type_ids, input_mask)
 | 
			
		||||
            outputs = {
 | 
			
		||||
@ -244,6 +255,22 @@ class BertModelTest(unittest.TestCase):
 | 
			
		||||
        self.assertEqual(obj["vocab_size"], 99)
 | 
			
		||||
        self.assertEqual(obj["hidden_size"], 37)
 | 
			
		||||
 | 
			
		||||
    def test_config_to_json_file(self):
 | 
			
		||||
        config_first = BertConfig(vocab_size_or_config_json_file=99, hidden_size=37)
 | 
			
		||||
        json_file_path = "/tmp/config.json"
 | 
			
		||||
        config_first.to_json_file(json_file_path)
 | 
			
		||||
        config_second = BertConfig.from_json_file(json_file_path)
 | 
			
		||||
        os.remove(json_file_path)
 | 
			
		||||
        self.assertEqual(config_second.to_dict(), config_first.to_dict())
 | 
			
		||||
 | 
			
		||||
    @pytest.mark.slow
 | 
			
		||||
    def test_model_from_pretrained(self):
 | 
			
		||||
        cache_dir = "/tmp/pytorch_pretrained_bert_test/"
 | 
			
		||||
        for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
 | 
			
		||||
            model = BertModel.from_pretrained(model_name, cache_dir=cache_dir)
 | 
			
		||||
            shutil.rmtree(cache_dir)
 | 
			
		||||
            self.assertIsNotNone(model)
 | 
			
		||||
 | 
			
		||||
    def run_tester(self, tester):
 | 
			
		||||
        config_and_inputs = tester.prepare_config_and_inputs()
 | 
			
		||||
        output_result = tester.create_bert_model(*config_and_inputs)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										237
									
								
								tests/modeling_transfo_xl_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										237
									
								
								tests/modeling_transfo_xl_test.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,237 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import unittest
 | 
			
		||||
import json
 | 
			
		||||
import random
 | 
			
		||||
import shutil
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import (TransfoXLConfig, TransfoXLModel, TransfoXLLMHeadModel)
 | 
			
		||||
from pytorch_pretrained_bert.modeling_transfo_xl import PRETRAINED_MODEL_ARCHIVE_MAP
 | 
			
		||||
 | 
			
		||||
class TransfoXLModelTest(unittest.TestCase):
 | 
			
		||||
    class TransfoXLModelTester(object):
 | 
			
		||||
 | 
			
		||||
        def __init__(self,
 | 
			
		||||
                     parent,
 | 
			
		||||
                     batch_size=13,
 | 
			
		||||
                     seq_length=7,
 | 
			
		||||
                     mem_len=30,
 | 
			
		||||
                     clamp_len=15,
 | 
			
		||||
                     is_training=True,
 | 
			
		||||
                     use_labels=True,
 | 
			
		||||
                     vocab_size=99,
 | 
			
		||||
                     cutoffs=[10, 50, 80],
 | 
			
		||||
                     d_model=32,
 | 
			
		||||
                     d_embed=32,
 | 
			
		||||
                     n_head=4,
 | 
			
		||||
                     d_head=8,
 | 
			
		||||
                     d_inner=128,
 | 
			
		||||
                     div_val=2,
 | 
			
		||||
                     n_layer=5,
 | 
			
		||||
                     scope=None,
 | 
			
		||||
                     seed=1):
 | 
			
		||||
            self.parent = parent
 | 
			
		||||
            self.batch_size = batch_size
 | 
			
		||||
            self.seq_length = seq_length
 | 
			
		||||
            self.mem_len = mem_len
 | 
			
		||||
            self.clamp_len = clamp_len
 | 
			
		||||
            self.is_training = is_training
 | 
			
		||||
            self.use_labels = use_labels
 | 
			
		||||
            self.vocab_size = vocab_size
 | 
			
		||||
            self.cutoffs = cutoffs
 | 
			
		||||
            self.d_model = d_model
 | 
			
		||||
            self.d_embed = d_embed
 | 
			
		||||
            self.n_head = n_head
 | 
			
		||||
            self.d_head = d_head
 | 
			
		||||
            self.d_inner = d_inner
 | 
			
		||||
            self.div_val = div_val
 | 
			
		||||
            self.n_layer = n_layer
 | 
			
		||||
            self.scope = scope
 | 
			
		||||
            self.seed = seed
 | 
			
		||||
 | 
			
		||||
        def prepare_config_and_inputs(self):
 | 
			
		||||
            input_ids_1 = TransfoXLModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
 | 
			
		||||
            input_ids_2 = TransfoXLModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
 | 
			
		||||
 | 
			
		||||
            lm_labels = None
 | 
			
		||||
            if self.use_labels:
 | 
			
		||||
                lm_labels = TransfoXLModelTest.ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
 | 
			
		||||
 | 
			
		||||
            config = TransfoXLConfig(
 | 
			
		||||
                vocab_size_or_config_json_file=self.vocab_size,
 | 
			
		||||
                mem_len=self.mem_len,
 | 
			
		||||
                clamp_len=self.clamp_len,
 | 
			
		||||
                cutoffs=self.cutoffs,
 | 
			
		||||
                d_model=self.d_model,
 | 
			
		||||
                d_embed=self.d_embed,
 | 
			
		||||
                n_head=self.n_head,
 | 
			
		||||
                d_head=self.d_head,
 | 
			
		||||
                d_inner=self.d_inner,
 | 
			
		||||
                div_val=self.div_val,
 | 
			
		||||
                n_layer=self.n_layer)
 | 
			
		||||
 | 
			
		||||
            return (config, input_ids_1, input_ids_2, lm_labels)
 | 
			
		||||
 | 
			
		||||
        def set_seed(self):
 | 
			
		||||
            random.seed(self.seed)
 | 
			
		||||
            torch.manual_seed(self.seed)
 | 
			
		||||
 | 
			
		||||
        def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels):
 | 
			
		||||
            model = TransfoXLModel(config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
 | 
			
		||||
            hidden_states_1, mems_1 = model(input_ids_1)
 | 
			
		||||
            hidden_states_2, mems_2 = model(input_ids_2, mems_1)
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "hidden_states_1": hidden_states_1,
 | 
			
		||||
                "mems_1": mems_1,
 | 
			
		||||
                "hidden_states_2": hidden_states_2,
 | 
			
		||||
                "mems_2": mems_2,
 | 
			
		||||
            }
 | 
			
		||||
            return outputs
 | 
			
		||||
 | 
			
		||||
        def check_transfo_xl_model_output(self, result):
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["hidden_states_1"].size()),
 | 
			
		||||
                [self.batch_size, self.seq_length, self.d_model])
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["hidden_states_2"].size()),
 | 
			
		||||
                [self.batch_size, self.seq_length, self.d_model])
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(list(mem.size()) for mem in result["mems_1"]),
 | 
			
		||||
                [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer)
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(list(mem.size()) for mem in result["mems_2"]),
 | 
			
		||||
                [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels):
 | 
			
		||||
            model = TransfoXLLMHeadModel(config)
 | 
			
		||||
            model.eval()
 | 
			
		||||
 | 
			
		||||
            loss_1, mems_1a = model(input_ids_1, target=lm_labels)
 | 
			
		||||
            lm_logits_1, mems_1b = model(input_ids_1)
 | 
			
		||||
 | 
			
		||||
            loss_2, mems_2a = model(input_ids_2, target=lm_labels, mems=mems_1a)
 | 
			
		||||
            lm_logits_2, mems_2b = model(input_ids_2, mems=mems_1b)
 | 
			
		||||
 | 
			
		||||
            outputs = {
 | 
			
		||||
                "loss_1": loss_1,
 | 
			
		||||
                "mems_1a": mems_1a,
 | 
			
		||||
                "lm_logits_1": lm_logits_1,
 | 
			
		||||
                "mems_1b": mems_1b,
 | 
			
		||||
                "loss_2": loss_2,
 | 
			
		||||
                "mems_2a": mems_2a,
 | 
			
		||||
                "lm_logits_2": lm_logits_2,
 | 
			
		||||
                "mems_2b": mems_2b,
 | 
			
		||||
            }
 | 
			
		||||
            return outputs
 | 
			
		||||
 | 
			
		||||
        def check_transfo_xl_lm_head_output(self, result):
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["loss_1"].size()),
 | 
			
		||||
                [self.batch_size, self.seq_length])
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["lm_logits_1"].size()),
 | 
			
		||||
                [self.batch_size, self.seq_length, self.vocab_size])
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(list(mem.size()) for mem in result["mems_1a"]),
 | 
			
		||||
                [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer)
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(list(mem.size()) for mem in result["mems_1b"]),
 | 
			
		||||
                [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer)
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(mem[~torch.isnan(mem)].sum() for mem in result["mems_1a"]),
 | 
			
		||||
                list(mem[~torch.isnan(mem)].sum() for mem in result["mems_1b"]))
 | 
			
		||||
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["loss_2"].size()),
 | 
			
		||||
                [self.batch_size, self.seq_length])
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(result["lm_logits_2"].size()),
 | 
			
		||||
                [self.batch_size, self.seq_length, self.vocab_size])
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(list(mem.size()) for mem in result["mems_2a"]),
 | 
			
		||||
                [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer)
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(list(mem.size()) for mem in result["mems_2b"]),
 | 
			
		||||
                [[self.mem_len, self.batch_size, self.d_model]] * self.n_layer)
 | 
			
		||||
            self.parent.assertListEqual(
 | 
			
		||||
                list(mem[~torch.isnan(mem)].sum() for mem in result["mems_2a"]),
 | 
			
		||||
                list(mem[~torch.isnan(mem)].sum() for mem in result["mems_2b"]))
 | 
			
		||||
 | 
			
		||||
    def test_default(self):
 | 
			
		||||
        self.run_tester(TransfoXLModelTest.TransfoXLModelTester(self))
 | 
			
		||||
 | 
			
		||||
    def test_config_to_json_string(self):
 | 
			
		||||
        config = TransfoXLConfig(vocab_size_or_config_json_file=96, d_embed=37)
 | 
			
		||||
        obj = json.loads(config.to_json_string())
 | 
			
		||||
        self.assertEqual(obj["n_token"], 96)
 | 
			
		||||
        self.assertEqual(obj["d_embed"], 37)
 | 
			
		||||
 | 
			
		||||
    def test_config_to_json_file(self):
 | 
			
		||||
        config_first = TransfoXLConfig(vocab_size_or_config_json_file=96, d_embed=37)
 | 
			
		||||
        json_file_path = "/tmp/config.json"
 | 
			
		||||
        config_first.to_json_file(json_file_path)
 | 
			
		||||
        config_second = TransfoXLConfig.from_json_file(json_file_path)
 | 
			
		||||
        os.remove(json_file_path)
 | 
			
		||||
        self.assertEqual(config_second.to_dict(), config_first.to_dict())
 | 
			
		||||
 | 
			
		||||
    @pytest.mark.slow
 | 
			
		||||
    def test_model_from_pretrained(self):
 | 
			
		||||
        cache_dir = "/tmp/pytorch_pretrained_bert_test/"
 | 
			
		||||
        for model_name in list(PRETRAINED_MODEL_ARCHIVE_MAP.keys())[:1]:
 | 
			
		||||
            model = TransfoXLModel.from_pretrained(model_name, cache_dir=cache_dir)
 | 
			
		||||
            shutil.rmtree(cache_dir)
 | 
			
		||||
            self.assertIsNotNone(model)
 | 
			
		||||
 | 
			
		||||
    def run_tester(self, tester):
 | 
			
		||||
        config_and_inputs = tester.prepare_config_and_inputs()
 | 
			
		||||
 | 
			
		||||
        tester.set_seed()
 | 
			
		||||
        output_result = tester.create_transfo_xl_model(*config_and_inputs)
 | 
			
		||||
        tester.check_transfo_xl_model_output(output_result)
 | 
			
		||||
 | 
			
		||||
        tester.set_seed()
 | 
			
		||||
        output_result = tester.create_transfo_xl_lm_head(*config_and_inputs)
 | 
			
		||||
        tester.check_transfo_xl_lm_head_output(output_result)
 | 
			
		||||
 | 
			
		||||
    @classmethod
 | 
			
		||||
    def ids_tensor(cls, shape, vocab_size, rng=None, name=None):
 | 
			
		||||
        """Creates a random int32 tensor of the shape within the vocab size."""
 | 
			
		||||
        if rng is None:
 | 
			
		||||
            rng = random.Random()
 | 
			
		||||
 | 
			
		||||
        total_dims = 1
 | 
			
		||||
        for dim in shape:
 | 
			
		||||
            total_dims *= dim
 | 
			
		||||
 | 
			
		||||
        values = []
 | 
			
		||||
        for _ in range(total_dims):
 | 
			
		||||
            values.append(rng.randint(0, vocab_size - 1))
 | 
			
		||||
 | 
			
		||||
        return torch.tensor(data=values, dtype=torch.long).view(shape).contiguous()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    unittest.main()
 | 
			
		||||
@ -21,6 +21,11 @@ import unittest
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert import BertAdam
 | 
			
		||||
from pytorch_pretrained_bert import OpenAIAdam
 | 
			
		||||
from pytorch_pretrained_bert.optimization import ConstantLR, WarmupLinearSchedule, WarmupConstantSchedule, \
 | 
			
		||||
    WarmupCosineWithWarmupRestartsSchedule, WarmupCosineWithHardRestartsSchedule, WarmupCosineSchedule
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OptimizationTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
@ -46,5 +51,41 @@ class OptimizationTest(unittest.TestCase):
 | 
			
		||||
        self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ScheduleInitTest(unittest.TestCase):
 | 
			
		||||
    def test_bert_sched_init(self):
 | 
			
		||||
        m = torch.nn.Linear(50, 50)
 | 
			
		||||
        optim = BertAdam(m.parameters(), lr=0.001, warmup=.1, t_total=1000, schedule=None)
 | 
			
		||||
        self.assertTrue(isinstance(optim.param_groups[0]["schedule"], ConstantLR))
 | 
			
		||||
        optim = BertAdam(m.parameters(), lr=0.001, warmup=.1, t_total=1000, schedule="none")
 | 
			
		||||
        self.assertTrue(isinstance(optim.param_groups[0]["schedule"], ConstantLR))
 | 
			
		||||
        optim = BertAdam(m.parameters(), lr=0.001, warmup=.01, t_total=1000)
 | 
			
		||||
        self.assertTrue(isinstance(optim.param_groups[0]["schedule"], WarmupLinearSchedule))
 | 
			
		||||
        # shouldn't fail
 | 
			
		||||
 | 
			
		||||
    def test_openai_sched_init(self):
 | 
			
		||||
        m = torch.nn.Linear(50, 50)
 | 
			
		||||
        optim = OpenAIAdam(m.parameters(), lr=0.001, warmup=.1, t_total=1000, schedule=None)
 | 
			
		||||
        self.assertTrue(isinstance(optim.param_groups[0]["schedule"], ConstantLR))
 | 
			
		||||
        optim = OpenAIAdam(m.parameters(), lr=0.001, warmup=.1, t_total=1000, schedule="none")
 | 
			
		||||
        self.assertTrue(isinstance(optim.param_groups[0]["schedule"], ConstantLR))
 | 
			
		||||
        optim = OpenAIAdam(m.parameters(), lr=0.001, warmup=.01, t_total=1000)
 | 
			
		||||
        self.assertTrue(isinstance(optim.param_groups[0]["schedule"], WarmupLinearSchedule))
 | 
			
		||||
        # shouldn't fail
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class WarmupCosineWithRestartsTest(unittest.TestCase):
 | 
			
		||||
    def test_it(self):
 | 
			
		||||
        m = WarmupCosineWithWarmupRestartsSchedule(warmup=0.05, t_total=1000., cycles=5)
 | 
			
		||||
        x = np.arange(0, 1000)
 | 
			
		||||
        y = [m.get_lr(xe) for xe in x]
 | 
			
		||||
        y = np.asarray(y)
 | 
			
		||||
        expected_zeros = y[[0, 200, 400, 600, 800]]
 | 
			
		||||
        print(expected_zeros)
 | 
			
		||||
        expected_ones = y[[50, 250, 450, 650, 850]]
 | 
			
		||||
        print(expected_ones)
 | 
			
		||||
        self.assertTrue(np.allclose(expected_ones, 1))
 | 
			
		||||
        self.assertTrue(np.allclose(expected_zeros, 0))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == "__main__":
 | 
			
		||||
    unittest.main()
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										77
									
								
								tests/tokenization_gpt2_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								tests/tokenization_gpt2_test.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,77 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import unittest
 | 
			
		||||
import json
 | 
			
		||||
import shutil
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.tokenization_gpt2 import GPT2Tokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class GPT2TokenizationTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_full_tokenizer(self):
 | 
			
		||||
        """ Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """
 | 
			
		||||
        vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n",
 | 
			
		||||
                 "lo", "low", "er",
 | 
			
		||||
                 "low", "lowest", "newer", "wider"]
 | 
			
		||||
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
 | 
			
		||||
        merges = ["#version: 0.2", "l o", "lo w", "e r", ""]
 | 
			
		||||
        with open("/tmp/openai_tokenizer_vocab_test.json", "w") as fp:
 | 
			
		||||
            fp.write(json.dumps(vocab_tokens))
 | 
			
		||||
            vocab_file = fp.name
 | 
			
		||||
        with open("/tmp/openai_tokenizer_merges_test.txt", "w") as fp:
 | 
			
		||||
            fp.write("\n".join(merges))
 | 
			
		||||
            merges_file = fp.name
 | 
			
		||||
 | 
			
		||||
        tokenizer = GPT2Tokenizer(vocab_file, merges_file, special_tokens=["<unk>", "<pad>"])
 | 
			
		||||
        os.remove(vocab_file)
 | 
			
		||||
        os.remove(merges_file)
 | 
			
		||||
 | 
			
		||||
        text = "lower"
 | 
			
		||||
        bpe_tokens = ["low", "er"]
 | 
			
		||||
        tokens = tokenizer.tokenize(text)
 | 
			
		||||
        self.assertListEqual(tokens, bpe_tokens)
 | 
			
		||||
 | 
			
		||||
        input_tokens = tokens + ["<unk>"]
 | 
			
		||||
        input_bpe_tokens = [13, 12, 16]
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
 | 
			
		||||
 | 
			
		||||
        vocab_file, merges_file, special_tokens_file = tokenizer.save_vocabulary(vocab_path="/tmp/")
 | 
			
		||||
        tokenizer_2 = GPT2Tokenizer.from_pretrained("/tmp/")
 | 
			
		||||
        os.remove(vocab_file)
 | 
			
		||||
        os.remove(merges_file)
 | 
			
		||||
        os.remove(special_tokens_file)
 | 
			
		||||
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            [tokenizer.encoder, tokenizer.decoder, tokenizer.bpe_ranks,
 | 
			
		||||
             tokenizer.special_tokens, tokenizer.special_tokens_decoder],
 | 
			
		||||
            [tokenizer_2.encoder, tokenizer_2.decoder, tokenizer_2.bpe_ranks,
 | 
			
		||||
             tokenizer_2.special_tokens, tokenizer_2.special_tokens_decoder])
 | 
			
		||||
 | 
			
		||||
    # @pytest.mark.slow
 | 
			
		||||
    def test_tokenizer_from_pretrained(self):
 | 
			
		||||
        cache_dir = "/tmp/pytorch_pretrained_bert_test/"
 | 
			
		||||
        for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
 | 
			
		||||
            tokenizer = GPT2Tokenizer.from_pretrained(model_name, cache_dir=cache_dir)
 | 
			
		||||
            shutil.rmtree(cache_dir)
 | 
			
		||||
            self.assertIsNotNone(tokenizer)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
							
								
								
									
										79
									
								
								tests/tokenization_openai_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										79
									
								
								tests/tokenization_openai_test.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,79 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import unittest
 | 
			
		||||
import json
 | 
			
		||||
import shutil
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.tokenization_openai import OpenAIGPTTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class OpenAIGPTTokenizationTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_full_tokenizer(self):
 | 
			
		||||
        """ Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """
 | 
			
		||||
        vocab = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n",
 | 
			
		||||
                 "w</w>", "r</w>", "t</w>",
 | 
			
		||||
                 "lo", "low", "er</w>",
 | 
			
		||||
                 "low</w>", "lowest</w>", "newer</w>", "wider</w>"]
 | 
			
		||||
        vocab_tokens = dict(zip(vocab, range(len(vocab))))
 | 
			
		||||
        merges = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
 | 
			
		||||
        with open("/tmp/openai_tokenizer_vocab_test.json", "w") as fp:
 | 
			
		||||
            fp.write(json.dumps(vocab_tokens))
 | 
			
		||||
            vocab_file = fp.name
 | 
			
		||||
        with open("/tmp/openai_tokenizer_merges_test.txt", "w") as fp:
 | 
			
		||||
            fp.write("\n".join(merges))
 | 
			
		||||
            merges_file = fp.name
 | 
			
		||||
 | 
			
		||||
        tokenizer = OpenAIGPTTokenizer(vocab_file, merges_file, special_tokens=["<unk>", "<pad>"])
 | 
			
		||||
        os.remove(vocab_file)
 | 
			
		||||
        os.remove(merges_file)
 | 
			
		||||
 | 
			
		||||
        text = "lower"
 | 
			
		||||
        bpe_tokens = ["low", "er</w>"]
 | 
			
		||||
        tokens = tokenizer.tokenize(text)
 | 
			
		||||
        self.assertListEqual(tokens, bpe_tokens)
 | 
			
		||||
 | 
			
		||||
        input_tokens = tokens + ["<unk>"]
 | 
			
		||||
        input_bpe_tokens = [14, 15, 20]
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)
 | 
			
		||||
 | 
			
		||||
        vocab_file, merges_file, special_tokens_file = tokenizer.save_vocabulary(vocab_path="/tmp/")
 | 
			
		||||
        tokenizer_2 = OpenAIGPTTokenizer.from_pretrained("/tmp/")
 | 
			
		||||
        os.remove(vocab_file)
 | 
			
		||||
        os.remove(merges_file)
 | 
			
		||||
        os.remove(special_tokens_file)
 | 
			
		||||
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            [tokenizer.encoder, tokenizer.decoder, tokenizer.bpe_ranks,
 | 
			
		||||
             tokenizer.special_tokens, tokenizer.special_tokens_decoder],
 | 
			
		||||
            [tokenizer_2.encoder, tokenizer_2.decoder, tokenizer_2.bpe_ranks,
 | 
			
		||||
             tokenizer_2.special_tokens, tokenizer_2.special_tokens_decoder])
 | 
			
		||||
 | 
			
		||||
    @pytest.mark.slow
 | 
			
		||||
    def test_tokenizer_from_pretrained(self):
 | 
			
		||||
        cache_dir = "/tmp/pytorch_pretrained_bert_test/"
 | 
			
		||||
        for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
 | 
			
		||||
            tokenizer = OpenAIGPTTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
 | 
			
		||||
            shutil.rmtree(cache_dir)
 | 
			
		||||
            self.assertIsNotNone(tokenizer)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
@ -12,15 +12,19 @@
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
from __future__ import absolute_import
 | 
			
		||||
from __future__ import division
 | 
			
		||||
from __future__ import print_function
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import unittest
 | 
			
		||||
from io import open
 | 
			
		||||
import shutil
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import (BertTokenizer, BasicTokenizer, WordpieceTokenizer,
 | 
			
		||||
                                                  _is_whitespace, _is_control, _is_punctuation)
 | 
			
		||||
from pytorch_pretrained_bert.tokenization import (BasicTokenizer,
 | 
			
		||||
                                                  BertTokenizer,
 | 
			
		||||
                                                  WordpieceTokenizer,
 | 
			
		||||
                                                  _is_control, _is_punctuation,
 | 
			
		||||
                                                  _is_whitespace, PRETRAINED_VOCAB_ARCHIVE_MAP)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TokenizationTest(unittest.TestCase):
 | 
			
		||||
@ -30,7 +34,7 @@ class TokenizationTest(unittest.TestCase):
 | 
			
		||||
            "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn",
 | 
			
		||||
            "##ing", ","
 | 
			
		||||
        ]
 | 
			
		||||
        with open("/tmp/bert_tokenizer_test.txt", "w") as vocab_writer:
 | 
			
		||||
        with open("/tmp/bert_tokenizer_test.txt", "w", encoding='utf-8') as vocab_writer:
 | 
			
		||||
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
 | 
			
		||||
 | 
			
		||||
            vocab_file = vocab_writer.name
 | 
			
		||||
@ -44,12 +48,30 @@ class TokenizationTest(unittest.TestCase):
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
 | 
			
		||||
 | 
			
		||||
        vocab_file = tokenizer.save_vocabulary(vocab_path="/tmp/")
 | 
			
		||||
        tokenizer.from_pretrained(vocab_file)
 | 
			
		||||
        os.remove(vocab_file)
 | 
			
		||||
 | 
			
		||||
        tokens = tokenizer.tokenize(u"UNwant\u00E9d,running")
 | 
			
		||||
        self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"])
 | 
			
		||||
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9])
 | 
			
		||||
 | 
			
		||||
    @pytest.mark.slow
 | 
			
		||||
    def test_tokenizer_from_pretrained(self):
 | 
			
		||||
        cache_dir = "/tmp/pytorch_pretrained_bert_test/"
 | 
			
		||||
        for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
 | 
			
		||||
            tokenizer = BertTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
 | 
			
		||||
            shutil.rmtree(cache_dir)
 | 
			
		||||
            self.assertIsNotNone(tokenizer)
 | 
			
		||||
 | 
			
		||||
    def test_chinese(self):
 | 
			
		||||
        tokenizer = BasicTokenizer()
 | 
			
		||||
    
 | 
			
		||||
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.tokenize(u"ah\u535A\u63A8zz"),
 | 
			
		||||
            [u"ah", u"\u535A", u"\u63A8", u"zz"])  
 | 
			
		||||
            [u"ah", u"\u535A", u"\u63A8", u"zz"])
 | 
			
		||||
 | 
			
		||||
    def test_basic_tokenizer_lower(self):
 | 
			
		||||
        tokenizer = BasicTokenizer(do_lower_case=True)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										80
									
								
								tests/tokenization_transfo_xl_test.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								tests/tokenization_transfo_xl_test.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,80 @@
 | 
			
		||||
# coding=utf-8
 | 
			
		||||
# Copyright 2018 The Google AI Language Team Authors.
 | 
			
		||||
#
 | 
			
		||||
# Licensed under the Apache License, Version 2.0 (the "License");
 | 
			
		||||
# you may not use this file except in compliance with the License.
 | 
			
		||||
# You may obtain a copy of the License at
 | 
			
		||||
#
 | 
			
		||||
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
#
 | 
			
		||||
# Unless required by applicable law or agreed to in writing, software
 | 
			
		||||
# distributed under the License is distributed on an "AS IS" BASIS,
 | 
			
		||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | 
			
		||||
# See the License for the specific language governing permissions and
 | 
			
		||||
# limitations under the License.
 | 
			
		||||
from __future__ import absolute_import, division, print_function, unicode_literals
 | 
			
		||||
 | 
			
		||||
import os
 | 
			
		||||
import unittest
 | 
			
		||||
from io import open
 | 
			
		||||
import shutil
 | 
			
		||||
import pytest
 | 
			
		||||
 | 
			
		||||
from pytorch_pretrained_bert.tokenization_transfo_xl import TransfoXLTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TransfoXLTokenizationTest(unittest.TestCase):
 | 
			
		||||
 | 
			
		||||
    def test_full_tokenizer(self):
 | 
			
		||||
        vocab_tokens = [
 | 
			
		||||
            "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ","
 | 
			
		||||
        ]
 | 
			
		||||
        with open("/tmp/transfo_xl_tokenizer_test.txt", "w", encoding='utf-8') as vocab_writer:
 | 
			
		||||
            vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
 | 
			
		||||
            vocab_file = vocab_writer.name
 | 
			
		||||
 | 
			
		||||
        tokenizer = TransfoXLTokenizer(vocab_file=vocab_file, lower_case=True)
 | 
			
		||||
        tokenizer.build_vocab()
 | 
			
		||||
        os.remove(vocab_file)
 | 
			
		||||
 | 
			
		||||
        tokens = tokenizer.tokenize(u"<unk> UNwanted , running")
 | 
			
		||||
        self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"])
 | 
			
		||||
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
 | 
			
		||||
 | 
			
		||||
        vocab_file = tokenizer.save_vocabulary(vocab_path="/tmp/")
 | 
			
		||||
        tokenizer.from_pretrained(vocab_file)
 | 
			
		||||
        os.remove(vocab_file)
 | 
			
		||||
 | 
			
		||||
        tokens = tokenizer.tokenize(u"<unk> UNwanted , running")
 | 
			
		||||
        self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"])
 | 
			
		||||
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7])
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    def test_full_tokenizer_lower(self):
 | 
			
		||||
        tokenizer = TransfoXLTokenizer(lower_case=True)
 | 
			
		||||
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.tokenize(u" \tHeLLo ! how  \n Are yoU ?  "),
 | 
			
		||||
            ["hello", "!", "how", "are", "you", "?"])
 | 
			
		||||
 | 
			
		||||
    def test_full_tokenizer_no_lower(self):
 | 
			
		||||
        tokenizer = TransfoXLTokenizer(lower_case=False)
 | 
			
		||||
 | 
			
		||||
        self.assertListEqual(
 | 
			
		||||
            tokenizer.tokenize(u" \tHeLLo ! how  \n Are yoU ?  "),
 | 
			
		||||
            ["HeLLo", "!", "how", "Are", "yoU", "?"])
 | 
			
		||||
 | 
			
		||||
    @pytest.mark.slow
 | 
			
		||||
    def test_tokenizer_from_pretrained(self):
 | 
			
		||||
        cache_dir = "/tmp/pytorch_pretrained_bert_test/"
 | 
			
		||||
        for model_name in list(PRETRAINED_VOCAB_ARCHIVE_MAP.keys())[:1]:
 | 
			
		||||
            tokenizer = TransfoXLTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
 | 
			
		||||
            shutil.rmtree(cache_dir)
 | 
			
		||||
            self.assertIsNotNone(tokenizer)
 | 
			
		||||
 | 
			
		||||
if __name__ == '__main__':
 | 
			
		||||
    unittest.main()
 | 
			
		||||
		Reference in New Issue
	
	Block a user