mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
Compare commits
92 Commits
Author | SHA1 | Date | |
---|---|---|---|
5cd4032368 | |||
70fa1a8d26 | |||
c7849d9efc | |||
893122f666 | |||
bf174f916b | |||
575aa6ef1a | |||
586dcf6b21 | |||
14928921e2 | |||
0bf1e1aca4 | |||
d2704c4143 | |||
f9a0008d2d | |||
672b66262a | |||
df28de0581 | |||
330247ede2 | |||
c74befc9e3 | |||
fc1d841b2d | |||
b69a62d579 | |||
02b176c4ce | |||
be41eaf55f | |||
fc546332d7 | |||
8fb7c908c8 | |||
a507908cd3 | |||
3db4378bd7 | |||
10e1ec9a8c | |||
9d7b70bcd7 | |||
92915ebec2 | |||
22a0dd2ef7 | |||
5096a654b7 | |||
042f420364 | |||
c382ed8a2f | |||
dbd9641c8c | |||
5546fb61ab | |||
2959d09072 | |||
8ae7784256 | |||
0b8c1b6994 | |||
dd21fb378f | |||
68a894a587 | |||
df5e4232f5 | |||
24845aeb6d | |||
151a2aaa4e | |||
01db72abd4 | |||
3909d7f139 | |||
941d233153 | |||
62098b9348 | |||
7b9e995b70 | |||
e0bc4c73e8 | |||
39e76d76fd | |||
1141371103 | |||
af1e6b4d87 | |||
bd6d1b4300 | |||
1cd7c6f154 | |||
96b5d7db9c | |||
679d68a11b | |||
1f84399171 | |||
25ec12eaf7 | |||
a7360385f4 | |||
b2e4b091f0 | |||
51227e26ab | |||
4e2f4a92dd | |||
1763770bd9 | |||
986526a0e4 | |||
a64bcb564d | |||
a4ee463d95 | |||
da503ea02f | |||
a2586795e5 | |||
7b0908769b | |||
9c336657a9 | |||
b53dab601c | |||
286a18fa00 | |||
5d1fed0740 | |||
985c7e3ac9 | |||
a8e279579b | |||
1e380c7dcb | |||
96be1b7f49 | |||
2b81f72be9 | |||
e87ac9d18b | |||
c89a592e87 | |||
7490a97cac | |||
9caf68a638 | |||
0077360d67 | |||
5c5676cdf9 | |||
cf32b2ee42 | |||
170fcaa604 | |||
83d2d74509 | |||
7996ef74dd | |||
70e7d1d656 | |||
1d71ad8905 | |||
d5610b53fa | |||
e318cda9ee | |||
ccd4180f8a | |||
5dfec704da | |||
47c2af0951 |
8
.github/workflows/self-scheduled.yml
vendored
8
.github/workflows/self-scheduled.yml
vendored
@ -187,19 +187,19 @@ jobs:
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
python3 -m pytest -v --make-reports=examples_gpu examples/pytorch
|
||||
python3 -m pytest -v --make-reports=single-gpu_examples_gpu examples/pytorch
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/examples_gpu/failures_short.txt
|
||||
run: cat /transformers/reports/single-gpu_examples_gpu/failures_short.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: run_examples_gpu
|
||||
path: /transformers/reports/examples_gpu
|
||||
name: single-gpu_run_examples_gpu
|
||||
path: /transformers/reports/single-gpu_examples_gpu
|
||||
|
||||
run_pipelines_torch_gpu:
|
||||
name: PyTorch pipelines
|
||||
|
16
README.md
16
README.md
@ -300,7 +300,7 @@ Current number of checkpoints: ** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/main/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
@ -324,15 +324,15 @@ Current number of checkpoints: ** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/main/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/main/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/main/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/main/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/main/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
@ -356,6 +356,7 @@ Current number of checkpoints: ** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/main/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
@ -363,10 +364,11 @@ Current number of checkpoints: ** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/main/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
|
16
README_ko.md
16
README_ko.md
@ -256,7 +256,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/main/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
@ -280,15 +280,15 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/main/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/main/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/main/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/main/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/main/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
@ -312,6 +312,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/main/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
@ -319,10 +320,11 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/main/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
|
@ -280,7 +280,7 @@ conda install -c huggingface transformers
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (来自 OpenAI) 伴随论文 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 由 Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 发布。
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (来自 EleutherAI) 伴随论文 [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) 由 Ben Wang and Aran Komatsuzaki 发布。
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/main/model_doc/groupvit)** (来自 UCSD, NVIDIA) 伴随论文 [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) 由 Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang 发布。
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (来自 UCSD, NVIDIA) 伴随论文 [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) 由 Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang 发布。
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (来自 Facebook) 伴随论文 [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) 由 Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed 发布。
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (来自 Berkeley) 伴随论文 [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) 由 Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer 发布。
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (来自 OpenAI) 伴随论文 [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) 由 Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever 发布。
|
||||
@ -304,15 +304,15 @@ conda install -c huggingface transformers
|
||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (来自 Studio Ousia) 伴随论文 [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) 由 Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka 发布。
|
||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (来自 CMU/Google Brain) 伴随论文 [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) 由 Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou 发布。
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/main/model_doc/mobilevit)** (来自 Apple) 伴随论文 [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) 由 Sachin Mehta and Mohammad Rastegari 发布。
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (来自 Apple) 伴随论文 [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) 由 Sachin Mehta and Mohammad Rastegari 发布。
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (来自 Microsoft Research) 伴随论文 [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) 由 Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu 发布。
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (来自 Google AI) 伴随论文 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 由 Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 发布。
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/main/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/main/model_doc/nezha)** (来自华为诺亚方舟实验室) 伴随论文 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 由 Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 发布。
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/main/model_doc/nllb)** (来自 Meta) 伴随论文 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) 由 the NLLB team 发布。
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (来自华为诺亚方舟实验室) 伴随论文 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 由 Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 发布。
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (来自 Meta) 伴随论文 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) 由 the NLLB team 发布。
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (来自 the University of Wisconsin - Madison) 伴随论文 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 由 Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 发布。
|
||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/main/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。
|
||||
@ -336,6 +336,7 @@ conda install -c huggingface transformers
|
||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (来自 Tel Aviv University) 伴随论文 [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) 由 Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy 发布。
|
||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (来自 Berkeley) 伴随论文 [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) 由 Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer 发布。
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (来自 Microsoft) 伴随论文 [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) 由 Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo 发布。
|
||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/main/model_doc/swinv2)** (来自 Microsoft) 伴随论文 [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) 由 Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo 发布。
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (来自 Google AI) 伴随论文 [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。
|
||||
@ -343,10 +344,11 @@ conda install -c huggingface transformers
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/main/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。
|
||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (来自 Multimedia Computing Group, Nanjing University) 伴随论文 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 由 Zhan Tong, Yibing Song, Jue Wang, Limin Wang 发布。
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (来自 NAVER AI Lab/Kakao Enterprise/Kakao Brain) 伴随论文 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 由 Wonjae Kim, Bokyung Son, Ildoo Kim 发布。
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。
|
||||
|
@ -292,7 +292,7 @@ conda install -c huggingface transformers
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released with the paper [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/main/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
@ -316,15 +316,15 @@ conda install -c huggingface transformers
|
||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/main/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/main/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/main/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/main/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/main/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
@ -348,6 +348,7 @@ conda install -c huggingface transformers
|
||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University) released with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/main/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
@ -355,10 +356,11 @@ conda install -c huggingface transformers
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/main/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/main/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
|
@ -68,6 +68,8 @@
|
||||
title: Training on many GPUs
|
||||
- local: perf_train_cpu
|
||||
title: Training on CPU
|
||||
- local: perf_train_cpu_many
|
||||
title: Training on many CPUs
|
||||
- local: perf_train_tpu
|
||||
title: Training on TPUs
|
||||
- local: perf_train_special
|
||||
@ -162,284 +164,303 @@
|
||||
title: Feature Extractor
|
||||
title: Main Classes
|
||||
- sections:
|
||||
- local: model_doc/albert
|
||||
title: ALBERT
|
||||
- local: model_doc/auto
|
||||
title: Auto Classes
|
||||
- local: model_doc/bart
|
||||
title: BART
|
||||
- local: model_doc/barthez
|
||||
title: BARThez
|
||||
- local: model_doc/bartpho
|
||||
title: BARTpho
|
||||
- local: model_doc/beit
|
||||
title: BEiT
|
||||
- local: model_doc/bert
|
||||
title: BERT
|
||||
- local: model_doc/bert-generation
|
||||
title: BertGeneration
|
||||
- local: model_doc/bert-japanese
|
||||
title: BertJapanese
|
||||
- local: model_doc/bertweet
|
||||
title: Bertweet
|
||||
- local: model_doc/big_bird
|
||||
title: BigBird
|
||||
- local: model_doc/bigbird_pegasus
|
||||
title: BigBirdPegasus
|
||||
- local: model_doc/blenderbot
|
||||
title: Blenderbot
|
||||
- local: model_doc/blenderbot-small
|
||||
title: Blenderbot Small
|
||||
- local: model_doc/bloom
|
||||
title: BLOOM
|
||||
- local: model_doc/bort
|
||||
title: BORT
|
||||
- local: model_doc/byt5
|
||||
title: ByT5
|
||||
- local: model_doc/camembert
|
||||
title: CamemBERT
|
||||
- local: model_doc/canine
|
||||
title: CANINE
|
||||
- local: model_doc/clip
|
||||
title: CLIP
|
||||
- local: model_doc/codegen
|
||||
title: CodeGen
|
||||
- local: model_doc/convbert
|
||||
title: ConvBERT
|
||||
- local: model_doc/convnext
|
||||
title: ConvNeXT
|
||||
- local: model_doc/cpm
|
||||
title: CPM
|
||||
- local: model_doc/ctrl
|
||||
title: CTRL
|
||||
- local: model_doc/cvt
|
||||
title: CvT
|
||||
- local: model_doc/data2vec
|
||||
title: Data2Vec
|
||||
- local: model_doc/deberta
|
||||
title: DeBERTa
|
||||
- local: model_doc/deberta-v2
|
||||
title: DeBERTa-v2
|
||||
- local: model_doc/decision_transformer
|
||||
title: Decision Transformer
|
||||
- local: model_doc/deit
|
||||
title: DeiT
|
||||
- local: model_doc/detr
|
||||
title: DETR
|
||||
- local: model_doc/dialogpt
|
||||
title: DialoGPT
|
||||
- local: model_doc/distilbert
|
||||
title: DistilBERT
|
||||
- local: model_doc/dit
|
||||
title: DiT
|
||||
- local: model_doc/dpr
|
||||
title: DPR
|
||||
- local: model_doc/dpt
|
||||
title: DPT
|
||||
- local: model_doc/electra
|
||||
title: ELECTRA
|
||||
- local: model_doc/encoder-decoder
|
||||
title: Encoder Decoder Models
|
||||
- local: model_doc/flaubert
|
||||
title: FlauBERT
|
||||
- local: model_doc/flava
|
||||
title: FLAVA
|
||||
- local: model_doc/fnet
|
||||
title: FNet
|
||||
- local: model_doc/fsmt
|
||||
title: FSMT
|
||||
- local: model_doc/funnel
|
||||
title: Funnel Transformer
|
||||
- local: model_doc/glpn
|
||||
title: GLPN
|
||||
- local: model_doc/openai-gpt
|
||||
title: GPT
|
||||
- local: model_doc/gpt_neo
|
||||
title: GPT Neo
|
||||
- local: model_doc/gpt_neox
|
||||
title: GPT NeoX
|
||||
- local: model_doc/gptj
|
||||
title: GPT-J
|
||||
- local: model_doc/gpt2
|
||||
title: GPT2
|
||||
- local: model_doc/groupvit
|
||||
title: GroupViT
|
||||
- local: model_doc/herbert
|
||||
title: HerBERT
|
||||
- local: model_doc/hubert
|
||||
title: Hubert
|
||||
- local: model_doc/ibert
|
||||
title: I-BERT
|
||||
- local: model_doc/imagegpt
|
||||
title: ImageGPT
|
||||
- local: model_doc/layoutlm
|
||||
title: LayoutLM
|
||||
- local: model_doc/layoutlmv2
|
||||
title: LayoutLMV2
|
||||
- local: model_doc/layoutlmv3
|
||||
title: LayoutLMV3
|
||||
- local: model_doc/layoutxlm
|
||||
title: LayoutXLM
|
||||
- local: model_doc/led
|
||||
title: LED
|
||||
- local: model_doc/levit
|
||||
title: LeViT
|
||||
- local: model_doc/longformer
|
||||
title: Longformer
|
||||
- local: model_doc/longt5
|
||||
title: LongT5
|
||||
- local: model_doc/luke
|
||||
title: LUKE
|
||||
- local: model_doc/lxmert
|
||||
title: LXMERT
|
||||
- local: model_doc/m2m_100
|
||||
title: M2M100
|
||||
- local: model_doc/marian
|
||||
title: MarianMT
|
||||
- local: model_doc/maskformer
|
||||
title: MaskFormer
|
||||
- local: model_doc/mbart
|
||||
title: MBart and MBart-50
|
||||
- local: model_doc/mctct
|
||||
title: MCTCT
|
||||
- local: model_doc/megatron-bert
|
||||
title: MegatronBERT
|
||||
- local: model_doc/megatron_gpt2
|
||||
title: MegatronGPT2
|
||||
- local: model_doc/mluke
|
||||
title: mLUKE
|
||||
- local: model_doc/mobilebert
|
||||
title: MobileBERT
|
||||
- local: model_doc/mobilevit
|
||||
title: MobileViT
|
||||
- local: model_doc/mpnet
|
||||
title: MPNet
|
||||
- local: model_doc/mt5
|
||||
title: MT5
|
||||
- local: model_doc/mvp
|
||||
title: MVP
|
||||
- local: model_doc/nezha
|
||||
title: NEZHA
|
||||
- local: model_doc/nllb
|
||||
title: NLLB
|
||||
- local: model_doc/nystromformer
|
||||
title: Nyströmformer
|
||||
- local: model_doc/opt
|
||||
title: OPT
|
||||
- local: model_doc/owlvit
|
||||
title: OWL-ViT
|
||||
- local: model_doc/pegasus
|
||||
title: Pegasus
|
||||
- local: model_doc/perceiver
|
||||
title: Perceiver
|
||||
- local: model_doc/phobert
|
||||
title: PhoBERT
|
||||
- local: model_doc/plbart
|
||||
title: PLBart
|
||||
- local: model_doc/poolformer
|
||||
title: PoolFormer
|
||||
- local: model_doc/prophetnet
|
||||
title: ProphetNet
|
||||
- local: model_doc/qdqbert
|
||||
title: QDQBert
|
||||
- local: model_doc/rag
|
||||
title: RAG
|
||||
- local: model_doc/realm
|
||||
title: REALM
|
||||
- local: model_doc/reformer
|
||||
title: Reformer
|
||||
- local: model_doc/regnet
|
||||
title: RegNet
|
||||
- local: model_doc/rembert
|
||||
title: RemBERT
|
||||
- local: model_doc/resnet
|
||||
title: ResNet
|
||||
- local: model_doc/retribert
|
||||
title: RetriBERT
|
||||
- local: model_doc/roberta
|
||||
title: RoBERTa
|
||||
- local: model_doc/roformer
|
||||
title: RoFormer
|
||||
- local: model_doc/segformer
|
||||
title: SegFormer
|
||||
- local: model_doc/sew
|
||||
title: SEW
|
||||
- local: model_doc/sew-d
|
||||
title: SEW-D
|
||||
- local: model_doc/speech-encoder-decoder
|
||||
title: Speech Encoder Decoder Models
|
||||
- local: model_doc/speech_to_text
|
||||
title: Speech2Text
|
||||
- local: model_doc/speech_to_text_2
|
||||
title: Speech2Text2
|
||||
- local: model_doc/splinter
|
||||
title: Splinter
|
||||
- local: model_doc/squeezebert
|
||||
title: SqueezeBERT
|
||||
- local: model_doc/swin
|
||||
title: Swin Transformer
|
||||
- local: model_doc/t5
|
||||
title: T5
|
||||
- local: model_doc/t5v1.1
|
||||
title: T5v1.1
|
||||
- local: model_doc/tapas
|
||||
title: TAPAS
|
||||
- local: model_doc/tapex
|
||||
title: TAPEX
|
||||
- local: model_doc/trajectory_transformer
|
||||
title: Trajectory Transformer
|
||||
- local: model_doc/transfo-xl
|
||||
title: Transformer XL
|
||||
- local: model_doc/trocr
|
||||
title: TrOCR
|
||||
- local: model_doc/ul2
|
||||
title: UL2
|
||||
- local: model_doc/unispeech
|
||||
title: UniSpeech
|
||||
- local: model_doc/unispeech-sat
|
||||
title: UniSpeech-SAT
|
||||
- local: model_doc/van
|
||||
title: VAN
|
||||
- local: model_doc/vilt
|
||||
title: ViLT
|
||||
- local: model_doc/vision-encoder-decoder
|
||||
title: Vision Encoder Decoder Models
|
||||
- local: model_doc/vision-text-dual-encoder
|
||||
title: Vision Text Dual Encoder
|
||||
- local: model_doc/vit
|
||||
title: Vision Transformer (ViT)
|
||||
- local: model_doc/visual_bert
|
||||
title: VisualBERT
|
||||
- local: model_doc/vit_mae
|
||||
title: ViTMAE
|
||||
- local: model_doc/wav2vec2
|
||||
title: Wav2Vec2
|
||||
- local: model_doc/wav2vec2-conformer
|
||||
title: Wav2Vec2-Conformer
|
||||
- local: model_doc/wav2vec2_phoneme
|
||||
title: Wav2Vec2Phoneme
|
||||
- local: model_doc/wavlm
|
||||
title: WavLM
|
||||
- local: model_doc/xglm
|
||||
title: XGLM
|
||||
- local: model_doc/xlm
|
||||
title: XLM
|
||||
- local: model_doc/xlm-prophetnet
|
||||
title: XLM-ProphetNet
|
||||
- local: model_doc/xlm-roberta
|
||||
title: XLM-RoBERTa
|
||||
- local: model_doc/xlm-roberta-xl
|
||||
title: XLM-RoBERTa-XL
|
||||
- local: model_doc/xlnet
|
||||
title: XLNet
|
||||
- local: model_doc/xls_r
|
||||
title: XLS-R
|
||||
- local: model_doc/xlsr_wav2vec2
|
||||
title: XLSR-Wav2Vec2
|
||||
- local: model_doc/yolos
|
||||
title: YOLOS
|
||||
- local: model_doc/yoso
|
||||
title: YOSO
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/albert
|
||||
title: ALBERT
|
||||
- local: model_doc/bart
|
||||
title: BART
|
||||
- local: model_doc/barthez
|
||||
title: BARThez
|
||||
- local: model_doc/bartpho
|
||||
title: BARTpho
|
||||
- local: model_doc/bert
|
||||
title: BERT
|
||||
- local: model_doc/bert-generation
|
||||
title: BertGeneration
|
||||
- local: model_doc/bert-japanese
|
||||
title: BertJapanese
|
||||
- local: model_doc/bertweet
|
||||
title: Bertweet
|
||||
- local: model_doc/big_bird
|
||||
title: BigBird
|
||||
- local: model_doc/bigbird_pegasus
|
||||
title: BigBirdPegasus
|
||||
- local: model_doc/blenderbot
|
||||
title: Blenderbot
|
||||
- local: model_doc/blenderbot-small
|
||||
title: Blenderbot Small
|
||||
- local: model_doc/bloom
|
||||
title: BLOOM
|
||||
- local: model_doc/bort
|
||||
title: BORT
|
||||
- local: model_doc/byt5
|
||||
title: ByT5
|
||||
- local: model_doc/camembert
|
||||
title: CamemBERT
|
||||
- local: model_doc/canine
|
||||
title: CANINE
|
||||
- local: model_doc/codegen
|
||||
title: CodeGen
|
||||
- local: model_doc/convbert
|
||||
title: ConvBERT
|
||||
- local: model_doc/cpm
|
||||
title: CPM
|
||||
- local: model_doc/ctrl
|
||||
title: CTRL
|
||||
- local: model_doc/deberta
|
||||
title: DeBERTa
|
||||
- local: model_doc/deberta-v2
|
||||
title: DeBERTa-v2
|
||||
- local: model_doc/dialogpt
|
||||
title: DialoGPT
|
||||
- local: model_doc/distilbert
|
||||
title: DistilBERT
|
||||
- local: model_doc/dpr
|
||||
title: DPR
|
||||
- local: model_doc/electra
|
||||
title: ELECTRA
|
||||
- local: model_doc/encoder-decoder
|
||||
title: Encoder Decoder Models
|
||||
- local: model_doc/flaubert
|
||||
title: FlauBERT
|
||||
- local: model_doc/fnet
|
||||
title: FNet
|
||||
- local: model_doc/fsmt
|
||||
title: FSMT
|
||||
- local: model_doc/funnel
|
||||
title: Funnel Transformer
|
||||
- local: model_doc/openai-gpt
|
||||
title: GPT
|
||||
- local: model_doc/gpt_neo
|
||||
title: GPT Neo
|
||||
- local: model_doc/gpt_neox
|
||||
title: GPT NeoX
|
||||
- local: model_doc/gptj
|
||||
title: GPT-J
|
||||
- local: model_doc/gpt2
|
||||
title: GPT2
|
||||
- local: model_doc/herbert
|
||||
title: HerBERT
|
||||
- local: model_doc/ibert
|
||||
title: I-BERT
|
||||
- local: model_doc/layoutlm
|
||||
title: LayoutLM
|
||||
- local: model_doc/led
|
||||
title: LED
|
||||
- local: model_doc/longformer
|
||||
title: Longformer
|
||||
- local: model_doc/longt5
|
||||
title: LongT5
|
||||
- local: model_doc/luke
|
||||
title: LUKE
|
||||
- local: model_doc/m2m_100
|
||||
title: M2M100
|
||||
- local: model_doc/marian
|
||||
title: MarianMT
|
||||
- local: model_doc/mbart
|
||||
title: MBart and MBart-50
|
||||
- local: model_doc/megatron-bert
|
||||
title: MegatronBERT
|
||||
- local: model_doc/megatron_gpt2
|
||||
title: MegatronGPT2
|
||||
- local: model_doc/mluke
|
||||
title: mLUKE
|
||||
- local: model_doc/mobilebert
|
||||
title: MobileBERT
|
||||
- local: model_doc/mpnet
|
||||
title: MPNet
|
||||
- local: model_doc/mt5
|
||||
title: MT5
|
||||
- local: model_doc/mvp
|
||||
title: MVP
|
||||
- local: model_doc/nezha
|
||||
title: NEZHA
|
||||
- local: model_doc/nllb
|
||||
title: NLLB
|
||||
- local: model_doc/nystromformer
|
||||
title: Nyströmformer
|
||||
- local: model_doc/opt
|
||||
title: OPT
|
||||
- local: model_doc/pegasus
|
||||
title: Pegasus
|
||||
- local: model_doc/phobert
|
||||
title: PhoBERT
|
||||
- local: model_doc/plbart
|
||||
title: PLBart
|
||||
- local: model_doc/prophetnet
|
||||
title: ProphetNet
|
||||
- local: model_doc/qdqbert
|
||||
title: QDQBert
|
||||
- local: model_doc/rag
|
||||
title: RAG
|
||||
- local: model_doc/realm
|
||||
title: REALM
|
||||
- local: model_doc/reformer
|
||||
title: Reformer
|
||||
- local: model_doc/rembert
|
||||
title: RemBERT
|
||||
- local: model_doc/retribert
|
||||
title: RetriBERT
|
||||
- local: model_doc/roberta
|
||||
title: RoBERTa
|
||||
- local: model_doc/roformer
|
||||
title: RoFormer
|
||||
- local: model_doc/splinter
|
||||
title: Splinter
|
||||
- local: model_doc/squeezebert
|
||||
title: SqueezeBERT
|
||||
- local: model_doc/t5
|
||||
title: T5
|
||||
- local: model_doc/t5v1.1
|
||||
title: T5v1.1
|
||||
- local: model_doc/tapas
|
||||
title: TAPAS
|
||||
- local: model_doc/tapex
|
||||
title: TAPEX
|
||||
- local: model_doc/transfo-xl
|
||||
title: Transformer XL
|
||||
- local: model_doc/ul2
|
||||
title: UL2
|
||||
- local: model_doc/xglm
|
||||
title: XGLM
|
||||
- local: model_doc/xlm
|
||||
title: XLM
|
||||
- local: model_doc/xlm-prophetnet
|
||||
title: XLM-ProphetNet
|
||||
- local: model_doc/xlm-roberta
|
||||
title: XLM-RoBERTa
|
||||
- local: model_doc/xlm-roberta-xl
|
||||
title: XLM-RoBERTa-XL
|
||||
- local: model_doc/xlnet
|
||||
title: XLNet
|
||||
- local: model_doc/yoso
|
||||
title: YOSO
|
||||
title: Text models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/beit
|
||||
title: BEiT
|
||||
- local: model_doc/convnext
|
||||
title: ConvNeXT
|
||||
- local: model_doc/cvt
|
||||
title: CvT
|
||||
- local: model_doc/deit
|
||||
title: DeiT
|
||||
- local: model_doc/detr
|
||||
title: DETR
|
||||
- local: model_doc/dit
|
||||
title: DiT
|
||||
- local: model_doc/dpt
|
||||
title: DPT
|
||||
- local: model_doc/glpn
|
||||
title: GLPN
|
||||
- local: model_doc/imagegpt
|
||||
title: ImageGPT
|
||||
- local: model_doc/levit
|
||||
title: LeViT
|
||||
- local: model_doc/maskformer
|
||||
title: MaskFormer
|
||||
- local: model_doc/mobilevit
|
||||
title: MobileViT
|
||||
- local: model_doc/owlvit
|
||||
title: OWL-ViT
|
||||
- local: model_doc/poolformer
|
||||
title: PoolFormer
|
||||
- local: model_doc/regnet
|
||||
title: RegNet
|
||||
- local: model_doc/resnet
|
||||
title: ResNet
|
||||
- local: model_doc/segformer
|
||||
title: SegFormer
|
||||
- local: model_doc/swin
|
||||
title: Swin Transformer
|
||||
- local: model_doc/swinv2
|
||||
title: Swin Transformer V2
|
||||
- local: model_doc/van
|
||||
title: VAN
|
||||
- local: model_doc/videomae
|
||||
title: VideoMAE
|
||||
- local: model_doc/vit
|
||||
title: Vision Transformer (ViT)
|
||||
- local: model_doc/vit_mae
|
||||
title: ViTMAE
|
||||
- local: model_doc/yolos
|
||||
title: YOLOS
|
||||
title: Vision models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/hubert
|
||||
title: Hubert
|
||||
- local: model_doc/mctct
|
||||
title: MCTCT
|
||||
- local: model_doc/sew
|
||||
title: SEW
|
||||
- local: model_doc/sew-d
|
||||
title: SEW-D
|
||||
- local: model_doc/speech_to_text
|
||||
title: Speech2Text
|
||||
- local: model_doc/speech_to_text_2
|
||||
title: Speech2Text2
|
||||
- local: model_doc/unispeech
|
||||
title: UniSpeech
|
||||
- local: model_doc/unispeech-sat
|
||||
title: UniSpeech-SAT
|
||||
- local: model_doc/wav2vec2
|
||||
title: Wav2Vec2
|
||||
- local: model_doc/wav2vec2-conformer
|
||||
title: Wav2Vec2-Conformer
|
||||
- local: model_doc/wav2vec2_phoneme
|
||||
title: Wav2Vec2Phoneme
|
||||
- local: model_doc/wavlm
|
||||
title: WavLM
|
||||
- local: model_doc/xls_r
|
||||
title: XLS-R
|
||||
- local: model_doc/xlsr_wav2vec2
|
||||
title: XLSR-Wav2Vec2
|
||||
title: Audio models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/clip
|
||||
title: CLIP
|
||||
- local: model_doc/data2vec
|
||||
title: Data2Vec
|
||||
- local: model_doc/flava
|
||||
title: FLAVA
|
||||
- local: model_doc/groupvit
|
||||
title: GroupViT
|
||||
- local: model_doc/layoutlmv2
|
||||
title: LayoutLMV2
|
||||
- local: model_doc/layoutlmv3
|
||||
title: LayoutLMV3
|
||||
- local: model_doc/layoutxlm
|
||||
title: LayoutXLM
|
||||
- local: model_doc/lxmert
|
||||
title: LXMERT
|
||||
- local: model_doc/perceiver
|
||||
title: Perceiver
|
||||
- local: model_doc/speech-encoder-decoder
|
||||
title: Speech Encoder Decoder Models
|
||||
- local: model_doc/trocr
|
||||
title: TrOCR
|
||||
- local: model_doc/vilt
|
||||
title: ViLT
|
||||
- local: model_doc/vision-encoder-decoder
|
||||
title: Vision Encoder Decoder Models
|
||||
- local: model_doc/vision-text-dual-encoder
|
||||
title: Vision Text Dual Encoder
|
||||
- local: model_doc/visual_bert
|
||||
title: VisualBERT
|
||||
title: Multimodal models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/decision_transformer
|
||||
title: Decision Transformer
|
||||
- local: model_doc/trajectory_transformer
|
||||
title: Trajectory Transformer
|
||||
title: Reinforcement learning models
|
||||
title: Models
|
||||
- sections:
|
||||
- local: internal/modeling_utils
|
||||
|
@ -813,13 +813,9 @@ checkpoint and to get the required access rights to be able to upload the model
|
||||
*brand_new_bert*. The `push_to_hub` method, present in all models in `transformers`, is a quick and efficient way to push your checkpoint to the hub. A little snippet is pasted below:
|
||||
|
||||
```python
|
||||
brand_new_bert.push_to_hub(
|
||||
repo_path_or_name="brand_new_bert",
|
||||
# Uncomment the following line to push to an organization
|
||||
# organization="<ORGANIZATION>",
|
||||
commit_message="Add model",
|
||||
use_temp_dir=True,
|
||||
)
|
||||
brand_new_bert.push_to_hub("brand_new_bert")
|
||||
# Uncomment the following line to push to an organization.
|
||||
# brand_new_bert.push_to_hub("<organization>/brand_new_bert")
|
||||
```
|
||||
|
||||
It is worth spending some time to create fitting model cards for each checkpoint. The model cards should highlight the
|
||||
|
@ -154,6 +154,7 @@ The library currently contains JAX, PyTorch and TensorFlow implementations, pret
|
||||
1. **[Splinter](model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBERT](model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[Swin Transformer V2](model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||
1. **[T5](model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
@ -165,6 +166,7 @@ The library currently contains JAX, PyTorch and TensorFlow implementations, pret
|
||||
1. **[UniSpeech](model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[VideoMAE](model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||
1. **[ViLT](model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
@ -289,6 +291,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| Splinter | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| SqueezeBERT | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| Swin Transformer | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Swin Transformer V2 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| T5 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| TAPAS | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Trajectory Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
@ -297,6 +300,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| UniSpeech | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| UniSpeechSat | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| VAN | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| VideoMAE | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ViLT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Vision Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||
| VisionTextDualEncoder | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||
|
@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Feature Extractor
|
||||
|
||||
A feature extractor is in charge of preparing input features for a multi-modal model. This includes feature extraction
|
||||
A feature extractor is in charge of preparing input features for audio or vision models. This includes feature extraction
|
||||
from sequences, *e.g.*, pre-processing audio files to Log-Mel Spectrogram features, feature extraction from images
|
||||
*e.g.* cropping image image files, but also padding, normalization, and conversion to Numpy, PyTorch, and TensorFlow
|
||||
tensors.
|
||||
|
@ -118,6 +118,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its
|
||||
|
||||
[[autodoc]] AutoModelForImageClassification
|
||||
|
||||
## AutoModelForVideoClassification
|
||||
|
||||
[[autodoc]] AutoModelForVideoClassification
|
||||
|
||||
## AutoModelForVision2Seq
|
||||
|
||||
[[autodoc]] AutoModelForVision2Seq
|
||||
|
@ -15,7 +15,7 @@ specific language governing permissions and limitations under the License.
|
||||
## Overview
|
||||
|
||||
The BLOOM model has been proposed with its various versions through the [BigScience Workshop](https://bigscience.huggingface.co/). BigScience is inspired by other open science initiatives where researchers have pooled their time and resources to collectively achieve a higher impact.
|
||||
The architecture of BLOOM is essentially similar to GPT3 (auto-regressive model for next token prediction), but has been trained on different 46 languages including code.
|
||||
The architecture of BLOOM is essentially similar to GPT3 (auto-regressive model for next token prediction), but has been trained on 46 different languages and 13 programming languages.
|
||||
Several smaller versions of the models have been trained on the same dataset. BLOOM is available in the following versions:
|
||||
|
||||
- [bloom-350m](https://huggingface.co/bigscience/bloom-350m)
|
||||
@ -54,4 +54,4 @@ Several smaller versions of the models have been trained on the same dataset. BL
|
||||
## BloomForTokenClassification
|
||||
|
||||
[[autodoc]] BloomForTokenClassification
|
||||
- forward
|
||||
- forward
|
||||
|
@ -27,9 +27,9 @@ any other models (see the examples for more information).
|
||||
An application of this architecture could be to leverage two pretrained [`BertModel`] as the encoder
|
||||
and decoder for a summarization model as was shown in: [Text Summarization with Pretrained Encoders](https://arxiv.org/abs/1908.08345) by Yang Liu and Mirella Lapata.
|
||||
|
||||
## Randomly initializing [`EncoderDecoderModel`] from model configurations.
|
||||
## Randomly initializing `EncoderDecoderModel` from model configurations.
|
||||
|
||||
[`EncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`BertModel`] configuration for both the encoder and the decoder.
|
||||
[`EncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`BertModel`] configuration for the encoder and the default [`BertForCausalLM`] configuration for the decoder.
|
||||
|
||||
```python
|
||||
>>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
|
||||
@ -41,7 +41,7 @@ and decoder for a summarization model as was shown in: [Text Summarization with
|
||||
>>> model = EncoderDecoderModel(config=config)
|
||||
```
|
||||
|
||||
## Initialising [`EncoderDecoderModel`] from a pretrained encoder and a pretrained decoder.
|
||||
## Initialising `EncoderDecoderModel` from a pretrained encoder and a pretrained decoder.
|
||||
|
||||
[`EncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained auto-encoding model, *e.g.* BERT, can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder.
|
||||
Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized.
|
||||
@ -55,14 +55,32 @@ To do so, the `EncoderDecoderModel` class provides a [`EncoderDecoderModel.from_
|
||||
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
|
||||
```
|
||||
|
||||
## Loading an existing [`EncoderDecoderModel`] checkpoint.
|
||||
## Loading an existing `EncoderDecoderModel` checkpoint and perform inference.
|
||||
|
||||
To load fine-tuned checkpoints of the `EncoderDecoderModel` class, ['EncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers.
|
||||
To load fine-tuned checkpoints of the `EncoderDecoderModel` class, [`EncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers.
|
||||
|
||||
To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling.
|
||||
|
||||
```python
|
||||
>>> from transformers import EncoderDecoderModel
|
||||
>>> from transformers import AutoTokenizer, EncoderDecoderModel
|
||||
|
||||
>>> # load a fine-tuned seq2seq model and corresponding tokenizer
|
||||
>>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
|
||||
|
||||
>>> # let's perform inference on a long piece of text
|
||||
>>> ARTICLE_TO_SUMMARIZE = (
|
||||
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
|
||||
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
|
||||
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
|
||||
... )
|
||||
>>> input_ids = tokenizer(ARTICLE_TO_SUMMARIZE, return_tensors="pt").input_ids
|
||||
|
||||
>>> # autoregressively generate summary (uses greedy decoding by default)
|
||||
>>> generated_ids = model.generate(input_ids)
|
||||
>>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
||||
>>> print(generated_text)
|
||||
nearly 800 thousand customers were affected by the shutoffs. the aim is to reduce the risk of wildfires. nearly 800, 000 customers were expected to be affected by high winds amid dry conditions. pg & e said it scheduled the blackouts to last through at least midday tomorrow.
|
||||
```
|
||||
|
||||
## Loading a PyTorch checkpoint into `TFEncoderDecoderModel`.
|
||||
@ -116,6 +134,7 @@ target sequence).
|
||||
>>> # the forward function automatically creates the correct decoder_input_ids
|
||||
>>> loss = model(input_ids=input_ids, labels=labels).loss
|
||||
```
|
||||
|
||||
Detailed [colab](https://colab.research.google.com/drive/1WIk2bxglElfZewOHboPFNj8H44_VAyKE?usp=sharing#scrollTo=ZwQIEhKOrJpl) for training.
|
||||
|
||||
This model was contributed by [thomwolf](https://github.com/thomwolf). This model's TensorFlow and Flax versions
|
||||
|
@ -28,8 +28,9 @@ Tips:
|
||||
- images need to be resized and normalized with channels in regular RGB format. LayoutLMv2 on the other hand normalizes the images internally and expects the channels in BGR format.
|
||||
- text is tokenized using byte-pair encoding (BPE), as opposed to WordPiece.
|
||||
Due to these differences in data preprocessing, one can use [`LayoutLMv3Processor`] which internally combines a [`LayoutLMv3FeatureExtractor`] (for the image modality) and a [`LayoutLMv3Tokenizer`]/[`LayoutLMv3TokenizerFast`] (for the text modality) to prepare all data for the model.
|
||||
- Regarding usage of [`LayoutLMv3Processor`], we refer to the [usage guide](layoutlmv2#usage-LayoutLMv2Processor) of its predecessor.
|
||||
- Regarding usage of [`LayoutLMv3Processor`], we refer to the [usage guide](layoutlmv2#usage-layoutlmv2processor) of its predecessor.
|
||||
- Demo notebooks for LayoutLMv3 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3).
|
||||
- Demo scripts can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3).
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/layoutlmv3_architecture.png"
|
||||
alt="drawing" width="600"/>
|
||||
|
@ -152,3 +152,23 @@ This model was contributed by [ikuyamada](https://huggingface.co/ikuyamada) and
|
||||
|
||||
[[autodoc]] LukeForEntitySpanClassification
|
||||
- forward
|
||||
|
||||
## LukeForSequenceClassification
|
||||
|
||||
[[autodoc]] LukeForSequenceClassification
|
||||
- forward
|
||||
|
||||
## LukeForMultipleChoice
|
||||
|
||||
[[autodoc]] LukeForMultipleChoice
|
||||
- forward
|
||||
|
||||
## LukeForTokenClassification
|
||||
|
||||
[[autodoc]] LukeForTokenClassification
|
||||
- forward
|
||||
|
||||
## LukeForQuestionAnswering
|
||||
|
||||
[[autodoc]] LukeForQuestionAnswering
|
||||
- forward
|
||||
|
@ -55,9 +55,7 @@ tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M", src_lang="en
|
||||
src_text = "Life is like a box of chocolates."
|
||||
tgt_text = "La vie est comme une boîte de chocolat."
|
||||
|
||||
model_inputs = tokenizer(src_text, return_tensors="pt")
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(tgt_text, return_tensors="pt").input_ids
|
||||
model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
|
||||
|
||||
loss = model(**model_inputs, labels=labels) # forward pass
|
||||
```
|
||||
|
@ -155,7 +155,7 @@ Example of translating english to many romance languages, using old-style 2 char
|
||||
## MarianTokenizer
|
||||
|
||||
[[autodoc]] MarianTokenizer
|
||||
- as_target_tokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
|
||||
## MarianModel
|
||||
|
||||
|
@ -33,7 +33,7 @@ Tips:
|
||||
`get_num_masks` function inside in the `MaskFormerLoss` class of `modeling_maskformer.py`. When training on multiple nodes, this should be
|
||||
set to the average number of target masks across all nodes, as can be seen in the original implementation [here](https://github.com/facebookresearch/MaskFormer/blob/da3e60d85fdeedcb31476b5edd7d328826ce56cc/mask_former/modeling/criterion.py#L169).
|
||||
- One can use [`MaskFormerFeatureExtractor`] to prepare images for the model and optional targets for the model.
|
||||
- To get the final segmentation, depending on the task, you can call [`~MaskFormerFeatureExtractor.post_process_semantic_segmentation`] or [`~MaskFormerFeatureExtractor.post_process_panoptic_segmentation`]. Both tasks can be solved using [`MaskFormerForInstanceSegmentation`] output, the latter needs an additional `is_thing_map` to know which instances must be merged together..
|
||||
- To get the final segmentation, depending on the task, you can call [`~MaskFormerFeatureExtractor.post_process_semantic_segmentation`] or [`~MaskFormerFeatureExtractor.post_process_panoptic_segmentation`]. Both tasks can be solved using [`MaskFormerForInstanceSegmentation`] output, panoptic segmentation accepts an optional `label_ids_to_fuse` argument to fuse instances of the target object/s (e.g. sky) together.
|
||||
|
||||
The figure below illustrates the architecture of MaskFormer. Taken from the [original paper](https://arxiv.org/abs/2107.06278).
|
||||
|
||||
|
@ -34,8 +34,8 @@ model is multilingual it expects the sequences in a different format. A special
|
||||
source and target text. The source text format is `X [eos, src_lang_code]` where `X` is the source text. The
|
||||
target text format is `[tgt_lang_code] X [eos]`. `bos` is never used.
|
||||
|
||||
The regular [`~MBartTokenizer.__call__`] will encode source text format, and it should be wrapped
|
||||
inside the context manager [`~MBartTokenizer.as_target_tokenizer`] to encode target text format.
|
||||
The regular [`~MBartTokenizer.__call__`] will encode source text format passed as first argument or with the `text`
|
||||
keyword, and target text format passed with the `text_label` keyword argument.
|
||||
|
||||
- Supervised training
|
||||
|
||||
@ -46,13 +46,11 @@ inside the context manager [`~MBartTokenizer.as_target_tokenizer`] to encode tar
|
||||
>>> example_english_phrase = "UN Chief Says There Is No Military Solution in Syria"
|
||||
>>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
|
||||
>>> inputs = tokenizer(example_english_phrase, return_tensors="pt")
|
||||
>>> with tokenizer.as_target_tokenizer():
|
||||
... labels = tokenizer(expected_translation_romanian, return_tensors="pt")
|
||||
>>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
|
||||
|
||||
>>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
|
||||
>>> # forward pass
|
||||
>>> model(**inputs, labels=batch["labels"])
|
||||
>>> model(**inputs)
|
||||
```
|
||||
|
||||
- Generation
|
||||
@ -108,11 +106,9 @@ tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50", src_
|
||||
src_text = " UN Chief Says There Is No Military Solution in Syria"
|
||||
tgt_text = "Şeful ONU declară că nu există o soluţie militară în Siria"
|
||||
|
||||
model_inputs = tokenizer(src_text, return_tensors="pt")
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(tgt_text, return_tensors="pt").input_ids
|
||||
model_inputs = tokenizer(src_text, text_target=tgt_text, return_tensors="pt")
|
||||
|
||||
model(**model_inputs, labels=labels) # forward pass
|
||||
model(**model_inputs) # forward pass
|
||||
```
|
||||
|
||||
- Generation
|
||||
@ -154,7 +150,6 @@ tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
||||
## MBartTokenizer
|
||||
|
||||
[[autodoc]] MBartTokenizer
|
||||
- as_target_tokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
|
||||
## MBartTokenizerFast
|
||||
|
@ -48,7 +48,6 @@ This model was contributed by [cwkeam](https://huggingface.co/cwkeam). The origi
|
||||
- save_pretrained
|
||||
- batch_decode
|
||||
- decode
|
||||
- as_target_processor
|
||||
|
||||
|
||||
## MCTCTModel
|
||||
|
@ -91,7 +91,6 @@ UN-Chef sagt, es gibt keine militärische Lösung in Syrien
|
||||
## NllbTokenizer
|
||||
|
||||
[[autodoc]] NllbTokenizer
|
||||
- as_target_tokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
|
||||
## NllbTokenizerFast
|
||||
|
@ -45,8 +45,9 @@ target text format is `[tgt_lang_code] X [eos]`. `bos` is never used.
|
||||
|
||||
However, for fine-tuning, in some cases no language token is provided in cases where a single language is used. Please refer to [the paper](https://arxiv.org/abs/2103.06333) to learn more about this.
|
||||
|
||||
In cases where the language code is needed, The regular [`~PLBartTokenizer.__call__`] will encode source text format, and it should be wrapped
|
||||
inside the context manager [`~PLBartTokenizer.as_target_tokenizer`] to encode target text format.
|
||||
In cases where the language code is needed, the regular [`~PLBartTokenizer.__call__`] will encode source text format
|
||||
when you pass texts as the first argument or with the keyword argument `text`, and will encode target text format if
|
||||
it's passed with the `text_target` keyword argument.
|
||||
|
||||
- Supervised training
|
||||
|
||||
@ -56,11 +57,7 @@ inside the context manager [`~PLBartTokenizer.as_target_tokenizer`] to encode ta
|
||||
>>> tokenizer = PLBartTokenizer.from_pretrained("uclanlp/plbart-base", src_lang="en_XX", tgt_lang="python")
|
||||
>>> example_python_phrase = "def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])"
|
||||
>>> expected_translation_english = "Returns the maximum value of a b c."
|
||||
>>> inputs = tokenizer(example_python_phrase, return_tensors="pt")
|
||||
>>> with tokenizer.as_target_tokenizer():
|
||||
... labels = tokenizer(expected_translation_english, return_tensors="pt")
|
||||
>>> inputs["labels"] = labels["input_ids"]
|
||||
>>> # forward pass
|
||||
>>> inputs = tokenizer(example_python_phrase, text_target=expected_translation_english, return_tensors="pt")
|
||||
>>> model(**inputs)
|
||||
```
|
||||
|
||||
@ -88,7 +85,6 @@ inside the context manager [`~PLBartTokenizer.as_target_tokenizer`] to encode ta
|
||||
## PLBartTokenizer
|
||||
|
||||
[[autodoc]] PLBartTokenizer
|
||||
- as_target_tokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
|
||||
## PLBartModel
|
||||
|
@ -12,7 +12,7 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Speech Encoder Decoder Models
|
||||
|
||||
The [`SpeechEncoderDecoderModel`] can be used to initialize a speech-sequence-to-text-sequence model
|
||||
The [`SpeechEncoderDecoderModel`] can be used to initialize a speech-to-text model
|
||||
with any pretrained speech autoencoding model as the encoder (*e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert)) and any pretrained autoregressive model as the decoder.
|
||||
|
||||
The effectiveness of initializing speech-sequence-to-text-sequence models with pretrained checkpoints for speech
|
||||
@ -20,9 +20,96 @@ recognition and speech translation has *e.g.* been shown in [Large-Scale Self- a
|
||||
Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli,
|
||||
Alexis Conneau.
|
||||
|
||||
An example of how to use a [`SpeechEncoderDecoderModel`] for inference can be seen in
|
||||
[Speech2Text2](speech_to_text_2).
|
||||
An example of how to use a [`SpeechEncoderDecoderModel`] for inference can be seen in [Speech2Text2](speech_to_text_2).
|
||||
|
||||
## Randomly initializing `SpeechEncoderDecoderModel` from model configurations.
|
||||
|
||||
[`SpeechEncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`Wav2Vec2Model`] configuration for the encoder
|
||||
and the default [`BertForCausalLM`] configuration for the decoder.
|
||||
|
||||
```python
|
||||
>>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel
|
||||
|
||||
>>> config_encoder = Wav2Vec2Config()
|
||||
>>> config_decoder = BertConfig()
|
||||
|
||||
>>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
|
||||
>>> model = SpeechEncoderDecoderModel(config=config)
|
||||
```
|
||||
|
||||
## Initialising `SpeechEncoderDecoderModel` from a pretrained encoder and a pretrained decoder.
|
||||
|
||||
[`SpeechEncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained Transformer-based speech model, *e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert) can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder.
|
||||
Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized.
|
||||
Initializing [`SpeechEncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder).
|
||||
To do so, the `SpeechEncoderDecoderModel` class provides a [`SpeechEncoderDecoderModel.from_encoder_decoder_pretrained`] method.
|
||||
|
||||
```python
|
||||
>>> from transformers import SpeechEncoderDecoderModel
|
||||
|
||||
>>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(
|
||||
... "facebook/hubert-large-ll60k", "bert-base-uncased"
|
||||
... )
|
||||
```
|
||||
|
||||
## Loading an existing `SpeechEncoderDecoderModel` checkpoint and perform inference.
|
||||
|
||||
To load fine-tuned checkpoints of the `SpeechEncoderDecoderModel` class, [`SpeechEncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers.
|
||||
|
||||
To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling.
|
||||
|
||||
```python
|
||||
>>> from transformers import Wav2Vec2Processor, SpeechEncoderDecoderModel
|
||||
>>> from datasets import load_dataset
|
||||
>>> import torch
|
||||
|
||||
>>> # load a fine-tuned speech translation model and corresponding processor
|
||||
>>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
|
||||
>>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15")
|
||||
|
||||
>>> # let's perform inference on a piece of English speech (which we'll translate to German)
|
||||
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
>>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values
|
||||
|
||||
>>> # autoregressively generate transcription (uses greedy decoding by default)
|
||||
>>> generated_ids = model.generate(input_values)
|
||||
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
||||
>>> print(generated_text)
|
||||
Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heißen zu können.
|
||||
```
|
||||
|
||||
## Training
|
||||
|
||||
Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model on a dataset of (speech, text) pairs.
|
||||
As you can see, only 2 inputs are required for the model in order to compute a loss: `input_values` (which are the
|
||||
speech inputs) and `labels` (which are the `input_ids` of the encoded target sequence).
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoFeatureExtractor, SpeechEncoderDecoderModel
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> encoder_id = "facebook/wav2vec2-base-960h" # acoustic model encoder
|
||||
>>> decoder_id = "bert-base-uncased" # text decoder
|
||||
|
||||
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(decoder_id)
|
||||
>>> # Combine pre-trained encoder and pre-trained decoder to form a Seq2Seq model
|
||||
>>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id)
|
||||
|
||||
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
|
||||
>>> model.config.pad_token_id = tokenizer.pad_token_id
|
||||
|
||||
>>> # load an audio input and pre-process (normalise mean/std to 0/1)
|
||||
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
>>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values
|
||||
|
||||
>>> # load its corresponding transcription and tokenize to generate labels
|
||||
>>> labels = tokenizer(ds[0]["text"], return_tensors="pt").input_ids
|
||||
|
||||
>>> # the forward function automatically creates the correct decoder_input_ids
|
||||
>>> loss = model(**input_features).loss
|
||||
>>> loss.backward()
|
||||
```
|
||||
|
||||
## SpeechEncoderDecoderConfig
|
||||
|
||||
|
@ -120,7 +120,6 @@ See the [model hub](https://huggingface.co/models?filter=speech_to_text) to look
|
||||
- save_pretrained
|
||||
- batch_decode
|
||||
- decode
|
||||
- as_target_processor
|
||||
|
||||
## Speech2TextModel
|
||||
|
||||
|
@ -114,7 +114,6 @@ See [model hub](https://huggingface.co/models?filter=speech2text2) to look for S
|
||||
- save_pretrained
|
||||
- batch_decode
|
||||
- decode
|
||||
- as_target_processor
|
||||
|
||||
## Speech2Text2ForCausalLM
|
||||
|
||||
|
47
docs/source/en/model_doc/swinv2.mdx
Normal file
47
docs/source/en/model_doc/swinv2.mdx
Normal file
@ -0,0 +1,47 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Swin Transformer V2
|
||||
|
||||
## Overview
|
||||
|
||||
The Swin Transformer V2 model was proposed in [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Large-scale NLP models have been shown to significantly improve the performance on language tasks with no signs of saturation. They also demonstrate amazing few-shot capabilities like that of human beings. This paper aims to explore large-scale models in computer vision. We tackle three major issues in training and application of large vision models, including training instability, resolution gaps between pre-training and fine-tuning, and hunger on labelled data. Three main techniques are proposed: 1) a residual-post-norm method combined with cosine attention to improve training stability; 2) A log-spaced continuous position bias method to effectively transfer models pre-trained using low-resolution images to downstream tasks with high-resolution inputs; 3) A self-supervised pre-training method, SimMIM, to reduce the needs of vast labeled images. Through these techniques, this paper successfully trained a 3 billion-parameter Swin Transformer V2 model, which is the largest dense vision model to date, and makes it capable of training with images of up to 1,536×1,536 resolution. It set new performance records on 4 representative vision tasks, including ImageNet-V2 image classification, COCO object detection, ADE20K semantic segmentation, and Kinetics-400 video action classification. Also note our training is much more efficient than that in Google's billion-level visual models, which consumes 40 times less labelled data and 40 times less training time.*
|
||||
|
||||
Tips:
|
||||
- One can use the [`AutoFeatureExtractor`] API to prepare images for the model.
|
||||
|
||||
This model was contributed by [nandwalritik](https://huggingface.co/nandwalritik).
|
||||
The original code can be found [here](https://github.com/microsoft/Swin-Transformer).
|
||||
|
||||
|
||||
## Swinv2Config
|
||||
|
||||
[[autodoc]] Swinv2Config
|
||||
|
||||
## Swinv2Model
|
||||
|
||||
[[autodoc]] Swinv2Model
|
||||
- forward
|
||||
|
||||
## Swinv2ForMaskedImageModeling
|
||||
|
||||
[[autodoc]] Swinv2ForMaskedImageModeling
|
||||
- forward
|
||||
|
||||
## Swinv2ForImageClassification
|
||||
|
||||
[[autodoc]] transformers.Swinv2ForImageClassification
|
||||
- forward
|
@ -94,7 +94,6 @@ See the [model hub](https://huggingface.co/models?filter=trocr) to look for TrOC
|
||||
- save_pretrained
|
||||
- batch_decode
|
||||
- decode
|
||||
- as_target_processor
|
||||
|
||||
## TrOCRForCausalLM
|
||||
|
||||
|
60
docs/source/en/model_doc/videomae.mdx
Normal file
60
docs/source/en/model_doc/videomae.mdx
Normal file
@ -0,0 +1,60 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# VideoMAE
|
||||
|
||||
## Overview
|
||||
|
||||
The VideoMAE model was proposed in [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||
VideoMAE extends masked auto encoders ([MAE](vit_mae)) to video, claiming state-of-the-art performance on several video classification benchmarks.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Pre-training video transformers on extra large-scale datasets is generally required to achieve premier performance on relatively small datasets. In this paper, we show that video masked autoencoders (VideoMAE) are data-efficient learners for self-supervised video pre-training (SSVP). We are inspired by the recent ImageMAE and propose customized video tube masking and reconstruction. These simple designs turn out to be effective for overcoming information leakage caused by the temporal correlation during video reconstruction. We obtain three important findings on SSVP: (1) An extremely high proportion of masking ratio (i.e., 90% to 95%) still yields favorable performance of VideoMAE. The temporally redundant video content enables higher masking ratio than that of images. (2) VideoMAE achieves impressive results on very small datasets (i.e., around 3k-4k videos) without using any extra data. This is partially ascribed to the challenging task of video reconstruction to enforce high-level structure learning. (3) VideoMAE shows that data quality is more important than data quantity for SSVP. Domain shift between pre-training and target datasets are important issues in SSVP. Notably, our VideoMAE with the vanilla ViT backbone can achieve 83.9% on Kinects-400, 75.3% on Something-Something V2, 90.8% on UCF101, and 61.1% on HMDB51 without using any extra data.*
|
||||
|
||||
Tips:
|
||||
|
||||
- One can use [`VideoMAEFeatureExtractor`] to prepare videos for the model. It will resize + normalize all frames of a video for you.
|
||||
- [`VideoMAEForPreTraining`] includes the decoder on top for self-supervised pre-training.
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/videomae_architecture.jpeg"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> VideoMAE pre-training. Taken from the <a href="https://arxiv.org/abs/2203.12602">original paper</a>. </small>
|
||||
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr).
|
||||
The original code can be found [here](https://github.com/MCG-NJU/VideoMAE).
|
||||
|
||||
|
||||
## VideoMAEConfig
|
||||
|
||||
[[autodoc]] VideoMAEConfig
|
||||
|
||||
## VideoMAEFeatureExtractor
|
||||
|
||||
[[autodoc]] VideoMAEFeatureExtractor
|
||||
- __call__
|
||||
|
||||
## VideoMAEModel
|
||||
|
||||
[[autodoc]] VideoMAEModel
|
||||
- forward
|
||||
|
||||
## VideoMAEForPreTraining
|
||||
|
||||
[[autodoc]] transformers.VideoMAEForPreTraining
|
||||
- forward
|
||||
|
||||
## VideoMAEForVideoClassification
|
||||
|
||||
[[autodoc]] transformers.VideoMAEForVideoClassification
|
||||
- forward
|
@ -12,16 +12,136 @@ specific language governing permissions and limitations under the License.
|
||||
|
||||
# Vision Encoder Decoder Models
|
||||
|
||||
The [`VisionEncoderDecoderModel`] can be used to initialize an image-to-text-sequence model with any
|
||||
pretrained Transformer-based vision autoencoding model as the encoder (*e.g.* [ViT](vit), [BEiT](beit), [DeiT](deit), [Swin](swin))
|
||||
## Overview
|
||||
|
||||
The [`VisionEncoderDecoderModel`] can be used to initialize an image-to-text model with any
|
||||
pretrained Transformer-based vision model as the encoder (*e.g.* [ViT](vit), [BEiT](beit), [DeiT](deit), [Swin](swin))
|
||||
and any pretrained language model as the decoder (*e.g.* [RoBERTa](roberta), [GPT2](gpt2), [BERT](bert), [DistilBERT](distilbert)).
|
||||
|
||||
The effectiveness of initializing image-to-text-sequence models with pretrained checkpoints has been shown in (for
|
||||
example) [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang,
|
||||
Zhoujun Li, Furu Wei.
|
||||
|
||||
An example of how to use a [`VisionEncoderDecoderModel`] for inference can be seen in [TrOCR](trocr).
|
||||
After such a [`VisionEncoderDecoderModel`] has been trained/fine-tuned, it can be saved/loaded just like any other models (see the examples below
|
||||
for more information).
|
||||
|
||||
An example application is image captioning, in which the encoder is used to encode the image, after which an autoregressive language model generates
|
||||
the caption. Another example is optical character recognition. Refer to [TrOCR](trocr), which is an instance of [`VisionEncoderDecoderModel`].
|
||||
|
||||
## Randomly initializing `VisionEncoderDecoderModel` from model configurations.
|
||||
|
||||
[`VisionEncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`ViTModel`] configuration for the encoder
|
||||
and the default [`BertForCausalLM`] configuration for the decoder.
|
||||
|
||||
```python
|
||||
>>> from transformers import BertConfig, ViTConfig, VisionEncoderDecoderConfig, VisionEncoderDecoderModel
|
||||
|
||||
>>> config_encoder = ViTConfig()
|
||||
>>> config_decoder = BertConfig()
|
||||
|
||||
>>> config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
|
||||
>>> model = VisionEncoderDecoderModel(config=config)
|
||||
```
|
||||
|
||||
## Initialising `VisionEncoderDecoderModel` from a pretrained encoder and a pretrained decoder.
|
||||
|
||||
[`VisionEncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained Transformer-based vision model, *e.g.* [Swin](swin), can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder.
|
||||
Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized.
|
||||
Initializing [`VisionEncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder).
|
||||
To do so, the `VisionEncoderDecoderModel` class provides a [`VisionEncoderDecoderModel.from_encoder_decoder_pretrained`] method.
|
||||
|
||||
```python
|
||||
>>> from transformers import VisionEncoderDecoderModel
|
||||
|
||||
>>> model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(
|
||||
... "microsoft/swin-base-patch4-window7-224-in22k", "bert-base-uncased"
|
||||
... )
|
||||
```
|
||||
|
||||
## Loading an existing `VisionEncoderDecoderModel` checkpoint and perform inference.
|
||||
|
||||
To load fine-tuned checkpoints of the `VisionEncoderDecoderModel` class, [`VisionEncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers.
|
||||
|
||||
To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling.
|
||||
|
||||
```python
|
||||
>>> import requests
|
||||
>>> from PIL import Image
|
||||
|
||||
>>> from transformers import GPT2TokenizerFast, ViTFeatureExtractor, VisionEncoderDecoderModel
|
||||
|
||||
>>> # load a fine-tuned image captioning model and corresponding tokenizer and feature extractor
|
||||
>>> model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
||||
>>> tokenizer = GPT2TokenizerFast.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
||||
>>> feature_extractor = ViTFeatureExtractor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
||||
|
||||
>>> # let's perform inference on an image
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
>>> pixel_values = feature_extractor(image, return_tensors="pt").pixel_values
|
||||
|
||||
>>> # autoregressively generate caption (uses greedy decoding by default)
|
||||
>>> generated_ids = model.generate(pixel_values)
|
||||
>>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
||||
>>> print(generated_text)
|
||||
a cat laying on a blanket next to a cat laying on a bed
|
||||
```
|
||||
|
||||
## Loading a PyTorch checkpoint into `TFVisionEncoderDecoderModel`.
|
||||
|
||||
[`TFVisionEncoderDecoderModel.from_pretrained`] currently doesn't support initializing the model from a
|
||||
PyTorch checkpoint. Passing `from_pt=True` to this method will throw an exception. If there are only PyTorch
|
||||
checkpoints for a particular vision encoder-decoder model, a workaround is:
|
||||
|
||||
```python
|
||||
>>> from transformers import VisionEncoderDecoderModel, TFVisionEncoderDecoderModel
|
||||
|
||||
>>> _model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
|
||||
|
||||
>>> _model.encoder.save_pretrained("./encoder")
|
||||
>>> _model.decoder.save_pretrained("./decoder")
|
||||
|
||||
>>> model = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
|
||||
... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True
|
||||
... )
|
||||
>>> # This is only for copying some specific attributes of this particular model.
|
||||
>>> model.config = _model.config
|
||||
```
|
||||
|
||||
## Training
|
||||
|
||||
Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model on a dataset of (image, text) pairs.
|
||||
As you can see, only 2 inputs are required for the model in order to compute a loss: `pixel_values` (which are the
|
||||
images) and `labels` (which are the `input_ids` of the encoded target sequence).
|
||||
|
||||
```python
|
||||
>>> from transformers import ViTFeatureExtractor, BertTokenizer, VisionEncoderDecoderModel
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")
|
||||
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
|
||||
>>> model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(
|
||||
... "google/vit-base-patch16-224-in21k", "bert-base-uncased"
|
||||
... )
|
||||
|
||||
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
|
||||
>>> model.config.pad_token_id = tokenizer.pad_token_id
|
||||
|
||||
>>> dataset = load_dataset("huggingface/cats-image")
|
||||
>>> image = dataset["test"]["image"][0]
|
||||
>>> pixel_values = feature_extractor(image, return_tensors="pt").pixel_values
|
||||
|
||||
>>> labels = tokenizer(
|
||||
... "an image of two cats chilling on a couch",
|
||||
... return_tensors="pt",
|
||||
... ).input_ids
|
||||
|
||||
>>> # the forward function automatically creates the correct decoder_input_ids
|
||||
>>> loss = model(pixel_values=pixel_values, labels=labels).loss
|
||||
```
|
||||
|
||||
This model was contributed by [nielsr](https://github.com/nielsrogge). This model's TensorFlow and Flax versions
|
||||
were contributed by [ydshieh](https://github.com/ydshieh).
|
||||
|
||||
## VisionEncoderDecoderConfig
|
||||
|
||||
|
@ -62,7 +62,6 @@ This model was contributed by [patrickvonplaten](https://huggingface.co/patrickv
|
||||
- save_pretrained
|
||||
- batch_decode
|
||||
- decode
|
||||
- as_target_processor
|
||||
|
||||
## Wav2Vec2ProcessorWithLM
|
||||
|
||||
@ -73,7 +72,6 @@ This model was contributed by [patrickvonplaten](https://huggingface.co/patrickv
|
||||
- save_pretrained
|
||||
- batch_decode
|
||||
- decode
|
||||
- as_target_processor
|
||||
|
||||
## Wav2Vec2 specific outputs
|
||||
|
||||
|
@ -179,10 +179,10 @@ This creates a repository under your username with the model name `my-awesome-mo
|
||||
>>> model = AutoModel.from_pretrained("your_username/my-awesome-model")
|
||||
```
|
||||
|
||||
If you belong to an organization and want to push your model under the organization name instead, add the `organization` parameter:
|
||||
If you belong to an organization and want to push your model under the organization name instead, just add it to the `repo_id`:
|
||||
|
||||
```py
|
||||
>>> pt_model.push_to_hub("my-awesome-model", organization="my-awesome-org")
|
||||
>>> pt_model.push_to_hub("my-awesome-org/my-awesome-model")
|
||||
```
|
||||
|
||||
The `push_to_hub` function can also be used to add other files to a model repository. For example, add a tokenizer to a model repository:
|
||||
|
92
docs/source/en/perf_train_cpu_many.mdx
Normal file
92
docs/source/en/perf_train_cpu_many.mdx
Normal file
@ -0,0 +1,92 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
-->
|
||||
|
||||
# Efficient Training on Multiple CPUs
|
||||
|
||||
When training on a single CPU is too slow, we can use multiple CPUs. This guide focuses on PyTorch-based DDP enabling distributed CPU training efficiently.
|
||||
|
||||
## Intel® oneCCL Bindings for PyTorch
|
||||
|
||||
[Intel® oneCCL](https://github.com/oneapi-src/oneCCL) (collective communications library) is a library for efficient distributed deep learning training implementing such collectives like allreduce, allgather, alltoall. For more information on oneCCL, please refer to the [oneCCL documentation](https://spec.oneapi.com/versions/latest/elements/oneCCL/source/index.html) and [oneCCL specification](https://spec.oneapi.com/versions/latest/elements/oneCCL/source/index.html).
|
||||
|
||||
Module `oneccl_bindings_for_pytorch` (`torch_ccl` before version 1.12) implements PyTorch C10D ProcessGroup API and can be dynamically loaded as external ProcessGroup and only works on Linux platform now
|
||||
|
||||
Check more detailed information for [oneccl_bind_pt](https://github.com/intel/torch-ccl).
|
||||
|
||||
### Intel® oneCCL Bindings for PyTorch installation:
|
||||
|
||||
Wheel files are available for the following Python versions:
|
||||
|
||||
| Extension Version | Python 3.6 | Python 3.7 | Python 3.8 | Python 3.9 | Python 3.10 |
|
||||
| :---------------: | :--------: | :--------: | :--------: | :--------: | :---------: |
|
||||
| 1.12.0 | | √ | √ | √ | √ |
|
||||
| 1.11.0 | | √ | √ | √ | √ |
|
||||
| 1.10.0 | √ | √ | √ | √ | |
|
||||
|
||||
```
|
||||
pip install oneccl_bind_pt=={pytorch_version} -f https://software.intel.com/ipex-whl-stable
|
||||
```
|
||||
where `{pytorch_version}` should be your PyTorch version, for instance 1.12.0.
|
||||
Check more approaches for [oneccl_bind_pt installation](https://github.com/intel/torch-ccl).
|
||||
|
||||
### Usage in Trainer
|
||||
To enable multi CPU distributed training in the Trainer with the ccl backend, users should add **`--xpu_backend ccl`** in the command arguments.
|
||||
|
||||
Let's see an example with the [question-answering example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)
|
||||
|
||||
|
||||
The following command enables training with 2 processes on one Xeon node, with one process running per one socket. The variables OMP_NUM_THREADS/CCL_WORKER_COUNT can be tuned for optimal performance.
|
||||
```shell script
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=127.0.0.1
|
||||
mpirun -n 2 -genv OMP_NUM_THREADS=23 \
|
||||
python3 run_qa.py \
|
||||
--model_name_or_path bert-large-uncased \
|
||||
--dataset_name squad \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--per_device_train_batch_size 12 \
|
||||
--learning_rate 3e-5 \
|
||||
--num_train_epochs 2 \
|
||||
--max_seq_length 384 \
|
||||
--doc_stride 128 \
|
||||
--output_dir /tmp/debug_squad/ \
|
||||
--no_cuda \
|
||||
--xpu_backend ccl
|
||||
```
|
||||
The following command enables training with a total of four processes on two Xeons (node0 and node1, taking node0 as the main process), ppn (processes per node) is set to 2, with one process running per one socket. The variables OMP_NUM_THREADS/CCL_WORKER_COUNT can be tuned for optimal performance.
|
||||
|
||||
In node0, you need to create a configuration file which contains the IP addresses of each node (for example hostfile) and pass that configuration file path as an argument.
|
||||
```shell script
|
||||
cat hostfile
|
||||
xxx.xxx.xxx.xxx #node0 ip
|
||||
xxx.xxx.xxx.xxx #node1 ip
|
||||
```
|
||||
Now, run the following command in node0 and **4DDP** will be enabled in node0 and node1:
|
||||
```shell script
|
||||
export CCL_WORKER_COUNT=1
|
||||
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
|
||||
mpirun -f hostfile -n 4 -ppn 2 \
|
||||
-genv OMP_NUM_THREADS=23 \
|
||||
python3 run_qa.py \
|
||||
--model_name_or_path bert-large-uncased \
|
||||
--dataset_name squad \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--per_device_train_batch_size 12 \
|
||||
--learning_rate 3e-5 \
|
||||
--num_train_epochs 2 \
|
||||
--max_seq_length 384 \
|
||||
--doc_stride 128 \
|
||||
--output_dir /tmp/debug_squad/ \
|
||||
--no_cuda \
|
||||
--xpu_backend ccl
|
||||
```
|
@ -486,10 +486,8 @@ A processor combines a feature extractor and tokenizer. Load a processor with [`
|
||||
>>> def prepare_dataset(example):
|
||||
... audio = example["audio"]
|
||||
|
||||
... example["input_values"] = processor(audio["array"], sampling_rate=16000)
|
||||
... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000))
|
||||
|
||||
... with processor.as_target_processor():
|
||||
... example["labels"] = processor(example["text"]).input_ids
|
||||
... return example
|
||||
```
|
||||
|
||||
|
@ -109,11 +109,10 @@ The preprocessing function needs to:
|
||||
>>> def prepare_dataset(batch):
|
||||
... audio = batch["audio"]
|
||||
|
||||
... batch["input_values"] = processor(audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0]
|
||||
... batch = processor(audio=audio["array"], sampling_rate=audio["sampling_rate"]).input_values[0]
|
||||
... batch["input_length"] = len(batch["input_values"])
|
||||
|
||||
... with processor.as_target_processor():
|
||||
... batch["labels"] = processor(batch["transcription"]).input_ids
|
||||
... batch["labels"] = processor(text=batch["transcription"]).input_ids
|
||||
... return batch
|
||||
```
|
||||
|
||||
@ -146,17 +145,9 @@ Unlike other data collators, this specific data collator needs to apply a differ
|
||||
... input_features = [{"input_values": feature["input_values"]} for feature in features]
|
||||
... label_features = [{"input_ids": feature["labels"]} for feature in features]
|
||||
|
||||
... batch = self.processor.pad(
|
||||
... input_features,
|
||||
... padding=self.padding,
|
||||
... return_tensors="pt",
|
||||
... )
|
||||
... with self.processor.as_target_processor():
|
||||
... labels_batch = self.processor.pad(
|
||||
... label_features,
|
||||
... padding=self.padding,
|
||||
... return_tensors="pt",
|
||||
... )
|
||||
... batch = self.processor.pad(input_features, padding=self.padding, return_tensors="pt")
|
||||
|
||||
... labels_batch = self.processor.pad(labels=label_features, padding=self.padding, return_tensors="pt")
|
||||
|
||||
... # replace padding with -100 to ignore loss correctly
|
||||
... labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
||||
|
@ -67,7 +67,7 @@ Load the T5 tokenizer to process `text` and `summary`:
|
||||
The preprocessing function needs to:
|
||||
|
||||
1. Prefix the input with a prompt so T5 knows this is a summarization task. Some models capable of multiple NLP tasks require prompting for specific tasks.
|
||||
2. Use a context manager with the `as_target_tokenizer()` function to parallelize tokenization of inputs and labels.
|
||||
2. Use the keyword `text_target` argument when tokenizing labels.
|
||||
3. Truncate sequences to be no longer than the maximum length set by the `max_length` parameter.
|
||||
|
||||
```py
|
||||
@ -78,8 +78,7 @@ The preprocessing function needs to:
|
||||
... inputs = [prefix + doc for doc in examples["text"]]
|
||||
... model_inputs = tokenizer(inputs, max_length=1024, truncation=True)
|
||||
|
||||
... with tokenizer.as_target_tokenizer():
|
||||
... labels = tokenizer(examples["summary"], max_length=128, truncation=True)
|
||||
... labels = tokenizer(text_target=examples["summary"], max_length=128, truncation=True)
|
||||
|
||||
... model_inputs["labels"] = labels["input_ids"]
|
||||
... return model_inputs
|
||||
|
@ -78,12 +78,7 @@ The preprocessing function needs to:
|
||||
>>> def preprocess_function(examples):
|
||||
... inputs = [prefix + example[source_lang] for example in examples["translation"]]
|
||||
... targets = [example[target_lang] for example in examples["translation"]]
|
||||
... model_inputs = tokenizer(inputs, max_length=128, truncation=True)
|
||||
|
||||
... with tokenizer.as_target_tokenizer():
|
||||
... labels = tokenizer(targets, max_length=128, truncation=True)
|
||||
|
||||
... model_inputs["labels"] = labels["input_ids"]
|
||||
... model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True)
|
||||
... return model_inputs
|
||||
```
|
||||
|
||||
|
@ -33,6 +33,8 @@
|
||||
- local: tasks/image_classification
|
||||
title: Clasificación de imágenes
|
||||
title: Fine-tuning para tareas posteriores
|
||||
- local: run_scripts
|
||||
title: Entrenamiento con scripts
|
||||
- local: sagemaker
|
||||
title: Ejecutar el entrenamiento en Amazon SageMaker
|
||||
- local: multilingual
|
||||
|
@ -471,10 +471,8 @@ Un processor combina un extractor de características y un tokenizador. Cargue u
|
||||
>>> def prepare_dataset(example):
|
||||
... audio = example["audio"]
|
||||
|
||||
... example["input_values"] = processor(audio["array"], sampling_rate=16000)
|
||||
... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000))
|
||||
|
||||
... with processor.as_target_processor():
|
||||
... example["labels"] = processor(example["text"]).input_ids
|
||||
... return example
|
||||
```
|
||||
|
||||
|
347
docs/source/es/run_scripts.mdx
Normal file
347
docs/source/es/run_scripts.mdx
Normal file
@ -0,0 +1,347 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Entrenamiento con scripts
|
||||
|
||||
Junto con los [notebooks](./noteboks/README) de 🤗 Transformers, también hay scripts con ejemplos que muestran cómo entrenar un modelo para una tarea en [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), o [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax).
|
||||
|
||||
También encontrarás scripts que hemos usado en nuestros [proyectos de investigación](https://github.com/huggingface/transformers/tree/main/examples/research_projects) y [ejemplos pasados](https://github.com/huggingface/transformers/tree/main/examples/legacy) que en su mayoría son aportados por la comunidad. Estos scripts no se mantienen activamente y requieren una versión específica de 🤗 Transformers que probablemente sea incompatible con la última versión de la biblioteca.
|
||||
|
||||
No se espera que los scripts de ejemplo funcionen de inmediato en todos los problemas, y es posible que debas adaptar el script al problema que estás tratando de resolver. Para ayudarte con esto, la mayoría de los scripts exponen completamente cómo se preprocesan los datos, lo que te permite editarlos según sea necesario para tu caso de uso.
|
||||
|
||||
Para cualquier característica que te gustaría implementar en un script de ejemplo, por favor discútelo en el [foro](https://discuss.huggingface.co/) o con un [issue](https://github.com/huggingface/transformers/issues) antes de enviar un Pull Request. Si bien agradecemos las correcciones de errores, es poco probable que fusionemos un Pull Request que agregue más funcionalidad a costa de la legibilidad.
|
||||
|
||||
Esta guía te mostrará cómo ejecutar un ejemplo de un script de entrenamiento para resumir texto en [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) y [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization). Se espera que todos los ejemplos funcionen con ambos frameworks a menos que se especifique lo contrario.
|
||||
|
||||
## Configuración
|
||||
|
||||
Para ejecutar con éxito la última versión de los scripts de ejemplo debes **instalar 🤗 Transformers desde su fuente** en un nuevo entorno virtual:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/huggingface/transformers
|
||||
cd transformers
|
||||
pip install .
|
||||
```
|
||||
|
||||
Para versiones anteriores de los scripts de ejemplo, haz clic en alguno de los siguientes links:
|
||||
|
||||
<details>
|
||||
<summary>Ejemplos de versiones anteriores de 🤗 Transformers</summary>
|
||||
<ul>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li>
|
||||
</ul>
|
||||
</details>
|
||||
|
||||
Luego cambia tu clon actual de 🤗 Transformers a una versión específica, por ejemplo v3.5.1:
|
||||
|
||||
```bash
|
||||
git checkout tags/v3.5.1
|
||||
```
|
||||
|
||||
Una vez que hayas configurado la versión correcta de la biblioteca, ve a la carpeta de ejemplo de tu elección e instala los requisitos específicos del ejemplo:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Ejecutar un script
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
El script de ejemplo descarga y preprocesa un conjunto de datos de la biblioteca 🤗 [Datasets](https://huggingface.co/docs/datasets/). Luego, el script ajusta un conjunto de datos con [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) en una arquitectura que soporta la tarea de resumen. El siguiente ejemplo muestra cómo ajustar un [T5-small](https://huggingface.co/t5-small) en el conjunto de datos [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). El modelo T5 requiere un argumento adicional `source_prefix` debido a cómo fue entrenado. Este aviso le permite a T5 saber que se trata de una tarea de resumir.
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py \
|
||||
--model_name_or_path t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
El script de ejemplo descarga y preprocesa un conjunto de datos de la biblioteca 🤗 [Datasets](https://huggingface.co/docs/datasets/). Luego, el script ajusta un conjunto de datos utilizando Keras en una arquitectura que soporta la tarea de resumir. El siguiente ejemplo muestra cómo ajustar un [T5-small](https://huggingface.co/t5-small) en el conjunto de datos [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). El modelo T5 requiere un argumento adicional `source_prefix` debido a cómo fue entrenado. Este aviso le permite a T5 saber que se trata de una tarea de resumir.
|
||||
|
||||
```bash
|
||||
python examples/tensorflow/summarization/run_summarization.py \
|
||||
--model_name_or_path t5-small \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size 8 \
|
||||
--per_device_eval_batch_size 16 \
|
||||
--num_train_epochs 3 \
|
||||
--do_train \
|
||||
--do_eval
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## Entrenamiento distribuido y de precisión mixta
|
||||
|
||||
[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) admite un entrenamiento distribuido y de precisión mixta, lo que significa que también puedes usarlo en un script. Para habilitar ambas características:
|
||||
|
||||
- Agrega el argumento `fp16` para habilitar la precisión mixta.
|
||||
- Establece la cantidad de GPU que se usarás con el argumento `nproc_per_node`.
|
||||
|
||||
```bash
|
||||
python -m torch.distributed.launch \
|
||||
--nproc_per_node 8 pytorch/summarization/run_summarization.py \
|
||||
--fp16 \
|
||||
--model_name_or_path t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
Los scripts de TensorFlow utilizan [`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy) para el entrenamiento distribuido, y no es necesario agregar argumentos adicionales al script de entrenamiento. El script de TensorFlow utilizará múltiples GPUs de forma predeterminada si están disponibles.
|
||||
|
||||
## Ejecutar un script en una TPU
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
Las Unidades de Procesamiento de Tensor (TPUs) están diseñadas específicamente para acelerar el rendimiento. PyTorch admite TPU con el compilador de aprendizaje profundo [XLA](https://www.tensorflow.org/xla) (consulta [aquí](https://github.com/pytorch/xla/blob/master/README.md) para obtener más detalles). Para usar una TPU, inicia el script `xla_spawn.py` y usa el argumento `num_cores` para establecer la cantidad de núcleos de TPU que deseas usar.
|
||||
|
||||
```bash
|
||||
python xla_spawn.py --num_cores 8 \
|
||||
summarization/run_summarization.py \
|
||||
--model_name_or_path t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
Las Unidades de Procesamiento de Tensor (TPUs) están diseñadas específicamente para acelerar el rendimiento. TensorFlow utiliza [`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy) para entrenar en TPUs. Para usar una TPU, pasa el nombre del recurso de la TPU al argumento `tpu`
|
||||
|
||||
```bash
|
||||
python run_summarization.py \
|
||||
--tpu name_of_tpu_resource \
|
||||
--model_name_or_path t5-small \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size 8 \
|
||||
--per_device_eval_batch_size 16 \
|
||||
--num_train_epochs 3 \
|
||||
--do_train \
|
||||
--do_eval
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## Ejecutar un script con 🤗 Accelerate
|
||||
|
||||
🤗 [Accelerate](https://huggingface.co/docs/accelerate/index.html) es una biblioteca exclusiva de PyTorch que ofrece un método unificado para entrenar un modelo en varios tipos de configuraciones (solo CPU, GPU múltiples, TPU) mientras mantiene una visibilidad completa en el ciclo de entrenamiento de PyTorch. Asegúrate de tener 🤗 Accelerate instalado si aún no lo tienes:
|
||||
|
||||
> Nota: Como Accelerate se está desarrollando rápidamente, debes instalar la versión git de Accelerate para ejecutar los scripts
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/accelerate
|
||||
```
|
||||
|
||||
En lugar del script `run_summarization.py`, debes usar el script `run_summarization_no_trainer.py`. Los scripts compatibles con 🤗 Accelerate tendrán un archivo `task_no_trainer.py` en la carpeta. Comienza ejecutando el siguiente comando para crear y guardar un archivo de configuración:
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
Prueba tu configuración para asegurarte que esta configurada correctamente:
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
```
|
||||
|
||||
Todo listo para iniciar el entrenamiento:
|
||||
|
||||
```bash
|
||||
accelerate launch run_summarization_no_trainer.py \
|
||||
--model_name_or_path t5-small \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir ~/tmp/tst-summarization
|
||||
```
|
||||
|
||||
## Usar un conjunto de datos personalizado
|
||||
|
||||
El script de la tarea resumir admite conjuntos de datos personalizados siempre que sean un archivo CSV o JSON Line. Cuando uses tu propio conjunto de datos, necesitas especificar varios argumentos adicionales:
|
||||
|
||||
- `train_file` y `validation_file` especifican la ruta a tus archivos de entrenamiento y validación.
|
||||
- `text_column` es el texto de entrada para resumir.
|
||||
- `summary_column` es el texto de destino para la salida.
|
||||
|
||||
Un script para resumir que utiliza un conjunto de datos personalizado se vera así:
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py \
|
||||
--model_name_or_path t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--train_file path_to_csv_or_jsonlines_file \
|
||||
--validation_file path_to_csv_or_jsonlines_file \
|
||||
--text_column text_column_name \
|
||||
--summary_column summary_column_name \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--overwrite_output_dir \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
## Prueba un script
|
||||
|
||||
A veces, es una buena idea ejecutar tu secuencia de comandos en una cantidad menor de ejemplos para asegurarte de que todo funciona como se espera antes de comprometerte con un conjunto de datos completo, lo que puede demorar horas en completarse. Utiliza los siguientes argumentos para truncar el conjunto de datos a un número máximo de muestras:
|
||||
|
||||
- `max_train_samples`
|
||||
- `max_eval_samples`
|
||||
- `max_predict_samples`
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py \
|
||||
--model_name_or_path t5-small \
|
||||
--max_train_samples 50 \
|
||||
--max_eval_samples 50 \
|
||||
--max_predict_samples 50 \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
No todos los scripts de ejemplo admiten el argumento `max_predict_samples`. Puede que desconozcas si la secuencia de comandos admite este argumento, agrega `-h` para verificar:
|
||||
|
||||
```bash
|
||||
examples/pytorch/summarization/run_summarization.py -h
|
||||
```
|
||||
|
||||
## Reanudar el entrenamiento desde el punto de control
|
||||
|
||||
Otra opción útil para habilitar es reanudar el entrenamiento desde un punto de control anterior. Esto asegurará que puedas continuar donde lo dejaste sin comenzar de nuevo si tu entrenamiento se interrumpe. Hay dos métodos para reanudar el entrenamiento desde un punto de control.
|
||||
|
||||
El primer método utiliza el argumento `output_dir previous_output_dir` para reanudar el entrenamiento desde el último punto de control almacenado en `output_dir`. En este caso, debes eliminar `overwrite_output_dir`:
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py
|
||||
--model_name_or_path t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--output_dir previous_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
El segundo método utiliza el argumento `resume_from_checkpoint path_to_specific_checkpoint` para reanudar el entrenamiento desde una carpeta de punto de control específica.
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py
|
||||
--model_name_or_path t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--resume_from_checkpoint path_to_specific_checkpoint \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
## Comparte tu modelo
|
||||
|
||||
Todos los scripts pueden cargar tu modelo final en el [Model Hub](https://huggingface.co/models). Asegúrate de haber iniciado sesión en Hugging Face antes de comenzar:
|
||||
|
||||
```bash
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
Luego agrega el argumento `push_to_hub` al script. Este argumento creará un repositorio con tu nombre de usuario Hugging Face y el nombre de la carpeta especificado en `output_dir`.
|
||||
|
||||
Para darle a tu repositorio un nombre específico, usa el argumento `push_to_hub_model_id` para añadirlo. El repositorio se incluirá automáticamente en tu namespace.
|
||||
|
||||
El siguiente ejemplo muestra cómo cargar un modelo con un nombre de repositorio específico:
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py
|
||||
--model_name_or_path t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--push_to_hub \
|
||||
--push_to_hub_model_id finetuned-t5-cnn_dailymail \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
@ -14,7 +14,7 @@
|
||||
- local: preprocessing
|
||||
title: Preprocess
|
||||
- local: training
|
||||
title: Mettere a punto un modello pre-addestrato
|
||||
title: Fine-tuning di un modello pre-addestrato
|
||||
- local: accelerate
|
||||
title: Allenamento distribuito con 🤗 Accelerate
|
||||
- local: model_sharing
|
||||
|
@ -471,10 +471,8 @@ Un processor combina un estrattore di caratteristiche e un tokenizer. Carica un
|
||||
>>> def prepare_dataset(example):
|
||||
... audio = example["audio"]
|
||||
|
||||
... example["input_values"] = processor(audio["array"], sampling_rate=16000)
|
||||
... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000))
|
||||
|
||||
... with processor.as_target_processor():
|
||||
... example["labels"] = processor(example["text"]).input_ids
|
||||
... return example
|
||||
```
|
||||
|
||||
|
@ -250,7 +250,7 @@ Come con la [`pipeline`], il tokenizer accetterà una lista di input. In più, i
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
Leggi il tutorial sul [preproccesing](./preprocessing) per maggiori dettagli sulla tokenizzazione.
|
||||
Leggi il tutorial sul [preprocessing](./preprocessing) per maggiori dettagli sulla tokenizzazione.
|
||||
|
||||
### AutoModel
|
||||
|
||||
|
@ -10,15 +10,15 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Mettere a punto un modello pre-addestrato
|
||||
# Fine-tuning di un modello pre-addestrato
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
Ci sono benefici significativi nell'usare un modello pre-allenato. Riduce i costi computazionali, la tua impronta di carbonio, e ti consente di usare lo stato dell'arte dei modelli senza doverli addestrare da zero. 🤗 Transformers consente l'accesso a migliaia di modelli pre-addestrati per un'ampia gamma di compiti. Quando usi un modello pre-addestrato, lo alleni su un dataset specifico per il tuo compito. Questo è conosciuto come messa a punto (in inglese *fine-tuning*),una tecnica di addestramento incredibilmente potente. In questa esercitazione, potrai mettere a punto un modello pre-addestrato con un framework di deep learning a tua scelta:
|
||||
Ci sono benefici significativi nell'usare un modello pre-addestrato. Si riducono i costi computazionali, l'impronta di carbonio e ti consente di usare modelli stato dell'arte senza doverli addestrare da zero. 🤗 Transformers consente l'accesso a migliaia di modelli pre-addestrati per un'ampia gamma di compiti. Quando usi un modello pre-addestrato, lo alleni su un dataset specifico per il tuo compito. Questo è conosciuto come fine-tuning, una tecnica di addestramento incredibilmente potente. In questa esercitazione, potrai fare il fine-tuning di un modello pre-addestrato, con un framework di deep learning a tua scelta:
|
||||
|
||||
* Messa a punto di un modello pre-addestrato con 🤗 Transformers [`Trainer`].
|
||||
* Messa a punto di un modello pre-addestrato in TensorFlow con Keras.
|
||||
* Messa a punto di un modello pre-addestrato con PyTorch.
|
||||
* Fine-tuning di un modello pre-addestrato con 🤗 Transformers [`Trainer`].
|
||||
* Fine-tuning di un modello pre-addestrato in TensorFlow con Keras.
|
||||
* Fine-tuning di un modello pre-addestrato con PyTorch.
|
||||
|
||||
<a id='data-processing'></a>
|
||||
|
||||
@ -26,7 +26,7 @@ Ci sono benefici significativi nell'usare un modello pre-allenato. Riduce i cost
|
||||
|
||||
<Youtube id="_BZearw7f0w"/>
|
||||
|
||||
Prima di poter mettere a punto un modello pre-addestrato, scarica un dataset e preparalo per l'addestramento. La precedente esercitazione ti mostra come processare i dati per l'addestramento, e adesso hai l'opportunità di provare queste capacità sul test!
|
||||
Prima di poter fare il fine-tuning di un modello pre-addestrato, scarica un dataset e preparalo per l'addestramento. La precedente esercitazione ti ha mostrato come processare i dati per l'addestramento e adesso hai l'opportunità di metterti alla prova!
|
||||
|
||||
Inizia caricando il dataset [Yelp Reviews](https://huggingface.co/datasets/yelp_review_full):
|
||||
|
||||
@ -39,7 +39,7 @@ Inizia caricando il dataset [Yelp Reviews](https://huggingface.co/datasets/yelp_
|
||||
'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'}
|
||||
```
|
||||
|
||||
Come già sai, hai bisogno di un tokenizer per processare il testo e includere una strategia di padding e truncation per gestire sequenze di lunghezza variabile. Per processare il dataset in un unico passo, usa il metodo [`map`](https://huggingface.co/docs/datasets/process.html#map) di 🤗 Datasets che applica la funzione di preprocessamento all'intero dataset:
|
||||
Come già sai, hai bisogno di un tokenizer per processare il testo e includere una strategia di padding e truncation per gestire sequenze di lunghezza variabile. Per processare il dataset in un unico passo, usa il metodo [`map`](https://huggingface.co/docs/datasets/process.html#map) di 🤗 Datasets che applica la funzione di preprocessing all'intero dataset:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
@ -54,7 +54,7 @@ Come già sai, hai bisogno di un tokenizer per processare il testo e includere u
|
||||
>>> tokenized_datasets = dataset.map(tokenize_function, batched=True)
|
||||
```
|
||||
|
||||
If you like, you can create a smaller subset of the full dataset to fine-tune on to reduce the time it takes:
|
||||
Se vuoi, puoi creare un sottoinsieme più piccolo del dataset per il fine-tuning così da ridurre il tempo necessario:
|
||||
|
||||
```py
|
||||
>>> small_train_dataset = tokenized_datasets["train"].shuffle(seed=42).select(range(1000))
|
||||
@ -69,9 +69,9 @@ If you like, you can create a smaller subset of the full dataset to fine-tune on
|
||||
<pt>
|
||||
<Youtube id="nvBXf7s7vTI"/>
|
||||
|
||||
🤗 Transformers mette a disposizione la classe [`Trainer`] ottimizzata per addestrare modelli 🤗 Transformers, rendendo semplice iniziare l'addestramento senza scrivere manualmente il tuo ciclo di allenamento. L'API [`Trainer`] supporta un'ampia gamma di opzioni e funzionalità di addestramento come logging, gradient accumulation, e mixed precision.
|
||||
🤗 Transformers mette a disposizione la classe [`Trainer`] ottimizzata per addestrare modelli 🤗 Transformers, rendendo semplice iniziare l'addestramento senza scrivere manualmente il tuo ciclo di addestramento. L'API [`Trainer`] supporta un'ampia gamma di opzioni e funzionalità di addestramento come logging, gradient accumulation e mixed precision.
|
||||
|
||||
Inizia caricando il tuo modello e specificando il numero di etichette attese. Nel dataset Yelp Review [dataset card](https://huggingface.co/datasets/yelp_review_full#data-fields), sai che ci sono cinque etichette:
|
||||
Inizia caricando il tuo modello e specificando il numero di etichette (labels) attese. Nel dataset Yelp Review [dataset card](https://huggingface.co/datasets/yelp_review_full#data-fields), sai che ci sono cinque etichette:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSequenceClassification
|
||||
@ -81,13 +81,13 @@ Inizia caricando il tuo modello e specificando il numero di etichette attese. Ne
|
||||
|
||||
<Tip>
|
||||
|
||||
Potresti vedere un'allerta dato che alcuni dei pesi pre-addestrati non sono stati utilizzati e altri pesi sono stati inizializzati casualmente. Non preoccuparti, è completamente normale! La testa pre-allenata del modello BERT viene scartata, e rimpiazzata da una testa per la classificazione inizializzata casualmente. Tu metterai a punto la nuova testa del modello sulla tua sequenza per il compito di classificazione, trasferendogli la conoscenza del modello pre-allenato.
|
||||
Potresti vedere un warning dato che alcuni dei pesi pre-addestrati non sono stati utilizzati e altri pesi sono stati inizializzati casualmente. Non preoccuparti, è completamente normale! L'head pre-addestrata del modello BERT viene scartata e rimpiazzata da una classification head inizializzata casualmente. Farai il fine-tuning di questa nuova head del modello sul tuo compito di classificazione, trasferendogli la conoscenza del modello pre-addestrato.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Addestrare gli iperparametri
|
||||
### Iperparametri per il training
|
||||
|
||||
Successivamente, crea una classe [`TrainingArguments`] contenente tutti gli iperparametri che si possono calibrare nonché le variabili per attivare le differenti opzioni di addestramento. Per questa esercitazione puoi iniziare con gli [iperparametri](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) di allenamento predefiniti, ma sentiti libero di sperimentare per trovare la configurazione ottimale per te.
|
||||
Successivamente, crea una classe [`TrainingArguments`] contenente tutti gli iperparametri che si possono regore nonché le variabili per attivare le differenti opzioni di addestramento. Per questa esercitazione puoi iniziare con gli [iperparametri](https://huggingface.co/docs/transformers/main_classes/trainer#transformers.TrainingArguments) di ddestramento predefiniti, ma sentiti libero di sperimentare per trovare la configurazione ottimale per te.
|
||||
|
||||
Specifica dove salvare i checkpoints del tuo addestramento:
|
||||
|
||||
@ -168,7 +168,7 @@ Il [`DefaultDataCollator`] assembla tensori in lotti su cui il modello si addest
|
||||
|
||||
</Tip>
|
||||
|
||||
Successivamente, converti i datasets tokenizzati in TensorFlow datasets con il metodo [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset). Specifica il tuo input in `columns`, e le tue etichette in `label_cols`:
|
||||
Successivamente, converti i datasets tokenizzati in TensorFlow datasets con il metodo [`to_tf_dataset`](https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.to_tf_dataset). Specifica il tuo input in `columns` e le tue etichette in `label_cols`:
|
||||
|
||||
```py
|
||||
>>> tf_train_dataset = small_train_dataset.to_tf_dataset(
|
||||
@ -199,7 +199,7 @@ Carica un modello TensorFlow col numero atteso di etichette:
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
|
||||
```
|
||||
|
||||
Poi compila e metti a punto il tuo modello usando [`fit`](https://keras.io/api/models/model_training_apis/) come si farebbe con qualsiasi altro modello di Keras:
|
||||
Poi compila e fai il fine-tuning del tuo modello usando [`fit`](https://keras.io/api/models/model_training_apis/) come faresti con qualsiasi altro modello di Keras:
|
||||
|
||||
```py
|
||||
>>> model.compile(
|
||||
@ -221,7 +221,7 @@ Poi compila e metti a punto il tuo modello usando [`fit`](https://keras.io/api/m
|
||||
<pt>
|
||||
<Youtube id="Dh9CL8fyG80"/>
|
||||
|
||||
[`Trainer`] si occupa del ciclo di addestramento e ti consente di mettere a punto un modello con una sola riga di codice. Per chi preferisse scrivere un proprio ciclo di addestramento personale, puoi anche mettere a punto un modello 🤗 Transformers in PyTorch nativo.
|
||||
[`Trainer`] si occupa del ciclo di addestramento e ti consente di mettere a punto un modello con una sola riga di codice. Per chi preferisse scrivere un proprio ciclo di addestramento personale, puoi anche fare il fine-tuning di un modello 🤗 Transformers in PyTorch nativo.
|
||||
|
||||
A questo punto, potresti avere bisogno di riavviare il tuo notebook o eseguire il seguente codice per liberare un po' di memoria:
|
||||
|
||||
@ -278,9 +278,9 @@ Carica il tuo modello con il numero atteso di etichette:
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", num_labels=5)
|
||||
```
|
||||
|
||||
### Ottimizzatore e pianificatore del tasso di apprendimento
|
||||
### Ottimizzatore e learning rate scheduler
|
||||
|
||||
Crea un ottimizzatore e pianificatore del tasso di apprendimento per mettere a punto il modello. Usa l'ottimizzatore [`AdamW`](https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html) di PyTorch:
|
||||
Crea un ottimizzatore e il learning rate scheduler per fare il fine-tuning del modello. Usa l'ottimizzatore [`AdamW`](https://pytorch.org/docs/stable/generated/torch.optim.AdamW.html) di PyTorch:
|
||||
|
||||
```py
|
||||
>>> from torch.optim import AdamW
|
||||
@ -288,7 +288,7 @@ Crea un ottimizzatore e pianificatore del tasso di apprendimento per mettere a p
|
||||
>>> optimizer = AdamW(model.parameters(), lr=5e-5)
|
||||
```
|
||||
|
||||
Crea il pianificatore del tasso di apprendimento predefinito da [`Trainer`]:
|
||||
Crea il learning rate scheduler predefinito da [`Trainer`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import get_scheduler
|
||||
@ -300,7 +300,7 @@ Crea il pianificatore del tasso di apprendimento predefinito da [`Trainer`]:
|
||||
... )
|
||||
```
|
||||
|
||||
Infine, specifica come `device` da usare una GPU se ne hai una. Altrimenti, l'addestramento su una CPU può richiedere diverse ore invece di un paio di minuti.
|
||||
Infine specifica come `device` da usare una GPU se ne hai una. Altrimenti, l'addestramento su una CPU può richiedere diverse ore invece di un paio di minuti.
|
||||
|
||||
```py
|
||||
>>> import torch
|
||||
@ -342,7 +342,7 @@ Per tenere traccia dei tuoi progressi durante l'addestramento, usa la libreria [
|
||||
|
||||
### Metriche
|
||||
|
||||
Proprio come è necessario aggiungere una funzione di valutazione del [`Trainer`], è necessario fare lo stesso quando si scrive il proprio ciclo di allenamento. Ma invece di calcolare e riportare la metrica alla fine di ogni epoca, questa volta accumulerai tutti i batch con [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch) e calcolerai la metrica alla fine.
|
||||
Proprio come è necessario aggiungere una funzione di valutazione del [`Trainer`], è necessario fare lo stesso quando si scrive il proprio ciclo di addestramento. Ma invece di calcolare e riportare la metrica alla fine di ogni epoca, questa volta accumulerai tutti i batch con [`add_batch`](https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=add_batch#datasets.Metric.add_batch) e calcolerai la metrica alla fine.
|
||||
|
||||
```py
|
||||
>>> metric = load_metric("accuracy")
|
||||
|
@ -43,6 +43,22 @@ To browse the examples corresponding to released versions of 🤗 Transformers,
|
||||
<details>
|
||||
<summary>Examples for older versions of 🤗 Transformers</summary>
|
||||
<ul>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.21.0/examples">v4.21.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.20.1/examples">v4.20.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.19.4/examples">v4.19.4</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.18.0/examples">v4.18.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.17.0/examples">v4.17.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.16.2/examples">v4.16.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.15.0/examples">v4.15.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.14.1/examples">v4.14.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.13.0/examples">v4.13.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.12.5/examples">v4.12.5</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.11.3/examples">v4.11.3</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.10.3/examples">v4.10.3</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.9.2/examples">v4.9.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.8.2/examples">v4.8.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.7.0/examples">v4.7.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.6.1/examples">v4.6.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li>
|
||||
|
@ -4,4 +4,5 @@ conllu
|
||||
nltk
|
||||
rouge-score
|
||||
seqeval
|
||||
tensorboard
|
||||
tensorboard
|
||||
evaluate >= 0.2.0
|
@ -31,10 +31,11 @@ from typing import Callable, Optional
|
||||
import datasets
|
||||
import nltk # Here to have a nice missing dependency error message early on
|
||||
import numpy as np
|
||||
from datasets import Dataset, load_dataset, load_metric
|
||||
from datasets import Dataset, load_dataset
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
import evaluate
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
import optax
|
||||
@ -551,11 +552,14 @@ def main():
|
||||
targets = captions
|
||||
|
||||
model_inputs = {}
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(
|
||||
targets, max_length=max_target_length, padding="max_length", truncation=True, return_tensors="np"
|
||||
)
|
||||
|
||||
labels = tokenizer(
|
||||
text_target=targets,
|
||||
max_length=max_target_length,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
return_tensors="np",
|
||||
)
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
decoder_input_ids = shift_tokens_right_fn(
|
||||
labels["input_ids"], model.config.pad_token_id, model.config.decoder_start_token_id
|
||||
@ -811,7 +815,7 @@ def main():
|
||||
yield batch
|
||||
|
||||
# Metric
|
||||
metric = load_metric("rouge")
|
||||
metric = evaluate.load("rouge")
|
||||
|
||||
def postprocess_text(preds, labels):
|
||||
preds = [pred.strip() for pred in preds]
|
||||
@ -875,15 +879,19 @@ def main():
|
||||
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
|
||||
# mask boolean with the same structure as the parameters.
|
||||
# The mask is True for parameters that should be decayed.
|
||||
# Note that this mask is specifically adapted for FlaxBart.
|
||||
# For FlaxT5, one should correct the layer norm parameter naming
|
||||
# accordingly - see `run_t5_mlm_flax.py` e.g.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
layer_norm_params = [
|
||||
(name, "scale") for name in ["self_attn_layer_norm", "layernorm_embedding", "final_layer_norm"]
|
||||
]
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_params) for path in flat_params}
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
# create adam optimizer
|
||||
|
@ -338,6 +338,98 @@ of 2.36 and 57.0 respectively after 3 epochs on a single TPUv3-8.
|
||||
This should take around 4.5 hours.
|
||||
Training statistics can be accessed on directly on the 🤗 [hub](https://huggingface.co/patrickvonplaten/t5-base-norwegian/tensorboard)
|
||||
|
||||
## BART: Denoising language modeling
|
||||
|
||||
In the following, we demonstrate how to train a BART model
|
||||
using denoising language modeling objective as introduced in [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461).
|
||||
More specifically, we demonstrate how JAX/Flax can be leveraged
|
||||
to pre-train [**`bart-base`**](https://huggingface.co/facebook/bart-base)
|
||||
in Norwegian on a single TPUv3-8 pod.
|
||||
|
||||
The example script uses the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets.
|
||||
|
||||
To setup all relevant files for training, let's create a directory.
|
||||
|
||||
```bash
|
||||
mkdir ./norwegian-roberta-base
|
||||
```
|
||||
|
||||
### Train tokenizer
|
||||
In the first step, we train a tokenizer to efficiently process the text input for the model. Similar to how it is shown in [How to train a new language model from scratch using Transformers and Tokenizers](https://huggingface.co/blog/how-to-train), we use a **`ByteLevelBPETokenizer`**.
|
||||
The tokenizer is trained on the complete Norwegian dataset of OSCAR
|
||||
and consequently saved in the cloned model directory.
|
||||
This can take up to 10 minutes depending on your hardware ☕.
|
||||
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
from tokenizers import trainers, Tokenizer, normalizers, ByteLevelBPETokenizer
|
||||
|
||||
# load dataset
|
||||
dataset = load_dataset("oscar", "unshuffled_deduplicated_no", split="train")
|
||||
|
||||
# Instantiate tokenizer
|
||||
tokenizer = ByteLevelBPETokenizer()
|
||||
|
||||
def batch_iterator(batch_size=1000):
|
||||
for i in range(0, len(dataset), batch_size):
|
||||
yield dataset[i: i + batch_size]["text"]
|
||||
|
||||
# Customized training
|
||||
tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[
|
||||
"<s>",
|
||||
"<pad>",
|
||||
"</s>",
|
||||
"<unk>",
|
||||
"<mask>",
|
||||
])
|
||||
|
||||
# Save files to disk
|
||||
tokenizer.save("./norwegian-bart-base/tokenizer.json")
|
||||
```
|
||||
|
||||
### Create configuration
|
||||
|
||||
Next, we create the model's configuration file. This is as simple
|
||||
as loading and storing [`**facebook/bart-base**`](https://huggingface.co/facebook/bart-base)
|
||||
in the local model folder:
|
||||
|
||||
```python
|
||||
from transformers import BartConfig
|
||||
config = BartConfig.from_pretrained("facebook/bart-base", vocab_size=50265)
|
||||
config.save_pretrained("./norwegian-bart-base")
|
||||
```
|
||||
|
||||
Great, we have set up our model repository. During training, we will automatically
|
||||
push the training logs and model weights to the repo.
|
||||
|
||||
### Train model
|
||||
|
||||
Next we can run the example script to pretrain the model:
|
||||
|
||||
```bash
|
||||
python run_bart_dlm_flax.py \
|
||||
--output_dir="./norwegian-bart-base" \
|
||||
--config_name="./norwegian-bart-base" \
|
||||
--tokenizer_name="./norwegian-bart-base" \
|
||||
--dataset_name="oscar" \
|
||||
--dataset_config_name="unshuffled_deduplicated_no" \
|
||||
--max_seq_length="1024" \
|
||||
--per_device_train_batch_size="32" \
|
||||
--per_device_eval_batch_size="32" \
|
||||
--learning_rate="1e-4" \
|
||||
--warmup_steps="2000" \
|
||||
--overwrite_output_dir \
|
||||
--logging_steps="500" \
|
||||
--save_steps="2000" \
|
||||
--eval_steps="2000" \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
Training should converge at a loss and accuracy
|
||||
of 1.36 and 0.77 respectively after 3 epochs on a single TPUv3-8.
|
||||
This should take less than 6 hours.
|
||||
Training statistics can be accessed on [tfhub.dev](https://tensorboard.dev/experiment/Maw62QlaSXWS0MOf2V2lbg/).
|
||||
|
||||
## Runtime evaluation
|
||||
|
||||
We also ran masked language modeling using PyTorch/XLA on a TPUv3-8, and PyTorch on 8 V100 GPUs. We report the
|
||||
|
964
examples/flax/language-modeling/run_bart_dlm_flax.py
Normal file
964
examples/flax/language-modeling/run_bart_dlm_flax.py
Normal file
@ -0,0 +1,964 @@
|
||||
#!/usr/bin/env python
|
||||
# coding=utf-8
|
||||
# Copyright 2021 The HuggingFace Team All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
Pretraining the library models for denoising language modeling on a text file or a dataset.
|
||||
Here is the full list of checkpoints on the hub that can be pretrained by this script:
|
||||
https://huggingface.co/models?filter=bart
|
||||
"""
|
||||
# You can also adapt this script on your own denoising language modeling task. Pointers for this are left as comments.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from enum import Enum
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
import nltk
|
||||
import numpy as np
|
||||
from datasets import load_dataset
|
||||
from tqdm import tqdm
|
||||
|
||||
import flax
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
import optax
|
||||
from flax import jax_utils, traverse_util
|
||||
from flax.jax_utils import pad_shard_unpad
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard
|
||||
from huggingface_hub import Repository
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
FLAX_MODEL_FOR_MASKED_LM_MAPPING,
|
||||
AutoTokenizer,
|
||||
BartConfig,
|
||||
BatchEncoding,
|
||||
FlaxBartForConditionalGeneration,
|
||||
HfArgumentParser,
|
||||
PreTrainedTokenizerBase,
|
||||
is_tensorboard_available,
|
||||
set_seed,
|
||||
)
|
||||
from transformers.models.bart.modeling_flax_bart import shift_tokens_right
|
||||
from transformers.utils import get_full_repo_name, send_example_telemetry
|
||||
|
||||
|
||||
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
|
||||
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
|
||||
|
||||
|
||||
@dataclass
|
||||
class TrainingArguments:
|
||||
output_dir: str = field(
|
||||
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
|
||||
)
|
||||
overwrite_output_dir: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": (
|
||||
"Overwrite the content of the output directory. "
|
||||
"Use this to continue training if output_dir points to a checkpoint directory."
|
||||
)
|
||||
},
|
||||
)
|
||||
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
|
||||
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
|
||||
per_device_train_batch_size: int = field(
|
||||
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
|
||||
)
|
||||
per_device_eval_batch_size: int = field(
|
||||
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
|
||||
)
|
||||
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
|
||||
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
|
||||
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
|
||||
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
|
||||
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
|
||||
adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."})
|
||||
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
|
||||
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
|
||||
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
|
||||
save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."})
|
||||
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
|
||||
seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
|
||||
push_to_hub: bool = field(
|
||||
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
|
||||
)
|
||||
hub_model_id: str = field(
|
||||
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
|
||||
)
|
||||
hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
|
||||
|
||||
def __post_init__(self):
|
||||
if self.output_dir is not None:
|
||||
self.output_dir = os.path.expanduser(self.output_dir)
|
||||
|
||||
def to_dict(self):
|
||||
"""
|
||||
Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
|
||||
the token values by removing their value.
|
||||
"""
|
||||
d = asdict(self)
|
||||
for k, v in d.items():
|
||||
if isinstance(v, Enum):
|
||||
d[k] = v.value
|
||||
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
|
||||
d[k] = [x.value for x in v]
|
||||
if k.endswith("_token"):
|
||||
d[k] = f"<{k.upper()}>"
|
||||
return d
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelArguments:
|
||||
"""
|
||||
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
|
||||
"""
|
||||
|
||||
model_name_or_path: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": (
|
||||
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
|
||||
)
|
||||
},
|
||||
)
|
||||
model_type: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
|
||||
)
|
||||
config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
|
||||
)
|
||||
tokenizer_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
|
||||
)
|
||||
cache_dir: Optional[str] = field(
|
||||
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
|
||||
)
|
||||
use_fast_tokenizer: bool = field(
|
||||
default=True,
|
||||
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
|
||||
)
|
||||
dtype: Optional[str] = field(
|
||||
default="float32",
|
||||
metadata={
|
||||
"help": (
|
||||
"Floating-point format in which the model weights should be initialized and trained. Choose one of"
|
||||
" `[float32, float16, bfloat16]`."
|
||||
)
|
||||
},
|
||||
)
|
||||
use_auth_token: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
"help": (
|
||||
"Will use the token generated when running `transformers-cli login` (necessary to use this script "
|
||||
"with private models)."
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DataTrainingArguments:
|
||||
"""
|
||||
Arguments pertaining to what data we are going to input our model for training and eval.
|
||||
"""
|
||||
|
||||
dataset_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
|
||||
)
|
||||
dataset_config_name: Optional[str] = field(
|
||||
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
|
||||
)
|
||||
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
|
||||
validation_file: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
|
||||
)
|
||||
train_ref_file: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "An optional input train ref data file for whole word masking in Chinese."},
|
||||
)
|
||||
validation_ref_file: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
||||
)
|
||||
validation_split_percentage: Optional[int] = field(
|
||||
default=5,
|
||||
metadata={
|
||||
"help": "The percentage of the train set used as validation set in case there's no validation split"
|
||||
},
|
||||
)
|
||||
max_seq_length: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": (
|
||||
"The maximum total input sequence length after tokenization and masking. Sequences longer than this"
|
||||
" will be truncated. Default to the max input length of the model."
|
||||
)
|
||||
},
|
||||
)
|
||||
preprocessing_num_workers: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={"help": "The number of processes to use for the preprocessing."},
|
||||
)
|
||||
mlm_probability: float = field(
|
||||
default=0.3, metadata={"help": "Ratio of tokens to mask for span masked language modeling loss"}
|
||||
)
|
||||
permute_sentence_ratio: float = field(
|
||||
default=1.0, metadata={"help": "Ratio of sentences to be permuted in each document"}
|
||||
)
|
||||
poisson_lambda: float = field(
|
||||
default=3.0, metadata={"help": "Mean of Poisson distribution used to generate span-lengths to be masked"}
|
||||
)
|
||||
|
||||
def __post_init__(self):
|
||||
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
|
||||
raise ValueError("Need either a dataset name or a training/validation file.")
|
||||
else:
|
||||
if self.train_file is not None:
|
||||
extension = self.train_file.split(".")[-1]
|
||||
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
|
||||
if self.validation_file is not None:
|
||||
extension = self.validation_file.split(".")[-1]
|
||||
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
|
||||
|
||||
|
||||
@flax.struct.dataclass
|
||||
class FlaxDataCollatorForBartDenoisingLM:
|
||||
"""
|
||||
Data collator used for BART denoising language modeling. The code is largely copied from
|
||||
`<https://github.com/morganmcg1/rotobart/blob/main/data_collator.py#L223>`__.
|
||||
For more information on how BART denoising language modeling works, one can take a look
|
||||
at the `official paper <https://arxiv.org/pdf/1910.13461.pdf>`__
|
||||
or the `official code for preprocessing <https://github.com/facebookresearch/fairseq/blob/main/fairseq/data/denoising_dataset.py>`__ .
|
||||
Args:
|
||||
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
|
||||
The tokenizer used for encoding the data
|
||||
mask_ratio (:obj:`float`):
|
||||
The probability with which to (randomly) mask tokens in the input
|
||||
poisson_lambda (:obj:`float`):
|
||||
Mean parameter of Poisson distribution used to generate span-lengths to be masked
|
||||
permute_sentence_ratio (:obj:`float`):
|
||||
Ratio of sentences to be permuted in each document
|
||||
decoder_start_token_id: (:obj:`int):
|
||||
The decoder start token id of the model
|
||||
"""
|
||||
|
||||
tokenizer: PreTrainedTokenizerBase
|
||||
decoder_start_token_id: int
|
||||
mask_ratio: float = 0.3
|
||||
poisson_lambda: float = 3.0
|
||||
permute_sentence_ratio: float = 1.0
|
||||
|
||||
def __post_init__(self):
|
||||
if self.tokenizer.mask_token is None or self.tokenizer.eos_token is None:
|
||||
raise ValueError(
|
||||
"This tokenizer does not have a mask token or eos token token which is necessary for denoising"
|
||||
" language modeling. "
|
||||
)
|
||||
|
||||
def __call__(self, examples: List[Dict[str, List[int]]]) -> BatchEncoding:
|
||||
# convert list to dict and tensorize input
|
||||
batch = BatchEncoding(
|
||||
{k: np.array([examples[i][k] for i in range(len(examples))]) for k, v in examples[0].items()}
|
||||
)
|
||||
batch["labels"] = batch["input_ids"].copy()
|
||||
batch["decoder_input_ids"] = shift_tokens_right(
|
||||
batch["labels"], self.tokenizer.pad_token_id, self.decoder_start_token_id
|
||||
)
|
||||
# permuting sentences
|
||||
do_permute = False
|
||||
if self.permute_sentence_ratio > 0.0:
|
||||
batch["input_ids"] = self.permute_sentences(batch["input_ids"])
|
||||
do_permute = True
|
||||
|
||||
# masking span of tokens (text infilling in the paper)
|
||||
if self.mask_ratio:
|
||||
batch["input_ids"], batch["labels"] = self.span_mask_tokens(
|
||||
batch["input_ids"], batch["labels"], do_permute
|
||||
)
|
||||
|
||||
# ignore pad tokens
|
||||
batch["attention_mask"] = (batch["input_ids"] != self.tokenizer.pad_token_id).astype(int)
|
||||
batch["decoder_attention_mask"] = (batch["decoder_input_ids"] != self.tokenizer.pad_token_id).astype(int)
|
||||
return batch
|
||||
|
||||
def permute_sentences(self, input_ids):
|
||||
"""
|
||||
Shuffle sentences in each document.
|
||||
"""
|
||||
results = input_ids.copy()
|
||||
|
||||
# find end locations of sentences
|
||||
end_sentence_mask = input_ids == self.tokenizer.pad_token_id
|
||||
sentence_ends = np.argwhere(end_sentence_mask)
|
||||
sentence_ends[:, 1] += 1
|
||||
example_has_multiple_sentences, num_sentences = np.unique(sentence_ends[:, 0], return_counts=True)
|
||||
num_sentences_map = {sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, num_sentences)}
|
||||
|
||||
num_to_permute = np.ceil(num_sentences * self.permute_sentence_ratio).astype(int)
|
||||
num_to_permute_map = {
|
||||
sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, num_to_permute)
|
||||
}
|
||||
|
||||
sentence_ends = np.split(sentence_ends[:, 1], np.unique(sentence_ends[:, 0], return_index=True)[1][1:])
|
||||
sentence_ends_map = {sent_idx: count for sent_idx, count in zip(example_has_multiple_sentences, sentence_ends)}
|
||||
|
||||
for i in range(input_ids.shape[0]):
|
||||
if i not in example_has_multiple_sentences:
|
||||
continue
|
||||
substitutions = np.random.permutation(num_sentences_map[i])[: num_to_permute_map[i]]
|
||||
ordering = np.arange(0, num_sentences_map[i])
|
||||
ordering[substitutions] = substitutions[np.random.permutation(num_to_permute_map[i])]
|
||||
|
||||
# write shuffled sentences into results
|
||||
index = 0
|
||||
for j in ordering:
|
||||
sentence = input_ids[i, (sentence_ends_map[i][j - 1] if j > 0 else 0) : sentence_ends_map[i][j]]
|
||||
results[i, index : index + sentence.shape[0]] = sentence
|
||||
index += sentence.shape[0]
|
||||
return results
|
||||
|
||||
def span_mask_tokens(self, input_ids, labels, do_permute):
|
||||
"""
|
||||
Sampling text spans with span lengths drawn from a Poisson distribution and masking them.
|
||||
"""
|
||||
special_tokens_mask_labels = [
|
||||
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
|
||||
]
|
||||
special_tokens_mask_inputs = [
|
||||
self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in input_ids.tolist()
|
||||
]
|
||||
special_tokens_mask_labels = np.array(special_tokens_mask_labels, dtype=bool)
|
||||
special_tokens_mask_inputs = np.array(special_tokens_mask_inputs, dtype=bool)
|
||||
|
||||
# determine how many tokens we need to mask in total
|
||||
is_token_mask = ~(input_ids == self.tokenizer.pad_token_id) & ~special_tokens_mask_inputs
|
||||
num_tokens_to_mask = int(math.ceil(is_token_mask.astype(float).sum() * self.mask_ratio))
|
||||
if num_tokens_to_mask == 0:
|
||||
return input_ids, labels
|
||||
|
||||
# generate a sufficient number of span lengths
|
||||
span_lengths = np.random.poisson(lam=self.poisson_lambda, size=(num_tokens_to_mask,))
|
||||
while np.cumsum(span_lengths, 0)[-1] < num_tokens_to_mask:
|
||||
span_lengths = np.concatenate(
|
||||
[span_lengths, np.random.poisson(lam=self.poisson_lambda, size=(num_tokens_to_mask,))]
|
||||
)
|
||||
|
||||
# remove all spans of length 0
|
||||
# note that BART inserts additional mask tokens where length == 0,
|
||||
# which we do not implement for now as it adds additional complexity
|
||||
span_lengths = span_lengths[span_lengths > 0]
|
||||
|
||||
# trim to about num_tokens_to_mask tokens
|
||||
cutoff_idx = np.argmin(np.abs(np.cumsum(span_lengths, 0) - num_tokens_to_mask)) + 1
|
||||
span_lengths = span_lengths[:cutoff_idx]
|
||||
|
||||
# randomly choose starting positions for masking
|
||||
token_indices = np.argwhere(is_token_mask == 1)
|
||||
span_starts = np.random.permutation(token_indices.shape[0])[: span_lengths.shape[0]]
|
||||
# prepare mask
|
||||
masked_indices = np.array(token_indices[span_starts])
|
||||
mask = np.full_like(input_ids, fill_value=False)
|
||||
|
||||
# mask starting positions
|
||||
for mi in masked_indices:
|
||||
mask[tuple(mi)] = True
|
||||
span_lengths -= 1
|
||||
|
||||
# fill up spans
|
||||
max_index = input_ids.shape[1] - 1
|
||||
remaining = (span_lengths > 0) & (masked_indices[:, 1] < max_index)
|
||||
while np.any(remaining):
|
||||
masked_indices[remaining, 1] += 1
|
||||
for mi in masked_indices:
|
||||
mask[tuple(mi)] = True
|
||||
span_lengths -= 1
|
||||
remaining = (span_lengths > 0) & (masked_indices[:, 1] < max_index)
|
||||
|
||||
# place the mask tokens
|
||||
mask[np.where(special_tokens_mask_inputs)] = False
|
||||
input_ids[np.where(mask)] = self.tokenizer.mask_token_id
|
||||
if not do_permute:
|
||||
labels[np.where(mask == 0)] = -100
|
||||
else:
|
||||
labels[np.where(special_tokens_mask_labels)] = -100
|
||||
|
||||
# remove mask tokens that are not starts of spans
|
||||
to_remove = (mask == 1) & np.roll((mask == 1), 1, 1)
|
||||
new_input_ids = np.full_like(input_ids, fill_value=self.tokenizer.pad_token_id)
|
||||
for i, example in enumerate(input_ids):
|
||||
new_example = example[~to_remove[i]]
|
||||
new_input_ids[i, : new_example.shape[0]] = new_example
|
||||
|
||||
return new_input_ids, labels
|
||||
|
||||
|
||||
def generate_batch_splits(samples_idx: np.ndarray, batch_size: int, drop_last=True) -> np.ndarray:
|
||||
"""Generate batches of data for a specified batch size from sample indices. If the dataset size is not divisible by
|
||||
the batch size and `drop_last` is `True`, the last incomplete batch is dropped. Else, it is returned."""
|
||||
num_samples = len(samples_idx)
|
||||
if drop_last:
|
||||
samples_to_remove = num_samples % batch_size
|
||||
if samples_to_remove != 0:
|
||||
samples_idx = samples_idx[:-samples_to_remove]
|
||||
sections_split = num_samples // batch_size
|
||||
samples_idx = samples_idx.reshape((sections_split, batch_size))
|
||||
else:
|
||||
sections_split = math.ceil(num_samples / batch_size)
|
||||
samples_idx = np.array_split(samples_idx, sections_split)
|
||||
return samples_idx
|
||||
|
||||
|
||||
def write_train_metric(summary_writer, train_metrics, train_time, step):
|
||||
summary_writer.scalar("train_time", train_time, step)
|
||||
|
||||
train_metrics = get_metrics(train_metrics)
|
||||
for key, vals in train_metrics.items():
|
||||
tag = f"train_{key}"
|
||||
for i, val in enumerate(vals):
|
||||
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
|
||||
|
||||
|
||||
def write_eval_metric(summary_writer, eval_metrics, step):
|
||||
for metric_name, value in eval_metrics.items():
|
||||
summary_writer.scalar(f"eval_{metric_name}", value, step)
|
||||
|
||||
|
||||
def main():
|
||||
# See all possible arguments in src/transformers/training_args.py
|
||||
# or by passing the --help flag to this script.
|
||||
# We now keep distinct sets of args, for a cleaner separation of concerns.
|
||||
|
||||
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
|
||||
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
|
||||
# If we pass only one argument to the script and it's the path to a json file,
|
||||
# let's parse it to get our arguments.
|
||||
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
|
||||
else:
|
||||
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
|
||||
|
||||
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
|
||||
# information sent is the one passed as arguments along with your Python/PyTorch versions.
|
||||
send_example_telemetry("run_bart_dlm", model_args, data_args, framework="flax")
|
||||
|
||||
if (
|
||||
os.path.exists(training_args.output_dir)
|
||||
and os.listdir(training_args.output_dir)
|
||||
and training_args.do_train
|
||||
and not training_args.overwrite_output_dir
|
||||
):
|
||||
raise ValueError(
|
||||
f"Output directory ({training_args.output_dir}) already exists and is not empty."
|
||||
"Use --overwrite_output_dir to overcome."
|
||||
)
|
||||
|
||||
# Setup logging
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
level=logging.INFO,
|
||||
datefmt="[%X]",
|
||||
)
|
||||
|
||||
# Log on each process the small summary:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Set the verbosity to info of the Transformers logger (on main process only):
|
||||
logger.info(f"Training/evaluation parameters {training_args}")
|
||||
|
||||
# Set seed before initializing model.
|
||||
set_seed(training_args.seed)
|
||||
|
||||
# Handle the repository creation
|
||||
if training_args.push_to_hub:
|
||||
if training_args.hub_model_id is None:
|
||||
repo_name = get_full_repo_name(
|
||||
Path(training_args.output_dir).absolute().name, token=training_args.hub_token
|
||||
)
|
||||
else:
|
||||
repo_name = training_args.hub_model_id
|
||||
repo = Repository(training_args.output_dir, clone_from=repo_name)
|
||||
|
||||
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
|
||||
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
|
||||
# (the dataset will be downloaded automatically from the datasets Hub).
|
||||
#
|
||||
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
|
||||
# 'text' is found. You can easily tweak this behavior (see below).
|
||||
if data_args.dataset_name is not None:
|
||||
# Downloading and loading a dataset from the hub.
|
||||
datasets = load_dataset(
|
||||
data_args.dataset_name,
|
||||
data_args.dataset_config_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
|
||||
if "validation" not in datasets.keys():
|
||||
datasets["validation"] = load_dataset(
|
||||
data_args.dataset_name,
|
||||
data_args.dataset_config_name,
|
||||
split=f"train[:{data_args.validation_split_percentage}%]",
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
datasets["train"] = load_dataset(
|
||||
data_args.dataset_name,
|
||||
data_args.dataset_config_name,
|
||||
split=f"train[{data_args.validation_split_percentage}%:]",
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
else:
|
||||
data_files = {}
|
||||
if data_args.train_file is not None:
|
||||
data_files["train"] = data_args.train_file
|
||||
if data_args.validation_file is not None:
|
||||
data_files["validation"] = data_args.validation_file
|
||||
extension = data_args.train_file.split(".")[-1]
|
||||
if extension == "txt":
|
||||
extension = "text"
|
||||
datasets = load_dataset(
|
||||
extension,
|
||||
data_files=data_files,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
|
||||
if "validation" not in datasets.keys():
|
||||
datasets["validation"] = load_dataset(
|
||||
extension,
|
||||
data_files=data_files,
|
||||
split=f"train[:{data_args.validation_split_percentage}%]",
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
datasets["train"] = load_dataset(
|
||||
extension,
|
||||
data_files=data_files,
|
||||
split=f"train[{data_args.validation_split_percentage}%:]",
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||
|
||||
# Load pretrained model and tokenizer
|
||||
|
||||
if model_args.tokenizer_name:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.tokenizer_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_fast=model_args.use_fast_tokenizer,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
|
||||
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
|
||||
)
|
||||
|
||||
if model_args.config_name:
|
||||
config = BartConfig.from_pretrained(
|
||||
model_args.config_name,
|
||||
cache_dir=model_args.cache_dir,
|
||||
vocab_size=len(tokenizer),
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
elif model_args.model_name_or_path:
|
||||
config = BartConfig.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
cache_dir=model_args.cache_dir,
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
else:
|
||||
config = CONFIG_MAPPING[model_args.model_type]()
|
||||
logger.warning("You are instantiating a new config instance from scratch.")
|
||||
|
||||
# Preprocessing the datasets.
|
||||
# First we tokenize all the texts.
|
||||
if training_args.do_train:
|
||||
column_names = datasets["train"].column_names
|
||||
else:
|
||||
column_names = datasets["validation"].column_names
|
||||
text_column_name = "text" if "text" in column_names else column_names[0]
|
||||
|
||||
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
|
||||
|
||||
# Use Punkt Sentence Tokenizer to divide a document into a list of sentences
|
||||
nltk.download("punkt")
|
||||
sentence_tokenizer = nltk.data.load("tokenizers/punkt/english.pickle")
|
||||
|
||||
def sentence_split_function(example):
|
||||
sents = sentence_tokenizer.tokenize(example["text"])
|
||||
# use pad token as end of sentence indicator
|
||||
new_text = tokenizer.bos_token + f"{tokenizer.pad_token}".join(sents) + tokenizer.eos_token
|
||||
return {"text": new_text}
|
||||
|
||||
split_datasets = datasets.map(
|
||||
sentence_split_function,
|
||||
batched=False,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
remove_columns=column_names,
|
||||
load_from_cache_file=not data_args.overwrite_cache,
|
||||
)
|
||||
|
||||
# Tokenize every text, then concatenate them together before splitting them in smaller parts.
|
||||
# Since we make sure that all sequences are of the same length, no attention_mask is needed.
|
||||
def tokenize_function(examples):
|
||||
return tokenizer(examples[text_column_name], add_special_tokens=False, return_attention_mask=False)
|
||||
|
||||
tokenized_datasets = split_datasets.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
remove_columns=text_column_name,
|
||||
load_from_cache_file=not data_args.overwrite_cache,
|
||||
)
|
||||
|
||||
# Main data processing function that will concatenate all texts from our dataset and generate chunks of
|
||||
# max_seq_length.
|
||||
def group_texts(examples):
|
||||
# Concatenate all texts.
|
||||
concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
|
||||
total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||||
# We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
|
||||
# customize this part to your needs.
|
||||
if total_length >= max_seq_length:
|
||||
total_length = (total_length // max_seq_length) * max_seq_length
|
||||
# Split by chunks of max_len.
|
||||
result = {
|
||||
k: [t[i : i + max_seq_length] for i in range(0, total_length, max_seq_length)]
|
||||
for k, t in concatenated_examples.items()
|
||||
}
|
||||
return result
|
||||
|
||||
# Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a
|
||||
# remainder for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value
|
||||
# might be slower to preprocess.
|
||||
#
|
||||
# To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
|
||||
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
|
||||
tokenized_datasets = tokenized_datasets.map(
|
||||
group_texts,
|
||||
batched=True,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
load_from_cache_file=not data_args.overwrite_cache,
|
||||
)
|
||||
|
||||
# Enable tensorboard only on the master node
|
||||
has_tensorboard = is_tensorboard_available()
|
||||
if has_tensorboard and jax.process_index() == 0:
|
||||
try:
|
||||
from flax.metrics.tensorboard import SummaryWriter
|
||||
|
||||
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
|
||||
except ImportError as ie:
|
||||
has_tensorboard = False
|
||||
logger.warning(
|
||||
f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Unable to display metrics through TensorBoard because the package is not installed: "
|
||||
"Please run pip install tensorboard to enable."
|
||||
)
|
||||
|
||||
# Initialize our training
|
||||
rng = jax.random.PRNGKey(training_args.seed)
|
||||
dropout_rngs = jax.random.split(rng, jax.local_device_count())
|
||||
|
||||
if model_args.model_name_or_path:
|
||||
model = FlaxBartForConditionalGeneration.from_pretrained(
|
||||
model_args.model_name_or_path,
|
||||
config=config,
|
||||
seed=training_args.seed,
|
||||
dtype=getattr(jnp, model_args.dtype),
|
||||
use_auth_token=True if model_args.use_auth_token else None,
|
||||
)
|
||||
else:
|
||||
config.vocab_size = len(tokenizer)
|
||||
model = FlaxBartForConditionalGeneration(
|
||||
config,
|
||||
seed=training_args.seed,
|
||||
dtype=getattr(jnp, model_args.dtype),
|
||||
)
|
||||
|
||||
# Data collator
|
||||
# This one will take care of randomly masking the tokens and permuting the sentences.
|
||||
data_collator = FlaxDataCollatorForBartDenoisingLM(
|
||||
tokenizer=tokenizer,
|
||||
decoder_start_token_id=model.config.decoder_start_token_id,
|
||||
mask_ratio=data_args.mlm_probability,
|
||||
poisson_lambda=data_args.poisson_lambda,
|
||||
permute_sentence_ratio=data_args.permute_sentence_ratio,
|
||||
)
|
||||
|
||||
# Store some constant
|
||||
num_epochs = int(training_args.num_train_epochs)
|
||||
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = per_device_eval_batch_size * jax.device_count()
|
||||
|
||||
num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs
|
||||
|
||||
# Create learning rate schedule
|
||||
warmup_fn = optax.linear_schedule(
|
||||
init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps
|
||||
)
|
||||
decay_fn = optax.linear_schedule(
|
||||
init_value=training_args.learning_rate,
|
||||
end_value=0,
|
||||
transition_steps=num_train_steps - training_args.warmup_steps,
|
||||
)
|
||||
linear_decay_lr_schedule_fn = optax.join_schedules(
|
||||
schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps]
|
||||
)
|
||||
|
||||
# We use Optax's "masking" functionality to not apply weight decay
|
||||
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
|
||||
# mask boolean with the same structure as the parameters.
|
||||
# The mask is True for parameters that should be decayed.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
# create adam optimizer
|
||||
if training_args.adafactor:
|
||||
# We use the default parameters here to initialize adafactor,
|
||||
# For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74
|
||||
optimizer = optax.adafactor(
|
||||
learning_rate=linear_decay_lr_schedule_fn,
|
||||
)
|
||||
else:
|
||||
optimizer = optax.adamw(
|
||||
learning_rate=linear_decay_lr_schedule_fn,
|
||||
b1=training_args.adam_beta1,
|
||||
b2=training_args.adam_beta2,
|
||||
weight_decay=training_args.weight_decay,
|
||||
mask=decay_mask_fn,
|
||||
)
|
||||
|
||||
# Setup train state
|
||||
state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=optimizer)
|
||||
|
||||
# Define gradient update step fn
|
||||
def train_step(state, batch, dropout_rng):
|
||||
dropout_rng, new_dropout_rng = jax.random.split(dropout_rng)
|
||||
|
||||
def loss_fn(params):
|
||||
labels = batch.pop("labels")
|
||||
|
||||
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
|
||||
|
||||
# compute loss, ignore padded input tokens and special tokens
|
||||
label_mask = jnp.where(labels > 0, 1.0, 0.0)
|
||||
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
|
||||
|
||||
# take average
|
||||
loss = loss.sum() / label_mask.sum()
|
||||
|
||||
return loss
|
||||
|
||||
grad_fn = jax.value_and_grad(loss_fn)
|
||||
loss, grad = grad_fn(state.params)
|
||||
grad = jax.lax.pmean(grad, "batch")
|
||||
new_state = state.apply_gradients(grads=grad)
|
||||
|
||||
metrics = jax.lax.pmean(
|
||||
{"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
|
||||
)
|
||||
|
||||
return new_state, metrics, new_dropout_rng
|
||||
|
||||
# Create parallel version of the train step
|
||||
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
|
||||
|
||||
# Define eval fn
|
||||
def eval_step(params, batch):
|
||||
labels = batch.pop("labels")
|
||||
|
||||
logits = model(**batch, params=params, train=False)[0]
|
||||
|
||||
# compute loss, ignore padded input tokens and special tokens
|
||||
label_mask = jnp.where(labels > 0, 1.0, 0.0)
|
||||
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
|
||||
|
||||
# compute accuracy
|
||||
accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask
|
||||
|
||||
# summarize metrics
|
||||
metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()}
|
||||
metrics = jax.lax.psum(metrics, axis_name="batch")
|
||||
|
||||
return metrics
|
||||
|
||||
p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,))
|
||||
|
||||
# Replicate the train state on each device
|
||||
state = jax_utils.replicate(state)
|
||||
|
||||
train_time = 0
|
||||
epochs = tqdm(range(num_epochs), desc="Epoch ... ", position=0)
|
||||
for epoch in epochs:
|
||||
# ======================== Training ================================
|
||||
train_start = time.time()
|
||||
train_metrics = []
|
||||
|
||||
# Create sampling rng
|
||||
rng, input_rng = jax.random.split(rng)
|
||||
|
||||
# Generate an epoch by shuffling sampling indices from the train dataset
|
||||
num_train_samples = len(tokenized_datasets["train"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
train_samples_idx = np.random.permutation(np.arange(num_train_samples))
|
||||
train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size)
|
||||
|
||||
# Gather the indexes for creating the batch and do a training step
|
||||
for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)):
|
||||
samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx]
|
||||
model_inputs = data_collator(samples)
|
||||
|
||||
# Model forward
|
||||
model_inputs = shard(model_inputs.data)
|
||||
state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs)
|
||||
train_metrics.append(train_metric)
|
||||
|
||||
cur_step = epoch * (num_train_samples // train_batch_size) + step
|
||||
|
||||
if cur_step % training_args.logging_steps == 0 and cur_step > 0:
|
||||
# Save metrics
|
||||
train_metric = jax_utils.unreplicate(train_metric)
|
||||
train_time += time.time() - train_start
|
||||
if has_tensorboard and jax.process_index() == 0:
|
||||
write_train_metric(summary_writer, train_metrics, train_time, cur_step)
|
||||
|
||||
epochs.write(
|
||||
f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate:"
|
||||
f" {train_metric['learning_rate']})"
|
||||
)
|
||||
|
||||
train_metrics = []
|
||||
|
||||
if cur_step % training_args.eval_steps == 0 and cur_step > 0:
|
||||
# ======================== Evaluating ==============================
|
||||
num_eval_samples = len(tokenized_datasets["validation"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
eval_samples_idx = np.arange(num_eval_samples)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False)
|
||||
|
||||
eval_metrics = []
|
||||
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
||||
samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
|
||||
model_inputs = data_collator(samples)
|
||||
|
||||
# Model forward
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# normalize eval metrics
|
||||
eval_metrics = get_metrics(eval_metrics)
|
||||
eval_metrics = jax.tree_map(jnp.sum, eval_metrics)
|
||||
eval_normalizer = eval_metrics.pop("normalizer")
|
||||
eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
|
||||
|
||||
# Update progress bar
|
||||
epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})"
|
||||
|
||||
# Save metrics
|
||||
if has_tensorboard and jax.process_index() == 0:
|
||||
write_eval_metric(summary_writer, eval_metrics, cur_step)
|
||||
|
||||
if cur_step % training_args.save_steps == 0 and cur_step > 0:
|
||||
# save checkpoint after each epoch and push checkpoint to the hub
|
||||
if jax.process_index() == 0:
|
||||
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
|
||||
model.save_pretrained(training_args.output_dir, params=params)
|
||||
tokenizer.save_pretrained(training_args.output_dir)
|
||||
if training_args.push_to_hub:
|
||||
repo.push_to_hub(commit_message=f"Saving weights and logs of step {cur_step}", blocking=False)
|
||||
|
||||
# Eval after training
|
||||
if training_args.do_eval:
|
||||
num_eval_samples = len(tokenized_datasets["validation"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
eval_samples_idx = np.arange(num_eval_samples)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False)
|
||||
|
||||
eval_metrics = []
|
||||
for _, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
||||
samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx]
|
||||
model_inputs = data_collator(samples)
|
||||
|
||||
# Model forward
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# normalize eval metrics
|
||||
eval_metrics = get_metrics(eval_metrics)
|
||||
eval_metrics = jax.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics)
|
||||
eval_normalizer = eval_metrics.pop("normalizer")
|
||||
eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
|
||||
|
||||
try:
|
||||
perplexity = math.exp(eval_metrics["loss"])
|
||||
except OverflowError:
|
||||
perplexity = float("inf")
|
||||
eval_metrics["perplexity"] = perplexity
|
||||
|
||||
if jax.process_index() == 0:
|
||||
eval_metrics = {f"eval_{metric_name}": value for metric_name, value in eval_metrics.items()}
|
||||
path = os.path.join(training_args.output_dir, "eval_results.json")
|
||||
with open(path, "w") as f:
|
||||
json.dump(eval_metrics, f, indent=4, sort_keys=True)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -43,7 +43,7 @@ import jax.numpy as jnp
|
||||
import optax
|
||||
import transformers
|
||||
from flax import jax_utils, traverse_util
|
||||
from flax.jax_utils import unreplicate
|
||||
from flax.jax_utils import pad_shard_unpad, unreplicate
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
|
||||
from huggingface_hub import Repository
|
||||
@ -264,20 +264,24 @@ class TrainState(train_state.TrainState):
|
||||
return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
|
||||
|
||||
|
||||
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
|
||||
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False, drop_last=True):
|
||||
"""
|
||||
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
|
||||
Shuffle batches if `shuffle` is `True`.
|
||||
Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
|
||||
and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
|
||||
"""
|
||||
steps_per_epoch = len(dataset) // batch_size
|
||||
|
||||
if shuffle:
|
||||
batch_idx = jax.random.permutation(rng, len(dataset))
|
||||
batch_idx = np.asarray(batch_idx)
|
||||
else:
|
||||
batch_idx = jnp.arange(len(dataset))
|
||||
batch_idx = np.arange(len(dataset))
|
||||
|
||||
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
|
||||
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
|
||||
if drop_last:
|
||||
steps_per_epoch = len(dataset) // batch_size
|
||||
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
|
||||
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
|
||||
else:
|
||||
steps_per_epoch = math.ceil(len(dataset) / batch_size)
|
||||
batch_idx = np.array_split(batch_idx, steps_per_epoch)
|
||||
|
||||
for idx in batch_idx:
|
||||
batch = dataset[idx]
|
||||
@ -621,7 +625,8 @@ def main():
|
||||
# Store some constant
|
||||
num_epochs = int(training_args.num_train_epochs)
|
||||
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
|
||||
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = per_device_eval_batch_size * jax.device_count()
|
||||
steps_per_epoch = len(train_dataset) // train_batch_size
|
||||
total_train_steps = steps_per_epoch * num_epochs
|
||||
|
||||
@ -638,15 +643,19 @@ def main():
|
||||
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
|
||||
# mask boolean with the same structure as the parameters.
|
||||
# The mask is True for parameters that should be decayed.
|
||||
# Note that this mask is specifically adapted for FlaxGPT2.
|
||||
# For other models, one should correct the layer norm parameter naming
|
||||
# accordingly.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
flat_mask = {
|
||||
path: (path[-1] != "bias" and path[-2:] not in [("ln_1", "scale"), ("ln_2", "scale"), ("ln_f", "scale")])
|
||||
for path in flat_params
|
||||
}
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
# create adam optimizer
|
||||
@ -760,13 +769,14 @@ def main():
|
||||
if cur_step % training_args.eval_steps == 0 and cur_step > 0:
|
||||
# ======================== Evaluating ==============================
|
||||
eval_metrics = []
|
||||
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
|
||||
eval_steps = len(eval_dataset) // eval_batch_size
|
||||
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size, drop_last=False)
|
||||
eval_steps = math.ceil(len(eval_dataset) / eval_batch_size)
|
||||
for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
|
||||
# Model forward
|
||||
batch = next(eval_loader)
|
||||
batch = shard(batch)
|
||||
metrics = p_eval_step(state.params, batch)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, batch, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# normalize eval metrics
|
||||
@ -802,12 +812,14 @@ def main():
|
||||
# Eval after training
|
||||
if training_args.do_eval:
|
||||
eval_metrics = []
|
||||
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
|
||||
eval_steps = len(eval_dataset) // eval_batch_size
|
||||
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size, drop_last=False)
|
||||
eval_steps = math.ceil(len(eval_dataset) / eval_batch_size)
|
||||
for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
|
||||
# Model forward
|
||||
batch = shard(next(eval_loader))
|
||||
metrics = p_eval_step(state.params, batch)
|
||||
batch = next(eval_loader)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, batch, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# normalize eval metrics
|
||||
|
@ -43,6 +43,7 @@ import jax
|
||||
import jax.numpy as jnp
|
||||
import optax
|
||||
from flax import jax_utils, traverse_util
|
||||
from flax.jax_utils import pad_shard_unpad
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard
|
||||
from huggingface_hub import Repository
|
||||
@ -326,15 +327,20 @@ class FlaxDataCollatorForLanguageModeling:
|
||||
return inputs, labels
|
||||
|
||||
|
||||
def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray:
|
||||
def generate_batch_splits(samples_idx: np.ndarray, batch_size: int, drop_last=True) -> np.ndarray:
|
||||
"""Generate batches of data for a specified batch size from sample indices. If the dataset size is not divisible by
|
||||
the batch size and `drop_last` is `True`, the last incomplete batch is dropped. Else, it is returned."""
|
||||
num_samples = len(samples_idx)
|
||||
samples_to_remove = num_samples % batch_size
|
||||
|
||||
if samples_to_remove != 0:
|
||||
samples_idx = samples_idx[:-samples_to_remove]
|
||||
sections_split = num_samples // batch_size
|
||||
batch_idx = np.split(samples_idx, sections_split)
|
||||
return batch_idx
|
||||
if drop_last:
|
||||
samples_to_remove = num_samples % batch_size
|
||||
if samples_to_remove != 0:
|
||||
samples_idx = samples_idx[:-samples_to_remove]
|
||||
sections_split = num_samples // batch_size
|
||||
samples_idx = samples_idx.reshape((sections_split, batch_size))
|
||||
else:
|
||||
sections_split = math.ceil(num_samples / batch_size)
|
||||
samples_idx = np.array_split(samples_idx, sections_split)
|
||||
return samples_idx
|
||||
|
||||
|
||||
def write_train_metric(summary_writer, train_metrics, train_time, step):
|
||||
@ -637,7 +643,8 @@ def main():
|
||||
# Store some constant
|
||||
num_epochs = int(training_args.num_train_epochs)
|
||||
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
|
||||
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = per_device_eval_batch_size * jax.device_count()
|
||||
|
||||
num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs
|
||||
|
||||
@ -658,12 +665,19 @@ def main():
|
||||
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
|
||||
# mask boolean with the same structure as the parameters.
|
||||
# The mask is True for parameters that should be decayed.
|
||||
# Note that this mask is specifically adapted for FlaxBERT-like models.
|
||||
# For other models, one should correct the layer norm parameter naming
|
||||
# accordingly.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
# create adam optimizer
|
||||
@ -789,7 +803,7 @@ def main():
|
||||
num_eval_samples = len(tokenized_datasets["validation"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
eval_samples_idx = np.arange(num_eval_samples)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False)
|
||||
|
||||
eval_metrics = []
|
||||
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
||||
@ -797,8 +811,9 @@ def main():
|
||||
model_inputs = data_collator(samples, pad_to_multiple_of=16)
|
||||
|
||||
# Model forward
|
||||
model_inputs = shard(model_inputs.data)
|
||||
metrics = p_eval_step(state.params, model_inputs)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# normalize eval metrics
|
||||
@ -828,7 +843,7 @@ def main():
|
||||
num_eval_samples = len(tokenized_datasets["validation"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
eval_samples_idx = np.arange(num_eval_samples)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False)
|
||||
|
||||
eval_metrics = []
|
||||
for _, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
||||
@ -836,8 +851,9 @@ def main():
|
||||
model_inputs = data_collator(samples, pad_to_multiple_of=16)
|
||||
|
||||
# Model forward
|
||||
model_inputs = shard(model_inputs.data)
|
||||
metrics = p_eval_step(state.params, model_inputs)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# normalize eval metrics
|
||||
|
@ -21,6 +21,7 @@ https://huggingface.co/models?filter=t5
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
@ -41,6 +42,7 @@ import jax
|
||||
import jax.numpy as jnp
|
||||
import optax
|
||||
from flax import jax_utils, traverse_util
|
||||
from flax.jax_utils import pad_shard_unpad
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard
|
||||
from huggingface_hub import Repository
|
||||
@ -325,7 +327,7 @@ class FlaxDataCollatorForT5MLM:
|
||||
pad_token_id: int
|
||||
decoder_start_token_id: int
|
||||
|
||||
def __call__(self, examples: List[Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]:
|
||||
def __call__(self, examples: List[Dict[str, np.ndarray]]) -> BatchEncoding:
|
||||
|
||||
# convert list to dict and tensorize input
|
||||
batch = BatchEncoding(
|
||||
@ -459,15 +461,20 @@ class FlaxDataCollatorForT5MLM:
|
||||
return is_noise[:orig_length]
|
||||
|
||||
|
||||
def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray:
|
||||
def generate_batch_splits(samples_idx: np.ndarray, batch_size: int, drop_last=True) -> np.ndarray:
|
||||
"""Generate batches of data for a specified batch size from sample indices. If the dataset size is not divisible by
|
||||
the batch size and `drop_last` is `True`, the last incomplete batch is dropped. Else, it is returned."""
|
||||
num_samples = len(samples_idx)
|
||||
samples_to_remove = num_samples % batch_size
|
||||
|
||||
if samples_to_remove != 0:
|
||||
samples_idx = samples_idx[:-samples_to_remove]
|
||||
sections_split = num_samples // batch_size
|
||||
batch_idx = np.split(samples_idx, sections_split)
|
||||
return batch_idx
|
||||
if drop_last:
|
||||
samples_to_remove = num_samples % batch_size
|
||||
if samples_to_remove != 0:
|
||||
samples_idx = samples_idx[:-samples_to_remove]
|
||||
sections_split = num_samples // batch_size
|
||||
samples_idx = samples_idx.reshape((sections_split, batch_size))
|
||||
else:
|
||||
sections_split = math.ceil(num_samples / batch_size)
|
||||
samples_idx = np.array_split(samples_idx, sections_split)
|
||||
return samples_idx
|
||||
|
||||
|
||||
def write_train_metric(summary_writer, train_metrics, train_time, step):
|
||||
@ -756,7 +763,8 @@ def main():
|
||||
# Store some constant
|
||||
num_epochs = int(training_args.num_train_epochs)
|
||||
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
|
||||
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = per_device_eval_batch_size * jax.device_count()
|
||||
|
||||
num_train_steps = len(tokenized_datasets["train"]) // train_batch_size * num_epochs
|
||||
|
||||
@ -782,10 +790,17 @@ def main():
|
||||
# The mask is True for parameters that should be decayed.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
flat_mask = {
|
||||
path: (path[-1] != "bias" and path[-2:] not in [("layer_norm", "scale"), ("final_layer_norm", "scale")])
|
||||
for path in flat_params
|
||||
}
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
# create adam optimizer
|
||||
@ -910,7 +925,7 @@ def main():
|
||||
num_eval_samples = len(tokenized_datasets["validation"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
eval_samples_idx = np.arange(num_eval_samples)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False)
|
||||
|
||||
eval_metrics = []
|
||||
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
||||
@ -918,8 +933,9 @@ def main():
|
||||
model_inputs = data_collator(samples)
|
||||
|
||||
# Model forward
|
||||
model_inputs = shard(model_inputs.data)
|
||||
metrics = p_eval_step(state.params, model_inputs)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# get eval metrics
|
||||
@ -947,7 +963,7 @@ def main():
|
||||
num_eval_samples = len(tokenized_datasets["validation"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
eval_samples_idx = np.arange(num_eval_samples)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False)
|
||||
|
||||
eval_metrics = []
|
||||
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
||||
@ -955,8 +971,9 @@ def main():
|
||||
model_inputs = data_collator(samples)
|
||||
|
||||
# Model forward
|
||||
model_inputs = shard(model_inputs.data)
|
||||
metrics = p_eval_step(state.params, model_inputs)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, model_inputs.data, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# get eval metrics
|
||||
|
@ -20,27 +20,28 @@ Fine-tuning the library models for question answering.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from enum import Enum
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, Optional, Tuple
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from tqdm import tqdm
|
||||
|
||||
import evaluate
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
import optax
|
||||
import transformers
|
||||
from flax import struct, traverse_util
|
||||
from flax.jax_utils import replicate, unreplicate
|
||||
from flax.jax_utils import pad_shard_unpad, replicate, unreplicate
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard
|
||||
from huggingface_hub import Repository
|
||||
@ -60,7 +61,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
@ -327,12 +328,19 @@ def create_train_state(
|
||||
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
|
||||
# mask boolean with the same structure as the parameters.
|
||||
# The mask is True for parameters that should be decayed.
|
||||
# Note that this mask is specifically adapted for FlaxBERT-like models.
|
||||
# For other models, one should correct the layer norm parameter naming
|
||||
# accordingly.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
tx = optax.adamw(
|
||||
@ -399,11 +407,15 @@ def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int):
|
||||
|
||||
# region eval data iterator
|
||||
def eval_data_collator(dataset: Dataset, batch_size: int):
|
||||
"""Returns batches of size `batch_size` from `eval dataset`, sharded over all local devices."""
|
||||
for i in range(len(dataset) // batch_size):
|
||||
batch = dataset[i * batch_size : (i + 1) * batch_size]
|
||||
"""Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop."""
|
||||
batch_idx = np.arange(len(dataset))
|
||||
|
||||
steps_per_epoch = math.ceil(len(dataset) / batch_size)
|
||||
batch_idx = np.array_split(batch_idx, steps_per_epoch)
|
||||
|
||||
for idx in batch_idx:
|
||||
batch = dataset[idx]
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
batch = shard(batch)
|
||||
|
||||
yield batch
|
||||
|
||||
@ -765,7 +777,7 @@ def main():
|
||||
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||
|
||||
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
|
||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
||||
|
||||
def compute_metrics(p: EvalPrediction):
|
||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||
@ -849,8 +861,9 @@ def main():
|
||||
rng = jax.random.PRNGKey(training_args.seed)
|
||||
dropout_rngs = jax.random.split(rng, jax.local_device_count())
|
||||
|
||||
train_batch_size = training_args.per_device_train_batch_size * jax.local_device_count()
|
||||
eval_batch_size = training_args.per_device_eval_batch_size * jax.local_device_count()
|
||||
train_batch_size = int(training_args.per_device_train_batch_size) * jax.local_device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = per_device_eval_batch_size * jax.local_device_count()
|
||||
# endregion
|
||||
|
||||
# region Load model
|
||||
@ -968,32 +981,17 @@ def main():
|
||||
# evaluate
|
||||
for batch in tqdm(
|
||||
eval_data_collator(eval_dataset, eval_batch_size),
|
||||
total=len(eval_dataset) // eval_batch_size,
|
||||
total=math.ceil(len(eval_dataset) / eval_batch_size),
|
||||
desc="Evaluating ...",
|
||||
position=2,
|
||||
):
|
||||
_ = batch.pop("example_id")
|
||||
_ = batch.pop("offset_mapping")
|
||||
predictions = p_eval_step(state, batch)
|
||||
start_logits = np.array([pred for pred in chain(*predictions[0])])
|
||||
end_logits = np.array([pred for pred in chain(*predictions[1])])
|
||||
all_start_logits.append(start_logits)
|
||||
all_end_logits.append(end_logits)
|
||||
|
||||
# evaluate also on leftover examples (not divisible by batch_size)
|
||||
num_leftover_samples = len(eval_dataset) % eval_batch_size
|
||||
|
||||
# make sure leftover batch is evaluated on one device
|
||||
if num_leftover_samples > 0 and jax.process_index() == 0:
|
||||
# take leftover samples
|
||||
batch = eval_dataset[-num_leftover_samples:]
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
_ = batch.pop("example_id")
|
||||
_ = batch.pop("offset_mapping")
|
||||
|
||||
predictions = eval_step(unreplicate(state), batch)
|
||||
start_logits = np.array([pred for pred in predictions[0]])
|
||||
end_logits = np.array([pred for pred in predictions[1]])
|
||||
predictions = pad_shard_unpad(p_eval_step)(
|
||||
state, batch, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
start_logits = np.array(predictions[0])
|
||||
end_logits = np.array(predictions[1])
|
||||
all_start_logits.append(start_logits)
|
||||
all_end_logits.append(end_logits)
|
||||
|
||||
@ -1032,30 +1030,15 @@ def main():
|
||||
all_start_logits = []
|
||||
all_end_logits = []
|
||||
|
||||
eva_loader = eval_data_collator(eval_dataset, eval_batch_size)
|
||||
for batch in tqdm(eva_loader, total=len(eval_dataset) // eval_batch_size, desc="Evaluating ...", position=2):
|
||||
eval_loader = eval_data_collator(eval_dataset, eval_batch_size)
|
||||
for batch in tqdm(
|
||||
eval_loader, total=math.ceil(len(eval_dataset) / eval_batch_size), desc="Evaluating ...", position=2
|
||||
):
|
||||
_ = batch.pop("example_id")
|
||||
_ = batch.pop("offset_mapping")
|
||||
predictions = p_eval_step(state, batch)
|
||||
start_logits = np.array([pred for pred in chain(*predictions[0])])
|
||||
end_logits = np.array([pred for pred in chain(*predictions[1])])
|
||||
all_start_logits.append(start_logits)
|
||||
all_end_logits.append(end_logits)
|
||||
|
||||
# evaluate also on leftover examples (not divisible by batch_size)
|
||||
num_leftover_samples = len(eval_dataset) % eval_batch_size
|
||||
|
||||
# make sure leftover batch is evaluated on one device
|
||||
if num_leftover_samples > 0 and jax.process_index() == 0:
|
||||
# take leftover samples
|
||||
batch = eval_dataset[-num_leftover_samples:]
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
_ = batch.pop("example_id")
|
||||
_ = batch.pop("offset_mapping")
|
||||
|
||||
predictions = eval_step(unreplicate(state), batch)
|
||||
start_logits = np.array([pred for pred in predictions[0]])
|
||||
end_logits = np.array([pred for pred in predictions[1]])
|
||||
predictions = pad_shard_unpad(p_eval_step)(state, batch, min_device_batch=per_device_eval_batch_size)
|
||||
start_logits = np.array(predictions[0])
|
||||
end_logits = np.array(predictions[1])
|
||||
all_start_logits.append(start_logits)
|
||||
all_end_logits.append(end_logits)
|
||||
|
||||
|
@ -3,3 +3,4 @@ jax>=0.2.8
|
||||
jaxlib>=0.1.59
|
||||
flax>=0.3.5
|
||||
optax>=0.0.8
|
||||
evaluate>=0.2.0
|
||||
|
@ -20,6 +20,7 @@ Fine-tuning the library models for summarization.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
@ -32,16 +33,17 @@ from typing import Callable, Optional
|
||||
import datasets
|
||||
import nltk # Here to have a nice missing dependency error message early on
|
||||
import numpy as np
|
||||
from datasets import Dataset, load_dataset, load_metric
|
||||
from datasets import Dataset, load_dataset
|
||||
from tqdm import tqdm
|
||||
|
||||
import evaluate
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
import optax
|
||||
import transformers
|
||||
from filelock import FileLock
|
||||
from flax import jax_utils, traverse_util
|
||||
from flax.jax_utils import unreplicate
|
||||
from flax.jax_utils import pad_shard_unpad, unreplicate
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
|
||||
from huggingface_hub import Repository
|
||||
@ -335,26 +337,28 @@ class TrainState(train_state.TrainState):
|
||||
return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
|
||||
|
||||
|
||||
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
|
||||
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False, drop_last=True):
|
||||
"""
|
||||
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
|
||||
Shuffle batches if `shuffle` is `True`.
|
||||
Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
|
||||
and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
|
||||
"""
|
||||
steps_per_epoch = len(dataset) // batch_size
|
||||
|
||||
if shuffle:
|
||||
batch_idx = jax.random.permutation(rng, len(dataset))
|
||||
batch_idx = np.asarray(batch_idx)
|
||||
else:
|
||||
batch_idx = jnp.arange(len(dataset))
|
||||
batch_idx = np.arange(len(dataset))
|
||||
|
||||
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
|
||||
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
|
||||
if drop_last:
|
||||
steps_per_epoch = len(dataset) // batch_size
|
||||
batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch.
|
||||
batch_idx = batch_idx.reshape((steps_per_epoch, batch_size))
|
||||
else:
|
||||
steps_per_epoch = math.ceil(len(dataset) / batch_size)
|
||||
batch_idx = np.array_split(batch_idx, steps_per_epoch)
|
||||
|
||||
for idx in batch_idx:
|
||||
batch = dataset[idx]
|
||||
batch = {k: jnp.array(v) for k, v in batch.items()}
|
||||
|
||||
batch = shard(batch)
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
|
||||
yield batch
|
||||
|
||||
@ -586,10 +590,13 @@ def main():
|
||||
)
|
||||
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(
|
||||
targets, max_length=max_target_length, padding="max_length", truncation=True, return_tensors="np"
|
||||
)
|
||||
labels = tokenizer(
|
||||
text_target=targets,
|
||||
max_length=max_target_length,
|
||||
padding="max_length",
|
||||
truncation=True,
|
||||
return_tensors="np",
|
||||
)
|
||||
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
decoder_input_ids = shift_tokens_right_fn(
|
||||
@ -653,7 +660,7 @@ def main():
|
||||
)
|
||||
|
||||
# Metric
|
||||
metric = load_metric("rouge")
|
||||
metric = evaluate.load("rouge")
|
||||
|
||||
def postprocess_text(preds, labels):
|
||||
preds = [pred.strip() for pred in preds]
|
||||
@ -673,12 +680,9 @@ def main():
|
||||
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
|
||||
|
||||
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
|
||||
# Extract a few results from ROUGE
|
||||
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
|
||||
|
||||
result = {k: round(v * 100, 4) for k, v in result.items()}
|
||||
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
|
||||
result["gen_len"] = np.mean(prediction_lens)
|
||||
result = {k: round(v, 4) for k, v in result.items()}
|
||||
return result
|
||||
|
||||
# Enable tensorboard only on the master node
|
||||
@ -706,7 +710,8 @@ def main():
|
||||
# Store some constant
|
||||
num_epochs = int(training_args.num_train_epochs)
|
||||
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
|
||||
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = per_device_eval_batch_size * jax.device_count()
|
||||
steps_per_epoch = len(train_dataset) // train_batch_size
|
||||
total_train_steps = steps_per_epoch * num_epochs
|
||||
|
||||
@ -723,15 +728,19 @@ def main():
|
||||
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
|
||||
# mask boolean with the same structure as the parameters.
|
||||
# The mask is True for parameters that should be decayed.
|
||||
# Note that this mask is specifically adapted for FlaxBart.
|
||||
# For FlaxT5, one should correct the layer norm parameter naming
|
||||
# accordingly - see `run_t5_mlm_flax.py` e.g.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
layer_norm_params = [
|
||||
(name, "scale") for name in ["self_attn_layer_norm", "layernorm_embedding", "final_layer_norm"]
|
||||
]
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_params) for path in flat_params}
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
# create adam optimizer
|
||||
@ -846,6 +855,7 @@ def main():
|
||||
# train
|
||||
for _ in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False):
|
||||
batch = next(train_loader)
|
||||
batch = shard(batch)
|
||||
state, train_metric = p_train_step(state, batch)
|
||||
train_metrics.append(train_metric)
|
||||
|
||||
@ -863,21 +873,23 @@ def main():
|
||||
eval_preds = []
|
||||
eval_labels = []
|
||||
|
||||
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size)
|
||||
eval_steps = len(eval_dataset) // eval_batch_size
|
||||
eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size, drop_last=False)
|
||||
eval_steps = math.ceil(len(eval_dataset) / eval_batch_size)
|
||||
for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False):
|
||||
# Model forward
|
||||
batch = next(eval_loader)
|
||||
labels = batch["labels"]
|
||||
|
||||
metrics = p_eval_step(state.params, batch)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, batch, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
# generation
|
||||
if data_args.predict_with_generate:
|
||||
generated_ids = p_generate_step(state.params, batch)
|
||||
generated_ids = pad_shard_unpad(p_generate_step)(state.params, batch)
|
||||
eval_preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"])))
|
||||
eval_labels.extend(jax.device_get(labels.reshape(-1, labels.shape[-1])))
|
||||
eval_labels.extend(labels)
|
||||
|
||||
# normalize eval metrics
|
||||
eval_metrics = get_metrics(eval_metrics)
|
||||
@ -916,21 +928,23 @@ def main():
|
||||
pred_generations = []
|
||||
pred_labels = []
|
||||
|
||||
pred_loader = data_loader(input_rng, predict_dataset, eval_batch_size)
|
||||
pred_steps = len(predict_dataset) // eval_batch_size
|
||||
pred_loader = data_loader(input_rng, predict_dataset, eval_batch_size, drop_last=False)
|
||||
pred_steps = math.ceil(len(predict_dataset) / eval_batch_size)
|
||||
for _ in tqdm(range(pred_steps), desc="Predicting...", position=2, leave=False):
|
||||
# Model forward
|
||||
batch = next(pred_loader)
|
||||
labels = batch["labels"]
|
||||
|
||||
metrics = p_eval_step(state.params, batch)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, batch, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
pred_metrics.append(metrics)
|
||||
|
||||
# generation
|
||||
if data_args.predict_with_generate:
|
||||
generated_ids = p_generate_step(state.params, batch)
|
||||
generated_ids = pad_shard_unpad(p_generate_step)(state.params, batch)
|
||||
pred_generations.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"])))
|
||||
pred_labels.extend(jax.device_get(labels.reshape(-1, labels.shape[-1])))
|
||||
pred_labels.extend(labels)
|
||||
|
||||
# normalize prediction metrics
|
||||
pred_metrics = get_metrics(pred_metrics)
|
||||
|
@ -16,26 +16,27 @@
|
||||
""" Finetuning a 🤗 Flax Transformers model for sequence classification on GLUE."""
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import Any, Callable, Dict, Optional, Tuple
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from tqdm import tqdm
|
||||
|
||||
import evaluate
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
import optax
|
||||
import transformers
|
||||
from flax import struct, traverse_util
|
||||
from flax.jax_utils import replicate, unreplicate
|
||||
from flax.jax_utils import pad_shard_unpad, replicate, unreplicate
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard
|
||||
from huggingface_hub import Repository
|
||||
@ -53,7 +54,7 @@ from transformers.utils import check_min_version, get_full_repo_name, send_examp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
@ -226,7 +227,17 @@ def create_train_state(
|
||||
# The mask is True for parameters that should be decayed.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
tx = optax.adamw(
|
||||
@ -290,11 +301,15 @@ def glue_train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int):
|
||||
|
||||
|
||||
def glue_eval_data_collator(dataset: Dataset, batch_size: int):
|
||||
"""Returns batches of size `batch_size` from `eval dataset`, sharded over all local devices."""
|
||||
for i in range(len(dataset) // batch_size):
|
||||
batch = dataset[i * batch_size : (i + 1) * batch_size]
|
||||
"""Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop."""
|
||||
batch_idx = np.arange(len(dataset))
|
||||
|
||||
steps_per_epoch = math.ceil(len(dataset) / batch_size)
|
||||
batch_idx = np.array_split(batch_idx, steps_per_epoch)
|
||||
|
||||
for idx in batch_idx:
|
||||
batch = dataset[idx]
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
batch = shard(batch)
|
||||
|
||||
yield batch
|
||||
|
||||
@ -511,8 +526,9 @@ def main():
|
||||
rng = jax.random.PRNGKey(training_args.seed)
|
||||
dropout_rngs = jax.random.split(rng, jax.local_device_count())
|
||||
|
||||
train_batch_size = training_args.per_device_train_batch_size * jax.local_device_count()
|
||||
eval_batch_size = training_args.per_device_eval_batch_size * jax.local_device_count()
|
||||
train_batch_size = int(training_args.per_device_train_batch_size) * jax.local_device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = per_device_eval_batch_size * jax.device_count()
|
||||
|
||||
learning_rate_fn = create_learning_rate_fn(
|
||||
len(train_dataset),
|
||||
@ -555,9 +571,9 @@ def main():
|
||||
p_eval_step = jax.pmap(eval_step, axis_name="batch")
|
||||
|
||||
if data_args.task_name is not None:
|
||||
metric = load_metric("glue", data_args.task_name)
|
||||
metric = evaluate.load("glue", data_args.task_name)
|
||||
else:
|
||||
metric = load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
logger.info(f"===== Starting training ({num_epochs} epochs) =====")
|
||||
train_time = 0
|
||||
@ -611,26 +627,15 @@ def main():
|
||||
eval_loader = glue_eval_data_collator(eval_dataset, eval_batch_size)
|
||||
for batch in tqdm(
|
||||
eval_loader,
|
||||
total=len(eval_dataset) // eval_batch_size,
|
||||
total=math.ceil(len(eval_dataset) / eval_batch_size),
|
||||
desc="Evaluating ...",
|
||||
position=2,
|
||||
):
|
||||
labels = batch.pop("labels")
|
||||
predictions = p_eval_step(state, batch)
|
||||
metric.add_batch(predictions=chain(*predictions), references=chain(*labels))
|
||||
|
||||
# evaluate also on leftover examples (not divisible by batch_size)
|
||||
num_leftover_samples = len(eval_dataset) % eval_batch_size
|
||||
|
||||
# make sure leftover batch is evaluated on one device
|
||||
if num_leftover_samples > 0 and jax.process_index() == 0:
|
||||
# take leftover samples
|
||||
batch = eval_dataset[-num_leftover_samples:]
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
|
||||
labels = batch.pop("labels")
|
||||
predictions = eval_step(unreplicate(state), batch)
|
||||
metric.add_batch(predictions=predictions, references=labels)
|
||||
predictions = pad_shard_unpad(p_eval_step)(
|
||||
state, batch, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
metric.add_batch(predictions=np.array(predictions), references=labels)
|
||||
|
||||
eval_metric = metric.compute()
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
""" Fine-tuning a 🤗 Flax Transformers model on token classification tasks (NER, POS, CHUNKS)"""
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import sys
|
||||
@ -28,15 +29,16 @@ from typing import Any, Callable, Dict, Optional, Tuple
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
from datasets import ClassLabel, load_dataset, load_metric
|
||||
from datasets import ClassLabel, load_dataset
|
||||
from tqdm import tqdm
|
||||
|
||||
import evaluate
|
||||
import jax
|
||||
import jax.numpy as jnp
|
||||
import optax
|
||||
import transformers
|
||||
from flax import struct, traverse_util
|
||||
from flax.jax_utils import replicate, unreplicate
|
||||
from flax.jax_utils import pad_shard_unpad, replicate, unreplicate
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard
|
||||
from huggingface_hub import Repository
|
||||
@ -53,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
@ -284,12 +286,19 @@ def create_train_state(
|
||||
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
|
||||
# mask boolean with the same structure as the parameters.
|
||||
# The mask is True for parameters that should be decayed.
|
||||
# Note that this mask is specifically adapted for FlaxBERT-like models.
|
||||
# For other models, one should correct the layer norm parameter naming
|
||||
# accordingly.
|
||||
def decay_mask_fn(params):
|
||||
flat_params = traverse_util.flatten_dict(params)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params}
|
||||
# find out all LayerNorm parameters
|
||||
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
|
||||
layer_norm_named_params = set(
|
||||
[
|
||||
layer[-2:]
|
||||
for layer_norm_name in layer_norm_candidates
|
||||
for layer in flat_params.keys()
|
||||
if layer_norm_name in "".join(layer).lower()
|
||||
]
|
||||
)
|
||||
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
|
||||
return traverse_util.unflatten_dict(flat_mask)
|
||||
|
||||
tx = optax.adamw(
|
||||
@ -344,11 +353,15 @@ def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int):
|
||||
|
||||
|
||||
def eval_data_collator(dataset: Dataset, batch_size: int):
|
||||
"""Returns batches of size `batch_size` from `eval dataset`, sharded over all local devices."""
|
||||
for i in range(len(dataset) // batch_size):
|
||||
batch = dataset[i * batch_size : (i + 1) * batch_size]
|
||||
"""Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop."""
|
||||
batch_idx = np.arange(len(dataset))
|
||||
|
||||
steps_per_epoch = math.ceil(len(dataset) / batch_size)
|
||||
batch_idx = np.array_split(batch_idx, steps_per_epoch)
|
||||
|
||||
for idx in batch_idx:
|
||||
batch = dataset[idx]
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
batch = shard(batch)
|
||||
|
||||
yield batch
|
||||
|
||||
@ -593,6 +606,7 @@ def main():
|
||||
dropout_rngs = jax.random.split(rng, jax.local_device_count())
|
||||
|
||||
train_batch_size = training_args.per_device_train_batch_size * jax.local_device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = training_args.per_device_eval_batch_size * jax.local_device_count()
|
||||
|
||||
learning_rate_fn = create_learning_rate_fn(
|
||||
@ -633,7 +647,7 @@ def main():
|
||||
|
||||
p_eval_step = jax.pmap(eval_step, axis_name="batch")
|
||||
|
||||
metric = load_metric("seqeval")
|
||||
metric = evaluate.load("seqeval")
|
||||
|
||||
def get_labels(y_pred, y_true):
|
||||
# Transform predictions and references tensos to numpy arrays
|
||||
@ -721,34 +735,16 @@ def main():
|
||||
# evaluate
|
||||
for batch in tqdm(
|
||||
eval_data_collator(eval_dataset, eval_batch_size),
|
||||
total=len(eval_dataset) // eval_batch_size,
|
||||
total=math.ceil(len(eval_dataset) / eval_batch_size),
|
||||
desc="Evaluating ...",
|
||||
position=2,
|
||||
):
|
||||
labels = batch.pop("labels")
|
||||
predictions = p_eval_step(state, batch)
|
||||
predictions = np.array([pred for pred in chain(*predictions)])
|
||||
labels = np.array([label for label in chain(*labels)])
|
||||
labels[np.array(chain(*batch["attention_mask"])) == 0] = -100
|
||||
preds, refs = get_labels(predictions, labels)
|
||||
metric.add_batch(
|
||||
predictions=preds,
|
||||
references=refs,
|
||||
predictions = pad_shard_unpad(p_eval_step)(
|
||||
state, batch, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
|
||||
# evaluate also on leftover examples (not divisible by batch_size)
|
||||
num_leftover_samples = len(eval_dataset) % eval_batch_size
|
||||
|
||||
# make sure leftover batch is evaluated on one device
|
||||
if num_leftover_samples > 0 and jax.process_index() == 0:
|
||||
# take leftover samples
|
||||
batch = eval_dataset[-num_leftover_samples:]
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
|
||||
labels = batch.pop("labels")
|
||||
predictions = eval_step(unreplicate(state), batch)
|
||||
labels = np.array(labels)
|
||||
labels[np.array(batch["attention_mask"]) == 0] = -100
|
||||
predictions = np.array(predictions)
|
||||
labels[np.array(chain(*batch["attention_mask"])) == 0] = -100
|
||||
preds, refs = get_labels(predictions, labels)
|
||||
metric.add_batch(
|
||||
predictions=preds,
|
||||
@ -784,28 +780,12 @@ def main():
|
||||
eval_loader = eval_data_collator(eval_dataset, eval_batch_size)
|
||||
for batch in tqdm(eval_loader, total=len(eval_dataset) // eval_batch_size, desc="Evaluating ...", position=2):
|
||||
labels = batch.pop("labels")
|
||||
predictions = p_eval_step(state, batch)
|
||||
predictions = np.array([pred for pred in chain(*predictions)])
|
||||
labels = np.array([label for label in chain(*labels)])
|
||||
predictions = pad_shard_unpad(p_eval_step)(state, batch, min_device_batch=per_device_eval_batch_size)
|
||||
predictions = np.array(predictions)
|
||||
labels[np.array(chain(*batch["attention_mask"])) == 0] = -100
|
||||
preds, refs = get_labels(predictions, labels)
|
||||
metric.add_batch(predictions=preds, references=refs)
|
||||
|
||||
# evaluate also on leftover examples (not divisible by batch_size)
|
||||
num_leftover_samples = len(eval_dataset) % eval_batch_size
|
||||
|
||||
# make sure leftover batch is evaluated on one device
|
||||
if num_leftover_samples > 0 and jax.process_index() == 0:
|
||||
# take leftover samples
|
||||
batch = eval_dataset[-num_leftover_samples:]
|
||||
batch = {k: np.array(v) for k, v in batch.items()}
|
||||
|
||||
labels = np.array(batch.pop("labels"))
|
||||
predictions = eval_step(unreplicate(state), batch)
|
||||
labels[np.array(batch["attention_mask"]) == 0] = -100
|
||||
preds, refs = get_labels(predictions, labels)
|
||||
metric.add_batch(predictions=preds, references=refs)
|
||||
|
||||
eval_metrics = compute_metrics()
|
||||
|
||||
if jax.process_index() == 0:
|
||||
|
@ -40,7 +40,7 @@ import jax.numpy as jnp
|
||||
import optax
|
||||
import transformers
|
||||
from flax import jax_utils
|
||||
from flax.jax_utils import unreplicate
|
||||
from flax.jax_utils import pad_shard_unpad, unreplicate
|
||||
from flax.training import train_state
|
||||
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
|
||||
from huggingface_hub import Repository
|
||||
@ -368,7 +368,8 @@ def main():
|
||||
# Store some constant
|
||||
num_epochs = int(training_args.num_train_epochs)
|
||||
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
|
||||
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
|
||||
per_device_eval_batch_size = int(training_args.per_device_eval_batch_size)
|
||||
eval_batch_size = per_device_eval_batch_size * jax.device_count()
|
||||
steps_per_epoch = len(train_dataset) // train_batch_size
|
||||
total_train_steps = steps_per_epoch * num_epochs
|
||||
|
||||
@ -398,7 +399,7 @@ def main():
|
||||
shuffle=False,
|
||||
num_workers=data_args.preprocessing_num_workers,
|
||||
persistent_workers=True,
|
||||
drop_last=True,
|
||||
drop_last=False,
|
||||
collate_fn=collate_fn,
|
||||
)
|
||||
|
||||
@ -532,8 +533,9 @@ def main():
|
||||
eval_step_progress_bar = tqdm(total=eval_steps, desc="Evaluating...", position=2, leave=False)
|
||||
for batch in eval_loader:
|
||||
# Model forward
|
||||
batch = shard(batch)
|
||||
metrics = p_eval_step(state.params, batch)
|
||||
metrics = pad_shard_unpad(p_eval_step, static_return=True)(
|
||||
state.params, batch, min_device_batch=per_device_eval_batch_size
|
||||
)
|
||||
eval_metrics.append(metrics)
|
||||
|
||||
eval_step_progress_bar.update(1)
|
||||
|
@ -23,3 +23,4 @@ torchvision
|
||||
jiwer
|
||||
librosa
|
||||
torch < 1.12
|
||||
evaluate >= 0.2.0
|
||||
|
@ -26,6 +26,7 @@ import datasets
|
||||
import numpy as np
|
||||
from datasets import DatasetDict, load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
@ -44,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
|
||||
|
||||
@ -315,7 +316,7 @@ def main():
|
||||
id2label[str(i)] = label
|
||||
|
||||
# Load the accuracy metric from the datasets package
|
||||
metric = datasets.load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
|
||||
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
|
||||
|
@ -43,7 +43,10 @@ cd ..
|
||||
Having downloaded COCO dataset manually you should be able to load with the `ydshieh/coc_dataset_script` dataset loading script:
|
||||
|
||||
```py
|
||||
COCO_DIR = "data"
|
||||
import os
|
||||
import datasets
|
||||
|
||||
COCO_DIR = os.path.join(os.getcwd(), "data")
|
||||
ds = datasets.load_dataset("ydshieh/coco_dataset_script", "2017", data_dir=COCO_DIR)
|
||||
```
|
||||
|
||||
@ -84,7 +87,7 @@ Finally, we can run the example script to train the model:
|
||||
python examples/pytorch/contrastive-image-text/run_clip.py \
|
||||
--output_dir ./clip-roberta-finetuned \
|
||||
--model_name_or_path ./clip-roberta \
|
||||
--data_dir ./data \
|
||||
--data_dir $PWD/data \
|
||||
--dataset_name ydshieh/coco_dataset_script \
|
||||
--dataset_config_name=2017 \
|
||||
--image_column image_path \
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
|
||||
|
||||
|
@ -19,7 +19,6 @@ import sys
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
@ -34,6 +33,7 @@ from torchvision.transforms import (
|
||||
ToTensor,
|
||||
)
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
|
||||
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
|
||||
|
||||
@ -252,7 +252,7 @@ def main():
|
||||
id2label[str(i)] = label
|
||||
|
||||
# Load the accuracy metric from the datasets package
|
||||
metric = datasets.load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||
|
@ -22,7 +22,7 @@ from pathlib import Path
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision.transforms import (
|
||||
CenterCrop,
|
||||
@ -35,6 +35,7 @@ from torchvision.transforms import (
|
||||
)
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -47,10 +48,13 @@ from transformers import (
|
||||
SchedulerType,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
|
||||
@ -415,7 +419,7 @@ def main():
|
||||
accelerator.init_trackers("image_classification_no_trainer", experiment_config)
|
||||
|
||||
# Get the metric function
|
||||
metric = load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
# Train!
|
||||
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
||||
@ -504,19 +508,11 @@ def main():
|
||||
break
|
||||
|
||||
model.eval()
|
||||
samples_seen = 0
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather((predictions, batch["labels"]))
|
||||
# If we are in a multiprocess environment, the last batch has duplicates
|
||||
if accelerator.num_processes > 1:
|
||||
if step == len(eval_dataloader) - 1:
|
||||
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
|
||||
references = references[: len(eval_dataloader.dataset) - samples_seen]
|
||||
else:
|
||||
samples_seen += references.shape[0]
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
|
@ -43,7 +43,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -30,8 +30,9 @@ from itertools import chain
|
||||
from typing import Optional
|
||||
|
||||
import datasets
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
@ -53,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
@ -492,7 +493,7 @@ def main():
|
||||
logits = logits[0]
|
||||
return logits.argmax(dim=-1)
|
||||
|
||||
metric = load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
def compute_metrics(eval_preds):
|
||||
preds, labels = eval_preds
|
||||
|
@ -52,10 +52,13 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
@ -594,10 +597,9 @@ def main():
|
||||
outputs = model(**batch)
|
||||
|
||||
loss = outputs.loss
|
||||
losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))
|
||||
losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))
|
||||
|
||||
losses = torch.cat(losses)
|
||||
losses = losses[: len(eval_dataset)]
|
||||
try:
|
||||
eval_loss = torch.mean(losses)
|
||||
perplexity = math.exp(eval_loss)
|
||||
|
@ -30,8 +30,9 @@ from itertools import chain
|
||||
from typing import Optional
|
||||
|
||||
import datasets
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
@ -52,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
@ -515,7 +516,7 @@ def main():
|
||||
logits = logits[0]
|
||||
return logits.argmax(dim=-1)
|
||||
|
||||
metric = load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
def compute_metrics(eval_preds):
|
||||
preds, labels = eval_preds
|
||||
|
@ -52,10 +52,13 @@ from transformers import (
|
||||
SchedulerType,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
|
||||
@ -639,10 +642,9 @@ def main():
|
||||
outputs = model(**batch)
|
||||
|
||||
loss = outputs.loss
|
||||
losses.append(accelerator.gather(loss.repeat(args.per_device_eval_batch_size)))
|
||||
losses.append(accelerator.gather_for_metrics(loss.repeat(args.per_device_eval_batch_size)))
|
||||
|
||||
losses = torch.cat(losses)
|
||||
losses = losses[: len(eval_dataset)]
|
||||
try:
|
||||
eval_loss = torch.mean(losses)
|
||||
perplexity = math.exp(eval_loss)
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -31,10 +31,11 @@ from typing import Optional, Union
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -51,9 +52,12 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import PaddingStrategy, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import PaddingStrategy, check_min_version, get_full_repo_name, send_example_telemetry
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
# You should update this to your particular problem to have better documentation of `model_type`
|
||||
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
|
||||
@ -514,7 +518,7 @@ def main():
|
||||
accelerator.init_trackers("swag_no_trainer", experiment_config)
|
||||
|
||||
# Metrics
|
||||
metric = load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
# Train!
|
||||
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
||||
@ -588,19 +592,11 @@ def main():
|
||||
break
|
||||
|
||||
model.eval()
|
||||
samples_seen = 0
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
predictions = outputs.logits.argmax(dim=-1)
|
||||
predictions, references = accelerator.gather((predictions, batch["labels"]))
|
||||
# If we are in a multiprocess environment, the last batch has duplicates
|
||||
if accelerator.num_processes > 1:
|
||||
if step == len(eval_dataloader) - 1:
|
||||
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
|
||||
references = references[: len(eval_dataloader.dataset) - samples_seen]
|
||||
else:
|
||||
samples_seen += references.shape[0]
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
references=references,
|
||||
|
@ -25,8 +25,9 @@ from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
import datasets
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from trainer_qa import QuestionAnsweringTrainer
|
||||
from transformers import (
|
||||
@ -48,7 +49,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
@ -593,7 +594,7 @@ def main():
|
||||
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||
|
||||
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
|
||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
||||
|
||||
def compute_metrics(p: EvalPrediction):
|
||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||
|
@ -25,8 +25,9 @@ from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
import datasets
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from trainer_qa import QuestionAnsweringTrainer
|
||||
from transformers import (
|
||||
@ -47,7 +48,7 @@ from utils_qa import postprocess_qa_predictions_with_beam_search
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
@ -625,7 +626,7 @@ def main():
|
||||
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||
|
||||
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
|
||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
||||
|
||||
def compute_metrics(p: EvalPrediction):
|
||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||
|
@ -29,10 +29,11 @@ from pathlib import Path
|
||||
import datasets
|
||||
import numpy as np
|
||||
import torch
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -55,7 +56,7 @@ from utils_qa import postprocess_qa_predictions_with_beam_search
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
@ -680,7 +681,7 @@ def main():
|
||||
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||
|
||||
metric = load_metric("squad_v2" if args.version_2_with_negative else "squad")
|
||||
metric = evaluate.load("squad_v2" if args.version_2_with_negative else "squad")
|
||||
|
||||
def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
|
||||
"""
|
||||
@ -697,7 +698,7 @@ def main():
|
||||
step = 0
|
||||
# create a numpy array and fill it with -100.
|
||||
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float32)
|
||||
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
|
||||
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics
|
||||
for i, output_logit in enumerate(start_or_end_logits): # populate columns
|
||||
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
|
||||
# And after every iteration we have to change the step
|
||||
@ -875,11 +876,11 @@ def main():
|
||||
end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100)
|
||||
cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100)
|
||||
|
||||
all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy())
|
||||
all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy())
|
||||
all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy())
|
||||
all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy())
|
||||
all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy())
|
||||
all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy())
|
||||
all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy())
|
||||
all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy())
|
||||
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
|
||||
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())
|
||||
|
||||
max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor
|
||||
|
||||
@ -935,11 +936,11 @@ def main():
|
||||
end_top_index = accelerator.pad_across_processes(end_top_index, dim=1, pad_index=-100)
|
||||
cls_logits = accelerator.pad_across_processes(cls_logits, dim=1, pad_index=-100)
|
||||
|
||||
all_start_top_log_probs.append(accelerator.gather(start_top_log_probs).cpu().numpy())
|
||||
all_start_top_index.append(accelerator.gather(start_top_index).cpu().numpy())
|
||||
all_end_top_log_probs.append(accelerator.gather(end_top_log_probs).cpu().numpy())
|
||||
all_end_top_index.append(accelerator.gather(end_top_index).cpu().numpy())
|
||||
all_cls_logits.append(accelerator.gather(cls_logits).cpu().numpy())
|
||||
all_start_top_log_probs.append(accelerator.gather_for_metrics(start_top_log_probs).cpu().numpy())
|
||||
all_start_top_index.append(accelerator.gather_for_metrics(start_top_index).cpu().numpy())
|
||||
all_end_top_log_probs.append(accelerator.gather_for_metrics(end_top_log_probs).cpu().numpy())
|
||||
all_end_top_index.append(accelerator.gather_for_metrics(end_top_index).cpu().numpy())
|
||||
all_cls_logits.append(accelerator.gather_for_metrics(cls_logits).cpu().numpy())
|
||||
|
||||
max_len = max([x.shape[1] for x in all_end_top_log_probs]) # Get the max_length of the tensor
|
||||
|
||||
|
@ -29,10 +29,11 @@ from pathlib import Path
|
||||
import datasets
|
||||
import numpy as np
|
||||
import torch
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -56,7 +57,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
@ -696,7 +697,7 @@ def main():
|
||||
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
|
||||
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
|
||||
|
||||
metric = load_metric("squad_v2" if args.version_2_with_negative else "squad")
|
||||
metric = evaluate.load("squad_v2" if args.version_2_with_negative else "squad")
|
||||
|
||||
# Create and fill numpy array of size len_of_validation_data * max_length_of_output_tensor
|
||||
def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
|
||||
@ -714,7 +715,7 @@ def main():
|
||||
step = 0
|
||||
# create a numpy array and fill it with -100.
|
||||
logits_concat = np.full((len(dataset), max_len), -100, dtype=np.float64)
|
||||
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather
|
||||
# Now since we have create an array now we will populate it with the outputs gathered using accelerator.gather_for_metrics
|
||||
for i, output_logit in enumerate(start_or_end_logits): # populate columns
|
||||
# We have to fill it such that we have to take the whole tensor and replace it on the newly created array
|
||||
# And after every iteration we have to change the step
|
||||
@ -900,8 +901,8 @@ def main():
|
||||
start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
|
||||
end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
|
||||
|
||||
all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())
|
||||
all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())
|
||||
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
|
||||
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())
|
||||
|
||||
max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
|
||||
|
||||
@ -939,8 +940,8 @@ def main():
|
||||
start_logits = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
|
||||
end_logits = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
|
||||
|
||||
all_start_logits.append(accelerator.gather(start_logits).cpu().numpy())
|
||||
all_end_logits.append(accelerator.gather(end_logits).cpu().numpy())
|
||||
all_start_logits.append(accelerator.gather_for_metrics(start_logits).cpu().numpy())
|
||||
all_end_logits.append(accelerator.gather_for_metrics(end_logits).cpu().numpy())
|
||||
|
||||
max_len = max([x.shape[1] for x in all_start_logits]) # Get the max_length of the tensor
|
||||
# concatenate the numpy array
|
||||
|
@ -25,8 +25,9 @@ from dataclasses import dataclass, field
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
import datasets
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from trainer_seq2seq_qa import QuestionAnsweringSeq2SeqTrainer
|
||||
from transformers import (
|
||||
@ -44,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
@ -453,9 +454,8 @@ def main():
|
||||
inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column)
|
||||
|
||||
model_inputs = tokenizer(inputs, max_length=max_seq_length, padding=padding, truncation=True)
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(targets, max_length=max_answer_length, padding=padding, truncation=True)
|
||||
# Tokenize targets with text_target=...
|
||||
labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
@ -479,9 +479,8 @@ def main():
|
||||
return_overflowing_tokens=True,
|
||||
return_offsets_mapping=True,
|
||||
)
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(targets, max_length=max_answer_length, padding=padding, truncation=True)
|
||||
# Tokenize targets with the `text_target` keyword argument
|
||||
labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True)
|
||||
|
||||
# Since one example might give us several features if it has a long context, we need a map from a feature to
|
||||
# its corresponding example. This key gives us just that.
|
||||
@ -583,7 +582,7 @@ def main():
|
||||
pad_to_multiple_of=8 if training_args.fp16 else None,
|
||||
)
|
||||
|
||||
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
|
||||
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad")
|
||||
|
||||
def compute_metrics(p: EvalPrediction):
|
||||
return metric.compute(predictions=p.predictions, references=p.label_ids)
|
||||
|
@ -21,7 +21,6 @@ import sys
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
@ -30,6 +29,7 @@ from torch import nn
|
||||
from torchvision import transforms
|
||||
from torchvision.transforms import functional
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from huggingface_hub import hf_hub_download
|
||||
from transformers import (
|
||||
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt")
|
||||
|
||||
@ -337,7 +337,7 @@ def main():
|
||||
label2id = {v: str(k) for k, v in id2label.items()}
|
||||
|
||||
# Load the mean IoU metric from the datasets package
|
||||
metric = datasets.load_metric("mean_iou")
|
||||
metric = evaluate.load("mean_iou")
|
||||
|
||||
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||
|
@ -24,13 +24,14 @@ from pathlib import Path
|
||||
import datasets
|
||||
import numpy as np
|
||||
import torch
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from PIL import Image
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision import transforms
|
||||
from torchvision.transforms import functional
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -44,10 +45,13 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt")
|
||||
@ -500,7 +504,7 @@ def main():
|
||||
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
||||
|
||||
# Instantiate metric
|
||||
metric = load_metric("mean_iou")
|
||||
metric = evaluate.load("mean_iou")
|
||||
|
||||
# We need to initialize the trackers we use, and also store our configuration.
|
||||
# We initialize the trackers only on main process because `accelerator.log`
|
||||
@ -601,7 +605,6 @@ def main():
|
||||
|
||||
logger.info("***** Running evaluation *****")
|
||||
model.eval()
|
||||
samples_seen = 0
|
||||
for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)):
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
@ -611,15 +614,7 @@ def main():
|
||||
)
|
||||
predictions = upsampled_logits.argmax(dim=1)
|
||||
|
||||
predictions, references = accelerator.gather((predictions, batch["labels"]))
|
||||
|
||||
# If we are in a multiprocess environment, the last batch has duplicates
|
||||
if accelerator.num_processes > 1:
|
||||
if step == len(eval_dataloader) - 1:
|
||||
predictions = predictions[: len(eval_dataloader.dataset) - samples_seen]
|
||||
references = references[: len(eval_dataloader.dataset) - samples_seen]
|
||||
else:
|
||||
samples_seen += references.shape[0]
|
||||
predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"]))
|
||||
|
||||
metric.add_batch(
|
||||
predictions=predictions,
|
||||
|
@ -28,8 +28,9 @@ from typing import Dict, List, Optional, Union
|
||||
import datasets
|
||||
import numpy as np
|
||||
import torch
|
||||
from datasets import DatasetDict, load_dataset, load_metric
|
||||
from datasets import DatasetDict, load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
@ -49,7 +50,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
@ -305,13 +306,12 @@ class DataCollatorCTCWithPadding:
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
with self.processor.as_target_processor():
|
||||
labels_batch = self.processor.pad(
|
||||
label_features,
|
||||
padding=self.padding,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
labels_batch = self.processor.pad(
|
||||
labels=label_features,
|
||||
padding=self.padding,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# replace padding with -100 to ignore loss correctly
|
||||
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
||||
@ -644,7 +644,7 @@ def main():
|
||||
# instantiate a data collator and the trainer
|
||||
|
||||
# Define evaluation metrics during training, *i.e.* word error rate, character error rate
|
||||
eval_metrics = {metric: load_metric(metric) for metric in data_args.eval_metrics}
|
||||
eval_metrics = {metric: evaluate.load(metric) for metric in data_args.eval_metrics}
|
||||
|
||||
# for large datasets it is advised to run the preprocessing on a
|
||||
# single machine first with ``args.preprocessing_only`` since there will mostly likely
|
||||
|
@ -27,8 +27,9 @@ from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
from datasets import DatasetDict, load_dataset, load_metric
|
||||
from datasets import DatasetDict, load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
@ -47,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
@ -425,7 +426,7 @@ def main():
|
||||
return
|
||||
|
||||
# 8. Load Metric
|
||||
metric = load_metric("wer")
|
||||
metric = evaluate.load("wer")
|
||||
|
||||
def compute_metrics(pred):
|
||||
pred_ids = pred.predictions
|
||||
|
@ -27,8 +27,9 @@ from typing import Optional
|
||||
import datasets
|
||||
import nltk # Here to have a nice missing dependency error message early on
|
||||
import numpy as np
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from filelock import FileLock
|
||||
from transformers import (
|
||||
@ -51,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
@ -522,9 +523,8 @@ def main():
|
||||
inputs = [prefix + inp for inp in inputs]
|
||||
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
|
||||
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
# Tokenize targets with the `text_target` keyword argument
|
||||
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
@ -599,7 +599,7 @@ def main():
|
||||
)
|
||||
|
||||
# Metric
|
||||
metric = load_metric("rouge")
|
||||
metric = evaluate.load("rouge")
|
||||
|
||||
def postprocess_text(preds, labels):
|
||||
preds = [pred.strip() for pred in preds]
|
||||
@ -625,12 +625,9 @@ def main():
|
||||
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
|
||||
|
||||
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
|
||||
# Extract a few results from ROUGE
|
||||
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
|
||||
|
||||
result = {k: round(v * 100, 4) for k, v in result.items()}
|
||||
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
|
||||
result["gen_len"] = np.mean(prediction_lens)
|
||||
result = {k: round(v, 4) for k, v in result.items()}
|
||||
return result
|
||||
|
||||
# Initialize our Trainer
|
||||
|
@ -30,10 +30,11 @@ import datasets
|
||||
import nltk
|
||||
import numpy as np
|
||||
import torch
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -50,10 +51,13 @@ from transformers import (
|
||||
SchedulerType,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name, is_offline_mode, send_example_telemetry
|
||||
from transformers.utils import check_min_version, get_full_repo_name, is_offline_mode, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
@ -470,9 +474,8 @@ def main():
|
||||
inputs = [prefix + inp for inp in inputs]
|
||||
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
|
||||
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
# Tokenize targets with the `text_target` keyword argument
|
||||
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
@ -584,7 +587,7 @@ def main():
|
||||
accelerator.init_trackers("summarization_no_trainer", experiment_config)
|
||||
|
||||
# Metric
|
||||
metric = load_metric("rouge")
|
||||
metric = evaluate.load("rouge")
|
||||
|
||||
# Train!
|
||||
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
||||
@ -707,10 +710,7 @@ def main():
|
||||
references=decoded_labels,
|
||||
)
|
||||
result = metric.compute(use_stemmer=True)
|
||||
# Extract a few results from ROUGE
|
||||
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
|
||||
|
||||
result = {k: round(v, 4) for k, v in result.items()}
|
||||
result = {k: round(v * 100, 4) for k, v in result.items()}
|
||||
|
||||
logger.info(result)
|
||||
|
||||
|
@ -25,8 +25,9 @@ from typing import Optional
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
@ -47,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
@ -480,9 +481,9 @@ def main():
|
||||
|
||||
# Get the metric function
|
||||
if data_args.task_name is not None:
|
||||
metric = load_metric("glue", data_args.task_name)
|
||||
metric = evaluate.load("glue", data_args.task_name)
|
||||
else:
|
||||
metric = load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||
|
@ -23,10 +23,11 @@ from pathlib import Path
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -42,10 +43,13 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
@ -466,9 +470,9 @@ def main():
|
||||
|
||||
# Get the metric function
|
||||
if args.task_name is not None:
|
||||
metric = load_metric("glue", args.task_name)
|
||||
metric = evaluate.load("glue", args.task_name)
|
||||
else:
|
||||
metric = load_metric("accuracy")
|
||||
metric = evaluate.load("accuracy")
|
||||
|
||||
# Train!
|
||||
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
||||
|
@ -26,8 +26,9 @@ from typing import Optional
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
@ -47,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
@ -349,7 +350,7 @@ def main():
|
||||
)
|
||||
|
||||
# Get the metric function
|
||||
metric = load_metric("xnli")
|
||||
metric = evaluate.load("xnli")
|
||||
|
||||
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
|
||||
# predictions and label_ids field) and has to return a dictionary string to float.
|
||||
|
@ -27,8 +27,9 @@ from typing import Optional
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
from datasets import ClassLabel, load_dataset, load_metric
|
||||
from datasets import ClassLabel, load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
@ -48,7 +49,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
@ -504,7 +505,7 @@ def main():
|
||||
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
|
||||
|
||||
# Metrics
|
||||
metric = load_metric("seqeval")
|
||||
metric = evaluate.load("seqeval")
|
||||
|
||||
def compute_metrics(p):
|
||||
predictions, labels = p
|
||||
|
@ -28,10 +28,11 @@ from pathlib import Path
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
from datasets import ClassLabel, load_dataset, load_metric
|
||||
from datasets import ClassLabel, load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -49,10 +50,13 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
@ -580,7 +584,7 @@ def main():
|
||||
accelerator.init_trackers("ner_no_trainer", experiment_config)
|
||||
|
||||
# Metrics
|
||||
metric = load_metric("seqeval")
|
||||
metric = evaluate.load("seqeval")
|
||||
|
||||
def get_labels(predictions, references):
|
||||
# Transform predictions and references tensos to numpy arrays
|
||||
|
@ -26,8 +26,9 @@ from typing import Optional
|
||||
|
||||
import datasets
|
||||
import numpy as np
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from transformers import (
|
||||
AutoConfig,
|
||||
@ -51,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.21.0.dev0")
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
|
||||
|
||||
@ -118,12 +119,12 @@ class DataTrainingArguments:
|
||||
validation_file: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={
|
||||
"help": "An optional input evaluation data file to evaluate the metrics (sacreblue) on a jsonlines file."
|
||||
"help": "An optional input evaluation data file to evaluate the metrics (sacrebleu) on a jsonlines file."
|
||||
},
|
||||
)
|
||||
test_file: Optional[str] = field(
|
||||
default=None,
|
||||
metadata={"help": "An optional input test data file to evaluate the metrics (sacreblue) on a jsonlines file."},
|
||||
metadata={"help": "An optional input test data file to evaluate the metrics (sacrebleu) on a jsonlines file."},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
||||
@ -443,9 +444,8 @@ def main():
|
||||
inputs = [prefix + inp for inp in inputs]
|
||||
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
|
||||
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
# Tokenize targets with the `text_target` keyword argument
|
||||
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
@ -523,7 +523,7 @@ def main():
|
||||
)
|
||||
|
||||
# Metric
|
||||
metric = load_metric("sacrebleu")
|
||||
metric = evaluate.load("sacrebleu")
|
||||
|
||||
def postprocess_text(preds, labels):
|
||||
preds = [pred.strip() for pred in preds]
|
||||
|
@ -29,10 +29,11 @@ from pathlib import Path
|
||||
import datasets
|
||||
import numpy as np
|
||||
import torch
|
||||
from datasets import load_dataset, load_metric
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
|
||||
import evaluate
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
@ -51,10 +52,13 @@ from transformers import (
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.22.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
|
||||
|
||||
@ -452,9 +456,8 @@ def main():
|
||||
inputs = [prefix + inp for inp in inputs]
|
||||
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
|
||||
|
||||
# Setup the tokenizer for targets
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
# Tokenize targets with the `text_target` keyword argument
|
||||
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
@ -563,7 +566,7 @@ def main():
|
||||
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
|
||||
accelerator.init_trackers("translation_no_trainer", experiment_config)
|
||||
|
||||
metric = load_metric("sacrebleu")
|
||||
metric = evaluate.load("sacrebleu")
|
||||
|
||||
def postprocess_text(preds, labels):
|
||||
preds = [pred.strip() for pred in preds]
|
||||
@ -695,7 +698,7 @@ def main():
|
||||
if args.with_tracking:
|
||||
accelerator.log(
|
||||
{
|
||||
"blue": eval_metric["score"],
|
||||
"bleu": eval_metric["score"],
|
||||
"train_loss": total_loss.item() / len(train_dataloader),
|
||||
"epoch": epoch,
|
||||
"step": completed_steps,
|
||||
|
@ -39,7 +39,7 @@ The source of the dataset is the GitHub dump available on Google's [BigQuery](ht
|
||||
### Preprocessing
|
||||
The raw dataset contains many duplicates. We deduplicated and filtered the dataset using the heuristics proposed in OpenAI's Codex [paper](https://arxiv.org/abs/2107.03374) and some new ones:
|
||||
|
||||
- exact deduplication using each file's hash
|
||||
- exact deduplication using each file's hash after having removed whistespaces.
|
||||
- near deduplication using MinHash and Jaccard similarity. MinHash with a Jaccard threshold (default=0.85) is first used to create duplicate clusters. Then these clusters are then reduced to unique files based on the exact Jaccard similarity. See `deduplicate_dataset` in `minhash_deduplication.py` for a detailed description.
|
||||
- filtering files with max line length > 1000
|
||||
- filtering files with mean line length > 100
|
||||
@ -51,7 +51,7 @@ The raw dataset contains many duplicates. We deduplicated and filtered the datas
|
||||
- filtering files that use the assignment operator `=` less than 5 times
|
||||
- filtering files with ratio between number of characters and number of tokens after tokenization < 1.5 (the average ratio is 3.6)
|
||||
|
||||
The script to process the full dataset can be found in `scripts/preprocessing.py`. Executing the script on 16 vCPUs takes roughly 3h and removes 70% of the original dataset. The cleaned [train](https://huggingface.co/datasets/loubnabnl/codeparrot-clean-train-v2) and [validation](https://huggingface.co/datasets/loubnabnl/codeparrot-clean-valid-v2) splits are also available on the Hub if you want to skip this step or use the data for another project.
|
||||
The script to process the full dataset can be found in `scripts/preprocessing.py`. Executing the script on 16 vCPUs takes roughly 3h and removes 70% of the original dataset. The cleaned [train](https://huggingface.co/datasets/codeparrot/codeparrot-clean-train-v2) and [validation](https://huggingface.co/datasets/codeparrot/codeparrot-clean-valid-v2) splits are also available on the Hub if you want to skip this step or use the data for another project.
|
||||
|
||||
To execute the preprocessing run the following command:
|
||||
```bash
|
||||
@ -62,12 +62,12 @@ python scripts/preprocessing.py \
|
||||
During preprocessing the dataset is downloaded and stored locally as well as caches of the computations. Make sure you have more than 500GB free disk space to execute it.
|
||||
|
||||
### Pretokenization
|
||||
The tokenization of the data might be slow during the training especially for small models. We provide code to pretokenize the data beforehand in `scripts/pretokenizing.py`, but this step is optional. The dataset is downloaded and stored locally and the tokenized data is pushed to the hub. The tokenized clean [train](https://huggingface.co/datasets/loubnabnl/tokenized-codeparrot-train) and [validation](https://huggingface.co/datasets/loubnabnl/tokenized-codeparrot-valid) datasets are available if you want to use them directly.
|
||||
The tokenization of the data might be slow during the training especially for small models. We provide code to pretokenize the data beforehand in `scripts/pretokenizing.py`, but this step is optional. The dataset is downloaded and stored locally and the tokenized data is pushed to the hub. The tokenized clean [train](https://huggingface.co/datasets/codeparrot/tokenized-codeparrot-train) and [validation](https://huggingface.co/datasets/codeparrot/tokenized-codeparrot-valid) datasets are available if you want to use them directly.
|
||||
|
||||
To execute the pretokenization, for the clean train data for instance, run the following command:
|
||||
```bash
|
||||
python scripts/pretokenizing.py \
|
||||
--dataset_name lvwerra/codeparrot-clean-train \
|
||||
--dataset_name codeparrot/codeparrot-clean-train \
|
||||
--tokenized_data_repo tokenized-codeparrot-train
|
||||
```
|
||||
|
||||
@ -76,7 +76,7 @@ Before training a new model for code we create a new tokenizer that is efficient
|
||||
```bash
|
||||
python scripts/bpe_training.py \
|
||||
--base_tokenizer gpt2 \
|
||||
--dataset_name lvwerra/codeparrot-clean-train
|
||||
--dataset_name codeparrot/codeparrot-clean-train
|
||||
```
|
||||
|
||||
_Note:_ We originally trained the tokenizer on the unprocessed train split of the dataset `transformersbook/codeparrot-train`.
|
||||
@ -87,14 +87,14 @@ The models are randomly initialized and trained from scratch. To initialize a ne
|
||||
```bash
|
||||
python scripts/initialize_model.py \
|
||||
--config_name gpt2-large \
|
||||
--tokenizer_name lvwerra/codeparrot \
|
||||
--tokenizer_name codeparrot/codeparrot \
|
||||
--model_name codeparrot \
|
||||
--push_to_hub True
|
||||
```
|
||||
This will initialize a new model with the architecture and configuration of `gpt2-large` and use the tokenizer to appropriately size the input embeddings. Finally, the initilaized model is pushed the hub.
|
||||
|
||||
We can either pass the name of a text dataset or a pretokenized dataset which speeds up training a bit.
|
||||
Now that the tokenizer and model are also ready we can start training the model. The main training script is built with `accelerate` to scale across a wide range of platforms and infrastructure scales. We train two models with [110M](https://huggingface.co/lvwerra/codeparrot-small/) and [1.5B](https://huggingface.co/lvwerra/codeparrot/) parameters for 25-30B tokens on a 16xA100 (40GB) machine which takes 1 day and 1 week, respectively.
|
||||
Now that the tokenizer and model are also ready we can start training the model. The main training script is built with `accelerate` to scale across a wide range of platforms and infrastructure scales. We train two models with [110M](https://huggingface.co/codeparrot/codeparrot-small/) and [1.5B](https://huggingface.co/codeparrot/codeparrot/) parameters for 25-30B tokens on a 16xA100 (40GB) machine which takes 1 day and 1 week, respectively.
|
||||
|
||||
First you need to configure `accelerate` and login to Weights & Biases:
|
||||
|
||||
@ -113,7 +113,7 @@ If you want to train the small model you need to make some modifications:
|
||||
|
||||
```bash
|
||||
accelerate launch scripts/codeparrot_training.py \
|
||||
--model_ckpt lvwerra/codeparrot-small \
|
||||
--model_ckpt codeparrot/codeparrot-small \
|
||||
--train_batch_size 12 \
|
||||
--valid_batch_size 12 \
|
||||
--learning_rate 5e-4 \
|
||||
@ -135,15 +135,15 @@ Instead of streaming the dataset from the hub you can also stream it from disk.
|
||||
```bash
|
||||
git lfs install
|
||||
mkdir data
|
||||
git -C "./data" clone https://huggingface.co/datasets/lvwerra/codeparrot-clean-train
|
||||
git -C "./data" clone https://huggingface.co/datasets/lvwerra/codeparrot-clean-valid
|
||||
git -C "./data" clone https://huggingface.co/datasets/codeparrot/codeparrot-clean-train
|
||||
git -C "./data" clone https://huggingface.co/datasets/codeparrot/codeparrot-clean-valid
|
||||
```
|
||||
|
||||
And then pass the paths to the datasets when we run the training script:
|
||||
|
||||
```bash
|
||||
accelerate launch scripts/codeparrot_training.py \
|
||||
--model_ckpt lvwerra/codeparrot-small \
|
||||
--model_ckpt codeparrot/codeparrot-small \
|
||||
--dataset_name_train ./data/codeparrot-clean-train \
|
||||
--dataset_name_valid ./data/codeparrot-clean-valid \
|
||||
--train_batch_size 12 \
|
||||
@ -160,13 +160,13 @@ accelerate launch scripts/codeparrot_training.py \
|
||||
For evaluating the language modeling loss on the validation set or any other dataset you can use the following command:
|
||||
```bash
|
||||
python scripts/validation_loss.py \
|
||||
--model_ckpt lvwerra/codeparrot \
|
||||
--dataset_name lvwerra/codeparrot-clean-valid
|
||||
--model_ckpt codeparrot/codeparrot \
|
||||
--dataset_name codeparrot/codeparrot-clean-valid
|
||||
```
|
||||
In addition we evaluate the model on OpenAI's _HumanEval_ benchmark. You can run the evaluation with the following command:
|
||||
|
||||
```bash
|
||||
accelerate launch scripts/human_eval.py --model_ckpt lvwerra/codeparrot \
|
||||
accelerate launch scripts/human_eval.py --model_ckpt codeparrot/codeparrot \
|
||||
--do_sample True \
|
||||
--temperature 0.2 \
|
||||
--top_p 0.95 \
|
||||
@ -194,9 +194,117 @@ The results as well as reference values are shown in the following table:
|
||||
The numbers were obtained by sampling with `T = [0.2, 0.6, 0.8]` and picking the best value for each metric. Both CodeParrot 🦜 models are still underfitted and longer training would likely improve the performance.
|
||||
|
||||
## Demo
|
||||
Give the model a shot yourself! There are two demos to interact with CodeParrot 🦜:
|
||||
- [Code generation](https://huggingface.co/spaces/lvwerra/codeparrot-generation)
|
||||
- [Code highlighting](https://huggingface.co/spaces/lvwerra/codeparrot-highlighting)
|
||||
Give the model a shot yourself! There are three demos to interact with CodeParrot 🦜:
|
||||
- [Code generation](https://huggingface.co/spaces/codeparrot/codeparrot-generation)
|
||||
- [Code highlighting](https://huggingface.co/spaces/codeparrot/codeparrot-highlighting)
|
||||
- [Comparison to other code models](https://huggingface.co/spaces/codeparrot/loubnabnl/code-generation-models)
|
||||
|
||||
## Training with Megatron
|
||||
[Megatron](https://github.com/NVIDIA/Megatron-LM) is a framework developed by NVIDIA for training large transformer models. While the CodeParrot code is easy to follow and modify to your needs the Megatron framework lets you train models faster. Below we explain how to use it.
|
||||
|
||||
### Setup
|
||||
You can pull an NVIDIA PyTorch Container that comes with all the required installations from [NGC](https://catalog.ngc.nvidia.com/orgs/nvidia/containers/pytorch). See [documentation](https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/index.html) for more details:
|
||||
|
||||
With the following Docker command you can run the container (`xx.xx` denotes your Docker version), and clone [Megatron repository](https://github.com/NVIDIA/Megatron-LM) into it:
|
||||
```bash
|
||||
docker run --gpus all -it --rm nvcr.io/nvidia/pytorch:xx.xx-py3
|
||||
git clone https://github.com/NVIDIA/Megatron-LM
|
||||
```
|
||||
|
||||
You also need to add the vocabulary file and merges table of the tokenizer that you trained on code into the container. You can also find these files in [vocab.json](https://huggingface.co/codeparrot/codeparrot/raw/main/vocab.json) and [merges.txt](https://huggingface.co/codeparrot/codeparrot/raw/main/merges.txt).
|
||||
```bash
|
||||
sudo docker cp vocab.json CONTAINER_ID:/workspace/Megatron-LM
|
||||
sudo docker cp merges.txt CONTAINER_ID:/workspace/Megatron-LM
|
||||
```
|
||||
|
||||
### Data preprocessing
|
||||
The training data requires preprocessing. First, you need to convert it into a loose json format, with one json containing a text sample per line. In python this can be done this way:
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
|
||||
train_data = load_dataset('codeparrot/codeparrot-clean-train', split='train')
|
||||
train_data.to_json("codeparrot_data.json", lines=True)
|
||||
```
|
||||
|
||||
The data is then tokenized, shuffled and processed into a binary format for training using the following command:
|
||||
```bash
|
||||
pip install nltk
|
||||
cd Megatron-LM
|
||||
python tools/preprocess_data.py \
|
||||
--input codeparrot_data.json \
|
||||
--output-prefix codeparrot \
|
||||
--vocab vocab.json \
|
||||
--dataset-impl mmap \
|
||||
--tokenizer-type GPT2BPETokenizer \
|
||||
--merge-file merges.txt \
|
||||
--json-keys content \
|
||||
--workers 32 \
|
||||
--chunk-size 25 \
|
||||
--append-eod
|
||||
```
|
||||
This outputs two files `codeparrot_content_document.idx` and `codeparrot_content_document.bin` which are used in the training.
|
||||
|
||||
### Training
|
||||
You can configure the model architecture and training parameters as shown below, or put it in a bash script that you will run. This runs on 8 GPUs the 110M parameter CodeParrot pretraining, with the same settings as before. Note that the data is partitioned by default into a 969:30:1 ratio for training/validation/test sets.
|
||||
```bash
|
||||
GPUS_PER_NODE=8
|
||||
MASTER_ADDR=localhost
|
||||
MASTER_PORT=6001
|
||||
NNODES=1
|
||||
NODE_RANK=0
|
||||
WORLD_SIZE=$(($GPUS_PER_NODE*$NNODES))
|
||||
DISTRIBUTED_ARGS="--nproc_per_node $GPUS_PER_NODE --nnodes $NNODES --node_rank $NODE_RANK --master_addr $MASTER_ADDR --master_port $MASTER_PORT"
|
||||
CHECKPOINT_PATH=/workspace/Megatron-LM/experiments/codeparrot-small
|
||||
VOCAB_FILE=vocab.json
|
||||
MERGE_FILE=merges.txt
|
||||
DATA_PATH=codeparrot_content_document
|
||||
GPT_ARGS="--num-layers 12
|
||||
--hidden-size 768
|
||||
--num-attention-heads 12
|
||||
--seq-length 1024
|
||||
--max-position-embeddings 1024
|
||||
--micro-batch-size 12
|
||||
--global-batch-size 192
|
||||
--lr 0.0005
|
||||
--train-iters 150000
|
||||
--lr-decay-iters 150000
|
||||
--lr-decay-style cosine
|
||||
--lr-warmup-iters 2000
|
||||
--weight-decay .1
|
||||
--adam-beta2 .999
|
||||
--fp16
|
||||
--log-interval 10
|
||||
--save-interval 2000
|
||||
--eval-interval 200
|
||||
--eval-iters 10
|
||||
"
|
||||
TENSORBOARD_ARGS="--tensorboard-dir experiments/tensorboard"
|
||||
python3 -m torch.distributed.launch $DISTRIBUTED_ARGS \
|
||||
pretrain_gpt.py \
|
||||
--tensor-model-parallel-size 1 \
|
||||
--pipeline-model-parallel-size 1 \
|
||||
$GPT_ARGS \
|
||||
--vocab-file $VOCAB_FILE \
|
||||
--merge-file $MERGE_FILE \
|
||||
--save $CHECKPOINT_PATH \
|
||||
--load $CHECKPOINT_PATH \
|
||||
--data-path $DATA_PATH \
|
||||
$TENSORBOARD_ARGS
|
||||
```
|
||||
The training takes almost 12 hours in this setting.
|
||||
|
||||
### Convert model to `transformers`
|
||||
After training we want to use the model in `transformers` e.g. to evaluate it on HumanEval. You can convert it to `transformers` following [this](https://huggingface.co/nvidia/megatron-gpt2-345m) tutorial. For instance, after the training is finished you can copy the weights of the last iteration 150k and convert the `model_optim_rng.pt` file to a `pytorch_model.bin` file that is supported by `transformers`.
|
||||
|
||||
```bash
|
||||
mkdir -p nvidia/megatron-codeparrot-small
|
||||
sudo docker cp CONTAINER_ID:/workspace/Megatron-LM/experiments/codeparrot-small/iter_0150000/mp_rank_00/model_optim_rng.pt nvidia/megatron-codeparrot-small
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
git clone https://github.com/NVIDIA/Megatron-LM.git
|
||||
export PYTHONPATH=Megatron-LM
|
||||
python transformers/src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py nvidia/megatron-codeparrot-small/model_optim_rng.pt
|
||||
```
|
||||
Be careful, you will need to replace the generated vocabulary file and merges table after the conversion, with the original ones if you plan to load the tokenizer from there.
|
||||
|
||||
## Further Resources
|
||||
A detailed description of the project can be found in the chapter "Training Transformers from Scratch" in the upcoming O'Reilly book [Natural Language Processing with Transformers](https://learning.oreilly.com/library/view/natural-language-processing/9781098103231/).
|
||||
|
@ -9,16 +9,16 @@ class TrainingArguments:
|
||||
"""
|
||||
|
||||
model_ckpt: Optional[str] = field(
|
||||
default="lvwerra/codeparrot", metadata={"help": "Model name or path of model to be trained."}
|
||||
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be trained."}
|
||||
)
|
||||
save_dir: Optional[str] = field(
|
||||
default="./", metadata={"help": "Save dir where model repo is cloned and models updates are saved to."}
|
||||
)
|
||||
dataset_name_train: Optional[str] = field(
|
||||
default="lvwerra/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."}
|
||||
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path of training dataset."}
|
||||
)
|
||||
dataset_name_valid: Optional[str] = field(
|
||||
default="lvwerra/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."}
|
||||
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."}
|
||||
)
|
||||
train_batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size for training."})
|
||||
valid_batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size for evaluation."})
|
||||
@ -60,10 +60,10 @@ class EvaluationArguments:
|
||||
"""
|
||||
|
||||
model_ckpt: Optional[str] = field(
|
||||
default="lvwerra/codeparrot", metadata={"help": "Model name or path of model to be evaluated."}
|
||||
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."}
|
||||
)
|
||||
dataset_name: Optional[str] = field(
|
||||
default="lvwerra/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."}
|
||||
default="codeparrot/codeparrot-clean-valid", metadata={"help": "Name or path of validation dataset."}
|
||||
)
|
||||
batch_size: Optional[int] = field(default=2, metadata={"help": "Batch size used for evaluation."})
|
||||
max_eval_steps: Optional[int] = field(
|
||||
@ -80,7 +80,7 @@ class HumanEvalArguments:
|
||||
"""
|
||||
|
||||
model_ckpt: Optional[str] = field(
|
||||
default="lvwerra/codeparrot", metadata={"help": "Model name or path of model to be evaluated."}
|
||||
default="codeparrot/codeparrot", metadata={"help": "Model name or path of model to be evaluated."}
|
||||
)
|
||||
num_workers: Optional[int] = field(default=None, metadata={"help": "Number of workers used for code evaluation."})
|
||||
num_tasks: Optional[int] = field(
|
||||
@ -154,7 +154,7 @@ class PreprocessingArguments:
|
||||
default=0.7, metadata={"help": "Probability for filtering config, test and uncommon files."}
|
||||
)
|
||||
tokenizer: Optional[str] = field(
|
||||
default="lvwerra/codeparrot",
|
||||
default="codeparrot/codeparrot",
|
||||
metadata={"help": "Name or path to the tokenizer."},
|
||||
)
|
||||
near_deduplication: Optional[bool] = field(
|
||||
@ -193,10 +193,10 @@ class PretokenizationArguments:
|
||||
"""
|
||||
|
||||
tokenizer_dir: Optional[str] = field(
|
||||
default="lvwerra/codeparrot", metadata={"help": "Name or path to the tokenizer."}
|
||||
default="codeparrot/codeparrot", metadata={"help": "Name or path to the tokenizer."}
|
||||
)
|
||||
dataset_name: Optional[str] = field(
|
||||
default="lvwerra/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."}
|
||||
default="codeparrot/codeparrot-clean-train", metadata={"help": "Name or path to the dataset to pretokenize."}
|
||||
)
|
||||
tokenized_data_repo: Optional[str] = field(
|
||||
default="tokenized-codeparrot-train", metadata={"help": "Repo name of the pretokenized data."}
|
||||
@ -214,7 +214,7 @@ class InitializationArguments:
|
||||
default="gpt2-large", metadata={"help": "Configuration to use for model initialization."}
|
||||
)
|
||||
tokenizer_name: Optional[str] = field(
|
||||
default="lvwerra/codeparrot", metadata={"help": "Tokenizer attached to model."}
|
||||
default="codeparrot/codeparrot", metadata={"help": "Tokenizer attached to model."}
|
||||
)
|
||||
model_name: Optional[str] = field(default="codeparrot", metadata={"help": "Name of the created model."})
|
||||
push_to_hub: Optional[bool] = field(default=True, metadata={"help": "Push saved tokenizer to the hub."})
|
||||
|
@ -3,6 +3,7 @@ import hashlib
|
||||
import json
|
||||
import multiprocessing
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import time
|
||||
from pathlib import Path
|
||||
@ -15,9 +16,12 @@ from minhash_deduplication import deduplicate_dataset
|
||||
from transformers import AutoTokenizer, HfArgumentParser
|
||||
|
||||
|
||||
PATTERN = re.compile(r"\s+")
|
||||
|
||||
|
||||
def get_hash(example):
|
||||
"""Get hash of content field."""
|
||||
return {"hash": hashlib.md5(example["content"].strip().encode("utf-8")).hexdigest()}
|
||||
return {"hash": hashlib.md5(re.sub(PATTERN, "", example["content"]).encode("utf-8")).hexdigest()}
|
||||
|
||||
|
||||
def line_stats(example):
|
||||
|
@ -40,7 +40,7 @@ kiwisolver==1.2.0
|
||||
lockfile==0.12.2
|
||||
MarkupSafe==1.1.1
|
||||
matplotlib==3.3.1
|
||||
mistune==0.8.4
|
||||
mistune==2.0.3
|
||||
msgpack==0.6.2
|
||||
nbclient==0.5.0
|
||||
nbconvert==6.0.1
|
||||
|
@ -304,13 +304,12 @@ class DataCollatorCTCWithPadding:
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
with self.processor.as_target_processor():
|
||||
labels_batch = self.processor.pad(
|
||||
label_features,
|
||||
padding=self.padding,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
labels_batch = self.processor.pad(
|
||||
labels=label_features,
|
||||
padding=self.padding,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# replace padding with -100 to ignore loss correctly
|
||||
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
||||
|
@ -301,13 +301,12 @@ class DataCollatorCTCWithPadding:
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
with self.processor.as_target_processor():
|
||||
labels_batch = self.processor.pad(
|
||||
label_features,
|
||||
padding=self.padding,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
labels_batch = self.processor.pad(
|
||||
labels=label_features,
|
||||
padding=self.padding,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# replace padding with -100 to ignore loss correctly
|
||||
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
||||
|
@ -437,13 +437,12 @@ def main():
|
||||
table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True
|
||||
)
|
||||
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(
|
||||
answer=[", ".join(answer) for answer in answers],
|
||||
max_length=max_target_length,
|
||||
padding=padding,
|
||||
truncation=True,
|
||||
)
|
||||
labels = tokenizer(
|
||||
answer=[", ".join(answer) for answer in answers],
|
||||
max_length=max_target_length,
|
||||
padding=padding,
|
||||
truncation=True,
|
||||
)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
|
@ -413,13 +413,12 @@ def main():
|
||||
table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True
|
||||
)
|
||||
|
||||
with tokenizer.as_target_tokenizer():
|
||||
labels = tokenizer(
|
||||
answer=[", ".join(answer) for answer in answers],
|
||||
max_length=max_target_length,
|
||||
padding=padding,
|
||||
truncation=True,
|
||||
)
|
||||
labels = tokenizer(
|
||||
answer=[", ".join(answer) for answer in answers],
|
||||
max_length=max_target_length,
|
||||
padding=padding,
|
||||
truncation=True,
|
||||
)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
|
@ -23,8 +23,6 @@ import re
|
||||
# Original: https://github.com/google-research/tapas/master/wikisql_utils.py
|
||||
from typing import Any, List, Text
|
||||
|
||||
import six
|
||||
|
||||
|
||||
EMPTY_ANSWER = "none"
|
||||
EMPTY_ANSWER_AGG = "none"
|
||||
@ -49,7 +47,7 @@ def convert_to_float(value):
|
||||
return value
|
||||
if isinstance(value, int):
|
||||
return float(value)
|
||||
if not isinstance(value, six.string_types):
|
||||
if not isinstance(value, str):
|
||||
raise ValueError("Argument value is not a string. Can't parse it as float")
|
||||
sanitized = value
|
||||
|
||||
|
@ -40,7 +40,7 @@ kiwisolver==1.2.0
|
||||
lockfile==0.12.2
|
||||
MarkupSafe==1.1.1
|
||||
matplotlib==3.3.1
|
||||
mistune==0.8.4
|
||||
mistune==2.0.3
|
||||
msgpack==0.6.2
|
||||
nbclient==0.5.0
|
||||
nbconvert==6.0.1
|
||||
|
@ -30,7 +30,7 @@ from transformers import (
|
||||
if is_apex_available():
|
||||
from apex import amp
|
||||
|
||||
if version.parse(torch.__version__) >= version.parse("1.6"):
|
||||
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
|
||||
_is_native_amp_available = True
|
||||
from torch.cuda.amp import autocast
|
||||
|
||||
@ -266,14 +266,13 @@ class DataCollatorCTCWithPadding:
|
||||
pad_to_multiple_of=self.pad_to_multiple_of,
|
||||
return_tensors="pt",
|
||||
)
|
||||
with self.processor.as_target_processor():
|
||||
labels_batch = self.processor.pad(
|
||||
label_features,
|
||||
padding=self.padding,
|
||||
max_length=self.max_length_labels,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
labels_batch = self.processor.pad(
|
||||
labels=label_features,
|
||||
padding=self.padding,
|
||||
max_length=self.max_length_labels,
|
||||
pad_to_multiple_of=self.pad_to_multiple_of_labels,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
# replace padding with -100 to ignore loss correctly
|
||||
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
||||
@ -419,9 +418,10 @@ def main():
|
||||
len(set(batch["sampling_rate"])) == 1
|
||||
), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
|
||||
|
||||
batch["input_values"] = processor(batch["speech"], sampling_rate=batch["sampling_rate"][0]).input_values
|
||||
with processor.as_target_processor():
|
||||
batch["labels"] = processor(batch[data_args.target_text_column]).input_ids
|
||||
processed_batch = processor(
|
||||
audio=batch["speech"], text=batch[data_args.target_text_column], sampling_rate=batch["sampling_rate"][0]
|
||||
)
|
||||
batch.update(processed_batch)
|
||||
return batch
|
||||
|
||||
train_dataset = train_dataset.map(
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user