mirror of
				https://github.com/huggingface/transformers.git
				synced 2025-11-04 20:14:36 +08:00 
			
		
		
		
	Compare commits
	
		
			323 Commits
		
	
	
		
			update_lla
			...
			keras3_com
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| a8cf3e0371 | |||
| 39407389e4 | |||
| 1c286be508 | |||
| dec84b3211 | |||
| 74cae670ce | |||
| e2b6df7971 | |||
| deb72cb6d9 | |||
| d269c4b2d7 | |||
| 70a127a37a | |||
| c817c17dbe | |||
| 6af3ce7757 | |||
| 7e876dca54 | |||
| e737446ee6 | |||
| 1e20931765 | |||
| 1a585c1222 | |||
| 3060899be5 | |||
| 050e0b44f6 | |||
| 52c37882fb | |||
| 388fd314d8 | |||
| 0ede762636 | |||
| bb1d0d0d9e | |||
| e2b16485f3 | |||
| 9e5c28c573 | |||
| dde6c427a1 | |||
| 73de5108e1 | |||
| 2788f8d8d5 | |||
| 131a528be0 | |||
| 17506d1256 | |||
| fe44b1f1a9 | |||
| 3ed3e3190c | |||
| 815ea8e8a2 | |||
| 93766251cb | |||
| ec43d6870a | |||
| 749f94e460 | |||
| c7f076a00e | |||
| 371fb0b7dc | |||
| 230ac352d8 | |||
| f4db565b69 | |||
| 9936143014 | |||
| 78172dcdb7 | |||
| 5e4ef0a0f6 | |||
| a49f4acab3 | |||
| 680c610f97 | |||
| 4b759da8be | |||
| e660424717 | |||
| e5079b0b2a | |||
| 35478182ce | |||
| 67b1335cb9 | |||
| 54d0b1c278 | |||
| 4850aaba6f | |||
| 4b4b864224 | |||
| c0a354d8d7 | |||
| 7e35f37071 | |||
| 39acfe84ba | |||
| 0f59d2f173 | |||
| 417bb91484 | |||
| 5cec306cdc | |||
| 921a6bf26e | |||
| 44127ec667 | |||
| b911c1f10f | |||
| e49c385266 | |||
| 6ff109227b | |||
| accccdd008 | |||
| 0676d992a5 | |||
| 9f18cc6df0 | |||
| 7ea21f1f03 | |||
| 5e620a92cf | |||
| e96c1de191 | |||
| 8d8970efdd | |||
| 235be08569 | |||
| df5c5c62ae | |||
| 5fa66df3f3 | |||
| ffd426eef8 | |||
| 80377eb018 | |||
| ce0bbd5101 | |||
| 94c765380c | |||
| d6c3a3f137 | |||
| 6757ed28ce | |||
| aa7ab98e72 | |||
| e0b617d192 | |||
| e366937587 | |||
| 79e7655906 | |||
| 3b720ad9a5 | |||
| 7f07c356a4 | |||
| b31905d1f6 | |||
| 3ac9945e56 | |||
| 4c5ed1d0c9 | |||
| fe8d1302c7 | |||
| 56be5e80e6 | |||
| 307a7d0be8 | |||
| 633215ba58 | |||
| 0ea42ef0f9 | |||
| 79b79ae2db | |||
| f7595760ed | |||
| 58e7f9bb2f | |||
| 47500b1d72 | |||
| 9f1f11a2e7 | |||
| c99f254763 | |||
| fc71e815f6 | |||
| 5324bf9c07 | |||
| 52746922b0 | |||
| 44b5506d29 | |||
| 0410a29a2d | |||
| f84d85ba67 | |||
| 06f561687c | |||
| 4d806dba8c | |||
| 75336c1794 | |||
| 7fc80724da | |||
| 9660e27cd0 | |||
| 9270ab0827 | |||
| 87714b3d11 | |||
| d6392482bd | |||
| acd653164b | |||
| ba52dec47f | |||
| da1d0d404f | |||
| 788730c670 | |||
| ac975074e6 | |||
| 28e2887a1a | |||
| b242d0f297 | |||
| e5c12c03b7 | |||
| 3e68944cc4 | |||
| b7e6d120c1 | |||
| fdb85be40f | |||
| df40edfb00 | |||
| 96f9caa10b | |||
| 235e5d4991 | |||
| a502b0d427 | |||
| 3c15fd1990 | |||
| 1d63b0ec36 | |||
| e0d2e69582 | |||
| e739a361bc | |||
| 2b5d5ead53 | |||
| 1da1302ec8 | |||
| 4d4febb7aa | |||
| a0f7c4a43d | |||
| ede09d671d | |||
| facc66457e | |||
| 73893df864 | |||
| 5a551df92b | |||
| c0b9db0914 | |||
| 269078a7eb | |||
| a2b1e1df49 | |||
| 7edf8bfafd | |||
| bcd0a91a01 | |||
| 2c658b5a42 | |||
| abd4cbd775 | |||
| 7b6324e18e | |||
| 95900916ab | |||
| cf62539a29 | |||
| 0ad4e7e6da | |||
| 9ddbb696d2 | |||
| 29f1aee3b6 | |||
| 510270af34 | |||
| fe41647afc | |||
| 62ab32b299 | |||
| 083e36923a | |||
| af8acc4760 | |||
| bd50402b56 | |||
| f2ad4b537b | |||
| dfbd209c25 | |||
| 30e92ea323 | |||
| 0b9c934575 | |||
| fdd86eed3b | |||
| 6336a7f7d6 | |||
| 93170298d1 | |||
| 1fb3c23b41 | |||
| 7a757bb694 | |||
| 2ca73e5ee3 | |||
| 0864dd3beb | |||
| cad1b1192b | |||
| 74a3cebfa5 | |||
| ce31508134 | |||
| c832bcb812 | |||
| 334a6d18a1 | |||
| 59499bbe8b | |||
| 1d7f406e19 | |||
| 307cf3a2ab | |||
| b09912c8f4 | |||
| 27b752bcf1 | |||
| 5c30dd40e7 | |||
| e11e26df93 | |||
| f70db28322 | |||
| 35551f9a0f | |||
| 29c94808ea | |||
| c13a43aaf2 | |||
| a6d178e238 | |||
| 2098d343cc | |||
| 181f85da24 | |||
| 80e9f76857 | |||
| 7293fdc5b9 | |||
| 623432dcc9 | |||
| a761d6e9a0 | |||
| b8db265bc6 | |||
| fe1c16e95a | |||
| b406c4d261 | |||
| baabd3877a | |||
| 3bc50d81e6 | |||
| 1ddc4fa60e | |||
| 8aca43bdb3 | |||
| 7f6a804d30 | |||
| 4151fbb49c | |||
| b2c63c79c3 | |||
| c651eb23c3 | |||
| b54993aa94 | |||
| c5be38cd27 | |||
| d2a980ec74 | |||
| 7f04373865 | |||
| c770600fde | |||
| f5c9738f61 | |||
| 0145c6825e | |||
| 82cc0a79ac | |||
| f93c1e9ece | |||
| 851a4f7088 | |||
| 81b7981830 | |||
| ade7af9361 | |||
| 0e6794ff1c | |||
| 8eb9e29d8d | |||
| 38e2633f80 | |||
| f18c95b49c | |||
| 87e217d065 | |||
| 93f2de858b | |||
| e66984f995 | |||
| f31af3927f | |||
| e4280d650c | |||
| ee29261555 | |||
| dbf7bfafa7 | |||
| dc68a39c81 | |||
| 25b0f2033b | |||
| d1a00f9dd0 | |||
| ffbcfc0166 | |||
| 638d49983f | |||
| 5330b83bc5 | |||
| 913d03dc5e | |||
| d903abfccc | |||
| fe3ce061c4 | |||
| b074461ef0 | |||
| 93f31e0e78 | |||
| 12b50c6130 | |||
| 651408a077 | |||
| acb5b4aff5 | |||
| 6b39470b74 | |||
| fd65aa9818 | |||
| 5603fad247 | |||
| 4989e73e2f | |||
| 69c9b89fcb | |||
| 85fde09c97 | |||
| 1394e08cf0 | |||
| 06343b0633 | |||
| 1ac599d90f | |||
| b71c38a094 | |||
| 2e72bbab2c | |||
| e6522e49a7 | |||
| a0633c4483 | |||
| a85ea4b19a | |||
| 48ba1e074f | |||
| 453079c7f8 | |||
| 3d1a7bf476 | |||
| 64e21ca2a4 | |||
| 1e0e2dd376 | |||
| 7ddb21b4db | |||
| 72f531ab6b | |||
| cc0dc24bc9 | |||
| 186c077513 | |||
| 2fc33ebead | |||
| 303c1d69f3 | |||
| 067c4a310d | |||
| 250032e974 | |||
| a53a0c5159 | |||
| 5468ab3555 | |||
| fe472b1db4 | |||
| 73bc0c9e88 | |||
| 78f6ed6c70 | |||
| a4616c6767 | |||
| b86c54d9ff | |||
| 4309abedbc | |||
| ee4fb326c7 | |||
| e107ae364e | |||
| d71fa9f618 | |||
| 721d1c8ca6 | |||
| 2ac5b9325e | |||
| 8017a59091 | |||
| eb79b55bf3 | |||
| 7b139023c3 | |||
| 04af4b90d6 | |||
| 1af766e104 | |||
| 2422c38de6 | |||
| 3b59621310 | |||
| 2dc29cfc98 | |||
| 20abdacbef | |||
| 68ae3be7f5 | |||
| 9dc8fe1b32 | |||
| 210e38d83f | |||
| b97cab7e6d | |||
| 8f577dca4f | |||
| 7b998cabee | |||
| 9d87cd2ce2 | |||
| 7ee995fd9c | |||
| ed115b3473 | |||
| e1c3ac2551 | |||
| 00dc856233 | |||
| 68afca3e69 | |||
| 7e9f10ac94 | |||
| 9dd58c53dd | |||
| fd685cfd59 | |||
| 184f60dcec | |||
| cf32c94135 | |||
| 740cd93590 | |||
| 51a98c40ee | |||
| e38348ae8f | |||
| c8b6052ff6 | |||
| c5037b459e | |||
| cf2a3f37bf | |||
| 3258ff9330 | |||
| 791ec370d1 | |||
| c5d7754b11 | |||
| 9999b73968 | |||
| bc086a2516 | |||
| e9adb0c9cf | |||
| 085ea7e56c | |||
| 7ecd229ba4 | |||
| ced9fd86f5 | |||
| 0e402e1478 | |||
| a5bee89c9d | 
@ -157,11 +157,10 @@ jobs:
 | 
			
		||||
                command: pip freeze | tee installed.txt
 | 
			
		||||
            - store_artifacts:
 | 
			
		||||
                  path: ~/transformers/installed.txt
 | 
			
		||||
            - run: black --check examples tests src utils
 | 
			
		||||
            - run: ruff examples tests src utils
 | 
			
		||||
            - run: ruff check examples tests src utils
 | 
			
		||||
            - run: ruff format tests src utils --check
 | 
			
		||||
            - run: python utils/custom_init_isort.py --check_only
 | 
			
		||||
            - run: python utils/sort_auto_mappings.py --check_only
 | 
			
		||||
            - run: doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
 | 
			
		||||
            - run: python utils/check_doc_toc.py
 | 
			
		||||
 | 
			
		||||
    check_repository_consistency:
 | 
			
		||||
 | 
			
		||||
@ -15,7 +15,6 @@
 | 
			
		||||
 | 
			
		||||
import argparse
 | 
			
		||||
import copy
 | 
			
		||||
import glob
 | 
			
		||||
import os
 | 
			
		||||
import random
 | 
			
		||||
from dataclasses import dataclass
 | 
			
		||||
@ -239,7 +238,7 @@ class CircleCIJob:
 | 
			
		||||
 | 
			
		||||
        py_command = f'import os; fp = open("reports/{self.job_name}/summary_short.txt"); failed = os.linesep.join([x for x in fp.read().split(os.linesep) if x.startswith("ERROR ")]); fp.close(); fp = open("summary_short.txt", "w"); fp.write(failed); fp.close()'
 | 
			
		||||
        check_test_command += f"$(python3 -c '{py_command}'); "
 | 
			
		||||
        check_test_command += f'cat summary_short.txt; echo ""; exit -1; '
 | 
			
		||||
        check_test_command += 'cat summary_short.txt; echo ""; exit -1; '
 | 
			
		||||
 | 
			
		||||
        # Deeal with failed tests
 | 
			
		||||
        check_test_command += f'elif [ -s reports/{self.job_name}/failures_short.txt ]; '
 | 
			
		||||
@ -249,7 +248,7 @@ class CircleCIJob:
 | 
			
		||||
 | 
			
		||||
        py_command = f'import os; fp = open("reports/{self.job_name}/summary_short.txt"); failed = os.linesep.join([x for x in fp.read().split(os.linesep) if x.startswith("FAILED ")]); fp.close(); fp = open("summary_short.txt", "w"); fp.write(failed); fp.close()'
 | 
			
		||||
        check_test_command += f"$(python3 -c '{py_command}'); "
 | 
			
		||||
        check_test_command += f'cat summary_short.txt; echo ""; exit -1; '
 | 
			
		||||
        check_test_command += 'cat summary_short.txt; echo ""; exit -1; '
 | 
			
		||||
 | 
			
		||||
        check_test_command += f'elif [ -s reports/{self.job_name}/stats.txt ]; then echo "All tests pass!"; '
 | 
			
		||||
 | 
			
		||||
@ -283,7 +282,7 @@ torch_and_tf_job = CircleCIJob(
 | 
			
		||||
        "pip install --upgrade --upgrade-strategy eager pip",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager tensorflow_probability",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate",
 | 
			
		||||
    ],
 | 
			
		||||
    marker="is_pt_tf_cross_test",
 | 
			
		||||
    pytest_options={"rA": None, "durations": 0},
 | 
			
		||||
@ -297,7 +296,7 @@ torch_and_flax_job = CircleCIJob(
 | 
			
		||||
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager --upgrade pip",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate",
 | 
			
		||||
    ],
 | 
			
		||||
    marker="is_pt_flax_cross_test",
 | 
			
		||||
    pytest_options={"rA": None, "durations": 0},
 | 
			
		||||
@ -310,7 +309,7 @@ torch_job = CircleCIJob(
 | 
			
		||||
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time",
 | 
			
		||||
        "pip install --upgrade --upgrade-strategy eager pip",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate",
 | 
			
		||||
    ],
 | 
			
		||||
    parallelism=1,
 | 
			
		||||
    pytest_num_workers=6,
 | 
			
		||||
@ -397,13 +396,16 @@ custom_tokenizers_job = CircleCIJob(
 | 
			
		||||
 | 
			
		||||
examples_torch_job = CircleCIJob(
 | 
			
		||||
    "examples_torch",
 | 
			
		||||
    additional_env={"OMP_NUM_THREADS": 8},
 | 
			
		||||
    cache_name="torch_examples",
 | 
			
		||||
    install_steps=[
 | 
			
		||||
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
 | 
			
		||||
        "pip install --upgrade --upgrade-strategy eager pip",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager .[sklearn,torch,sentencepiece,testing,torch-speech]",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager -r examples/pytorch/_tests_requirements.txt",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate",
 | 
			
		||||
    ],
 | 
			
		||||
    pytest_num_workers=1,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -510,7 +512,7 @@ doc_test_job = CircleCIJob(
 | 
			
		||||
        "sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time ffmpeg",
 | 
			
		||||
        "pip install --upgrade --upgrade-strategy eager pip",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager -e .[dev]",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager git+https://github.com/huggingface/accelerate",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager -e git+https://github.com/huggingface/accelerate@main#egg=accelerate",
 | 
			
		||||
        "pip install --upgrade --upgrade-strategy eager pytest pytest-sugar",
 | 
			
		||||
        "pip install -U --upgrade-strategy eager natten",
 | 
			
		||||
        "find -name __pycache__ -delete",
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/add-model-like.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/add-model-like.yml
									
									
									
									
										vendored
									
									
								
							@ -14,7 +14,7 @@ on:
 | 
			
		||||
jobs:
 | 
			
		||||
  run_tests_templates_like:
 | 
			
		||||
    name: "Add new model like template tests"
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - uses: actions/checkout@v3
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										117
									
								
								.github/workflows/build-docker-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										117
									
								
								.github/workflows/build-docker-images.yml
									
									
									
									
										vendored
									
									
								
							@ -20,7 +20,7 @@ concurrency:
 | 
			
		||||
jobs:
 | 
			
		||||
  latest-docker:
 | 
			
		||||
    name: "Latest PyTorch + TensorFlow [dev]"
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Cleanup disk
 | 
			
		||||
        run: |
 | 
			
		||||
@ -69,7 +69,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  latest-torch-deepspeed-docker:
 | 
			
		||||
    name: "Latest PyTorch + DeepSpeed"
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Cleanup disk
 | 
			
		||||
        run: |
 | 
			
		||||
@ -106,7 +106,7 @@ jobs:
 | 
			
		||||
  # Can't build 2 images in a single job `latest-torch-deepspeed-docker` (for `nvcr.io/nvidia`)
 | 
			
		||||
  latest-torch-deepspeed-docker-for-push-ci-daily-build:
 | 
			
		||||
    name: "Latest PyTorch + DeepSpeed (Push CI - Daily Build)"
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Cleanup disk
 | 
			
		||||
        run: |
 | 
			
		||||
@ -148,7 +148,7 @@ jobs:
 | 
			
		||||
    name: "Doc builder"
 | 
			
		||||
    # Push CI doesn't need this image
 | 
			
		||||
    if: inputs.image_postfix != '-push-ci'
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      -
 | 
			
		||||
        name: Set up Docker Buildx
 | 
			
		||||
@ -174,7 +174,7 @@ jobs:
 | 
			
		||||
    name: "Latest PyTorch [dev]"
 | 
			
		||||
    # Push CI doesn't need this image
 | 
			
		||||
    if: inputs.image_postfix != '-push-ci'
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Cleanup disk
 | 
			
		||||
        run: |
 | 
			
		||||
@ -208,46 +208,47 @@ jobs:
 | 
			
		||||
          push: true
 | 
			
		||||
          tags: huggingface/transformers-pytorch-gpu
 | 
			
		||||
 | 
			
		||||
  latest-pytorch-amd:
 | 
			
		||||
    name: "Latest PyTorch (AMD) [dev]"
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, single-gpu, mi210]
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Set up Docker Buildx
 | 
			
		||||
        uses: docker/setup-buildx-action@v3
 | 
			
		||||
      - name: Check out code
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
      - name: Login to DockerHub
 | 
			
		||||
        uses: docker/login-action@v3
 | 
			
		||||
        with:
 | 
			
		||||
          username: ${{ secrets.DOCKERHUB_USERNAME }}
 | 
			
		||||
          password: ${{ secrets.DOCKERHUB_PASSWORD }}
 | 
			
		||||
      - name: Build and push
 | 
			
		||||
        uses: docker/build-push-action@v5
 | 
			
		||||
        with:
 | 
			
		||||
          context: ./docker/transformers-pytorch-amd-gpu
 | 
			
		||||
          build-args: |
 | 
			
		||||
            REF=main
 | 
			
		||||
          push: true
 | 
			
		||||
          tags: huggingface/transformers-pytorch-amd-gpu${{ inputs.image_postfix }}
 | 
			
		||||
      # Push CI images still need to be re-built daily
 | 
			
		||||
      -
 | 
			
		||||
        name: Build and push (for Push CI) in a daily basis
 | 
			
		||||
        # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
 | 
			
		||||
        # The later case is useful for manual image building for debugging purpose. Use another tag in this case!
 | 
			
		||||
        if: inputs.image_postfix != '-push-ci'
 | 
			
		||||
        uses: docker/build-push-action@v5
 | 
			
		||||
        with:
 | 
			
		||||
          context: ./docker/transformers-pytorch-amd-gpu
 | 
			
		||||
          build-args: |
 | 
			
		||||
            REF=main
 | 
			
		||||
          push: true
 | 
			
		||||
          tags: huggingface/transformers-pytorch-amd-gpu-push-ci
 | 
			
		||||
# Need to be fixed with the help from Guillaume.
 | 
			
		||||
#  latest-pytorch-amd:
 | 
			
		||||
#    name: "Latest PyTorch (AMD) [dev]"
 | 
			
		||||
#    runs-on: [self-hosted, docker-gpu, amd-gpu, single-gpu, mi210]
 | 
			
		||||
#    steps:
 | 
			
		||||
#      - name: Set up Docker Buildx
 | 
			
		||||
#        uses: docker/setup-buildx-action@v3
 | 
			
		||||
#      - name: Check out code
 | 
			
		||||
#        uses: actions/checkout@v3
 | 
			
		||||
#      - name: Login to DockerHub
 | 
			
		||||
#        uses: docker/login-action@v3
 | 
			
		||||
#        with:
 | 
			
		||||
#          username: ${{ secrets.DOCKERHUB_USERNAME }}
 | 
			
		||||
#          password: ${{ secrets.DOCKERHUB_PASSWORD }}
 | 
			
		||||
#      - name: Build and push
 | 
			
		||||
#        uses: docker/build-push-action@v5
 | 
			
		||||
#        with:
 | 
			
		||||
#          context: ./docker/transformers-pytorch-amd-gpu
 | 
			
		||||
#          build-args: |
 | 
			
		||||
#            REF=main
 | 
			
		||||
#          push: true
 | 
			
		||||
#          tags: huggingface/transformers-pytorch-amd-gpu${{ inputs.image_postfix }}
 | 
			
		||||
#      # Push CI images still need to be re-built daily
 | 
			
		||||
#      -
 | 
			
		||||
#        name: Build and push (for Push CI) in a daily basis
 | 
			
		||||
#        # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
 | 
			
		||||
#        # The later case is useful for manual image building for debugging purpose. Use another tag in this case!
 | 
			
		||||
#        if: inputs.image_postfix != '-push-ci'
 | 
			
		||||
#        uses: docker/build-push-action@v5
 | 
			
		||||
#        with:
 | 
			
		||||
#          context: ./docker/transformers-pytorch-amd-gpu
 | 
			
		||||
#          build-args: |
 | 
			
		||||
#            REF=main
 | 
			
		||||
#          push: true
 | 
			
		||||
#          tags: huggingface/transformers-pytorch-amd-gpu-push-ci
 | 
			
		||||
 | 
			
		||||
  latest-tensorflow:
 | 
			
		||||
    name: "Latest TensorFlow [dev]"
 | 
			
		||||
    # Push CI doesn't need this image
 | 
			
		||||
    if: inputs.image_postfix != '-push-ci'
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      -
 | 
			
		||||
        name: Set up Docker Buildx
 | 
			
		||||
@ -270,3 +271,39 @@ jobs:
 | 
			
		||||
            REF=main
 | 
			
		||||
          push: true
 | 
			
		||||
          tags: huggingface/transformers-tensorflow-gpu
 | 
			
		||||
 | 
			
		||||
  # latest-pytorch-deepspeed-amd:
 | 
			
		||||
  #   name: "PyTorch + DeepSpeed (AMD) [dev]"
 | 
			
		||||
 | 
			
		||||
  #   runs-on: [self-hosted, docker-gpu, amd-gpu, single-gpu, mi210]
 | 
			
		||||
  #   steps:
 | 
			
		||||
  #     - name: Set up Docker Buildx
 | 
			
		||||
  #       uses: docker/setup-buildx-action@v3
 | 
			
		||||
  #     - name: Check out code
 | 
			
		||||
  #       uses: actions/checkout@v3
 | 
			
		||||
  #     - name: Login to DockerHub
 | 
			
		||||
  #       uses: docker/login-action@v3
 | 
			
		||||
  #       with:
 | 
			
		||||
  #         username: ${{ secrets.DOCKERHUB_USERNAME }}
 | 
			
		||||
  #         password: ${{ secrets.DOCKERHUB_PASSWORD }}
 | 
			
		||||
  #     - name: Build and push
 | 
			
		||||
  #       uses: docker/build-push-action@v5
 | 
			
		||||
  #       with:
 | 
			
		||||
  #         context: ./docker/transformers-pytorch-deepspeed-amd-gpu
 | 
			
		||||
  #         build-args: |
 | 
			
		||||
  #           REF=main
 | 
			
		||||
  #         push: true
 | 
			
		||||
  #         tags: huggingface/transformers-pytorch-deepspeed-amd-gpu${{ inputs.image_postfix }}
 | 
			
		||||
  #     # Push CI images still need to be re-built daily
 | 
			
		||||
  #     -
 | 
			
		||||
  #       name: Build and push (for Push CI) in a daily basis
 | 
			
		||||
  #       # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
 | 
			
		||||
  #       # The later case is useful for manual image building for debugging purpose. Use another tag in this case!
 | 
			
		||||
  #       if: inputs.image_postfix != '-push-ci'
 | 
			
		||||
  #       uses: docker/build-push-action@v5
 | 
			
		||||
  #       with:
 | 
			
		||||
  #         context: ./docker/transformers-pytorch-deepspeed-amd-gpu
 | 
			
		||||
  #         build-args: |
 | 
			
		||||
  #           REF=main
 | 
			
		||||
  #         push: true
 | 
			
		||||
  #         tags: huggingface/transformers-pytorch-deepspeed-amd-gpu-push-ci
 | 
			
		||||
 | 
			
		||||
@ -13,7 +13,7 @@ concurrency:
 | 
			
		||||
jobs:
 | 
			
		||||
  latest-with-torch-nightly-docker:
 | 
			
		||||
    name: "Nightly PyTorch + Stable TensorFlow"
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Cleanup disk
 | 
			
		||||
        run: |
 | 
			
		||||
@ -50,7 +50,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  nightly-torch-deepspeed-docker:
 | 
			
		||||
    name: "Nightly PyTorch + DeepSpeed"
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Cleanup disk
 | 
			
		||||
        run: |
 | 
			
		||||
 | 
			
		||||
@ -16,7 +16,7 @@ jobs:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        version: ["1.13", "1.12", "1.11", "1.10"]
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      -
 | 
			
		||||
        name: Set up Docker Buildx
 | 
			
		||||
@ -60,7 +60,7 @@ jobs:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        version: ["2.11", "2.10", "2.9", "2.8", "2.7", "2.6", "2.5"]
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      -
 | 
			
		||||
        name: Set up Docker Buildx
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										68
									
								
								.github/workflows/check_runner_status.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										68
									
								
								.github/workflows/check_runner_status.yml
									
									
									
									
										vendored
									
									
								
							@ -1,68 +0,0 @@
 | 
			
		||||
name: Self-hosted runner (check runner status)
 | 
			
		||||
 | 
			
		||||
# Note that each job's dependencies go into a corresponding docker file.
 | 
			
		||||
#
 | 
			
		||||
# For example for `run_all_tests_torch_cuda_extensions_gpu` the docker image is
 | 
			
		||||
# `huggingface/transformers-pytorch-deepspeed-latest-gpu`, which can be found at
 | 
			
		||||
# `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile`
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  repository_dispatch:
 | 
			
		||||
  schedule:
 | 
			
		||||
    # run per hour
 | 
			
		||||
    - cron: "0 */1 * * *"
 | 
			
		||||
 | 
			
		||||
env:
 | 
			
		||||
  TRANSFORMERS_IS_CI: yes
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  check_runner_status:
 | 
			
		||||
    name: Check Runner Status
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    outputs:
 | 
			
		||||
      offline_runners: ${{ steps.set-offline_runners.outputs.offline_runners }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Checkout transformers
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          fetch-depth: 2
 | 
			
		||||
 | 
			
		||||
      - name: Check Runner Status
 | 
			
		||||
        run: python utils/check_self_hosted_runner.py --target_runners single-gpu-ci-runner-docker,multi-gpu-ci-runner-docker,single-gpu-scheduled-ci-runner-docker,multi-scheduled-scheduled-ci-runner-docker,single-gpu-doctest-ci-runner-docker --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
 | 
			
		||||
 | 
			
		||||
      - id: set-offline_runners
 | 
			
		||||
        name: Set output for offline runners
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        run: |
 | 
			
		||||
          offline_runners=$(python3 -c 'fp = open("offline_runners.txt"); failed = fp.read(); fp.close(); print(failed)')
 | 
			
		||||
          echo "offline_runners=$offline_runners" >> $GITHUB_OUTPUT
 | 
			
		||||
 | 
			
		||||
  send_results:
 | 
			
		||||
    name: Send results to webhook
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    needs: check_runner_status
 | 
			
		||||
    if: ${{ failure() }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Preliminary job status
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "Runner availability: ${{ needs.check_runner_status.result }}"
 | 
			
		||||
 | 
			
		||||
      - uses: actions/checkout@v3
 | 
			
		||||
      - uses: actions/download-artifact@v3
 | 
			
		||||
      - name: Send message to Slack
 | 
			
		||||
        env:
 | 
			
		||||
          CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
 | 
			
		||||
          CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
 | 
			
		||||
          CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
 | 
			
		||||
          CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
 | 
			
		||||
          CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
 | 
			
		||||
          ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
 | 
			
		||||
          CI_EVENT: runner status check
 | 
			
		||||
          RUNNER_STATUS: ${{ needs.check_runner_status.result }}
 | 
			
		||||
          OFFLINE_RUNNERS: ${{ needs.check_runner_status.outputs.offline_runners }}
 | 
			
		||||
        # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
 | 
			
		||||
        # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install slack_sdk
 | 
			
		||||
          python utils/notification_service.py
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/check_tiny_models.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/check_tiny_models.yml
									
									
									
									
										vendored
									
									
								
							@ -14,7 +14,7 @@ env:
 | 
			
		||||
jobs:
 | 
			
		||||
  check_tiny_models:
 | 
			
		||||
    name: Check tiny models
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Checkout transformers
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										14
									
								
								.github/workflows/delete_doc_comment.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/delete_doc_comment.yml
									
									
									
									
										vendored
									
									
								
							@ -1,14 +0,0 @@
 | 
			
		||||
name: Delete doc comment
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  workflow_run:
 | 
			
		||||
    workflows: ["Delete doc comment trigger"]
 | 
			
		||||
    types:
 | 
			
		||||
      - completed
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  delete:
 | 
			
		||||
    uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
 | 
			
		||||
    secrets:
 | 
			
		||||
      comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
 | 
			
		||||
							
								
								
									
										12
									
								
								.github/workflows/delete_doc_comment_trigger.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										12
									
								
								.github/workflows/delete_doc_comment_trigger.yml
									
									
									
									
										vendored
									
									
								
							@ -1,12 +0,0 @@
 | 
			
		||||
name: Delete doc comment trigger
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  pull_request:
 | 
			
		||||
    types: [ closed ]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  delete:
 | 
			
		||||
    uses: huggingface/doc-builder/.github/workflows/delete_doc_comment_trigger.yml@main
 | 
			
		||||
    with:
 | 
			
		||||
      pr_number: ${{ github.event.number }}
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/doctests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/doctests.yml
									
									
									
									
										vendored
									
									
								
							@ -66,7 +66,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  send_results:
 | 
			
		||||
    name: Send results to webhook
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [run_doctests]
 | 
			
		||||
    steps:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/model-templates.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/model-templates.yml
									
									
									
									
										vendored
									
									
								
							@ -7,7 +7,7 @@ on:
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  run_tests_templates:
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Checkout repository
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/release-conda.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/release-conda.yml
									
									
									
									
										vendored
									
									
								
							@ -12,7 +12,7 @@ env:
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  build_and_package:
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    defaults:
 | 
			
		||||
      run:
 | 
			
		||||
        shell: bash -l {0}
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										7
									
								
								.github/workflows/self-nightly-scheduled.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/self-nightly-scheduled.yml
									
									
									
									
										vendored
									
									
								
							@ -19,6 +19,7 @@ env:
 | 
			
		||||
  SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
 | 
			
		||||
  TF_FORCE_GPU_ALLOW_GROWTH: true
 | 
			
		||||
  RUN_PT_TF_CROSS_TESTS: 1
 | 
			
		||||
  CUDA_VISIBLE_DEVICES: 0,1
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  setup:
 | 
			
		||||
@ -211,7 +212,7 @@ jobs:
 | 
			
		||||
          python3 -m pip uninstall -y deepspeed
 | 
			
		||||
          rm -rf DeepSpeed
 | 
			
		||||
          git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
 | 
			
		||||
      - name: NVIDIA-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
@ -245,7 +246,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  send_results:
 | 
			
		||||
    name: Send results to webhook
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [
 | 
			
		||||
      setup,
 | 
			
		||||
@ -285,4 +286,4 @@ jobs:
 | 
			
		||||
        with:
 | 
			
		||||
          name: |
 | 
			
		||||
              single-*
 | 
			
		||||
              multi-*
 | 
			
		||||
              multi-*
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										19
									
								
								.github/workflows/self-past.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								.github/workflows/self-past.yml
									
									
									
									
										vendored
									
									
								
							@ -30,6 +30,7 @@ env:
 | 
			
		||||
  SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
 | 
			
		||||
  TF_FORCE_GPU_ALLOW_GROWTH: true
 | 
			
		||||
  RUN_PT_TF_CROSS_TESTS: 1
 | 
			
		||||
  CUDA_VISIBLE_DEVICES: 0,1
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  setup:
 | 
			
		||||
@ -87,6 +88,10 @@ jobs:
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
 | 
			
		||||
 | 
			
		||||
      - name: Update some packages
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip install -U datasets
 | 
			
		||||
 | 
			
		||||
      - name: Echo folder ${{ matrix.folders }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
 | 
			
		||||
@ -163,6 +168,10 @@ jobs:
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
 | 
			
		||||
 | 
			
		||||
      - name: Update some packages
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip install -U datasets
 | 
			
		||||
 | 
			
		||||
      - name: Echo folder ${{ matrix.folders }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
 | 
			
		||||
@ -239,6 +248,10 @@ jobs:
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
 | 
			
		||||
 | 
			
		||||
      - name: Update some packages
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip install -U datasets
 | 
			
		||||
 | 
			
		||||
      - name: Install
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
@ -254,7 +267,7 @@ jobs:
 | 
			
		||||
          python3 -m pip uninstall -y deepspeed
 | 
			
		||||
          rm -rf DeepSpeed
 | 
			
		||||
          git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
 | 
			
		||||
      - name: NVIDIA-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
@ -288,7 +301,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  send_results:
 | 
			
		||||
    name: Send results to webhook
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [
 | 
			
		||||
      setup,
 | 
			
		||||
@ -340,4 +353,4 @@ jobs:
 | 
			
		||||
        with:
 | 
			
		||||
          name: |
 | 
			
		||||
              single-*
 | 
			
		||||
              multi-*
 | 
			
		||||
              multi-*
 | 
			
		||||
 | 
			
		||||
@ -18,7 +18,7 @@ on:
 | 
			
		||||
jobs:
 | 
			
		||||
  run_amd_ci:
 | 
			
		||||
    name: AMD mi210
 | 
			
		||||
    if: (cancelled() != true) && ((github.event_name != 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller')))
 | 
			
		||||
    if: (cancelled() != true) && ((github.event_name == 'workflow_run') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller')))
 | 
			
		||||
    uses: ./.github/workflows/self-push-amd.yml
 | 
			
		||||
    with:
 | 
			
		||||
      gpu_flavor: mi210
 | 
			
		||||
 | 
			
		||||
@ -18,7 +18,7 @@ on:
 | 
			
		||||
jobs:
 | 
			
		||||
  run_amd_ci:
 | 
			
		||||
    name: AMD mi250
 | 
			
		||||
    if: (cancelled() != true) && ((github.event_name != 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller')))
 | 
			
		||||
    if: (cancelled() != true) && ((github.event_name == 'workflow_run') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_push_ci_caller')))
 | 
			
		||||
    uses: ./.github/workflows/self-push-amd.yml
 | 
			
		||||
    with:
 | 
			
		||||
      gpu_flavor: mi250
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										26
									
								
								.github/workflows/self-push-amd.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										26
									
								
								.github/workflows/self-push-amd.yml
									
									
									
									
										vendored
									
									
								
							@ -19,7 +19,7 @@ env:
 | 
			
		||||
jobs:
 | 
			
		||||
  check_runner_status:
 | 
			
		||||
    name: Check Runner Status
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Checkout transformers
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
@ -38,14 +38,16 @@ jobs:
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu-push-ci  # <--- We test only for PyTorch for now
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show HIP environment
 | 
			
		||||
          rocm-smi
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "HIP: $HIP_VISIBLE_DEVICES"
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
  setup_gpu:
 | 
			
		||||
@ -57,7 +59,7 @@ jobs:
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu-push-ci  # <--- We test only for PyTorch for now
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    outputs:
 | 
			
		||||
      matrix: ${{ steps.set-matrix.outputs.matrix }}
 | 
			
		||||
      test_map: ${{ steps.set-matrix.outputs.test_map }}
 | 
			
		||||
@ -155,7 +157,7 @@ jobs:
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu-push-ci  # <--- We test only for PyTorch for now
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env HIP_VISIBLE_DEVICES --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    steps:
 | 
			
		||||
      # Necessary to get the correct branch name and commit SHA for `workflow_run` event
 | 
			
		||||
      # We also take into account the `push` event (we might want to test some changes in a branch)
 | 
			
		||||
@ -207,10 +209,12 @@ jobs:
 | 
			
		||||
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show HIP environment
 | 
			
		||||
          rocm-smi
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "HIP: $HIP_VISIBLE_DEVICES"
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
      - name: Environment
 | 
			
		||||
@ -241,7 +245,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  send_results:
 | 
			
		||||
    name: Send results to webhook
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [
 | 
			
		||||
        check_runner_status,
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										4
									
								
								.github/workflows/self-push-caller.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/self-push-caller.yml
									
									
									
									
										vendored
									
									
								
							@ -14,7 +14,7 @@ on:
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  check-for-setup:
 | 
			
		||||
      runs-on: ubuntu-latest
 | 
			
		||||
      runs-on: ubuntu-22.04
 | 
			
		||||
      name: Check if setup was changed
 | 
			
		||||
      outputs:
 | 
			
		||||
        changed: ${{ steps.was_changed.outputs.changed }}
 | 
			
		||||
@ -46,7 +46,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  run_push_ci:
 | 
			
		||||
    name: Trigger Push CI
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: ${{ always() }}
 | 
			
		||||
    needs: build-docker-containers
 | 
			
		||||
    steps:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										7
									
								
								.github/workflows/self-push.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										7
									
								
								.github/workflows/self-push.yml
									
									
									
									
										vendored
									
									
								
							@ -25,6 +25,7 @@ env:
 | 
			
		||||
  PYTEST_TIMEOUT: 60
 | 
			
		||||
  TF_FORCE_GPU_ALLOW_GROWTH: true
 | 
			
		||||
  RUN_PT_TF_CROSS_TESTS: 1
 | 
			
		||||
  CUDA_VISIBLE_DEVICES: 0,1
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  setup:
 | 
			
		||||
@ -365,7 +366,7 @@ jobs:
 | 
			
		||||
        working-directory: /workspace
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 -m pip uninstall -y deepspeed
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
 | 
			
		||||
      - name: NVIDIA-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
@ -455,7 +456,7 @@ jobs:
 | 
			
		||||
        working-directory: /workspace
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 -m pip uninstall -y deepspeed
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
 | 
			
		||||
      - name: NVIDIA-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
@ -490,7 +491,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  send_results:
 | 
			
		||||
    name: Send results to webhook
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [
 | 
			
		||||
        setup,
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										14
									
								
								.github/workflows/self-scheduled-amd-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								.github/workflows/self-scheduled-amd-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@ -0,0 +1,14 @@
 | 
			
		||||
name: Self-hosted runner (AMD scheduled CI caller)
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: "17 2 * * *"
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  run_scheduled_amd_ci:
 | 
			
		||||
    name: Trigger Scheduled AMD CI
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: ${{ always() }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Trigger scheduled AMD CI via workflow_run
 | 
			
		||||
        run: echo "Trigger scheduled AMD CI via workflow_run"
 | 
			
		||||
							
								
								
									
										19
									
								
								.github/workflows/self-scheduled-amd-mi210-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								.github/workflows/self-scheduled-amd-mi210-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@ -0,0 +1,19 @@
 | 
			
		||||
name: Self-hosted runner (AMD mi210 scheduled CI caller)
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  workflow_run:
 | 
			
		||||
    workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
 | 
			
		||||
    branches: ["main"]
 | 
			
		||||
    types: [completed]
 | 
			
		||||
  push:
 | 
			
		||||
    branches:
 | 
			
		||||
      - run_amd_scheduled_ci_caller*
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  run_amd_ci:
 | 
			
		||||
    name: AMD mi210
 | 
			
		||||
    if: (cancelled() != true) && ((github.event_name == 'workflow_run') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_scheduled_ci_caller')))
 | 
			
		||||
    uses: ./.github/workflows/self-scheduled-amd.yml
 | 
			
		||||
    with:
 | 
			
		||||
      gpu_flavor: mi210
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
							
								
								
									
										19
									
								
								.github/workflows/self-scheduled-amd-mi250-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								.github/workflows/self-scheduled-amd-mi250-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@ -0,0 +1,19 @@
 | 
			
		||||
name: Self-hosted runner (AMD mi250 scheduled CI caller)
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  workflow_run:
 | 
			
		||||
    workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
 | 
			
		||||
    branches: ["main"]
 | 
			
		||||
    types: [completed]
 | 
			
		||||
  push:
 | 
			
		||||
    branches:
 | 
			
		||||
      - run_amd_scheduled_ci_caller*
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  run_amd_ci:
 | 
			
		||||
    name: AMD mi250
 | 
			
		||||
    if: (cancelled() != true) && ((github.event_name == 'workflow_run') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_amd_scheduled_ci_caller')))
 | 
			
		||||
    uses: ./.github/workflows/self-scheduled-amd.yml
 | 
			
		||||
    with:
 | 
			
		||||
      gpu_flavor: mi250
 | 
			
		||||
    secrets: inherit
 | 
			
		||||
							
								
								
									
										518
									
								
								.github/workflows/self-scheduled-amd.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										518
									
								
								.github/workflows/self-scheduled-amd.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							@ -0,0 +1,518 @@
 | 
			
		||||
name: Self-hosted runner (scheduled-amd)
 | 
			
		||||
 | 
			
		||||
# Note: For the AMD CI, we rely on a caller workflow and on the workflow_call event to trigger the
 | 
			
		||||
# CI in order to run it on both MI210 and MI250, without having to use matrix here which pushes
 | 
			
		||||
# us towards the limit of allowed jobs on GitHub Actions.
 | 
			
		||||
on:
 | 
			
		||||
  workflow_call:
 | 
			
		||||
    inputs:
 | 
			
		||||
      gpu_flavor:
 | 
			
		||||
        required: true
 | 
			
		||||
        type: string
 | 
			
		||||
 | 
			
		||||
env:
 | 
			
		||||
  HF_HOME: /mnt/cache
 | 
			
		||||
  TRANSFORMERS_IS_CI: yes
 | 
			
		||||
  OMP_NUM_THREADS: 8
 | 
			
		||||
  MKL_NUM_THREADS: 8
 | 
			
		||||
  RUN_SLOW: yes
 | 
			
		||||
  SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Important note: each job (run_tests_single_gpu, run_tests_multi_gpu, run_examples_gpu, run_pipelines_torch_gpu) requires all the previous jobs before running.
 | 
			
		||||
# This is done so that we avoid parallelizing the scheduled tests, to leave available
 | 
			
		||||
# runners for the push CI that is running on the same machine.
 | 
			
		||||
jobs:
 | 
			
		||||
  check_runner_status:
 | 
			
		||||
    name: Check Runner Status
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Checkout transformers
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          fetch-depth: 2
 | 
			
		||||
 | 
			
		||||
      - name: Check Runner Status
 | 
			
		||||
        run: python utils/check_self_hosted_runner.py --target_runners hf-amd-mi210-ci-1gpu-1,hf-amd-mi250-ci-1gpu-1 --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
 | 
			
		||||
 | 
			
		||||
  check_runners:
 | 
			
		||||
    name: Check Runners
 | 
			
		||||
    needs: check_runner_status
 | 
			
		||||
    strategy:
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocm-smi
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
  setup:
 | 
			
		||||
    name: Setup
 | 
			
		||||
    needs: check_runners
 | 
			
		||||
    strategy:
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    outputs:
 | 
			
		||||
      matrix: ${{ steps.set-matrix.outputs.matrix }}
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Update clone
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          git fetch && git checkout ${{ github.sha }}
 | 
			
		||||
 | 
			
		||||
      - name: Cleanup
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          rm -rf tests/__pycache__
 | 
			
		||||
          rm -rf tests/models/__pycache__
 | 
			
		||||
          rm -rf reports
 | 
			
		||||
 | 
			
		||||
      - name: Show installed libraries and their versions
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - id: set-matrix
 | 
			
		||||
        name: Identify models to test
 | 
			
		||||
        working-directory: /transformers/tests
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT
 | 
			
		||||
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocm-smi
 | 
			
		||||
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
      - name: Environment
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 utils/print_env.py
 | 
			
		||||
 | 
			
		||||
  run_tests_single_gpu:
 | 
			
		||||
    name: Single GPU tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      max-parallel: 1  # For now, not to parallelize. Can change later if it works well.
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        folders: ${{ fromJson(needs.setup.outputs.matrix) }}
 | 
			
		||||
        machine_type: [single-gpu]
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    needs: setup
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Echo folder ${{ matrix.folders }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
 | 
			
		||||
        # set the artifact folder names (because the character `/` is not allowed).
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "${{ matrix.folders }}"
 | 
			
		||||
          matrix_folders=${{ matrix.folders }}
 | 
			
		||||
          matrix_folders=${matrix_folders/'models/'/'models_'}
 | 
			
		||||
          echo "$matrix_folders"
 | 
			
		||||
          echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Update clone
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: git fetch && git checkout ${{ github.sha }}
 | 
			
		||||
 | 
			
		||||
      - name: Reinstall transformers in edit mode (remove the one installed during docker image build)
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
 | 
			
		||||
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocm-smi
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
      - name: Environment
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 utils/print_env.py
 | 
			
		||||
 | 
			
		||||
      - name: Show installed libraries and their versions
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Run all tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: Test suite reports artifacts
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
 | 
			
		||||
 | 
			
		||||
  run_tests_multi_gpu:
 | 
			
		||||
    name: Multi GPU tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      max-parallel: 1
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        folders: ${{ fromJson(needs.setup.outputs.matrix) }}
 | 
			
		||||
        machine_type: [multi-gpu]
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    needs: setup
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Echo folder ${{ matrix.folders }}
 | 
			
		||||
        shell: bash
 | 
			
		||||
        # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
 | 
			
		||||
        # set the artifact folder names (because the character `/` is not allowed).
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "${{ matrix.folders }}"
 | 
			
		||||
          matrix_folders=${{ matrix.folders }}
 | 
			
		||||
          matrix_folders=${matrix_folders/'models/'/'models_'}
 | 
			
		||||
          echo "$matrix_folders"
 | 
			
		||||
          echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Update clone
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: git fetch && git checkout ${{ github.sha }}
 | 
			
		||||
 | 
			
		||||
      - name: Reinstall transformers in edit mode (remove the one installed during docker image build)
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
 | 
			
		||||
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocm-smi
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
      - name: Environment
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 utils/print_env.py
 | 
			
		||||
 | 
			
		||||
      - name: Show installed libraries and their versions
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Run all tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: Test suite reports artifacts
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
 | 
			
		||||
 | 
			
		||||
  run_examples_gpu:
 | 
			
		||||
    name: Examples tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu]
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    needs: setup
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Update clone
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: git fetch && git checkout ${{ github.sha }}
 | 
			
		||||
 | 
			
		||||
      - name: Reinstall transformers in edit mode (remove the one installed during docker image build)
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
 | 
			
		||||
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocm-smi
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
      - name: Environment
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 utils/print_env.py
 | 
			
		||||
 | 
			
		||||
      - name: Show installed libraries and their versions
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Run examples tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install -r examples/pytorch/_tests_requirements.txt
 | 
			
		||||
          python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_examples_gpu examples/pytorch
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ matrix.machine_type }}_examples_gpu/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: Test suite reports artifacts
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_examples_gpu
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_examples_gpu
 | 
			
		||||
 | 
			
		||||
  run_pipelines_torch_gpu:
 | 
			
		||||
    name: PyTorch pipelines tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-amd-gpu
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    needs: setup
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Update clone
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: git fetch && git checkout ${{ github.sha }}
 | 
			
		||||
 | 
			
		||||
      - name: Reinstall transformers in edit mode (remove the one installed during docker image build)
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
 | 
			
		||||
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocm-smi
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
      - name: Environment
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 utils/print_env.py
 | 
			
		||||
 | 
			
		||||
      - name: Show installed libraries and their versions
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Run all pipeline tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: Test suite reports artifacts
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_tests_torch_pipeline_gpu
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_pipeline_gpu
 | 
			
		||||
 | 
			
		||||
  run_tests_torch_deepspeed_gpu:
 | 
			
		||||
    name: Torch ROCm deepspeed tests
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
 | 
			
		||||
    runs-on: [self-hosted, docker-gpu, amd-gpu, '${{ matrix.machine_type }}', '${{ inputs.gpu_flavor }}']
 | 
			
		||||
    needs: setup
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-deepspeed-amd-gpu
 | 
			
		||||
      options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Update clone
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: git fetch && git checkout ${{ github.sha }}
 | 
			
		||||
 | 
			
		||||
      - name: Reinstall transformers in edit mode (remove the one installed during docker image build)
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
 | 
			
		||||
 | 
			
		||||
      - name: ROCM-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
          rocm-smi
 | 
			
		||||
      - name: ROCM-INFO
 | 
			
		||||
        run: |
 | 
			
		||||
          rocminfo  | grep "Agent" -A 14
 | 
			
		||||
 | 
			
		||||
      - name: Show ROCR environment
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "ROCR: $ROCR_VISIBLE_DEVICES"
 | 
			
		||||
 | 
			
		||||
      - name: Environment
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 utils/print_env.py
 | 
			
		||||
 | 
			
		||||
      - name: Show installed libraries and their versions
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Run all tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_torch_deepspeed_gpu tests/deepspeed tests/extended
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ matrix.machine_type }}_tests_torch_deepspeed_gpu/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: Test suite reports artifacts
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_tests_torch_deepspeed_gpu_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_tests_torch_deepspeed_gpu
 | 
			
		||||
 | 
			
		||||
  run_extract_warnings:
 | 
			
		||||
    name: Extract warnings in CI artifacts
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [
 | 
			
		||||
      check_runner_status,
 | 
			
		||||
      check_runners,
 | 
			
		||||
      setup,
 | 
			
		||||
      run_tests_single_gpu,
 | 
			
		||||
      run_tests_multi_gpu,
 | 
			
		||||
      run_examples_gpu,
 | 
			
		||||
      run_pipelines_torch_gpu,
 | 
			
		||||
      run_tests_torch_deepspeed_gpu
 | 
			
		||||
    ]
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Checkout transformers
 | 
			
		||||
        uses: actions/checkout@v3
 | 
			
		||||
        with:
 | 
			
		||||
          fetch-depth: 2
 | 
			
		||||
 | 
			
		||||
      - name: Install transformers
 | 
			
		||||
        run: pip install transformers
 | 
			
		||||
 | 
			
		||||
      - name: Show installed libraries and their versions
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Create output directory
 | 
			
		||||
        run: mkdir warnings_in_ci
 | 
			
		||||
 | 
			
		||||
      - uses: actions/download-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          path: warnings_in_ci
 | 
			
		||||
 | 
			
		||||
      - name: Show artifacts
 | 
			
		||||
        run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')"
 | 
			
		||||
        working-directory: warnings_in_ci
 | 
			
		||||
 | 
			
		||||
      - name: Extract warnings in CI artifacts
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh
 | 
			
		||||
          echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')"
 | 
			
		||||
 | 
			
		||||
      - name: Upload artifact
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: warnings_in_ci
 | 
			
		||||
          path: warnings_in_ci/selected_warnings.json
 | 
			
		||||
 | 
			
		||||
  send_results:
 | 
			
		||||
    name: Send results to webhook
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [
 | 
			
		||||
      check_runner_status,
 | 
			
		||||
      check_runners,
 | 
			
		||||
      setup,
 | 
			
		||||
      run_tests_single_gpu,
 | 
			
		||||
      run_tests_multi_gpu,
 | 
			
		||||
      run_examples_gpu,
 | 
			
		||||
      run_pipelines_torch_gpu,
 | 
			
		||||
      run_tests_torch_deepspeed_gpu,
 | 
			
		||||
      run_extract_warnings
 | 
			
		||||
    ]
 | 
			
		||||
    steps:
 | 
			
		||||
      - name: Preliminary job status
 | 
			
		||||
        shell: bash
 | 
			
		||||
        # For the meaning of these environment variables, see the job `Setup`
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "Runner availability: ${{ needs.check_runner_status.result }}"
 | 
			
		||||
          echo "Runner status: ${{ needs.check_runners.result }}"
 | 
			
		||||
          echo "Setup status: ${{ needs.setup.result }}"
 | 
			
		||||
 | 
			
		||||
      - uses: actions/checkout@v3
 | 
			
		||||
      - uses: actions/download-artifact@v3
 | 
			
		||||
      - name: Send message to Slack
 | 
			
		||||
        env:
 | 
			
		||||
          CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
 | 
			
		||||
          CI_SLACK_CHANNEL_ID_DAILY_AMD: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_AMD }}
 | 
			
		||||
          CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
 | 
			
		||||
          CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY_AMD }}
 | 
			
		||||
          ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
 | 
			
		||||
          CI_EVENT: Scheduled CI (AMD) - ${{ inputs.gpu_flavor }}
 | 
			
		||||
          CI_SHA: ${{ github.sha }}
 | 
			
		||||
          CI_WORKFLOW_REF: ${{ github.workflow_ref }}
 | 
			
		||||
          RUNNER_STATUS: ${{ needs.check_runner_status.result }}
 | 
			
		||||
          RUNNER_ENV_STATUS: ${{ needs.check_runners.result }}
 | 
			
		||||
          SETUP_STATUS: ${{ needs.setup.result }}
 | 
			
		||||
        # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
 | 
			
		||||
        # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
 | 
			
		||||
        run: |
 | 
			
		||||
          sudo apt-get install -y curl
 | 
			
		||||
          pip install slack_sdk
 | 
			
		||||
          pip show slack_sdk
 | 
			
		||||
          python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
 | 
			
		||||
 | 
			
		||||
      # Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack.
 | 
			
		||||
      - name: Failure table artifacts
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: test_failure_tables
 | 
			
		||||
          path: test_failure_tables
 | 
			
		||||
							
								
								
									
										11
									
								
								.github/workflows/self-scheduled.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										11
									
								
								.github/workflows/self-scheduled.yml
									
									
									
									
										vendored
									
									
								
							@ -23,6 +23,7 @@ env:
 | 
			
		||||
  SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
 | 
			
		||||
  TF_FORCE_GPU_ALLOW_GROWTH: true
 | 
			
		||||
  RUN_PT_TF_CROSS_TESTS: 1
 | 
			
		||||
  CUDA_VISIBLE_DEVICES: 0,1
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  setup:
 | 
			
		||||
@ -365,7 +366,7 @@ jobs:
 | 
			
		||||
        working-directory: /workspace
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 -m pip uninstall -y deepspeed
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
          DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
 | 
			
		||||
 | 
			
		||||
      - name: NVIDIA-SMI
 | 
			
		||||
        run: |
 | 
			
		||||
@ -399,7 +400,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  run_extract_warnings:
 | 
			
		||||
    name: Extract warnings in CI artifacts
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [
 | 
			
		||||
      setup,
 | 
			
		||||
@ -447,7 +448,7 @@ jobs:
 | 
			
		||||
 | 
			
		||||
  send_results:
 | 
			
		||||
    name: Send results to webhook
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    if: always()
 | 
			
		||||
    needs: [
 | 
			
		||||
      setup,
 | 
			
		||||
@ -493,5 +494,5 @@ jobs:
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v3
 | 
			
		||||
        with:
 | 
			
		||||
          name: test_failure_tables
 | 
			
		||||
          path: test_failure_tables
 | 
			
		||||
          name: prev_ci_results
 | 
			
		||||
          path: prev_ci_results
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/stale.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/stale.yml
									
									
									
									
										vendored
									
									
								
							@ -8,7 +8,7 @@ jobs:
 | 
			
		||||
  close_stale_issues:
 | 
			
		||||
    name: Close Stale Issues
 | 
			
		||||
    if: github.repository == 'huggingface/transformers'
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    env:
 | 
			
		||||
      GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
 | 
			
		||||
    steps:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								.github/workflows/update_metdata.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/update_metdata.yml
									
									
									
									
										vendored
									
									
								
							@ -8,7 +8,7 @@ on:
 | 
			
		||||
 | 
			
		||||
jobs:
 | 
			
		||||
  build_and_package:
 | 
			
		||||
    runs-on: ubuntu-latest
 | 
			
		||||
    runs-on: ubuntu-22.04
 | 
			
		||||
    defaults:
 | 
			
		||||
      run:
 | 
			
		||||
        shell: bash -l {0}
 | 
			
		||||
 | 
			
		||||
@ -152,7 +152,7 @@ You are not required to read the following guidelines before opening an issue. H
 | 
			
		||||
 | 
			
		||||
   ```bash
 | 
			
		||||
    cd examples/seq2seq
 | 
			
		||||
    python -m torch.distributed.launch --nproc_per_node=2 ./finetune_trainer.py \
 | 
			
		||||
    torchrun --nproc_per_node=2 ./finetune_trainer.py \
 | 
			
		||||
    --model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \
 | 
			
		||||
    --output_dir output_dir --overwrite_output_dir \
 | 
			
		||||
    --do_train --n_train 500 --num_train_epochs 1 \
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										14
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										14
									
								
								Makefile
									
									
									
									
									
								
							@ -9,8 +9,8 @@ modified_only_fixup:
 | 
			
		||||
	$(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))
 | 
			
		||||
	@if test -n "$(modified_py_files)"; then \
 | 
			
		||||
		echo "Checking/fixing $(modified_py_files)"; \
 | 
			
		||||
		black $(modified_py_files); \
 | 
			
		||||
		ruff $(modified_py_files) --fix; \
 | 
			
		||||
		ruff check $(modified_py_files) --fix; \
 | 
			
		||||
		ruff format $(modified_py_files);\
 | 
			
		||||
	else \
 | 
			
		||||
		echo "No library .py files were modified"; \
 | 
			
		||||
	fi
 | 
			
		||||
@ -48,11 +48,10 @@ repo-consistency:
 | 
			
		||||
# this target runs checks on all files
 | 
			
		||||
 | 
			
		||||
quality:
 | 
			
		||||
	black --check $(check_dirs) setup.py conftest.py
 | 
			
		||||
	ruff check $(check_dirs) setup.py conftest.py
 | 
			
		||||
	ruff format --check $(check_dirs) setup.py conftest.py
 | 
			
		||||
	python utils/custom_init_isort.py --check_only
 | 
			
		||||
	python utils/sort_auto_mappings.py --check_only
 | 
			
		||||
	ruff $(check_dirs) setup.py conftest.py
 | 
			
		||||
	doc-builder style src/transformers docs/source --max_len 119 --check_only --path_to_docs docs/source
 | 
			
		||||
	python utils/check_doc_toc.py
 | 
			
		||||
 | 
			
		||||
# Format source code automatically and check is there are any problems left that need manual fixing
 | 
			
		||||
@ -60,14 +59,13 @@ quality:
 | 
			
		||||
extra_style_checks:
 | 
			
		||||
	python utils/custom_init_isort.py
 | 
			
		||||
	python utils/sort_auto_mappings.py
 | 
			
		||||
	doc-builder style src/transformers docs/source --max_len 119 --path_to_docs docs/source
 | 
			
		||||
	python utils/check_doc_toc.py --fix_and_overwrite
 | 
			
		||||
 | 
			
		||||
# this target runs checks on all files and potentially modifies some of them
 | 
			
		||||
 | 
			
		||||
style:
 | 
			
		||||
	black $(check_dirs) setup.py conftest.py
 | 
			
		||||
	ruff $(check_dirs) setup.py conftest.py --fix
 | 
			
		||||
	ruff check $(check_dirs) setup.py conftest.py --fix
 | 
			
		||||
	ruff format $(check_dirs) setup.py conftest.py
 | 
			
		||||
	${MAKE} autogenerate_code
 | 
			
		||||
	${MAKE} extra_style_checks
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										11
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								README.md
									
									
									
									
									
								
							@ -321,6 +321,7 @@ Current number of checkpoints: ** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.
 | 
			
		||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
 | 
			
		||||
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
 | 
			
		||||
1. **[CLVP](https://huggingface.co/docs/transformers/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker.
 | 
			
		||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
 | 
			
		||||
1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (from MetaAI) released with the paper [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) by Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve.
 | 
			
		||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
 | 
			
		||||
@ -396,12 +397,14 @@ Current number of checkpoints: ** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
 | 
			
		||||
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.
 | 
			
		||||
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom.
 | 
			
		||||
1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (from Microsoft Research & University of Wisconsin-Madison) released with the paper [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee.
 | 
			
		||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
 | 
			
		||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
 | 
			
		||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
 | 
			
		||||
@ -414,6 +417,7 @@ Current number of checkpoints: ** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
 | 
			
		||||
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao.
 | 
			
		||||
1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.
 | 
			
		||||
1. **[Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.
 | 
			
		||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
 | 
			
		||||
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.
 | 
			
		||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
 | 
			
		||||
@ -438,10 +442,13 @@ Current number of checkpoints: ** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
 | 
			
		||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
 | 
			
		||||
1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby.
 | 
			
		||||
1. **[PatchTSMixer](https://huggingface.co/docs/transformers/model_doc/patchtsmixer)** (from  IBM Research) released with the paper [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://arxiv.org/pdf/2306.09364.pdf) by Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.
 | 
			
		||||
1. **[PatchTST](https://huggingface.co/docs/transformers/model_doc/patchtst)** (from IBM) released with the paper [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/abs/2211.14730) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.
 | 
			
		||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
 | 
			
		||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
 | 
			
		||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
 | 
			
		||||
1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (from ADEPT) released in a [blog post](https://www.adept.ai/blog/persimmon-8b) by Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani.
 | 
			
		||||
1. **[Phi](https://huggingface.co/docs/transformers/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
 | 
			
		||||
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.
 | 
			
		||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
 | 
			
		||||
@ -462,6 +469,7 @@ Current number of checkpoints: ** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
 | 
			
		||||
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng), released on [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng.
 | 
			
		||||
1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team.
 | 
			
		||||
1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team.
 | 
			
		||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
 | 
			
		||||
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.
 | 
			
		||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
 | 
			
		||||
@ -487,14 +495,17 @@ Current number of checkpoints: ** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
 | 
			
		||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
 | 
			
		||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
 | 
			
		||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
 | 
			
		||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
 | 
			
		||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
 | 
			
		||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
 | 
			
		||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
 | 
			
		||||
1. **[UnivNet](https://huggingface.co/docs/transformers/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim.
 | 
			
		||||
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
 | 
			
		||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
 | 
			
		||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
 | 
			
		||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
 | 
			
		||||
1. **[VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)** (from University of Wisconsin–Madison) released with the paper [Making Large Multimodal Models Understand Arbitrary Visual Prompts](https://arxiv.org/abs/2312.00784) by Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, Yong Jae Lee.
 | 
			
		||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
 | 
			
		||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
 | 
			
		||||
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										11
									
								
								README_es.md
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								README_es.md
									
									
									
									
									
								
							@ -296,6 +296,7 @@ Número actual de puntos de control: ** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.
 | 
			
		||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
 | 
			
		||||
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
 | 
			
		||||
1. **[CLVP](https://huggingface.co/docs/transformers/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 
 | 
			
		||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
 | 
			
		||||
1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (from MetaAI) released with the paper [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) by Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve.
 | 
			
		||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
 | 
			
		||||
@ -371,12 +372,14 @@ Número actual de puntos de control: ** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
 | 
			
		||||
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.
 | 
			
		||||
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom..
 | 
			
		||||
1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (from Microsoft Research & University of Wisconsin-Madison) released with the paper [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee.
 | 
			
		||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
 | 
			
		||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
 | 
			
		||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
 | 
			
		||||
@ -389,6 +392,7 @@ Número actual de puntos de control: ** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
 | 
			
		||||
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao.
 | 
			
		||||
1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The Mistral AI team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.. 
 | 
			
		||||
1. **[Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. 
 | 
			
		||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
 | 
			
		||||
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.
 | 
			
		||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
 | 
			
		||||
@ -413,10 +417,13 @@ Número actual de puntos de control: ** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
 | 
			
		||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
 | 
			
		||||
1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby.
 | 
			
		||||
1. **[PatchTSMixer](https://huggingface.co/docs/transformers/model_doc/patchtsmixer)** (from  IBM Research) released with the paper [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://arxiv.org/pdf/2306.09364.pdf) by Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.
 | 
			
		||||
1. **[PatchTST](https://huggingface.co/docs/transformers/model_doc/patchtst)** (from IBM) released with the paper [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.
 | 
			
		||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
 | 
			
		||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
 | 
			
		||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
 | 
			
		||||
1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (from ADEPT) released with the paper [blog post](https://www.adept.ai/blog/persimmon-8b) by Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani.
 | 
			
		||||
1. **[Phi](https://huggingface.co/docs/transformers/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
 | 
			
		||||
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.
 | 
			
		||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
 | 
			
		||||
@ -437,6 +444,7 @@ Número actual de puntos de control: ** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
 | 
			
		||||
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng) released with the paper [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng.
 | 
			
		||||
1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team.
 | 
			
		||||
1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team.
 | 
			
		||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
 | 
			
		||||
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.
 | 
			
		||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
 | 
			
		||||
@ -462,14 +470,17 @@ Número actual de puntos de control: ** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
 | 
			
		||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
 | 
			
		||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
 | 
			
		||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
 | 
			
		||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
 | 
			
		||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
 | 
			
		||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
 | 
			
		||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
 | 
			
		||||
1. **[UnivNet](https://huggingface.co/docs/transformers/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 
 | 
			
		||||
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
 | 
			
		||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
 | 
			
		||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
 | 
			
		||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
 | 
			
		||||
1. **[VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)** (from University of Wisconsin–Madison) released with the paper [Making Large Multimodal Models Understand Arbitrary Visual Prompts](https://arxiv.org/abs/2312.00784) by Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, Yong Jae Lee.
 | 
			
		||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
 | 
			
		||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
 | 
			
		||||
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										11
									
								
								README_hd.md
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								README_hd.md
									
									
									
									
									
								
							@ -270,6 +270,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI से) Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. द्वाराअनुसंधान पत्र [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) के साथ जारी किया गया
 | 
			
		||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI से) साथ वाला पेपर [लर्निंग ट्रांसफरेबल विजुअल मॉडल फ्रॉम नेचुरल लैंग्वेज सुपरविजन](https://arxiv.org /abs/2103.00020) एलेक रैडफोर्ड, जोंग वूक किम, क्रिस हैलासी, आदित्य रमेश, गेब्रियल गोह, संध्या अग्रवाल, गिरीश शास्त्री, अमांडा एस्केल, पामेला मिश्किन, जैक क्लार्क, ग्रेचेन क्रुएगर, इल्या सुत्स्केवर द्वारा।
 | 
			
		||||
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
 | 
			
		||||
1. **[CLVP](https://huggingface.co/docs/transformers/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 
 | 
			
		||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (सेल्सफोर्स से) साथ में पेपर [प्रोग्राम सिंथेसिस के लिए एक संवादात्मक प्रतिमान](https://arxiv.org/abs/2203.13474) एरिक निजकैंप, बो पैंग, हिरोआकी हयाशी, लिफू तू, हुआन वांग, यिंगबो झोउ, सिल्वियो सावरेस, कैमिंग जिओंग रिलीज।
 | 
			
		||||
1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (MetaAI से) Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve. द्वाराअनुसंधान पत्र [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) के साथ जारी किया गया
 | 
			
		||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (माइक्रोसॉफ्ट रिसर्च एशिया से) कागज के साथ [फास्ट ट्रेनिंग कन्वर्जेंस के लिए सशर्त डीईटीआर](https://arxiv. org/abs/2108.06152) डेपू मेंग, ज़ियाओकांग चेन, ज़ेजिया फैन, गैंग ज़ेंग, होउकियांग ली, युहुई युआन, लेई सन, जिंगडोंग वांग द्वारा।
 | 
			
		||||
@ -345,12 +346,14 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (दक्षिण चीन प्रौद्योगिकी विश्वविद्यालय से) साथ में कागज [LiLT: एक सरल लेकिन प्रभावी भाषा-स्वतंत्र लेआउट ट्रांसफार्मर संरचित दस्तावेज़ समझ के लिए](https://arxiv.org/abs/2202.13669) जियापेंग वांग, लियानवेन जिन, काई डिंग द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (The FAIR team of Meta AI से) Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. द्वाराअनुसंधान पत्र [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) के साथ जारी किया गया
 | 
			
		||||
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (The FAIR team of Meta AI से) Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom.. द्वाराअनुसंधान पत्र [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) के साथ जारी किया गया
 | 
			
		||||
1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (Microsoft Research & University of Wisconsin-Madison से) Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee. द्वाराअनुसंधान पत्र [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) के साथ जारी किया गया
 | 
			
		||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
 | 
			
		||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (मैंडी गुओ, जोशुआ आइंस्ली, डेविड यूथस, सैंटियागो ओंटानन, जियानमो नि, यूं-हुआन सुंग, यिनफेई यांग द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (स्टूडियो औसिया से) साथ में पेपर [LUKE: डीप कॉन्टेक्स्टुअलाइज्ड एंटिटी रिप्रेजेंटेशन विद एंटिटी-अवेयर सेल्फ-अटेंशन](https ://arxiv.org/abs/2010.01057) Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto द्वारा।
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (UNC चैपल हिल से) साथ में पेपर [LXMERT: ओपन-डोमेन क्वेश्चन के लिए ट्रांसफॉर्मर से क्रॉस-मोडलिटी एनकोडर रिप्रेजेंटेशन सीखना Answering](https://arxiv.org/abs/1908.07490) हाओ टैन और मोहित बंसल द्वारा।
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (फेसबुक से) साथ देने वाला पेपर [बियॉन्ड इंग्लिश-सेंट्रिक मल्टीलिंगुअल मशीन ट्रांसलेशन](https://arxiv.org/ एब्स/2010.11125) एंजेला फैन, श्रुति भोसले, होल्गर श्वेन्क, झी मा, अहमद अल-किश्की, सिद्धार्थ गोयल, मनदीप बैनेस, ओनूर सेलेबी, गुइल्लाम वेन्जेक, विश्रव चौधरी, नमन गोयल, टॉम बर्च, विटाली लिपचिंस्की, सर्गेई एडुनोव, एडौर्ड द्वारा ग्रेव, माइकल औली, आर्मंड जौलिन द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Jörg द्वारा [OPUS](http://opus.nlpl.eu/) डेटा से प्रशिक्षित मशीनी अनुवाद मॉडल पोस्ट किया गया टाइडेमैन द्वारा। [मैरियन फ्रेमवर्क](https://marian-nmt.github.io/) माइक्रोसॉफ्ट ट्रांसलेटर टीम द्वारा विकसित।
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (माइक्रोसॉफ्ट रिसर्च एशिया से) साथ में पेपर [मार्कअपएलएम: विजुअली-रिच डॉक्यूमेंट अंडरस्टैंडिंग के लिए टेक्स्ट और मार्कअप लैंग्वेज का प्री-ट्रेनिंग] (https://arxiv.org/abs/2110.08518) जुनलॉन्ग ली, यिहेंग जू, लेई कुई, फुरु द्वारा वी द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (FAIR and UIUC से) Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar. द्वाराअनुसंधान पत्र [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) के साथ जारी किया गया
 | 
			
		||||
@ -363,6 +366,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (NVIDIA से) साथ वाला पेपर [Megatron-LM: ट्रेनिंग मल्टी-बिलियन पैरामीटर लैंग्वेज मॉडल्स यूजिंग मॉडल पैरेललिज़्म] (https://arxiv.org/abs/1909.08053) मोहम्मद शोएबी, मोस्टोफा पटवारी, राउल पुरी, पैट्रिक लेग्रेस्ले, जेरेड कैस्पर और ब्रायन कैटानज़ारो द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (Alibaba Research से) Peng Wang, Cheng Da, and Cong Yao. द्वाराअनुसंधान पत्र [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) के साथ जारी किया गया
 | 
			
		||||
1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The Mistral AI team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.. 
 | 
			
		||||
1. **[Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. 
 | 
			
		||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (फ्रॉम Studio Ousia) साथ में पेपर [mLUKE: द पावर ऑफ एंटिटी रिप्रेजेंटेशन इन मल्टीलिंगुअल प्रीट्रेन्ड लैंग्वेज मॉडल्स](https://arxiv.org/abs/2110.08151) रयोकन री, इकुया यामाडा, और योशिमासा त्सुरोका द्वारा।
 | 
			
		||||
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (Facebook से) Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli. द्वाराअनुसंधान पत्र [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) के साथ जारी किया गया
 | 
			
		||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (सीएमयू/गूगल ब्रेन से) साथ में कागज [मोबाइलबर्ट: संसाधन-सीमित उपकरणों के लिए एक कॉम्पैक्ट टास्क-अज्ञेय बीईआरटी] (https://arxiv.org/abs/2004.02984) Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, और Denny Zhou द्वारा पोस्ट किया गया।
 | 
			
		||||
@ -387,10 +391,13 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
 | 
			
		||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI से) साथ में कागज [विज़न ट्रांसफॉर्मर्स के साथ सिंपल ओपन-वोकैबुलरी ऑब्जेक्ट डिटेक्शन](https:/ /arxiv.org/abs/2205.06230) मैथियास मिंडरर, एलेक्सी ग्रिट्सेंको, ऑस्टिन स्टोन, मैक्सिम न्यूमैन, डिर्क वीसेनबोर्न, एलेक्सी डोसोवित्स्की, अरविंद महेंद्रन, अनुराग अर्नब, मुस्तफा देहघानी, ज़ुओरन शेन, जिओ वांग, ज़ियाओहुआ झाई, थॉमस किफ़, और नील हॉल्सबी द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI से) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. द्वाराअनुसंधान पत्र [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) के साथ जारी किया गया
 | 
			
		||||
1. **[PatchTSMixer](https://huggingface.co/docs/transformers/model_doc/patchtsmixer)** ( IBM Research से) Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. द्वाराअनुसंधान पत्र [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://arxiv.org/pdf/2306.09364.pdf) के साथ जारी किया गया
 | 
			
		||||
1. **[PatchTST](https://huggingface.co/docs/transformers/model_doc/patchtst)** (IBM से) Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. द्वाराअनुसंधान पत्र [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) के साथ जारी किया गया
 | 
			
		||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
 | 
			
		||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google की ओर से) साथ में दिया गया पेपर [लंबे इनपुट सारांश के लिए ट्रांसफ़ॉर्मरों को बेहतर तरीके से एक्सटेंड करना](https://arxiv .org/abs/2208.04347) जेसन फांग, याओ झाओ, पीटर जे लियू द्वारा।
 | 
			
		||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (दीपमाइंड से) साथ में पेपर [पर्सीवर आईओ: संरचित इनपुट और आउटपुट के लिए एक सामान्य वास्तुकला] (https://arxiv.org/abs/2107.14795) एंड्रयू जेगल, सेबेस्टियन बोरग्यूड, जीन-बैप्टिस्ट अलायराक, कार्ल डोर्श, कैटलिन इओनेस्कु, डेविड द्वारा डिंग, स्कंद कोप्पुला, डैनियल ज़ोरान, एंड्रयू ब्रॉक, इवान शेलहैमर, ओलिवियर हेनाफ, मैथ्यू एम। बोट्विनिक, एंड्रयू ज़िसरमैन, ओरिओल विनियल्स, जोआओ कैरेरा द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (ADEPT से) Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. द्वाराअनुसंधान पत्र [blog post](https://www.adept.ai/blog/persimmon-8b) के साथ जारी किया गया
 | 
			
		||||
1. **[Phi](https://huggingface.co/docs/transformers/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research से) कागज के साथ [PhoBERT: वियतनामी के लिए पूर्व-प्रशिक्षित भाषा मॉडल](https://www .aclweb.org/anthology/2020.findings-emnlp.92/) डैट क्वोक गुयेन और अन्ह तुआन गुयेन द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google से) Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. द्वाराअनुसंधान पत्र [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) के साथ जारी किया गया
 | 
			
		||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP से) साथ वाला पेपर [प्रोग्राम अंडरस्टैंडिंग एंड जेनरेशन के लिए यूनिफाइड प्री-ट्रेनिंग](https://arxiv .org/abs/2103.06333) वसी उद्दीन अहमद, सैकत चक्रवर्ती, बैशाखी रे, काई-वेई चांग द्वारा।
 | 
			
		||||
@ -411,6 +418,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (झुईई टेक्नोलॉजी से), साथ में पेपर [रोफॉर्मर: रोटरी पोजिशन एंबेडिंग के साथ एन्हांस्ड ट्रांसफॉर्मर] (https://arxiv.org/pdf/2104.09864v1.pdf) जियानलिन सु और यू लू और शेंगफेंग पैन और बो वेन और युनफेंग लियू द्वारा प्रकाशित।
 | 
			
		||||
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng से) Bo Peng. द्वाराअनुसंधान पत्र [this repo](https://github.com/BlinkDL/RWKV-LM) के साथ जारी किया गया
 | 
			
		||||
1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team.
 | 
			
		||||
1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team.
 | 
			
		||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
 | 
			
		||||
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI से) Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. द्वाराअनुसंधान पत्र [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) के साथ जारी किया गया
 | 
			
		||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP से) साथ देने वाला पेपर [भाषण पहचान के लिए अनसुपरवाइज्ड प्री-ट्रेनिंग में परफॉर्मेंस-एफिशिएंसी ट्रेड-ऑफ्स](https ://arxiv.org/abs/2109.06870) फेलिक्स वू, क्वांगयुन किम, जिंग पैन, क्यू हान, किलियन क्यू. वेनबर्गर, योव आर्टज़ी द्वारा।
 | 
			
		||||
@ -436,14 +444,17 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU की ओर से) कागज के साथ [संस्करण-एक्स: एक ब्लॉग मॉडल चौकस चौक मॉडल मॉडल] (https://arxivorg/abs/1901.02860) क्वोकोक वी. ले, रुस्लैन सलाखुतदी
 | 
			
		||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
 | 
			
		||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
 | 
			
		||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
 | 
			
		||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
 | 
			
		||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research से) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. द्वाराअनुसंधान पत्र [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) के साथ जारी किया गया
 | 
			
		||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (माइक्रोसॉफ्ट रिसर्च से) साथ में दिया गया पेपर [UniSpeech: यूनिफाइड स्पीच रिप्रेजेंटेशन लर्निंग विद लेबलेड एंड अनलेबल्ड डेटा](https:/ /arxiv.org/abs/2101.07597) चेंगई वांग, यू वू, याओ कियान, केनिची कुमातानी, शुजी लियू, फुरु वेई, माइकल ज़ेंग, ज़ुएदोंग हुआंग द्वारा।
 | 
			
		||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (माइक्रोसॉफ्ट रिसर्च से) कागज के साथ [UNISPEECH-SAT: यूनिवर्सल स्पीच रिप्रेजेंटेशन लर्निंग विद स्पीकर अवेयर प्री-ट्रेनिंग ](https://arxiv.org/abs/2110.05752) सानयुआन चेन, यू वू, चेंग्यी वांग, झेंगयांग चेन, झूओ चेन, शुजी लियू, जियान वू, याओ कियान, फुरु वेई, जिन्यु ली, जियांगज़ान यू द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[UnivNet](https://huggingface.co/docs/transformers/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 
 | 
			
		||||
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
 | 
			
		||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (सिंघुआ यूनिवर्सिटी और ननकाई यूनिवर्सिटी से) साथ में पेपर [विजुअल अटेंशन नेटवर्क](https://arxiv.org/ pdf/2202.09741.pdf) मेंग-हाओ गुओ, चेंग-ज़े लू, झेंग-निंग लियू, मिंग-मिंग चेंग, शि-मिन हू द्वारा।
 | 
			
		||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (मल्टीमीडिया कम्प्यूटिंग ग्रुप, नानजिंग यूनिवर्सिटी से) साथ में पेपर [वीडियोएमएई: मास्क्ड ऑटोएन्कोडर स्व-पर्यवेक्षित वीडियो प्री-ट्रेनिंग के लिए डेटा-कुशल सीखने वाले हैं] (https://arxiv.org/abs/2203.12602) ज़ान टोंग, यिबिंग सॉन्ग, जुए द्वारा वांग, लिमिन वांग द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (NAVER AI Lab/Kakao Enterprise/Kakao Brain से) साथ में कागज [ViLT: Vision-and-Language Transformer बिना कनवल्शन या रीजन सुपरविजन](https://arxiv.org/abs/2102.03334) वोनजे किम, बोक्यूंग सोन, इल्डू किम द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)** (University of Wisconsin–Madison से) Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, Yong Jae Lee. द्वाराअनुसंधान पत्र [Making Large Multimodal Models Understand Arbitrary Visual Prompts](https://arxiv.org/abs/2312.00784) के साथ जारी किया गया
 | 
			
		||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (गूगल एआई से) कागज के साथ [एक इमेज इज़ वर्थ 16x16 वर्ड्स: ट्रांसफॉर्मर्स फॉर इमेज रिकॉग्निशन एट स्केल](https://arxiv.org/abs/2010.11929) एलेक्सी डोसोवित्स्की, लुकास बेयर, अलेक्जेंडर कोलेसनिकोव, डिर्क वीसेनबोर्न, शियाओहुआ झाई, थॉमस अनटरथिनर, मुस्तफा देहघानी, मैथियास मिंडरर, जॉर्ज हेगोल्ड, सिल्वेन गेली, जैकब उस्ज़कोरेइट द्वारा हॉल्सबी द्वारा पोस्ट किया गया।
 | 
			
		||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (UCLA NLP से) साथ वाला पेपर [VisualBERT: A Simple and Performant Baseline for Vision and Language](https:/ /arxiv.org/pdf/1908.03557) लियुनियन हेरोल्ड ली, मार्क यात्स्कर, दा यिन, चो-जुई हसीह, काई-वेई चांग द्वारा।
 | 
			
		||||
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										11
									
								
								README_ja.md
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								README_ja.md
									
									
									
									
									
								
							@ -330,6 +330,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ
 | 
			
		||||
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI から) Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov. から公開された研究論文 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687)
 | 
			
		||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI から) Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever から公開された研究論文: [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020)
 | 
			
		||||
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (University of Göttingen から) Timo Lüddecke and Alexander Ecker から公開された研究論文: [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003)
 | 
			
		||||
1. **[CLVP](https://huggingface.co/docs/transformers/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 
 | 
			
		||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (Salesforce から) Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong から公開された研究論文: [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474)
 | 
			
		||||
1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (MetaAI から) Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve. から公開された研究論文 [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/)
 | 
			
		||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (Microsoft Research Asia から) Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang から公開された研究論文: [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152)
 | 
			
		||||
@ -405,12 +406,14 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ
 | 
			
		||||
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (South China University of Technology から) Jiapeng Wang, Lianwen Jin, Kai Ding から公開された研究論文: [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669)
 | 
			
		||||
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (The FAIR team of Meta AI から) Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. から公開された研究論文 [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971)
 | 
			
		||||
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (The FAIR team of Meta AI から) Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom.. から公開された研究論文 [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX)
 | 
			
		||||
1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (Microsoft Research & University of Wisconsin-Madison から) Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee. から公開された研究論文 [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485)
 | 
			
		||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (AllenAI から) Iz Beltagy, Matthew E. Peters, Arman Cohan から公開された研究論文: [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150)
 | 
			
		||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (Google AI から) Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang から公開された研究論文: [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916)
 | 
			
		||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (Studio Ousia から) Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto から公開された研究論文: [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057)
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (UNC Chapel Hill から) Hao Tan and Mohit Bansal から公開された研究論文: [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490)
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (Facebook から) Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert から公開された研究論文: [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161)
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (Facebook から) Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin から公開された研究論文: [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125)
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Jörg Tiedemann から. [OPUS](http://opus.nlpl.eu/) を使いながら学習された "Machine translation" (マシントランスレーション) モデル. [Marian Framework](https://marian-nmt.github.io/) はMicrosoft Translator Team が現在開発中です.
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (Microsoft Research Asia から) Junlong Li, Yiheng Xu, Lei Cui, Furu Wei から公開された研究論文: [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518)
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (FAIR and UIUC から) Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar. から公開された研究論文 [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527)
 | 
			
		||||
@ -423,6 +426,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ
 | 
			
		||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (NVIDIA から) Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro から公開された研究論文: [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053)
 | 
			
		||||
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (Alibaba Research から) Peng Wang, Cheng Da, and Cong Yao. から公開された研究論文 [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592)
 | 
			
		||||
1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The Mistral AI team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.. 
 | 
			
		||||
1. **[Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. 
 | 
			
		||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (Studio Ousia から) Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka から公開された研究論文: [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151)
 | 
			
		||||
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (Facebook から) Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli. から公開された研究論文 [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516)
 | 
			
		||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (CMU/Google Brain から) Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou から公開された研究論文: [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984)
 | 
			
		||||
@ -447,10 +451,13 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ
 | 
			
		||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI から) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al から公開された研究論文: [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068)
 | 
			
		||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby から公開された研究論文: [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230)
 | 
			
		||||
1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI から) Matthias Minderer, Alexey Gritsenko, Neil Houlsby. から公開された研究論文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683)
 | 
			
		||||
1. **[PatchTSMixer](https://huggingface.co/docs/transformers/model_doc/patchtsmixer)** ( IBM Research から) Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. から公開された研究論文 [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://arxiv.org/pdf/2306.09364.pdf)
 | 
			
		||||
1. **[PatchTST](https://huggingface.co/docs/transformers/model_doc/patchtst)** (IBM から) Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam. から公開された研究論文 [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf)
 | 
			
		||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (Google から) Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu から公開された研究論文: [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777)
 | 
			
		||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google から) Jason Phang, Yao Zhao, and Peter J. Liu から公開された研究論文: [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347)
 | 
			
		||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind から) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira から公開された研究論文: [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795)
 | 
			
		||||
1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (ADEPT から) Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani. から公開された研究論文 [blog post](https://www.adept.ai/blog/persimmon-8b)
 | 
			
		||||
1. **[Phi](https://huggingface.co/docs/transformers/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research から) Dat Quoc Nguyen and Anh Tuan Nguyen から公開された研究論文: [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/)
 | 
			
		||||
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google から) Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova. から公開された研究論文 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347)
 | 
			
		||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP から) Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang から公開された研究論文: [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333)
 | 
			
		||||
@ -471,6 +478,7 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ
 | 
			
		||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (ZhuiyiTechnology から), Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu から公開された研究論文: [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864)
 | 
			
		||||
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng から) Bo Peng. から公開された研究論文 [this repo](https://github.com/BlinkDL/RWKV-LM)
 | 
			
		||||
1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team.
 | 
			
		||||
1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team.
 | 
			
		||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (NVIDIA から) Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo から公開された研究論文: [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203)
 | 
			
		||||
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI から) Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick. から公開された研究論文 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf)
 | 
			
		||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP から) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi から公開された研究論文: [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870)
 | 
			
		||||
@ -496,14 +504,17 @@ Flax、PyTorch、TensorFlowをcondaでインストールする方法は、それ
 | 
			
		||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU から) Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov から公開された研究論文: [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860)
 | 
			
		||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (Microsoft から), Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei から公開された研究論文: [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282)
 | 
			
		||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill から), Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal から公開された研究論文: [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156)
 | 
			
		||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (Intel から), Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding から公開された研究論文: [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995)
 | 
			
		||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (Google Research から) Yi Tay, Mostafa Dehghani, Vinh Q から公開された研究論文: [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
 | 
			
		||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research から) Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant. から公開された研究論文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi)
 | 
			
		||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research から) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang から公開された研究論文: [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597)
 | 
			
		||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (Microsoft Research から) Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu から公開された研究論文: [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752)
 | 
			
		||||
1. **[UnivNet](https://huggingface.co/docs/transformers/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 
 | 
			
		||||
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (Peking University から) Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun. から公開された研究論文 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221)
 | 
			
		||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (Tsinghua University and Nankai University から) Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu から公開された研究論文: [Visual Attention Network](https://arxiv.org/abs/2202.09741)
 | 
			
		||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (Multimedia Computing Group, Nanjing University から) Zhan Tong, Yibing Song, Jue Wang, Limin Wang から公開された研究論文: [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602)
 | 
			
		||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (NAVER AI Lab/Kakao Enterprise/Kakao Brain から) Wonjae Kim, Bokyung Son, Ildoo Kim から公開された研究論文: [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334)
 | 
			
		||||
1. **[VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)** (University of Wisconsin–Madison から) Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, Yong Jae Lee. から公開された研究論文 [Making Large Multimodal Models Understand Arbitrary Visual Prompts](https://arxiv.org/abs/2312.00784)
 | 
			
		||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (Google AI から) Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby から公開された研究論文: [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929)
 | 
			
		||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (UCLA NLP から) Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang から公開された研究論文: [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557)
 | 
			
		||||
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (Google AI から) Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby から公開された研究論文: [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929)
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										11
									
								
								README_ko.md
									
									
									
									
									
								
							
							
						
						
									
										11
									
								
								README_ko.md
									
									
									
									
									
								
							@ -245,6 +245,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
 | 
			
		||||
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (LAION-AI 에서 제공)은 Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.의 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (OpenAI 에서) Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 의 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (University of Göttingen 에서) Timo Lüddecke and Alexander Ecker 의 [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[CLVP](https://huggingface.co/docs/transformers/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 
 | 
			
		||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (Salesforce 에서) Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 의 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (MetaAI 에서 제공)은 Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve.의 [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (Microsoft Research Asia 에서) Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 의 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 논문과 함께 발표했습니다.
 | 
			
		||||
@ -320,12 +321,14 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
 | 
			
		||||
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (South China University of Technology 에서) Jiapeng Wang, Lianwen Jin, Kai Ding 의 [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (The FAIR team of Meta AI 에서 제공)은 Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.의 [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (The FAIR team of Meta AI 에서 제공)은 Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom..의 [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (Microsoft Research & University of Wisconsin-Madison 에서 제공)은 Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee.의 [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (AllenAI 에서) Iz Beltagy, Matthew E. Peters, Arman Cohan 의 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (Google AI 에서) Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang 의 [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (Studio Ousia 에서) Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto 의 [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (UNC Chapel Hill 에서) Hao Tan and Mohit Bansal 의 [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (Facebook 에서) Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert 의 [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (Facebook 에서) Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 의 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (Microsoft Research Asia 에서) Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 의 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (FAIR and UIUC 에서 제공)은 Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.의 [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527)논문과 함께 발표했습니다.
 | 
			
		||||
@ -338,6 +341,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
 | 
			
		||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (NVIDIA 에서) Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 의 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (Alibaba Research 에서 제공)은 Peng Wang, Cheng Da, and Cong Yao.의 [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The Mistral AI team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.. 
 | 
			
		||||
1. **[Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. 
 | 
			
		||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (Studio Ousia 에서) Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka 의 [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (Facebook 에서 제공)은 Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.의 [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (CMU/Google Brain 에서) Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou 의 [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) 논문과 함께 발표했습니다.
 | 
			
		||||
@ -362,10 +366,13 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
 | 
			
		||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (Meta AI 에서) Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 의 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (Google AI 에서) Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 의 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (Google AI 에서 제공)은 Matthias Minderer, Alexey Gritsenko, Neil Houlsby.의 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[PatchTSMixer](https://huggingface.co/docs/transformers/model_doc/patchtsmixer)** ( IBM Research 에서 제공)은 Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.의 [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://arxiv.org/pdf/2306.09364.pdf)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[PatchTST](https://huggingface.co/docs/transformers/model_doc/patchtst)** (IBM 에서 제공)은 Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.의 [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (Google 에서) Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 의 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (Google 에서) Jason Phang, Yao Zhao, Peter J. Liu 의 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (Deepmind 에서) Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 의 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (ADEPT 에서 제공)은 Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani.의 [blog post](https://www.adept.ai/blog/persimmon-8b)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Phi](https://huggingface.co/docs/transformers/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (VinAI Research 에서) Dat Quoc Nguyen and Anh Tuan Nguyen 의 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (Google 에서 제공)은 Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.의 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (UCLA NLP 에서) Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 의 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 논문과 함께 발표했습니다.
 | 
			
		||||
@ -386,6 +393,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
 | 
			
		||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (ZhuiyiTechnology 에서) Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 의 a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (Bo Peng 에서 제공)은 Bo Peng.의 [this repo](https://github.com/BlinkDL/RWKV-LM)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team.
 | 
			
		||||
1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team.
 | 
			
		||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (NVIDIA 에서) Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 의 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (Meta AI 에서 제공)은 Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.의 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (ASAPP 에서) Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 의 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 논문과 함께 발표했습니다.
 | 
			
		||||
@ -411,14 +419,17 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
 | 
			
		||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (Google/CMU 에서) Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 의 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (Microsoft 에서) Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 의 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill 에서) Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal 의 [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (Intel 에서) Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding 의 [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (Google Research 에서) Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzle 의 [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (Google Research 에서 제공)은 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.의 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (Microsoft Research 에서) Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 의 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (Microsoft Research 에서) Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 의 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[UnivNet](https://huggingface.co/docs/transformers/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 
 | 
			
		||||
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (Peking University 에서 제공)은 Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.의 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (Tsinghua University and Nankai University 에서) Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 의 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (Multimedia Computing Group, Nanjing University 에서) Zhan Tong, Yibing Song, Jue Wang, Limin Wang 의 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (NAVER AI Lab/Kakao Enterprise/Kakao Brain 에서) Wonjae Kim, Bokyung Son, Ildoo Kim 의 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)** (University of Wisconsin–Madison 에서 제공)은 Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, Yong Jae Lee.의 [Making Large Multimodal Models Understand Arbitrary Visual Prompts](https://arxiv.org/abs/2312.00784)논문과 함께 발표했습니다.
 | 
			
		||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (Google AI 에서) Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 의 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (UCLA NLP 에서) Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 의 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 논문과 함께 발표했습니다.
 | 
			
		||||
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (Google AI 에서) Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 의 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 논문과 함께 발표했습니다.
 | 
			
		||||
 | 
			
		||||
@ -409,6 +409,7 @@ Número atual de pontos de verificação: ** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
 | 
			
		||||
 | 
			
		||||
@ -399,6 +399,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
 | 
			
		||||
@ -436,6 +437,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
 | 
			
		||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
 | 
			
		||||
1. **[Persimmon](https://huggingface.co/docs/transformers/main/model_doc/persimmon)** (from ADEPT) released in a [blog post](https://www.adept.ai/blog/persimmon-8b) by Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani.
 | 
			
		||||
1. **[Phi](https://huggingface.co/docs/main/transformers/model_doc/phi)** (from Microsoft Research) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
 | 
			
		||||
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.
 | 
			
		||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
 | 
			
		||||
 | 
			
		||||
@ -402,6 +402,7 @@ Flax, PyTorch లేదా TensorFlow యొక్క ఇన్స్టా
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
 | 
			
		||||
 | 
			
		||||
@ -269,6 +269,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (来自 LAION-AI) 伴随论文 [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) 由 Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov 发布。
 | 
			
		||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。
 | 
			
		||||
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (来自 University of Göttingen) 伴随论文 [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) 由 Timo Lüddecke and Alexander Ecker 发布。
 | 
			
		||||
1. **[CLVP](https://huggingface.co/docs/transformers/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 
 | 
			
		||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。
 | 
			
		||||
1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (来自 MetaAI) 伴随论文 [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) 由 Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve 发布。
 | 
			
		||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。
 | 
			
		||||
@ -344,12 +345,14 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (来自 South China University of Technology) 伴随论文 [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) 由 Jiapeng Wang, Lianwen Jin, Kai Ding 发布。
 | 
			
		||||
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (来自 The FAIR team of Meta AI) 伴随论文 [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) 由 Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample 发布。
 | 
			
		||||
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (来自 The FAIR team of Meta AI) 伴随论文 [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) 由 Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom. 发布。
 | 
			
		||||
1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (来自 Microsoft Research & University of Wisconsin-Madison) 伴随论文 [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) 由 Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee 发布。
 | 
			
		||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。
 | 
			
		||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (来自 Google AI) released 伴随论文 [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) 由 Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang 发布。
 | 
			
		||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (来自 Studio Ousia) 伴随论文 [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) 由 Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto 发布。
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (来自 UNC Chapel Hill) 伴随论文 [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) 由 Hao Tan and Mohit Bansal 发布。
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (来自 Facebook) 伴随论文 [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) 由 Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert 发布。
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (来自 Facebook) 伴随论文 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 由 Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 发布。
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** 用 [OPUS](http://opus.nlpl.eu/) 数据训练的机器翻译模型由 Jörg Tiedemann 发布。[Marian Framework](https://marian-nmt.github.io/) 由微软翻译团队开发。
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (来自 FAIR and UIUC) 伴随论文 [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) 由 Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar 发布。
 | 
			
		||||
@ -362,6 +365,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
 | 
			
		||||
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (来自 Alibaba Research) 伴随论文 [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) 由 Peng Wang, Cheng Da, and Cong Yao 发布。
 | 
			
		||||
1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The Mistral AI team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.. 
 | 
			
		||||
1. **[Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. 
 | 
			
		||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (来自 Studio Ousia) 伴随论文 [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) 由 Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka 发布。
 | 
			
		||||
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (来自 Facebook) 伴随论文 [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) 由 Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli 发布。
 | 
			
		||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (来自 CMU/Google Brain) 伴随论文 [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) 由 Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou 发布。
 | 
			
		||||
@ -386,10 +390,13 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。
 | 
			
		||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。
 | 
			
		||||
1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (来自 Google AI) 伴随论文 [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) 由 Matthias Minderer, Alexey Gritsenko, Neil Houlsby 发布。
 | 
			
		||||
1. **[PatchTSMixer](https://huggingface.co/docs/transformers/model_doc/patchtsmixer)** (来自  IBM Research) 伴随论文 [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://arxiv.org/pdf/2306.09364.pdf) 由 Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong, Jayant Kalagnanam 发布。
 | 
			
		||||
1. **[PatchTST](https://huggingface.co/docs/transformers/model_doc/patchtst)** (来自 IBM) 伴随论文 [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) 由 Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam 发布。
 | 
			
		||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。
 | 
			
		||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。
 | 
			
		||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。
 | 
			
		||||
1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (来自 ADEPT) 伴随论文 [blog post](https://www.adept.ai/blog/persimmon-8b) 由 Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani 发布。
 | 
			
		||||
1. **[Phi](https://huggingface.co/docs/transformers/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。
 | 
			
		||||
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (来自 Google) 伴随论文 [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) 由 Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova 发布。
 | 
			
		||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。
 | 
			
		||||
@ -410,6 +417,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (来自 ZhuiyiTechnology), 伴随论文 [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 由 Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 发布。
 | 
			
		||||
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (来自 Bo Peng) 伴随论文 [this repo](https://github.com/BlinkDL/RWKV-LM) 由 Bo Peng 发布。
 | 
			
		||||
1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team.
 | 
			
		||||
1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team.
 | 
			
		||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (来自 NVIDIA) 伴随论文 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 由 Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 发布。
 | 
			
		||||
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (来自 Meta AI) 伴随论文 [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) 由 Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick 发布。
 | 
			
		||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (来自 ASAPP) 伴随论文 [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) 由 Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi 发布。
 | 
			
		||||
@ -435,14 +443,17 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。
 | 
			
		||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。
 | 
			
		||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (来自 UNC Chapel Hill) 伴随论文 [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) 由 Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal 发布。
 | 
			
		||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (来自 Intel) 伴随论文 [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) 由 Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding 发布.
 | 
			
		||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
 | 
			
		||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (来自 Google Research) 伴随论文 [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) 由 Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant 发布。
 | 
			
		||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。
 | 
			
		||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。
 | 
			
		||||
1. **[UnivNet](https://huggingface.co/docs/transformers/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 
 | 
			
		||||
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (来自 Peking University) 伴随论文 [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) 由 Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun 发布。
 | 
			
		||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。
 | 
			
		||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (来自 Multimedia Computing Group, Nanjing University) 伴随论文 [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) 由 Zhan Tong, Yibing Song, Jue Wang, Limin Wang 发布。
 | 
			
		||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (来自 NAVER AI Lab/Kakao Enterprise/Kakao Brain) 伴随论文 [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) 由 Wonjae Kim, Bokyung Son, Ildoo Kim 发布。
 | 
			
		||||
1. **[VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)** (来自 University of Wisconsin–Madison) 伴随论文 [Making Large Multimodal Models Understand Arbitrary Visual Prompts](https://arxiv.org/abs/2312.00784) 由 Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, Yong Jae Lee 发布。
 | 
			
		||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
 | 
			
		||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。
 | 
			
		||||
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
 | 
			
		||||
 | 
			
		||||
@ -281,6 +281,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[CLAP](https://huggingface.co/docs/transformers/model_doc/clap)** (from LAION-AI) released with the paper [Large-scale Contrastive Language-Audio Pretraining with Feature Fusion and Keyword-to-Caption Augmentation](https://arxiv.org/abs/2211.06687) by Yusong Wu, Ke Chen, Tianyu Zhang, Yuchen Hui, Taylor Berg-Kirkpatrick, Shlomo Dubnov.
 | 
			
		||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
 | 
			
		||||
1. **[CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)** (from University of Göttingen) released with the paper [Image Segmentation Using Text and Image Prompts](https://arxiv.org/abs/2112.10003) by Timo Lüddecke and Alexander Ecker.
 | 
			
		||||
1. **[CLVP](https://huggingface.co/docs/transformers/model_doc/clvp)** released with the paper [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker. 
 | 
			
		||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
 | 
			
		||||
1. **[CodeLlama](https://huggingface.co/docs/transformers/model_doc/llama_code)** (from MetaAI) released with the paper [Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/) by Baptiste Rozière, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaoqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafiori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, Gabriel Synnaeve.
 | 
			
		||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
 | 
			
		||||
@ -356,12 +357,14 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[LiLT](https://huggingface.co/docs/transformers/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
 | 
			
		||||
1. **[LLaMA](https://huggingface.co/docs/transformers/model_doc/llama)** (from The FAIR team of Meta AI) released with the paper [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample.
 | 
			
		||||
1. **[Llama2](https://huggingface.co/docs/transformers/model_doc/llama2)** (from The FAIR team of Meta AI) released with the paper [Llama2: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/XXX) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom..
 | 
			
		||||
1. **[LLaVa](https://huggingface.co/docs/transformers/model_doc/llava)** (from Microsoft Research & University of Wisconsin-Madison) released with the paper [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee.
 | 
			
		||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
 | 
			
		||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
 | 
			
		||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
 | 
			
		||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
 | 
			
		||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
 | 
			
		||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
 | 
			
		||||
1. **[MADLAD-400](https://huggingface.co/docs/transformers/model_doc/madlad-400)** (from Google) released with the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](https://arxiv.org/abs/2309.04662) by Sneha Kudugunta, Isaac Caswell, Biao Zhang, Xavier Garcia, Christopher A. Choquette-Choo, Katherine Lee, Derrick Xin, Aditya Kusupati, Romi Stella, Ankur Bapna, Orhan Firat.
 | 
			
		||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
 | 
			
		||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
 | 
			
		||||
1. **[Mask2Former](https://huggingface.co/docs/transformers/model_doc/mask2former)** (from FAIR and UIUC) released with the paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) by Bowen Cheng, Ishan Misra, Alexander G. Schwing, Alexander Kirillov, Rohit Girdhar.
 | 
			
		||||
@ -374,6 +377,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
 | 
			
		||||
1. **[MGP-STR](https://huggingface.co/docs/transformers/model_doc/mgp-str)** (from Alibaba Research) released with the paper [Multi-Granularity Prediction for Scene Text Recognition](https://arxiv.org/abs/2209.03592) by Peng Wang, Cheng Da, and Cong Yao.
 | 
			
		||||
1. **[Mistral](https://huggingface.co/docs/transformers/model_doc/mistral)** (from Mistral AI) by The Mistral AI team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.. 
 | 
			
		||||
1. **[Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral)** (from Mistral AI) by The [Mistral AI](https://mistral.ai) team: Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed. 
 | 
			
		||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
 | 
			
		||||
1. **[MMS](https://huggingface.co/docs/transformers/model_doc/mms)** (from Facebook) released with the paper [Scaling Speech Technology to 1,000+ Languages](https://arxiv.org/abs/2305.13516) by Vineel Pratap, Andros Tjandra, Bowen Shi, Paden Tomasello, Arun Babu, Sayani Kundu, Ali Elkahky, Zhaoheng Ni, Apoorv Vyas, Maryam Fazel-Zarandi, Alexei Baevski, Yossi Adi, Xiaohui Zhang, Wei-Ning Hsu, Alexis Conneau, Michael Auli.
 | 
			
		||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
 | 
			
		||||
@ -398,10 +402,13 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
 | 
			
		||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
 | 
			
		||||
1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby.
 | 
			
		||||
1. **[PatchTSMixer](https://huggingface.co/docs/transformers/model_doc/patchtsmixer)** (from  IBM Research) released with the paper [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://arxiv.org/pdf/2306.09364.pdf) by Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.
 | 
			
		||||
1. **[PatchTST](https://huggingface.co/docs/transformers/model_doc/patchtst)** (from IBM) released with the paper [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/pdf/2211.14730.pdf) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong, Jayant Kalagnanam.
 | 
			
		||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
 | 
			
		||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu.
 | 
			
		||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
 | 
			
		||||
1. **[Persimmon](https://huggingface.co/docs/transformers/model_doc/persimmon)** (from ADEPT) released with the paper [blog post](https://www.adept.ai/blog/persimmon-8b) by Erich Elsen, Augustus Odena, Maxwell Nye, Sağnak Taşırlar, Tri Dao, Curtis Hawthorne, Deepak Moparthi, Arushi Somani.
 | 
			
		||||
1. **[Phi](https://huggingface.co/docs/transformers/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
 | 
			
		||||
1. **[Pix2Struct](https://huggingface.co/docs/transformers/model_doc/pix2struct)** (from Google) released with the paper [Pix2Struct: Screenshot Parsing as Pretraining for Visual Language Understanding](https://arxiv.org/abs/2210.03347) by Kenton Lee, Mandar Joshi, Iulia Turc, Hexiang Hu, Fangyu Liu, Julian Eisenschlos, Urvashi Khandelwal, Peter Shaw, Ming-Wei Chang, Kristina Toutanova.
 | 
			
		||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
 | 
			
		||||
@ -422,6 +429,7 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
 | 
			
		||||
1. **[RWKV](https://huggingface.co/docs/transformers/model_doc/rwkv)** (from Bo Peng) released with the paper [this repo](https://github.com/BlinkDL/RWKV-LM) by Bo Peng.
 | 
			
		||||
1. **[SeamlessM4T](https://huggingface.co/docs/transformers/model_doc/seamless_m4t)** (from Meta AI) released with the paper [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team.
 | 
			
		||||
1. **[SeamlessM4Tv2](https://huggingface.co/docs/transformers/model_doc/seamless_m4t_v2)** (from Meta AI) released with the paper [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team.
 | 
			
		||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
 | 
			
		||||
1. **[Segment Anything](https://huggingface.co/docs/transformers/model_doc/sam)** (from Meta AI) released with the paper [Segment Anything](https://arxiv.org/pdf/2304.02643v1.pdf) by Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alex Berg, Wan-Yen Lo, Piotr Dollar, Ross Girshick.
 | 
			
		||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
 | 
			
		||||
@ -447,14 +455,17 @@ conda install -c huggingface transformers
 | 
			
		||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
 | 
			
		||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
 | 
			
		||||
1. **[TVLT](https://huggingface.co/docs/transformers/model_doc/tvlt)** (from UNC Chapel Hill) released with the paper [TVLT: Textless Vision-Language Transformer](https://arxiv.org/abs/2209.14156) by Zineng Tang, Jaemin Cho, Yixin Nie, Mohit Bansal.
 | 
			
		||||
1. **[TVP](https://huggingface.co/docs/transformers/model_doc/tvp)** (from Intel) released with the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
 | 
			
		||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
 | 
			
		||||
1. **[UMT5](https://huggingface.co/docs/transformers/model_doc/umt5)** (from Google Research) released with the paper [UniMax: Fairer and More Effective Language Sampling for Large-Scale Multilingual Pretraining](https://openreview.net/forum?id=kXwdL1cWOAi) by Hyung Won Chung, Xavier Garcia, Adam Roberts, Yi Tay, Orhan Firat, Sharan Narang, Noah Constant.
 | 
			
		||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
 | 
			
		||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
 | 
			
		||||
1. **[UnivNet](https://huggingface.co/docs/transformers/model_doc/univnet)** (from Kakao Corporation) released with the paper [UnivNet: A Neural Vocoder with Multi-Resolution Spectrogram Discriminators for High-Fidelity Waveform Generation](https://arxiv.org/abs/2106.07889) by Won Jang, Dan Lim, Jaesam Yoon, Bongwan Kim, and Juntae Kim. 
 | 
			
		||||
1. **[UPerNet](https://huggingface.co/docs/transformers/model_doc/upernet)** (from Peking University) released with the paper [Unified Perceptual Parsing for Scene Understanding](https://arxiv.org/abs/1807.10221) by Tete Xiao, Yingcheng Liu, Bolei Zhou, Yuning Jiang, Jian Sun.
 | 
			
		||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
 | 
			
		||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
 | 
			
		||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
 | 
			
		||||
1. **[VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)** (from University of Wisconsin–Madison) released with the paper [Making Large Multimodal Models Understand Arbitrary Visual Prompts](https://arxiv.org/abs/2312.00784) by Mu Cai, Haotian Liu, Siva Karthik Mustikovela, Gregory P. Meyer, Yuning Chai, Dennis Park, Yong Jae Lee.
 | 
			
		||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
 | 
			
		||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
 | 
			
		||||
1. **[ViT Hybrid](https://huggingface.co/docs/transformers/model_doc/vit_hybrid)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ RUN python3 -m pip install --no-cache-dir auto-gptq --extra-index-url https://hu
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir einops
 | 
			
		||||
 | 
			
		||||
# Add autoawq for quantization testing
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.1.6/autoawq-0.1.6+cu118-cp38-cp38-linux_x86_64.whl
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.1.7/autoawq-0.1.7+cu118-cp38-cp38-linux_x86_64.whl
 | 
			
		||||
 | 
			
		||||
# For bettertransformer + gptq 
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum
 | 
			
		||||
@ -67,6 +67,9 @@ RUN python3 -m pip install --no-cache-dir decord av==9.2.0
 | 
			
		||||
# For `dinat` model
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir natten -f https://shi-labs.com/natten/wheels/$CUDA/
 | 
			
		||||
 | 
			
		||||
# For `nougat` tokenizer
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir python-Levenshtein
 | 
			
		||||
 | 
			
		||||
# When installing in editable mode, `transformers` is not recognized as a package.
 | 
			
		||||
# this line must be added in order for python to be aware of transformers.
 | 
			
		||||
RUN cd transformers && python3 setup.py develop
 | 
			
		||||
 | 
			
		||||
@ -1,27 +1,32 @@
 | 
			
		||||
FROM rocm/pytorch:rocm5.6_ubuntu20.04_py3.8_pytorch_2.0.1
 | 
			
		||||
FROM rocm/dev-ubuntu-20.04:5.6
 | 
			
		||||
# rocm/pytorch has no version with 2.1.0
 | 
			
		||||
LABEL maintainer="Hugging Face"
 | 
			
		||||
 | 
			
		||||
ARG DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
ARG PYTORCH='2.1.0'
 | 
			
		||||
ARG TORCH_VISION='0.16.0'
 | 
			
		||||
ARG TORCH_AUDIO='2.1.0'
 | 
			
		||||
ARG ROCM='5.6'
 | 
			
		||||
 | 
			
		||||
RUN apt update && \
 | 
			
		||||
    apt install -y --no-install-recommends git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg && \
 | 
			
		||||
    apt install -y --no-install-recommends git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-dev python3-pip ffmpeg && \
 | 
			
		||||
    apt clean && \
 | 
			
		||||
    rm -rf /var/lib/apt/lists/*
 | 
			
		||||
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
 | 
			
		||||
 | 
			
		||||
RUN python3 -m pip install torch==$PYTORCH torchvision==$TORCH_VISION torchaudio==$TORCH_AUDIO --index-url https://download.pytorch.org/whl/rocm$ROCM
 | 
			
		||||
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools ninja git+https://github.com/facebookresearch/detectron2.git pytesseract "itsdangerous<2.1.0"
 | 
			
		||||
 | 
			
		||||
# If set to nothing, will install the latest version
 | 
			
		||||
ARG PYTORCH='2.0.1'
 | 
			
		||||
ARG TORCH_VISION='0.15.2'
 | 
			
		||||
ARG TORCH_AUDIO='2.0.2'
 | 
			
		||||
ARG ROCM='5.6'
 | 
			
		||||
 | 
			
		||||
RUN git clone --depth 1 --branch v$TORCH_AUDIO https://github.com/pytorch/audio.git
 | 
			
		||||
RUN cd audio && USE_ROCM=1 USE_CUDA=0 python setup.py install
 | 
			
		||||
 | 
			
		||||
ARG REF=main
 | 
			
		||||
WORKDIR /
 | 
			
		||||
 | 
			
		||||
# Invalidate docker cache from here if new commit is available.
 | 
			
		||||
ADD https://api.github.com/repos/huggingface/transformers/git/refs/heads/main version.json
 | 
			
		||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
 | 
			
		||||
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video]
 | 
			
		||||
 | 
			
		||||
RUN python3 -m pip uninstall -y tensorflow flax
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										45
									
								
								docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								docker/transformers-pytorch-deepspeed-amd-gpu/Dockerfile
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,45 @@
 | 
			
		||||
FROM rocm/dev-ubuntu-22.04:5.6
 | 
			
		||||
LABEL maintainer="Hugging Face"
 | 
			
		||||
 | 
			
		||||
ARG DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
ARG PYTORCH='2.1.1'
 | 
			
		||||
ARG TORCH_VISION='0.16.1'
 | 
			
		||||
ARG TORCH_AUDIO='2.1.1'
 | 
			
		||||
ARG ROCM='5.6'
 | 
			
		||||
 | 
			
		||||
RUN apt update && \
 | 
			
		||||
    apt install -y --no-install-recommends \
 | 
			
		||||
    libaio-dev \
 | 
			
		||||
    git \
 | 
			
		||||
    # These are required to build deepspeed.
 | 
			
		||||
    python3-dev \
 | 
			
		||||
    python-is-python3 \
 | 
			
		||||
    rocrand-dev \
 | 
			
		||||
    rocthrust-dev \
 | 
			
		||||
    hipsparse-dev \
 | 
			
		||||
    hipblas-dev \
 | 
			
		||||
    rocblas-dev && \
 | 
			
		||||
    apt clean && \
 | 
			
		||||
    rm -rf /var/lib/apt/lists/*
 | 
			
		||||
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir --upgrade pip ninja "pydantic<2"
 | 
			
		||||
RUN python3 -m pip uninstall -y apex torch torchvision torchaudio
 | 
			
		||||
RUN python3 -m pip install torch==$PYTORCH torchvision==$TORCH_VISION torchaudio==$TORCH_AUDIO --index-url https://download.pytorch.org/whl/rocm$ROCM --no-cache-dir
 | 
			
		||||
 | 
			
		||||
# Pre-build DeepSpeed, so it's be ready for testing (to avoid timeout)
 | 
			
		||||
RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache-dir -v --disable-pip-version-check 2>&1
 | 
			
		||||
 | 
			
		||||
ARG REF=main
 | 
			
		||||
WORKDIR /
 | 
			
		||||
 | 
			
		||||
# Invalidate docker cache from here if new commit is available.
 | 
			
		||||
ADD https://api.github.com/repos/huggingface/transformers/git/refs/heads/main version.json
 | 
			
		||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
 | 
			
		||||
 | 
			
		||||
RUN python3 -m pip install --no-cache-dir ./transformers[accelerate,testing,sentencepiece,sklearn]
 | 
			
		||||
 | 
			
		||||
# When installing in editable mode, `transformers` is not recognized as a package.
 | 
			
		||||
# this line must be added in order for python to be aware of transformers.
 | 
			
		||||
RUN cd transformers && python3 setup.py develop
 | 
			
		||||
 | 
			
		||||
RUN python3 -c "from deepspeed.launcher.runner import main"
 | 
			
		||||
@ -1,12 +1,12 @@
 | 
			
		||||
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-22-12.html#rel-22-12
 | 
			
		||||
FROM nvcr.io/nvidia/pytorch:22.12-py3
 | 
			
		||||
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11
 | 
			
		||||
FROM nvcr.io/nvidia/pytorch:23.11-py3
 | 
			
		||||
LABEL maintainer="Hugging Face"
 | 
			
		||||
 | 
			
		||||
ARG DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
ARG PYTORCH='2.1.0'
 | 
			
		||||
# Example: `cu102`, `cu113`, etc.
 | 
			
		||||
ARG CUDA='cu118'
 | 
			
		||||
ARG CUDA='cu121'
 | 
			
		||||
 | 
			
		||||
RUN apt -y update
 | 
			
		||||
RUN apt install -y libaio-dev
 | 
			
		||||
@ -34,7 +34,7 @@ RUN python3 -m pip uninstall -y torch-tensorrt
 | 
			
		||||
 | 
			
		||||
# recompile apex
 | 
			
		||||
RUN python3 -m pip uninstall -y apex
 | 
			
		||||
RUN git clone https://github.com/NVIDIA/apex
 | 
			
		||||
# RUN git clone https://github.com/NVIDIA/apex
 | 
			
		||||
#  `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners
 | 
			
		||||
# TODO: check if there is alternative way to install latest apex
 | 
			
		||||
# RUN cd apex && MAX_JOBS=1 python3 -m pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check .
 | 
			
		||||
@ -44,7 +44,7 @@ RUN python3 -m pip uninstall -y deepspeed
 | 
			
		||||
# This has to be run (again) inside the GPU VMs running the tests.
 | 
			
		||||
# The installation works here, but some tests fail, if we don't pre-build deepspeed again in the VMs running the tests.
 | 
			
		||||
# TODO: Find out why test fail.
 | 
			
		||||
RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
 | 
			
		||||
RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
 | 
			
		||||
 | 
			
		||||
# When installing in editable mode, `transformers` is not recognized as a package.
 | 
			
		||||
# this line must be added in order for python to be aware of transformers.
 | 
			
		||||
 | 
			
		||||
@ -1,11 +1,11 @@
 | 
			
		||||
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-22-12.html#rel-22-12
 | 
			
		||||
FROM nvcr.io/nvidia/pytorch:22.12-py3
 | 
			
		||||
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11
 | 
			
		||||
FROM nvcr.io/nvidia/pytorch:23.11-py3
 | 
			
		||||
LABEL maintainer="Hugging Face"
 | 
			
		||||
 | 
			
		||||
ARG DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
 | 
			
		||||
# Example: `cu102`, `cu113`, etc.
 | 
			
		||||
ARG CUDA='cu118'
 | 
			
		||||
ARG CUDA='cu121'
 | 
			
		||||
 | 
			
		||||
RUN apt -y update
 | 
			
		||||
RUN apt install -y libaio-dev
 | 
			
		||||
 | 
			
		||||
@ -1,4 +1,4 @@
 | 
			
		||||
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
 | 
			
		||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04
 | 
			
		||||
LABEL maintainer="Hugging Face"
 | 
			
		||||
 | 
			
		||||
ARG DEBIAN_FRONTEND=noninteractive
 | 
			
		||||
@ -15,7 +15,7 @@ ARG PYTORCH='2.1.0'
 | 
			
		||||
ARG TORCH_VISION=''
 | 
			
		||||
ARG TORCH_AUDIO=''
 | 
			
		||||
# Example: `cu102`, `cu113`, etc.
 | 
			
		||||
ARG CUDA='cu118'
 | 
			
		||||
ARG CUDA='cu121'
 | 
			
		||||
 | 
			
		||||
RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' ||  VERSION='torch'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA
 | 
			
		||||
RUN [ ${#TORCH_VISION} -gt 0 ] && VERSION='torchvision=='TORCH_VISION'.*' ||  VERSION='torchvision'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA
 | 
			
		||||
 | 
			
		||||
@ -10,5 +10,5 @@ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
 | 
			
		||||
black_avoid_patterns = {
 | 
			
		||||
    "{processor_class}": "FakeProcessorClass",
 | 
			
		||||
    "{model_class}": "FakeModelClass",
 | 
			
		||||
    "{object_class}": "FakeObjectClass",    
 | 
			
		||||
    "{object_class}": "FakeObjectClass",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -209,7 +209,7 @@ Audioeingaben werden anders vorverarbeitet als Texteingaben, aber das Endziel bl
 | 
			
		||||
pip install datasets
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Laden Sie den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz (weitere Informationen zum Laden eines Datensatzes finden Sie im 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub.html)):
 | 
			
		||||
Laden Sie den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz (weitere Informationen zum Laden eines Datensatzes finden Sie im 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub)):
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
>>> from datasets import load_dataset, Audio
 | 
			
		||||
@ -344,7 +344,7 @@ Laden wir den [food101](https://huggingface.co/datasets/food101) Datensatz für
 | 
			
		||||
>>> dataset = load_dataset("food101", split="train[:100]")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild] (https://huggingface.co/docs/datasets/package_reference/main_classes.html?highlight=image#datasets.Image) an:
 | 
			
		||||
Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild] (https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) an:
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
>>> dataset[0]["image"]
 | 
			
		||||
@ -385,7 +385,7 @@ Bei Bildverarbeitungsaufgaben ist es üblich, den Bildern als Teil der Vorverarb
 | 
			
		||||
...     return examples
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
3. Dann verwenden Sie 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process.html#format-transform), um die Transformationen im laufenden Betrieb anzuwenden:
 | 
			
		||||
3. Dann verwenden Sie 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform), um die Transformationen im laufenden Betrieb anzuwenden:
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
>>> dataset.set_transform(transforms)
 | 
			
		||||
 | 
			
		||||
@ -121,7 +121,7 @@ Erstellen wir eine [`pipeline`] mit der Aufgabe die wir lösen und dem Modell we
 | 
			
		||||
>>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Als nächstes laden wir den Datensatz (siehe 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart.html) für mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz:
 | 
			
		||||
Als nächstes laden wir den Datensatz (siehe 🤗 Datasets [Quick Start](https://huggingface.co/docs/datasets/quickstart) für mehr Details) welches wir nutzen möchten. Zum Beispiel laden wir den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz:
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
>>> from datasets import load_dataset, Audio
 | 
			
		||||
 | 
			
		||||
@ -130,7 +130,7 @@ Der [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) unt
 | 
			
		||||
- Legen Sie die Anzahl der zu verwendenden GPUs mit dem Argument `nproc_per_node` fest.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python -m torch.distributed.launch \
 | 
			
		||||
torchrun \
 | 
			
		||||
    --nproc_per_node 8 pytorch/summarization/run_summarization.py \
 | 
			
		||||
    --fp16 \
 | 
			
		||||
    --model_name_or_path t5-small \
 | 
			
		||||
 | 
			
		||||
@ -43,7 +43,7 @@ Laden Sie zunächst den Datensatz [Yelp Reviews](https://huggingface.co/datasets
 | 
			
		||||
 'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Wie Sie nun wissen, benötigen Sie einen Tokenizer, um den Text zu verarbeiten und eine Auffüll- und Abschneidungsstrategie einzubauen, um mit variablen Sequenzlängen umzugehen. Um Ihren Datensatz in einem Schritt zu verarbeiten, verwenden Sie die 🤗 Methode Datasets [`map`](https://huggingface.co/docs/datasets/process.html#map), um eine Vorverarbeitungsfunktion auf den gesamten Datensatz anzuwenden:
 | 
			
		||||
Wie Sie nun wissen, benötigen Sie einen Tokenizer, um den Text zu verarbeiten und eine Auffüll- und Abschneidungsstrategie einzubauen, um mit variablen Sequenzlängen umzugehen. Um Ihren Datensatz in einem Schritt zu verarbeiten, verwenden Sie die 🤗 Methode Datasets [`map`](https://huggingface.co/docs/datasets/process#map), um eine Vorverarbeitungsfunktion auf den gesamten Datensatz anzuwenden:
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
>>> from transformers import AutoTokenizer
 | 
			
		||||
 | 
			
		||||
@ -10,5 +10,5 @@ notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
 | 
			
		||||
black_avoid_patterns = {
 | 
			
		||||
    "{processor_class}": "FakeProcessorClass",
 | 
			
		||||
    "{model_class}": "FakeModelClass",
 | 
			
		||||
    "{object_class}": "FakeObjectClass",    
 | 
			
		||||
    "{object_class}": "FakeObjectClass",
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -1,3 +1,3 @@
 | 
			
		||||
# Optimizing inference
 | 
			
		||||
 | 
			
		||||
perf_infer_gpu_many: perf_infer_gpu_one
 | 
			
		||||
perf_infer_gpu_many: perf_infer_gpu_one
 | 
			
		||||
 | 
			
		||||
@ -60,7 +60,7 @@
 | 
			
		||||
    - local: tasks/image_classification
 | 
			
		||||
      title: Image classification
 | 
			
		||||
    - local: tasks/semantic_segmentation
 | 
			
		||||
      title: Semantic segmentation
 | 
			
		||||
      title: Image segmentation
 | 
			
		||||
    - local: tasks/video_classification
 | 
			
		||||
      title: Video classification
 | 
			
		||||
    - local: tasks/object_detection
 | 
			
		||||
@ -133,6 +133,8 @@
 | 
			
		||||
- sections:
 | 
			
		||||
  - local: performance
 | 
			
		||||
    title: Overview
 | 
			
		||||
  - local: quantization
 | 
			
		||||
    title: Quantization
 | 
			
		||||
  - sections:
 | 
			
		||||
    - local: perf_train_gpu_one
 | 
			
		||||
      title: Methods and tools for efficient training on a single GPU
 | 
			
		||||
@ -216,6 +218,8 @@
 | 
			
		||||
      title: Agents and Tools
 | 
			
		||||
    - local: model_doc/auto
 | 
			
		||||
      title: Auto Classes
 | 
			
		||||
    - local: main_classes/backbones
 | 
			
		||||
      title: Backbones
 | 
			
		||||
    - local: main_classes/callback
 | 
			
		||||
      title: Callbacks
 | 
			
		||||
    - local: main_classes/configuration
 | 
			
		||||
@ -378,6 +382,8 @@
 | 
			
		||||
        title: LUKE
 | 
			
		||||
      - local: model_doc/m2m_100
 | 
			
		||||
        title: M2M100
 | 
			
		||||
      - local: model_doc/madlad-400
 | 
			
		||||
        title: MADLAD-400
 | 
			
		||||
      - local: model_doc/marian
 | 
			
		||||
        title: MarianMT
 | 
			
		||||
      - local: model_doc/markuplm
 | 
			
		||||
@ -392,6 +398,8 @@
 | 
			
		||||
        title: MegatronGPT2
 | 
			
		||||
      - local: model_doc/mistral
 | 
			
		||||
        title: Mistral
 | 
			
		||||
      - local: model_doc/mixtral
 | 
			
		||||
        title: Mixtral
 | 
			
		||||
      - local: model_doc/mluke
 | 
			
		||||
        title: mLUKE
 | 
			
		||||
      - local: model_doc/mobilebert
 | 
			
		||||
@ -424,6 +432,8 @@
 | 
			
		||||
        title: PEGASUS-X
 | 
			
		||||
      - local: model_doc/persimmon
 | 
			
		||||
        title: Persimmon
 | 
			
		||||
      - local: model_doc/phi
 | 
			
		||||
        title: Phi
 | 
			
		||||
      - local: model_doc/phobert
 | 
			
		||||
        title: PhoBERT
 | 
			
		||||
      - local: model_doc/plbart
 | 
			
		||||
@ -612,6 +622,8 @@
 | 
			
		||||
        title: Pop2Piano
 | 
			
		||||
      - local: model_doc/seamless_m4t
 | 
			
		||||
        title: Seamless-M4T
 | 
			
		||||
      - local: model_doc/seamless_m4t_v2
 | 
			
		||||
        title: SeamlessM4T-v2
 | 
			
		||||
      - local: model_doc/sew
 | 
			
		||||
        title: SEW
 | 
			
		||||
      - local: model_doc/sew-d
 | 
			
		||||
@ -626,6 +638,8 @@
 | 
			
		||||
        title: UniSpeech
 | 
			
		||||
      - local: model_doc/unispeech-sat
 | 
			
		||||
        title: UniSpeech-SAT
 | 
			
		||||
      - local: model_doc/univnet
 | 
			
		||||
        title: UnivNet
 | 
			
		||||
      - local: model_doc/vits
 | 
			
		||||
        title: VITS
 | 
			
		||||
      - local: model_doc/wav2vec2
 | 
			
		||||
@ -663,6 +677,8 @@
 | 
			
		||||
        title: CLIP
 | 
			
		||||
      - local: model_doc/clipseg
 | 
			
		||||
        title: CLIPSeg
 | 
			
		||||
      - local: model_doc/clvp
 | 
			
		||||
        title: CLVP
 | 
			
		||||
      - local: model_doc/data2vec
 | 
			
		||||
        title: Data2Vec
 | 
			
		||||
      - local: model_doc/deplot
 | 
			
		||||
@ -691,6 +707,8 @@
 | 
			
		||||
        title: LayoutXLM
 | 
			
		||||
      - local: model_doc/lilt
 | 
			
		||||
        title: LiLT
 | 
			
		||||
      - local: model_doc/llava
 | 
			
		||||
        title: Llava
 | 
			
		||||
      - local: model_doc/lxmert
 | 
			
		||||
        title: LXMERT
 | 
			
		||||
      - local: model_doc/matcha
 | 
			
		||||
@ -719,8 +737,12 @@
 | 
			
		||||
        title: TrOCR
 | 
			
		||||
      - local: model_doc/tvlt
 | 
			
		||||
        title: TVLT
 | 
			
		||||
      - local: model_doc/tvp
 | 
			
		||||
        title: TVP
 | 
			
		||||
      - local: model_doc/vilt
 | 
			
		||||
        title: ViLT
 | 
			
		||||
      - local: model_doc/vipllava
 | 
			
		||||
        title: VipLlava
 | 
			
		||||
      - local: model_doc/vision-encoder-decoder
 | 
			
		||||
        title: Vision Encoder Decoder Models
 | 
			
		||||
      - local: model_doc/vision-text-dual-encoder
 | 
			
		||||
@ -743,6 +765,10 @@
 | 
			
		||||
        title: Autoformer
 | 
			
		||||
      - local: model_doc/informer
 | 
			
		||||
        title: Informer
 | 
			
		||||
      - local: model_doc/patchtsmixer
 | 
			
		||||
        title: PatchTSMixer
 | 
			
		||||
      - local: model_doc/patchtst
 | 
			
		||||
        title: PatchTST
 | 
			
		||||
      - local: model_doc/time_series_transformer
 | 
			
		||||
        title: Time Series Transformer
 | 
			
		||||
      title: Time series models
 | 
			
		||||
 | 
			
		||||
@ -31,6 +31,7 @@ In this tutorial, learn to:
 | 
			
		||||
* Load a pretrained feature extractor.
 | 
			
		||||
* Load a pretrained processor.
 | 
			
		||||
* Load a pretrained model.
 | 
			
		||||
* Load a model as a backbone.
 | 
			
		||||
 | 
			
		||||
## AutoTokenizer
 | 
			
		||||
 | 
			
		||||
@ -95,7 +96,7 @@ Load a processor with [`AutoProcessor.from_pretrained`]:
 | 
			
		||||
 | 
			
		||||
<frameworkcontent>
 | 
			
		||||
<pt>
 | 
			
		||||
Finally, the `AutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`AutoModelForSequenceClassification.from_pretrained`]:
 | 
			
		||||
The `AutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`AutoModelForSequenceClassification.from_pretrained`]:
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
>>> from transformers import AutoModelForSequenceClassification
 | 
			
		||||
@ -141,3 +142,24 @@ Easily reuse the same checkpoint to load an architecture for a different task:
 | 
			
		||||
Generally, we recommend using the `AutoTokenizer` class and the `TFAutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, image processor, feature extractor and processor to preprocess a dataset for fine-tuning.
 | 
			
		||||
</tf>
 | 
			
		||||
</frameworkcontent>
 | 
			
		||||
 | 
			
		||||
## AutoBackbone
 | 
			
		||||
 | 
			
		||||
`AutoBackbone` lets you use pretrained models as backbones and get feature maps as outputs from different stages of the models. Below you can see how to get feature maps from a [Swin](model_doc/swin) checkpoint.
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
>>> from transformers import AutoImageProcessor, AutoBackbone
 | 
			
		||||
>>> import torch
 | 
			
		||||
>>> from PIL import Image
 | 
			
		||||
>>> import requests
 | 
			
		||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
 | 
			
		||||
>>> image = Image.open(requests.get(url, stream=True).raw)
 | 
			
		||||
>>> processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
 | 
			
		||||
>>> model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(0,))
 | 
			
		||||
 | 
			
		||||
>>> inputs = processor(image, return_tensors="pt")
 | 
			
		||||
>>> outputs = model(**inputs)
 | 
			
		||||
>>> feature_maps = outputs.feature_maps
 | 
			
		||||
>>> list(feature_maps[-1].shape)
 | 
			
		||||
[1, 96, 56, 56]
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
@ -20,25 +20,11 @@ rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
An increasingly common use case for LLMs is **chat**. In a chat context, rather than continuing a single string
 | 
			
		||||
of text (as is the case with a standard language model), the model instead continues a conversation that consists
 | 
			
		||||
of one or more **messages**, each of which includes a **role** as well as message text.
 | 
			
		||||
of one or more **messages**, each of which includes a **role**, like "user" or "assistant", as well as message text.
 | 
			
		||||
 | 
			
		||||
Most commonly, these roles are "user" for messages sent by the user, and "assistant" for messages sent by the model.
 | 
			
		||||
Some models also support a "system" role. System messages are usually sent at the beginning of the conversation
 | 
			
		||||
and include directives about how the model should behave in the subsequent chat.
 | 
			
		||||
 | 
			
		||||
All language models, including models fine-tuned for chat, operate on linear sequences of tokens and do not intrinsically
 | 
			
		||||
have special handling for roles. This means that role information is usually injected by adding control tokens
 | 
			
		||||
between messages, to indicate both the message boundary and the relevant roles.
 | 
			
		||||
 | 
			
		||||
Unfortunately, there isn't (yet!) a standard for which tokens to use, and so different models have been trained
 | 
			
		||||
with wildly different formatting and control tokens for chat. This can be a real problem for users - if you use the
 | 
			
		||||
wrong format, then the model will be confused by your input, and your performance will be a lot worse than it should be.
 | 
			
		||||
This is the problem that **chat templates** aim to resolve. 
 | 
			
		||||
 | 
			
		||||
Chat conversations are typically represented as a list of dictionaries, where each dictionary contains `role`
 | 
			
		||||
and `content` keys, and represents a single chat message. Chat templates are strings containing a Jinja template that
 | 
			
		||||
specifies how to format a conversation for a given model into a single tokenizable sequence. By storing this information
 | 
			
		||||
with the tokenizer, we can ensure that models get input data in the format they expect.
 | 
			
		||||
Much like tokenization, different models expect very different input formats for chat. This is the reason we added
 | 
			
		||||
**chat templates** as a feature. Chat templates are part of the tokenizer. They specify how to convert conversations, 
 | 
			
		||||
represented as lists of messages, into a single tokenizable string in the format that the model expects. 
 | 
			
		||||
 | 
			
		||||
Let's make this concrete with a quick example using the `BlenderBot` model. BlenderBot has an extremely simple default 
 | 
			
		||||
template, which mostly just adds whitespace between rounds of dialogue:
 | 
			
		||||
@ -48,9 +34,9 @@ template, which mostly just adds whitespace between rounds of dialogue:
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
 | 
			
		||||
 | 
			
		||||
>>> chat = [
 | 
			
		||||
...   {"role": "user", "content": "Hello, how are you?"},
 | 
			
		||||
...   {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
 | 
			
		||||
...   {"role": "user", "content": "I'd like to show off how chat templating works!"},
 | 
			
		||||
...    {"role": "user", "content": "Hello, how are you?"},
 | 
			
		||||
...    {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
 | 
			
		||||
...    {"role": "user", "content": "I'd like to show off how chat templating works!"},
 | 
			
		||||
... ]
 | 
			
		||||
 | 
			
		||||
>>> tokenizer.apply_chat_template(chat, tokenize=False)
 | 
			
		||||
@ -59,28 +45,196 @@ template, which mostly just adds whitespace between rounds of dialogue:
 | 
			
		||||
 | 
			
		||||
Notice how the entire chat is condensed into a single string. If we use `tokenize=True`, which is the default setting,
 | 
			
		||||
that string will also be tokenized for us. To see a more complex template in action, though, let's use the 
 | 
			
		||||
`meta-llama/Llama-2-7b-chat-hf` model. Note that this model has gated access, so you will have to
 | 
			
		||||
[request access on the repo](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf) if you want to run this code yourself:
 | 
			
		||||
`mistralai/Mistral-7B-Instruct-v0.1` model.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>> from transformers import AutoTokenizer
 | 
			
		||||
>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
 | 
			
		||||
>>> from transformers import AutoTokenizer
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
 | 
			
		||||
 | 
			
		||||
>> chat = [
 | 
			
		||||
>>> chat = [
 | 
			
		||||
...   {"role": "user", "content": "Hello, how are you?"},
 | 
			
		||||
...   {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
 | 
			
		||||
...   {"role": "user", "content": "I'd like to show off how chat templating works!"},
 | 
			
		||||
... ]
 | 
			
		||||
 | 
			
		||||
>> tokenizer.use_default_system_prompt = False
 | 
			
		||||
>> tokenizer.apply_chat_template(chat, tokenize=False)
 | 
			
		||||
"<s>[INST] Hello, how are you? [/INST] I'm doing great. How can I help you today? </s><s>[INST] I'd like to show off how chat templating works! [/INST]"
 | 
			
		||||
>>> tokenizer.apply_chat_template(chat, tokenize=False)
 | 
			
		||||
"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that this time, the tokenizer has added the control tokens [INST] and [/INST] to indicate the start and end of 
 | 
			
		||||
user messages (but not assistant messages!)
 | 
			
		||||
user messages (but not assistant messages!). Mistral-instruct was trained with these tokens, but BlenderBot was not.
 | 
			
		||||
 | 
			
		||||
## How do chat templates work?
 | 
			
		||||
## How do I use chat templates?
 | 
			
		||||
 | 
			
		||||
As you can see in the example above, chat templates are easy to use. Simply build a list of messages, with `role`
 | 
			
		||||
and `content` keys, and then pass it to the [`~PreTrainedTokenizer.apply_chat_template`] method. Once you do that,
 | 
			
		||||
you'll get output that's ready to go! When using chat templates as input for model generation, it's also a good idea
 | 
			
		||||
to use `add_generation_prompt=True` to add a [generation prompt](#what-are-generation-prompts). 
 | 
			
		||||
 | 
			
		||||
Here's an example of preparing input for `model.generate()`, using the `Zephyr` assistant model:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
checkpoint = "HuggingFaceH4/zephyr-7b-beta"
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained(checkpoint)  # You may want to use bfloat16 and/or move to GPU here
 | 
			
		||||
 | 
			
		||||
messages = [
 | 
			
		||||
    {
 | 
			
		||||
        "role": "system",
 | 
			
		||||
        "content": "You are a friendly chatbot who always responds in the style of a pirate",
 | 
			
		||||
    },
 | 
			
		||||
    {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
 | 
			
		||||
 ]
 | 
			
		||||
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
 | 
			
		||||
print(tokenizer.decode(tokenized_chat[0]))
 | 
			
		||||
```
 | 
			
		||||
This will yield a string in the input format that Zephyr expects. 
 | 
			
		||||
```text
 | 
			
		||||
<|system|>
 | 
			
		||||
You are a friendly chatbot who always responds in the style of a pirate</s> 
 | 
			
		||||
<|user|>
 | 
			
		||||
How many helicopters can a human eat in one sitting?</s> 
 | 
			
		||||
<|assistant|>
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Now that our input is formatted correctly for Zephyr, we can use the model to generate a response to the user's question:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
outputs = model.generate(tokenized_chat, max_new_tokens=128) 
 | 
			
		||||
print(tokenizer.decode(outputs[0]))
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
This will yield:
 | 
			
		||||
 | 
			
		||||
```text
 | 
			
		||||
<|system|>
 | 
			
		||||
You are a friendly chatbot who always responds in the style of a pirate</s> 
 | 
			
		||||
<|user|>
 | 
			
		||||
How many helicopters can a human eat in one sitting?</s> 
 | 
			
		||||
<|assistant|>
 | 
			
		||||
Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Arr, 'twas easy after all!
 | 
			
		||||
 | 
			
		||||
## Is there an automated pipeline for chat?
 | 
			
		||||
 | 
			
		||||
Yes, there is: [`ConversationalPipeline`]. This pipeline is designed to make it easy to use chat models. Let's try
 | 
			
		||||
the `Zephyr` example again, but this time using the pipeline:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import pipeline
 | 
			
		||||
 | 
			
		||||
pipe = pipeline("conversational", "HuggingFaceH4/zephyr-7b-beta")
 | 
			
		||||
messages = [
 | 
			
		||||
    {
 | 
			
		||||
        "role": "system",
 | 
			
		||||
        "content": "You are a friendly chatbot who always responds in the style of a pirate",
 | 
			
		||||
    },
 | 
			
		||||
    {"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
 | 
			
		||||
]
 | 
			
		||||
print(pipe(messages))
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
```text
 | 
			
		||||
Conversation id: 76d886a0-74bd-454e-9804-0467041a63dc
 | 
			
		||||
system: You are a friendly chatbot who always responds in the style of a pirate
 | 
			
		||||
user: How many helicopters can a human eat in one sitting?
 | 
			
		||||
assistant: Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
[`ConversationalPipeline`] will take care of all the details of tokenization and calling `apply_chat_template` for you -
 | 
			
		||||
once the model has a chat template, all you need to do is initialize the pipeline and pass it the list of messages!
 | 
			
		||||
 | 
			
		||||
## What are "generation prompts"?
 | 
			
		||||
 | 
			
		||||
You may have noticed that the `apply_chat_template` method has an `add_generation_prompt` argument. This argument tells
 | 
			
		||||
the template to add tokens that indicate the start of a bot response. For example, consider the following chat:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
messages = [
 | 
			
		||||
    {"role": "user", "content": "Hi there!"},
 | 
			
		||||
    {"role": "assistant", "content": "Nice to meet you!"},
 | 
			
		||||
    {"role": "user", "content": "Can I ask a question?"}
 | 
			
		||||
]
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Here's what this will look like without a generation prompt, using the ChatML template we saw in the Zephyr example:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
 | 
			
		||||
"""<|im_start|>user
 | 
			
		||||
Hi there!<|im_end|>
 | 
			
		||||
<|im_start|>assistant
 | 
			
		||||
Nice to meet you!<|im_end|>
 | 
			
		||||
<|im_start|>user
 | 
			
		||||
Can I ask a question?<|im_end|>
 | 
			
		||||
"""
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
And here's what it looks like **with** a generation prompt:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 | 
			
		||||
"""<|im_start|>user
 | 
			
		||||
Hi there!<|im_end|>
 | 
			
		||||
<|im_start|>assistant
 | 
			
		||||
Nice to meet you!<|im_end|>
 | 
			
		||||
<|im_start|>user
 | 
			
		||||
Can I ask a question?<|im_end|>
 | 
			
		||||
<|im_start|>assistant
 | 
			
		||||
"""
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that this time, we've added the tokens that indicate the start of a bot response. This ensures that when the model
 | 
			
		||||
generates text it will write a bot response instead of doing something unexpected, like continuing the user's 
 | 
			
		||||
message. Remember, chat models are still just language models - they're trained to continue text, and chat is just a 
 | 
			
		||||
special kind of text to them! You need to guide them with the appropriate control tokens so they know what they're 
 | 
			
		||||
supposed to be doing.
 | 
			
		||||
 | 
			
		||||
Not all models require generation prompts. Some models, like BlenderBot and LLaMA, don't have any
 | 
			
		||||
special tokens before bot responses. In these cases, the `add_generation_prompt` argument will have no effect. The exact
 | 
			
		||||
effect that `add_generation_prompt` has will depend on the template being used.
 | 
			
		||||
 | 
			
		||||
## Can I use chat templates in training?
 | 
			
		||||
 | 
			
		||||
Yes! We recommend that you apply the chat template as a preprocessing step for your dataset. After this, you
 | 
			
		||||
can simply continue like any other language model training task. When training, you should usually set 
 | 
			
		||||
`add_generation_prompt=False`, because the added tokens to prompt an assistant response will not be helpful during 
 | 
			
		||||
training. Let's see an example:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoTokenizer
 | 
			
		||||
from datasets import Dataset
 | 
			
		||||
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-beta")
 | 
			
		||||
 | 
			
		||||
chat1 = [
 | 
			
		||||
    {"role": "user", "content": "Which is bigger, the moon or the sun?"},
 | 
			
		||||
    {"role": "assistant", "content": "The sun."}
 | 
			
		||||
]
 | 
			
		||||
chat2 = [
 | 
			
		||||
    {"role": "user", "content": "Which is bigger, a virus or a bacterium?"},
 | 
			
		||||
    {"role": "assistant", "content": "A bacterium."}
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
dataset = Dataset.from_dict({"chat": [chat1, chat2]})
 | 
			
		||||
dataset = dataset.map(lambda x: {"formatted_chat": tokenizer.apply_chat_template(x["chat"], tokenize=False, add_generation_prompt=False)})
 | 
			
		||||
print(dataset['formatted_chat'][0])
 | 
			
		||||
```
 | 
			
		||||
And we get:
 | 
			
		||||
```text
 | 
			
		||||
<|user|>
 | 
			
		||||
Which is bigger, the moon or the sun?</s>
 | 
			
		||||
<|assistant|>
 | 
			
		||||
The sun.</s>
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
From here, just continue training like you would with a standard language modelling task, using the `formatted_chat` column.
 | 
			
		||||
 | 
			
		||||
## Advanced: How do chat templates work?
 | 
			
		||||
 | 
			
		||||
The chat template for a model is stored on the `tokenizer.chat_template` attribute. If no chat template is set, the
 | 
			
		||||
default template for that model class is used instead. Let's take a look at the template for `BlenderBot`:
 | 
			
		||||
@ -154,7 +308,9 @@ Hopefully if you stare at this for a little bit you can see what this template i
 | 
			
		||||
on the "role" of each message, which represents who sent it. User, assistant and system messages are clearly
 | 
			
		||||
distinguishable to the model because of the tokens they're wrapped in.
 | 
			
		||||
 | 
			
		||||
## How do I create a chat template?
 | 
			
		||||
## Advanced: Adding and editing chat templates
 | 
			
		||||
 | 
			
		||||
### How do I create a chat template?
 | 
			
		||||
 | 
			
		||||
Simple, just write a jinja template and set `tokenizer.chat_template`. You may find it easier to start with an 
 | 
			
		||||
existing template from another model and simply edit it for your needs! For example, we could take the LLaMA template
 | 
			
		||||
@ -187,7 +343,7 @@ tokenizer.push_to_hub("model_name")  # Upload your new template to the Hub!
 | 
			
		||||
The method [`~PreTrainedTokenizer.apply_chat_template`] which uses your chat template is called by the [`ConversationalPipeline`] class, so 
 | 
			
		||||
once you set the correct chat template, your model will automatically become compatible with [`ConversationalPipeline`].
 | 
			
		||||
 | 
			
		||||
## What are "default" templates?
 | 
			
		||||
### What are "default" templates?
 | 
			
		||||
 | 
			
		||||
Before the introduction of chat templates, chat handling was hardcoded at the model class level. For backwards 
 | 
			
		||||
compatibility, we have retained this class-specific handling as default templates, also set at the class level. If a
 | 
			
		||||
@ -200,7 +356,7 @@ the class template is appropriate for your model, we strongly recommend overridi
 | 
			
		||||
setting the `chat_template` attribute explicitly to make it clear to users that your model has been correctly configured
 | 
			
		||||
for chat, and to future-proof in case the default templates are ever altered or deprecated.
 | 
			
		||||
 | 
			
		||||
## What template should I use?
 | 
			
		||||
### What template should I use?
 | 
			
		||||
 | 
			
		||||
When setting the template for a model that's already been trained for chat, you should ensure that the template
 | 
			
		||||
exactly matches the message formatting that the model saw during training, or else you will probably experience
 | 
			
		||||
@ -220,7 +376,10 @@ input formats. Our default template for models that don't have a class-specific
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
If you like this one, here it is in one-liner form, ready to copy into your code. The one-liner also includes
 | 
			
		||||
handy support for "generation prompts" - see the next section for more!
 | 
			
		||||
handy support for [generation prompts](#what-are-generation-prompts), but note that it doesn't add BOS or EOS tokens!
 | 
			
		||||
If your model expects those, they won't be added automatically by `apply_chat_template` - in other words, the
 | 
			
		||||
text will be tokenized with `add_special_tokens=False`. This is to avoid potential conflicts between the template and
 | 
			
		||||
the `add_special_tokens` logic. If your model expects special tokens, make sure to add them to the template!
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}"
 | 
			
		||||
@ -229,7 +388,7 @@ tokenizer.chat_template = "{% if not add_generation_prompt is defined %}{% set a
 | 
			
		||||
This template wraps each message in `<|im_start|>` and `<|im_end|>` tokens, and simply writes the role as a string, which
 | 
			
		||||
allows for flexibility in the roles you train with. The output looks like this:
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
```text
 | 
			
		||||
<|im_start|>system
 | 
			
		||||
You are a helpful chatbot that will do its best not to say anything so stupid that people tweet about it.<|im_end|>
 | 
			
		||||
<|im_start|>user
 | 
			
		||||
@ -242,62 +401,12 @@ The "user", "system" and "assistant" roles are the standard for chat, and we rec
 | 
			
		||||
particularly if you want your model to operate well with [`ConversationalPipeline`]. However, you are not limited
 | 
			
		||||
to these roles - templating is extremely flexible, and any string can be a role.
 | 
			
		||||
 | 
			
		||||
## What are "generation prompts"?
 | 
			
		||||
 | 
			
		||||
You may notice that the `apply_chat_template` method has an `add_generation_prompt` argument. This argument tells
 | 
			
		||||
the template to add tokens that indicate the start of a bot response. For example, consider the following chat:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
messages = [
 | 
			
		||||
    {"role": "user", "content": "Hi there!"},
 | 
			
		||||
    {"role": "assistant", "content": "Nice to meet you!"},
 | 
			
		||||
    {"role": "user", "content": "Can I ask a question?"}
 | 
			
		||||
]
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Here's what this will look like without a generation prompt, using the ChatML template we described above:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>> tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
 | 
			
		||||
"""<|im_start|>user
 | 
			
		||||
Hi there!<|im_end|>
 | 
			
		||||
<|im_start|>assistant
 | 
			
		||||
Nice to meet you!<|im_end|>
 | 
			
		||||
<|im_start|>user
 | 
			
		||||
Can I ask a question?<|im_end|>
 | 
			
		||||
"""
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
And here's what it looks like **with** a generation prompt:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>> tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
 | 
			
		||||
"""<|im_start|>user
 | 
			
		||||
Hi there!<|im_end|>
 | 
			
		||||
<|im_start|>assistant
 | 
			
		||||
Nice to meet you!<|im_end|>
 | 
			
		||||
<|im_start|>user
 | 
			
		||||
Can I ask a question?<|im_end|>
 | 
			
		||||
<|im_start|>assistant
 | 
			
		||||
"""
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that this time, we've added the tokens that indicate the start of a bot response. This ensures that when the model
 | 
			
		||||
generates text it will write a bot response instead of doing something unexpected, like continuing the user's 
 | 
			
		||||
message. Remember, chat models are still just language models - they're trained to continue text, and chat is just a 
 | 
			
		||||
special kind of text to them! You need to guide them with the appropriate control tokens so they know what they're 
 | 
			
		||||
supposed to be doing.
 | 
			
		||||
 | 
			
		||||
Not all models require generation prompts. Some models, like BlenderBot and LLaMA, don't have any
 | 
			
		||||
special tokens before bot responses. In these cases, the `add_generation_prompt` argument will have no effect. The exact
 | 
			
		||||
effect that `add_generation_prompt` has will depend on the template being used.
 | 
			
		||||
 | 
			
		||||
## I want to use chat templates! How should I get started?
 | 
			
		||||
### I want to add some chat templates! How should I get started?
 | 
			
		||||
 | 
			
		||||
If you have any chat models, you should set their `tokenizer.chat_template` attribute and test it using
 | 
			
		||||
[`~PreTrainedTokenizer.apply_chat_template`]. This applies even if you're not the model owner - if you're using a model
 | 
			
		||||
with an empty chat template, or one that's still using the default class template, please open a [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) to
 | 
			
		||||
the model repository so that this attribute can be set properly!
 | 
			
		||||
[`~PreTrainedTokenizer.apply_chat_template`], then push the updated tokenizer to the Hub. This applies even if you're
 | 
			
		||||
not the model owner - if you're using a model with an empty chat template, or one that's still using the default class
 | 
			
		||||
template, please open a [pull request](https://huggingface.co/docs/hub/repositories-pull-requests-discussions) to the model repository so that this attribute can be set properly!
 | 
			
		||||
 | 
			
		||||
Once the attribute is set, that's it, you're done! `tokenizer.apply_chat_template` will now work correctly for that
 | 
			
		||||
model, which means it is also automatically supported in places like `ConversationalPipeline`!
 | 
			
		||||
@ -306,7 +415,7 @@ By ensuring that models have this attribute, we can make sure that the whole com
 | 
			
		||||
open-source models. Formatting mismatches have been haunting the field and silently harming performance for too long - 
 | 
			
		||||
it's time to put an end to them!
 | 
			
		||||
 | 
			
		||||
## Template writing tips
 | 
			
		||||
## Advanced: Template writing tips
 | 
			
		||||
 | 
			
		||||
If you're unfamiliar with Jinja, we generally find that the easiest way to write a chat template is to first
 | 
			
		||||
write a short Python script that formats messages the way you want, and then convert that script into a template.
 | 
			
		||||
 | 
			
		||||
@ -14,7 +14,7 @@ rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# Sharing custom models
 | 
			
		||||
# Building custom models
 | 
			
		||||
 | 
			
		||||
The 🤗 Transformers library is designed to be easily extensible. Every model is fully coded in a given subfolder
 | 
			
		||||
of the repository with no abstraction, so you can easily copy a modeling file and tweak it to your needs.
 | 
			
		||||
@ -22,7 +22,8 @@ of the repository with no abstraction, so you can easily copy a modeling file an
 | 
			
		||||
If you are writing a brand new model, it might be easier to start from scratch. In this tutorial, we will show you
 | 
			
		||||
how to write a custom model and its configuration so it can be used inside Transformers, and how you can share it
 | 
			
		||||
with the community (with the code it relies on) so that anyone can use it, even if it's not present in the 🤗
 | 
			
		||||
Transformers library.
 | 
			
		||||
Transformers library. We'll see how to build upon transformers and extend the framework with your hooks and
 | 
			
		||||
custom code.
 | 
			
		||||
 | 
			
		||||
We will illustrate all of this on a ResNet model, by wrapping the ResNet class of the
 | 
			
		||||
[timm library](https://github.com/rwightman/pytorch-image-models) into a [`PreTrainedModel`].
 | 
			
		||||
@ -218,6 +219,27 @@ resnet50d.model.load_state_dict(pretrained_model.state_dict())
 | 
			
		||||
Now let's see how to make sure that when we do [`~PreTrainedModel.save_pretrained`] or [`~PreTrainedModel.push_to_hub`], the
 | 
			
		||||
code of the model is saved.
 | 
			
		||||
 | 
			
		||||
## Registering a model with custom code to the auto classes
 | 
			
		||||
 | 
			
		||||
If you are writing a library that extends 🤗 Transformers, you may want to extend the auto classes to include your own
 | 
			
		||||
model. This is different from pushing the code to the Hub in the sense that users will need to import your library to
 | 
			
		||||
get the custom models (contrarily to automatically downloading the model code from the Hub).
 | 
			
		||||
 | 
			
		||||
As long as your config has a `model_type` attribute that is different from existing model types, and that your model
 | 
			
		||||
classes have the right `config_class` attributes, you can just add them to the auto classes like this:
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
from transformers import AutoConfig, AutoModel, AutoModelForImageClassification
 | 
			
		||||
 | 
			
		||||
AutoConfig.register("resnet", ResnetConfig)
 | 
			
		||||
AutoModel.register(ResnetConfig, ResnetModel)
 | 
			
		||||
AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that the first argument used when registering your custom config to [`AutoConfig`] needs to match the `model_type`
 | 
			
		||||
of your custom config, and the first argument used when registering your custom models to any auto model class needs
 | 
			
		||||
to match the `config_class` of those models.
 | 
			
		||||
 | 
			
		||||
## Sending the code to the Hub
 | 
			
		||||
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
@ -350,23 +372,3 @@ model = AutoModelForImageClassification.from_pretrained(
 | 
			
		||||
Note that when browsing the commit history of the model repo on the Hub, there is a button to easily copy the commit
 | 
			
		||||
hash of any commit.
 | 
			
		||||
 | 
			
		||||
## Registering a model with custom code to the auto classes
 | 
			
		||||
 | 
			
		||||
If you are writing a library that extends 🤗 Transformers, you may want to extend the auto classes to include your own
 | 
			
		||||
model. This is different from pushing the code to the Hub in the sense that users will need to import your library to
 | 
			
		||||
get the custom models (contrarily to automatically downloading the model code from the Hub).
 | 
			
		||||
 | 
			
		||||
As long as your config has a `model_type` attribute that is different from existing model types, and that your model
 | 
			
		||||
classes have the right `config_class` attributes, you can just add them to the auto classes like this:
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
from transformers import AutoConfig, AutoModel, AutoModelForImageClassification
 | 
			
		||||
 | 
			
		||||
AutoConfig.register("resnet", ResnetConfig)
 | 
			
		||||
AutoModel.register(ResnetConfig, ResnetModel)
 | 
			
		||||
AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that the first argument used when registering your custom config to [`AutoConfig`] needs to match the `model_type`
 | 
			
		||||
of your custom config, and the first argument used when registering your custom models to any auto model class needs
 | 
			
		||||
to match the `config_class` of those models.
 | 
			
		||||
 | 
			
		||||
@ -100,7 +100,7 @@ reading the whole sentence but using a mask inside the model to hide the future
 | 
			
		||||
 | 
			
		||||
### channel
 | 
			
		||||
 | 
			
		||||
Color images are made up of some combination of values in three channels - red, green, and blue (RGB) - and grayscale images only have one channel. In 🤗 Transformers, the channel can be the first or last dimension of an image's tensor: [`n_channels`, `height`, `width`] or [`height`, `width`, `n_channels`].
 | 
			
		||||
Color images are made up of some combination of values in three channels: red, green, and blue (RGB) and grayscale images only have one channel. In 🤗 Transformers, the channel can be the first or last dimension of an image's tensor: [`n_channels`, `height`, `width`] or [`height`, `width`, `n_channels`].
 | 
			
		||||
 | 
			
		||||
### connectionist temporal classification (CTC)
 | 
			
		||||
 | 
			
		||||
@ -116,6 +116,7 @@ A type of layer in a neural network where the input matrix is multiplied element
 | 
			
		||||
 | 
			
		||||
Parallelism technique for training on multiple GPUs where the same setup is replicated multiple times, with each instance 
 | 
			
		||||
receiving a distinct data slice. The processing is done in parallel and all setups are synchronized at the end of each training step.
 | 
			
		||||
 | 
			
		||||
Learn more about how DataParallel works [here](perf_train_gpu_many#dataparallel-vs-distributeddataparallel).
 | 
			
		||||
 | 
			
		||||
### decoder input IDs
 | 
			
		||||
@ -165,8 +166,7 @@ embeddings `[batch_size, sequence_length, config.intermediate_size]` can account
 | 
			
		||||
use. The authors of [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) noticed that since the
 | 
			
		||||
computation is independent of the `sequence_length` dimension, it is mathematically equivalent to compute the output
 | 
			
		||||
embeddings of both feed forward layers `[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n`
 | 
			
		||||
individually and concat them afterward to `[batch_size, sequence_length, config.hidden_size]` with `n =
 | 
			
		||||
sequence_length`, which trades increased computation time against reduced memory use, but yields a mathematically
 | 
			
		||||
individually and concat them afterward to `[batch_size, sequence_length, config.hidden_size]` with `n = sequence_length`, which trades increased computation time against reduced memory use, but yields a mathematically
 | 
			
		||||
**equivalent** result.
 | 
			
		||||
 | 
			
		||||
For models employing the function [`apply_chunking_to_forward`], the `chunk_size` defines the number of output
 | 
			
		||||
@ -187,7 +187,7 @@ The model head refers to the last layer of a neural network that accepts the raw
 | 
			
		||||
 | 
			
		||||
  * [`GPT2ForSequenceClassification`] is a sequence classification head - a linear layer - on top of the base [`GPT2Model`].
 | 
			
		||||
  * [`ViTForImageClassification`] is an image classification head - a linear layer on top of the final hidden state of the `CLS` token - on top of the base [`ViTModel`].
 | 
			
		||||
  * [`Wav2Vec2ForCTC`] ia a language modeling head with [CTC](#connectionist-temporal-classification-(CTC)) on top of the base [`Wav2Vec2Model`].
 | 
			
		||||
  * [`Wav2Vec2ForCTC`] is a language modeling head with [CTC](#connectionist-temporal-classification-(CTC)) on top of the base [`Wav2Vec2Model`].
 | 
			
		||||
 | 
			
		||||
## I
 | 
			
		||||
 | 
			
		||||
@ -232,9 +232,7 @@ is added for "RA" and "M":
 | 
			
		||||
['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M']
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding
 | 
			
		||||
the sentence to the tokenizer, which leverages the Rust implementation of [🤗
 | 
			
		||||
Tokenizers](https://github.com/huggingface/tokenizers) for peak performance.
 | 
			
		||||
These tokens can then be converted into IDs which are understandable by the model. This can be done by directly feeding the sentence to the tokenizer, which leverages the Rust implementation of [🤗 Tokenizers](https://github.com/huggingface/tokenizers) for peak performance.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> inputs = tokenizer(sequence)
 | 
			
		||||
@ -383,7 +381,7 @@ self-supervised objective, which can be reading the text and trying to predict t
 | 
			
		||||
modeling](#causal-language-modeling)) or masking some words and trying to predict them (see [masked language
 | 
			
		||||
modeling](#masked-language-modeling-mlm)). 
 | 
			
		||||
 | 
			
		||||
  Speech and vision models have their own pretraining objectives. For example, Wav2Vec2 is a speech model pretrained on a contrastive task which requires the model to identify the "true" speech representation from a set of "false" speech representations. On the other hand, BEiT is a vision model pretrained on a masked image modeling task which masks some of the image patches and requires the model to predict the masked patches (similar to the masked language modeling objective).
 | 
			
		||||
Speech and vision models have their own pretraining objectives. For example, Wav2Vec2 is a speech model pretrained on a contrastive task which requires the model to identify the "true" speech representation from a set of "false" speech representations. On the other hand, BEiT is a vision model pretrained on a masked image modeling task which masks some of the image patches and requires the model to predict the masked patches (similar to the masked language modeling objective).
 | 
			
		||||
 | 
			
		||||
## R
 | 
			
		||||
 | 
			
		||||
@ -518,7 +516,7 @@ A form of model training in which data provided to the model is not labeled. Uns
 | 
			
		||||
 | 
			
		||||
### Zero Redundancy Optimizer (ZeRO)
 | 
			
		||||
 | 
			
		||||
Parallelism technique which performs sharding of the tensors somewhat similar to [TensorParallel](#tensorparallel--tp-), 
 | 
			
		||||
Parallelism technique which performs sharding of the tensors somewhat similar to [TensorParallel](#tensor-parallelism-tp), 
 | 
			
		||||
except the whole tensor gets reconstructed in time for a forward or backward computation, therefore the model doesn't need 
 | 
			
		||||
to be modified. This method also supports various offloading techniques to compensate for limited GPU memory. 
 | 
			
		||||
Learn more about ZeRO [here](perf_train_gpu_many#zero-data-parallelism).
 | 
			
		||||
@ -99,7 +99,7 @@ Define a `model_init` function and pass it to the [`Trainer`], as an example:
 | 
			
		||||
...         config=config,
 | 
			
		||||
...         cache_dir=model_args.cache_dir,
 | 
			
		||||
...         revision=model_args.model_revision,
 | 
			
		||||
...         use_auth_token=True if model_args.use_auth_token else None,
 | 
			
		||||
...         token=True if model_args.use_auth_token else None,
 | 
			
		||||
...     )
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -1,4 +1,4 @@
 | 
			
		||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
 | 
			
		||||
        <!--Copyright 2020 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
@ -92,8 +92,9 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                          [CLAP](model_doc/clap)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                          [CLIP](model_doc/clip)                          |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                       [CLIPSeg](model_doc/clipseg)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                          [CLVP](model_doc/clvp)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [CodeGen](model_doc/codegen)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                    [CodeLlama](model_doc/code_llama)                     |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                    [CodeLlama](model_doc/code_llama)                     |       ✅        |         ❌         |      ✅      |
 | 
			
		||||
|              [Conditional DETR](model_doc/conditional_detr)              |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                      [ConvBERT](model_doc/convbert)                      |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
|                      [ConvNeXT](model_doc/convnext)                      |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
@ -166,14 +167,16 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                           [LED](model_doc/led)                           |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
|                         [LeViT](model_doc/levit)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                          [LiLT](model_doc/lilt)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                         [LLaMA](model_doc/llama)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                        [Llama2](model_doc/llama2)                        |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                         [LLaMA](model_doc/llama)                         |       ✅        |         ❌         |      ✅      |
 | 
			
		||||
|                        [Llama2](model_doc/llama2)                        |       ✅        |         ❌         |      ✅      |
 | 
			
		||||
|                         [LLaVa](model_doc/llava)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                    [Longformer](model_doc/longformer)                    |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
|                        [LongT5](model_doc/longt5)                        |       ✅        |         ❌         |      ✅      |
 | 
			
		||||
|                          [LUKE](model_doc/luke)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                        [LXMERT](model_doc/lxmert)                        |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
|                        [M-CTC-T](model_doc/mctct)                        |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [M2M100](model_doc/m2m_100)                        |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                    [MADLAD-400](model_doc/madlad-400)                    |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                        [Marian](model_doc/marian)                        |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                      [MarkupLM](model_doc/markuplm)                      |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                   [Mask2Former](model_doc/mask2former)                   |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
@ -186,6 +189,7 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                 [Megatron-GPT2](model_doc/megatron_gpt2)                 |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                       [MGP-STR](model_doc/mgp-str)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [Mistral](model_doc/mistral)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [Mixtral](model_doc/mixtral)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                         [mLUKE](model_doc/mluke)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                           [MMS](model_doc/mms)                           |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                    [MobileBERT](model_doc/mobilebert)                    |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
@ -212,10 +216,13 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                           [OPT](model_doc/opt)                           |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                       [OWL-ViT](model_doc/owlvit)                        |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                         [OWLv2](model_doc/owlv2)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                  [PatchTSMixer](model_doc/patchtsmixer)                  |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                      [PatchTST](model_doc/patchtst)                      |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [Pegasus](model_doc/pegasus)                       |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                     [PEGASUS-X](model_doc/pegasus_x)                     |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                     [Perceiver](model_doc/perceiver)                     |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                     [Persimmon](model_doc/persimmon)                     |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                           [Phi](model_doc/phi)                           |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [PhoBERT](model_doc/phobert)                       |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                    [Pix2Struct](model_doc/pix2struct)                    |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                        [PLBart](model_doc/plbart)                        |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
@ -238,6 +245,7 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                          [RWKV](model_doc/rwkv)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                           [SAM](model_doc/sam)                           |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
|                  [SeamlessM4T](model_doc/seamless_m4t)                   |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                [SeamlessM4Tv2](model_doc/seamless_m4t_v2)                |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                     [SegFormer](model_doc/segformer)                     |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
|                           [SEW](model_doc/sew)                           |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                         [SEW-D](model_doc/sew-d)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
@ -262,14 +270,17 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                  [Transformer-XL](model_doc/transfo-xl)                  |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
|                         [TrOCR](model_doc/trocr)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                          [TVLT](model_doc/tvlt)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                           [TVP](model_doc/tvp)                           |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                           [UL2](model_doc/ul2)                           |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                          [UMT5](model_doc/umt5)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                     [UniSpeech](model_doc/unispeech)                     |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                 [UniSpeechSat](model_doc/unispeech-sat)                  |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [UnivNet](model_doc/univnet)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [UPerNet](model_doc/upernet)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                           [VAN](model_doc/van)                           |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                      [VideoMAE](model_doc/videomae)                      |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                          [ViLT](model_doc/vilt)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                      [VipLlava](model_doc/vipllava)                      |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|        [Vision Encoder decoder](model_doc/vision-encoder-decoder)        |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|       [VisionTextDualEncoder](model_doc/vision-text-dual-encoder)        |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                   [VisualBERT](model_doc/visual_bert)                    |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
 | 
			
		||||
@ -368,3 +368,20 @@ A [`Constraint`] can be used to force the generation to include specific tokens
 | 
			
		||||
[[autodoc]] TextStreamer
 | 
			
		||||
 | 
			
		||||
[[autodoc]] TextIteratorStreamer
 | 
			
		||||
 | 
			
		||||
## Caches
 | 
			
		||||
 | 
			
		||||
[[autodoc]] Cache
 | 
			
		||||
    - update
 | 
			
		||||
 | 
			
		||||
[[autodoc]] DynamicCache
 | 
			
		||||
    - update
 | 
			
		||||
    - get_seq_length
 | 
			
		||||
    - reorder_cache
 | 
			
		||||
    - to_legacy_cache
 | 
			
		||||
    - from_legacy_cache
 | 
			
		||||
 | 
			
		||||
[[autodoc]] SinkCache
 | 
			
		||||
    - update
 | 
			
		||||
    - get_seq_length
 | 
			
		||||
    - reorder_cache
 | 
			
		||||
 | 
			
		||||
@ -40,7 +40,7 @@ Most of those are only useful if you are studying the code of the Trainer in the
 | 
			
		||||
 | 
			
		||||
[[autodoc]] trainer_pt_utils.DistributedTensorGatherer
 | 
			
		||||
 | 
			
		||||
## Distributed Evaluation
 | 
			
		||||
## Trainer Argument Parser
 | 
			
		||||
 | 
			
		||||
[[autodoc]] HfArgumentParser
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -250,7 +250,7 @@ While the autoregressive generation process is relatively straightforward, makin
 | 
			
		||||
1. [Guide](generation_strategies) on how to control different generation methods, how to set up the generation configuration file, and how to stream the output;
 | 
			
		||||
2. [Guide](chat_templating) on the prompt template for chat LLMs;
 | 
			
		||||
3. [Guide](tasks/prompting) on to get the most of prompt design;
 | 
			
		||||
4. API reference on [`~generation.GenerationConfig`], [`~generation.GenerationMixin.generate`], and [generate-related classes](internal/generation_utils).
 | 
			
		||||
4. API reference on [`~generation.GenerationConfig`], [`~generation.GenerationMixin.generate`], and [generate-related classes](internal/generation_utils). Most of the classes, including the logits processors, have usage examples!
 | 
			
		||||
 | 
			
		||||
### LLM leaderboards
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -22,7 +22,7 @@ The crux of these challenges lies in augmenting the computational and memory cap
 | 
			
		||||
 | 
			
		||||
In this guide, we will go over the effective techniques for efficient LLM deployment:
 | 
			
		||||
 | 
			
		||||
1.  **Lower Precision**: Research has shown that operating at reduced numerical precision, namely [8-bit and 4-bit](./main_classes/quantization.md) can achieve computational advantages without a considerable decline in model performance.
 | 
			
		||||
1.  **Lower Precision:** Research has shown that operating at reduced numerical precision, namely [8-bit and 4-bit](./main_classes/quantization.md) can achieve computational advantages without a considerable decline in model performance.
 | 
			
		||||
 | 
			
		||||
2.  **Flash Attention:** Flash Attention is a variation of the attention algorithm that not only provides a more memory-efficient approach but also realizes increased efficiency due to optimized GPU memory utilization.
 | 
			
		||||
 | 
			
		||||
@ -58,7 +58,7 @@ As of writing this document, the largest GPU chip on the market is the A100 & H1
 | 
			
		||||
🤗 Transformers does not support tensor parallelism out of the box as it requires the model architecture to be written in a specific way. If you're interested in writing models in a tensor-parallelism-friendly way, feel free to have a look at [the text-generation-inference library](https://github.com/huggingface/text-generation-inference/tree/main/server/text_generation_server/models/custom_modeling).
 | 
			
		||||
 | 
			
		||||
Naive pipeline parallelism is supported out of the box. For this, simply load the model with `device="auto"` which will automatically place the different layers on the available GPUs as explained [here](https://huggingface.co/docs/accelerate/v0.22.0/en/concept_guides/big_model_inference).
 | 
			
		||||
Note, however that while very effective, this naive pipeline parallelism does not tackle the issues of GPU idling. For this more advanced pipeline parallelism is required as explained [here](https://huggingface.co/docs/transformers/v4.34.0/en/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism).
 | 
			
		||||
Note, however that while very effective, this naive pipeline parallelism does not tackle the issues of GPU idling. For this more advanced pipeline parallelism is required as explained [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism).
 | 
			
		||||
 | 
			
		||||
If you have access to an 8 x 80GB A100 node, you could load BLOOM as follows
 | 
			
		||||
 | 
			
		||||
@ -286,7 +286,7 @@ If GPU memory is not a constraint for your use case, there is often no need to l
 | 
			
		||||
For more in-detail usage information, we strongly recommend taking a look at the [Transformers Quantization Docs](https://huggingface.co/docs/transformers/main_classes/quantization#general-usage).
 | 
			
		||||
Next, let's look into how we can improve computational and memory efficiency by using better algorithms and an improved model architecture.
 | 
			
		||||
 | 
			
		||||
# 2. Flash Attention
 | 
			
		||||
## 2. Flash Attention
 | 
			
		||||
 | 
			
		||||
Today's top-performing LLMs share more or less the same fundamental architecture that consists of feed-forward layers, activation layers, layer normalization layers, and most crucially, self-attention layers.
 | 
			
		||||
 | 
			
		||||
@ -441,7 +441,7 @@ flush()
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
For comparison, let's run the same function, but enable Flash Attention instead.
 | 
			
		||||
To do so, we convert the model to [BetterTransformers](https://huggingface.co/docs/optimum/bettertransformer/overview) and by doing so enabling PyTorch's [SDPA self-attention](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) which in turn is based on Flash Attention.
 | 
			
		||||
To do so, we convert the model to [BetterTransformer](https://huggingface.co/docs/optimum/bettertransformer/overview) and by doing so enabling PyTorch's [SDPA self-attention](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) which in turn is able to use Flash Attention.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
model.to_bettertransformer()
 | 
			
		||||
@ -484,7 +484,9 @@ We can observe that we only use roughly 100MB more GPU memory when passing a ver
 | 
			
		||||
```py
 | 
			
		||||
flush()
 | 
			
		||||
```
 | 
			
		||||
For more information on how to use Flash Attention, please have a look at [this doc page](https://huggingface.co/docs/transformers/v4.34.0/en/perf_infer_gpu_one#flash-attention-2).
 | 
			
		||||
 | 
			
		||||
For more information on how to use Flash Attention, please have a look at [this doc page](https://huggingface.co/docs/transformers/en/perf_infer_gpu_one#flashattention-2).
 | 
			
		||||
 | 
			
		||||
## 3. Architectural Innovations
 | 
			
		||||
 | 
			
		||||
So far we have looked into improving computational and memory efficiency by:
 | 
			
		||||
@ -662,7 +664,15 @@ Using the key-value cache has two advantages:
 | 
			
		||||
 | 
			
		||||
> One should *always* make use of the key-value cache as it leads to identical results and a significant speed-up for longer input sequences. Transformers has the key-value cache enabled by default when making use of the text pipeline or the [`generate` method](https://huggingface.co/docs/transformers/main_classes/text_generation).
 | 
			
		||||
 | 
			
		||||
Note that the key-value cache is especially useful for applications such as chat where multiple passes of auto-regressive decoding are required. Let's look at an example.
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
 | 
			
		||||
Note that, despite our advice to use key-value caches, your LLM output may be slightly different when you use them. This is a property of the matrix multiplication kernels themselves -- you can read more about it [here](https://github.com/huggingface/transformers/issues/25420#issuecomment-1775317535).
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
#### 3.2.1 Multi-round conversation
 | 
			
		||||
 | 
			
		||||
The key-value cache is especially useful for applications such as chat where multiple passes of auto-regressive decoding are required. Let's look at an example.
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
User: How many people live in France?
 | 
			
		||||
@ -672,14 +682,45 @@ Assistant: Germany has ca. 81 million inhabitants
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
In this chat, the LLM runs auto-regressive decoding twice:
 | 
			
		||||
- 1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step.
 | 
			
		||||
- 2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, it's computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`.
 | 
			
		||||
  1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step.
 | 
			
		||||
  2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, it's computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`.
 | 
			
		||||
 | 
			
		||||
Two things should be noted here:
 | 
			
		||||
  1. Keeping all the context is crucial for LLMs deployed in chat so that the LLM understands all the previous context of the conversation. E.g. for the example above the LLM needs to understand that the user refers to the population when asking `"And how many are in Germany"`.
 | 
			
		||||
  2. The key-value cache is extremely useful for chat as it allows us to continuously grow the encoded chat history instead of having to re-encode the chat history again from scratch (as e.g. would be the case when using an encoder-decoder architecture).
 | 
			
		||||
 | 
			
		||||
There is however one catch. While the required peak memory for the \\( \mathbf{QK}^T \\) matrix is significantly reduced, holding the key-value cache in memory can become very memory expensive for long input sequences or multi-turn chat. Remember that the key-value cache needs to store the key-value vectors for all previous input vectors \\( \mathbf{x}_i \text{, for } i \in \{1, \ldots, c - 1\} \\) for all self-attention layers and for all attention heads.
 | 
			
		||||
In `transformers`, a `generate` call will return `past_key_values` when `return_dict_in_generate=True` is passed, in addition to the default `use_cache=True`. Note that it is not yet available through the `pipeline` interface.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
# Generation as usual
 | 
			
		||||
prompt = system_prompt + "Question: Please write a function in Python that transforms bytes to Giga bytes.\n\nAnswer: Here"
 | 
			
		||||
model_inputs = tokenizer(prompt, return_tensors='pt')
 | 
			
		||||
generation_output = model.generate(**model_inputs, max_new_tokens=60, return_dict_in_generate=True)
 | 
			
		||||
decoded_output = tokenizer.batch_decode(generation_output.sequences)[0]
 | 
			
		||||
 | 
			
		||||
# Piping the returned `past_key_values` to speed up the next conversation round
 | 
			
		||||
prompt = decoded_output + "\nQuestion: How can I modify the function above to return Mega bytes instead?\n\nAnswer: Here"
 | 
			
		||||
model_inputs = tokenizer(prompt, return_tensors='pt')
 | 
			
		||||
generation_output = model.generate(
 | 
			
		||||
  **model_inputs,
 | 
			
		||||
  past_key_values=generation_output.past_key_values,
 | 
			
		||||
  max_new_tokens=60,
 | 
			
		||||
  return_dict_in_generate=True
 | 
			
		||||
)
 | 
			
		||||
tokenizer.batch_decode(generation_output.sequences)[0][len(prompt):]
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Output**:
 | 
			
		||||
```
 | 
			
		||||
 is a modified version of the function that returns Mega bytes instead.
 | 
			
		||||
 | 
			
		||||
def bytes_to_megabytes(bytes):
 | 
			
		||||
   return bytes / 1024 / 1024
 | 
			
		||||
 | 
			
		||||
Answer: The function takes a number of bytes as input and returns the number of
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Great, no additional time is spent recomputing the same key and values for the attention layer! There is however one catch. While the required peak memory for the \\( \mathbf{QK}^T \\) matrix is significantly reduced, holding the key-value cache in memory can become very memory expensive for long input sequences or multi-turn chat. Remember that the key-value cache needs to store the key-value vectors for all previous input vectors \\( \mathbf{x}_i \text{, for } i \in \{1, \ldots, c - 1\} \\) for all self-attention layers and for all attention heads.
 | 
			
		||||
 | 
			
		||||
Let's compute the number of float values that need to be stored in the key-value cache for the LLM `bigcode/octocoder` that we used before.
 | 
			
		||||
The number of float values amounts to two times the sequence length times the number of attention heads times the attention head dimension and times the number of layers.
 | 
			
		||||
@ -696,11 +737,11 @@ config = model.config
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Roughly 8 billion float values! Storing 8 billion float values in `float16` precision requires around 15 GB of RAM which is circa half as much as the model weights themselves!
 | 
			
		||||
Researchers have proposed two methods that allow to significantly reduce the memory cost of storing the key-value cache:
 | 
			
		||||
Researchers have proposed two methods that allow to significantly reduce the memory cost of storing the key-value cache, which are explored in the next subsections.
 | 
			
		||||
 | 
			
		||||
  1.  [Multi-Query-Attention (MQA)](https://arxiv.org/abs/1911.02150)
 | 
			
		||||
#### 3.2.2 Multi-Query-Attention (MQA)
 | 
			
		||||
 | 
			
		||||
Multi-Query-Attention was proposed in Noam Shazeer's *Fast Transformer Decoding: One Write-Head is All You Need* paper. As the title says, Noam found out that instead of using `n_head` key-value projections weights, one can use a single head-value projection weight pair that is shared across all attention heads without that the model's performance significantly degrades.
 | 
			
		||||
[Multi-Query-Attention](https://arxiv.org/abs/1911.02150) was proposed in Noam Shazeer's *Fast Transformer Decoding: One Write-Head is All You Need* paper. As the title says, Noam found out that instead of using `n_head` key-value projections weights, one can use a single head-value projection weight pair that is shared across all attention heads without that the model's performance significantly degrades.
 | 
			
		||||
 | 
			
		||||
> By using a single head-value projection weight pair, the key value vectors \\( \mathbf{k}_i, \mathbf{v}_i \\) have to be identical across all attention heads which in turn means that we only need to store 1 key-value projection pair in the cache instead of `n_head` ones.
 | 
			
		||||
 | 
			
		||||
@ -720,9 +761,9 @@ MQA has seen wide adoption by the community and is now used by many of the most
 | 
			
		||||
 | 
			
		||||
Also, the checkpoint used in this notebook - `bigcode/octocoder` - makes use of MQA.
 | 
			
		||||
 | 
			
		||||
  2.  [Grouped-Query-Attention (GQA)](https://arxiv.org/abs/2305.13245)
 | 
			
		||||
#### 3.2.3 Grouped-Query-Attention (GQA)
 | 
			
		||||
 | 
			
		||||
Grouped-Query-Attention, as proposed by Ainslie et al. from Google, found that using MQA can often lead to quality degradation compared to using vanilla multi-key-value head projections. The paper argues that more model performance can be kept by less drastically reducing the number of query head projection weights. Instead of using just a single key-value projection weight, `n < n_head` key-value projection weights should be used. By choosing `n` to a significantly smaller value than `n_head`, such as 2,4 or 8 almost all of the memory and speed gains from MQA can be kept while sacrificing less model capacity and thus arguably less performance.
 | 
			
		||||
[Grouped-Query-Attention](https://arxiv.org/abs/2305.13245), as proposed by Ainslie et al. from Google, found that using MQA can often lead to quality degradation compared to using vanilla multi-key-value head projections. The paper argues that more model performance can be kept by less drastically reducing the number of query head projection weights. Instead of using just a single key-value projection weight, `n < n_head` key-value projection weights should be used. By choosing `n` to a significantly smaller value than `n_head`, such as 2,4 or 8 almost all of the memory and speed gains from MQA can be kept while sacrificing less model capacity and thus arguably less performance.
 | 
			
		||||
 | 
			
		||||
Moreover, the authors of GQA found out that existing model checkpoints can be *uptrained* to have a GQA architecture with as little as 5% of the original pre-training compute. While 5% of the original pre-training compute can still be a massive amount, GQA *uptraining* allows existing checkpoints to be useful for longer input sequences.
 | 
			
		||||
 | 
			
		||||
@ -731,6 +772,7 @@ The most notable application of GQA is [Llama-v2](https://huggingface.co/meta-ll
 | 
			
		||||
 | 
			
		||||
> As a conclusion, it is strongly recommended to make use of either GQA or MQA if the LLM is deployed with auto-regressive decoding and is required to handle large input sequences as is the case for example for chat.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Conclusion
 | 
			
		||||
 | 
			
		||||
The research community is constantly coming up with new, nifty ways to speed up inference time for ever-larger LLMs. As an example, one such promising research direction is [speculative decoding](https://arxiv.org/abs/2211.17192) where "easy tokens" are generated by smaller, faster language models and only "hard tokens" are generated by the LLM itself. Going into more detail is out of the scope of this notebook, but can be read upon in this [nice blog post](https://huggingface.co/blog/assisted-generation).
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										93
									
								
								docs/source/en/main_classes/backbones.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										93
									
								
								docs/source/en/main_classes/backbones.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,93 @@
 | 
			
		||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# Backbones
 | 
			
		||||
 | 
			
		||||
Backbones are models used for feature extraction for computer vision tasks. One can use a model as backbone in two ways:
 | 
			
		||||
 | 
			
		||||
* initializing `AutoBackbone` class with a pretrained model,
 | 
			
		||||
* initializing a supported backbone configuration and passing it to the model architecture. 
 | 
			
		||||
 | 
			
		||||
## Using AutoBackbone 
 | 
			
		||||
 | 
			
		||||
You can use `AutoBackbone` class to initialize a model as a backbone and get the feature maps for any stage. You can define `out_indices` to indicate the index of the layers which you would like to get the feature maps from. You can also use `out_features` if you know the name of the layers. You can use them interchangeably. If you are using both `out_indices` and `out_features`, ensure they are consistent. Not passing any of the feature map arguments will make the backbone yield the feature maps of the last layer.
 | 
			
		||||
To visualize how stages look like, let's take the Swin model. Each stage is responsible from feature extraction, outputting feature maps.
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stages.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
Illustrating feature maps of the first stage looks like below.
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stage%201.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
Let's see with an example. Note that `out_indices=(0,)` results in yielding the stem of the model. Stem refers to the stage before the first feature extraction stage. In above diagram, it refers to patch partition. We would like to have the feature maps from stem, first, and second stage of the model.
 | 
			
		||||
```py
 | 
			
		||||
>>> from transformers import AutoImageProcessor, AutoBackbone
 | 
			
		||||
>>> import torch
 | 
			
		||||
>>> from PIL import Image
 | 
			
		||||
>>> import requests
 | 
			
		||||
 | 
			
		||||
>>> processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
 | 
			
		||||
>>> model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(0,1,2))
 | 
			
		||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
 | 
			
		||||
>>> image = Image.open(requests.get(url, stream=True).raw)
 | 
			
		||||
 | 
			
		||||
>>> inputs = processor(image, return_tensors="pt")
 | 
			
		||||
>>> outputs = model(**inputs)
 | 
			
		||||
>>> feature_maps = outputs.feature_maps
 | 
			
		||||
```
 | 
			
		||||
`feature_maps` object now has three feature maps, each can be accessed like below. Say we would like to get the feature map of the stem.
 | 
			
		||||
```python
 | 
			
		||||
>>> list(feature_maps[0].shape)
 | 
			
		||||
[1, 96, 56, 56]
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
We can get the feature maps of first and second stages like below.
 | 
			
		||||
```python
 | 
			
		||||
>>> list(feature_maps[1].shape)
 | 
			
		||||
[1, 96, 56, 56]
 | 
			
		||||
>>> list(feature_maps[2].shape)
 | 
			
		||||
[1, 192, 28, 28]
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Initializing Backbone Configuration
 | 
			
		||||
 | 
			
		||||
In computer vision, models consist of backbone, neck, and a head. Backbone extracts the features, neck transforms the output of the backbone and head is used for the main task (e.g. object detection). You can initialize neck and head with model backbones by passing a model configuration to `backbone_config`. For example, below you can see how to initialize the [MaskFormer](../model_doc/maskformer) model with instance segmentation head with [ResNet](../model_doc/resnet) backbone.
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig
 | 
			
		||||
 | 
			
		||||
backbone_config = ResNetConfig.from_pretrained("microsoft/resnet-50")
 | 
			
		||||
config = MaskFormerConfig(backbone_config=backbone_config)
 | 
			
		||||
model = MaskFormerForInstanceSegmentation(config)
 | 
			
		||||
```
 | 
			
		||||
You can also initialize a backbone with random weights to initialize the model neck with it. 
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
backbone_config = ResNetConfig()
 | 
			
		||||
config = MaskFormerConfig(backbone_config=backbone_config)
 | 
			
		||||
model = MaskFormerForInstanceSegmentation(config)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
`timm` models are also supported in transformers through `TimmBackbone` and `TimmBackboneConfig`.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import TimmBackboneConfig, TimmBackbone
 | 
			
		||||
 | 
			
		||||
backbone_config = TimmBackboneConfig("resnet50")
 | 
			
		||||
model = TimmBackbone(config=backbone_config)
 | 
			
		||||
```
 | 
			
		||||
@ -44,6 +44,7 @@ By default, `TrainingArguments.report_to` is set to `"all"`, so a [`Trainer`] wi
 | 
			
		||||
- [`~integrations.ClearMLCallback`] if [clearml](https://github.com/allegroai/clearml) is installed.
 | 
			
		||||
- [`~integrations.DagsHubCallback`] if [dagshub](https://dagshub.com/) is installed.
 | 
			
		||||
- [`~integrations.FlyteCallback`] if [flyte](https://flyte.org/) is installed.
 | 
			
		||||
- [`~integrations.DVCLiveCallback`] if [dvclive](https://dvc.org/doc/dvclive) is installed.
 | 
			
		||||
 | 
			
		||||
If a package is installed but you don't wish to use the accompanying integration, you can change `TrainingArguments.report_to` to a list of just those integrations you want to use (e.g. `["azure_ml", "wandb"]`). 
 | 
			
		||||
 | 
			
		||||
@ -88,6 +89,9 @@ Here is the list of the available [`TrainerCallback`] in the library:
 | 
			
		||||
 | 
			
		||||
[[autodoc]] integrations.FlyteCallback
 | 
			
		||||
 | 
			
		||||
[[autodoc]] integrations.DVCLiveCallback
 | 
			
		||||
    - setup
 | 
			
		||||
 | 
			
		||||
## TrainerCallback
 | 
			
		||||
 | 
			
		||||
[[autodoc]] TrainerCallback
 | 
			
		||||
 | 
			
		||||
@ -287,7 +287,7 @@ The information in this section isn't not specific to the DeepSpeed integration
 | 
			
		||||
 | 
			
		||||
For the duration of this section let's assume that you have 2 nodes with 8 gpus each. And you can reach the first node with `ssh hostname1` and second node with `ssh hostname2`, and both must be able to reach each other via ssh locally without a password. Of course, you will need to rename these host (node) names to the actual host names you are working with.
 | 
			
		||||
 | 
			
		||||
#### The torch.distributed.run launcher
 | 
			
		||||
#### The torch.distributed.run(torchrun) launcher
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
For example, to use `torch.distributed.run`, you could do:
 | 
			
		||||
 | 
			
		||||
@ -225,7 +225,7 @@ For users, a rule of thumb is:
 | 
			
		||||
 | 
			
		||||
- **Measure performance on your load, with your hardware. Measure, measure, and keep measuring. Real numbers are the
 | 
			
		||||
  only way to go.**
 | 
			
		||||
- If you are latency constrained (live product doing inference), don't batch
 | 
			
		||||
- If you are latency constrained (live product doing inference), don't batch.
 | 
			
		||||
- If you are using CPU, don't batch.
 | 
			
		||||
- If you are using throughput (you want to run your model on a bunch of static data), on GPU, then:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -14,535 +14,24 @@ rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# Quantize 🤗 Transformers models
 | 
			
		||||
# Quantization
 | 
			
		||||
 | 
			
		||||
## AWQ integration
 | 
			
		||||
Quantization techniques reduces memory and computational costs by representing weights and activations with lower-precision data types like 8-bit integers (int8). This enables loading larger models you normally wouldn't be able to fit into memory, and speeding up inference. Transformers supports the AWQ and GPTQ quantization algorithms and it supports 8-bit and 4-bit quantization with bitsandbytes.
 | 
			
		||||
 | 
			
		||||
AWQ method has been introduced in the [*AWQ: Activation-aware Weight Quantization for LLM Compression and Acceleration* paper](https://arxiv.org/abs/2306.00978). With AWQ you can run models in 4-bit precision, while preserving its original quality (i.e. no performance degradation) with a superior throughput that other quantization methods presented below - reaching similar throughput as pure `float16` inference.
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
We now support inference with any AWQ model, meaning anyone can load and use AWQ weights that are pushed on the Hub or saved locally. Note that using AWQ requires to have access to a NVIDIA GPU. CPU inference is not supported yet. 
 | 
			
		||||
Learn how to quantize models in the [Quantization](../quantization) guide.
 | 
			
		||||
 | 
			
		||||
### Quantizing a model
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
We advise users to look at different existing tools in the ecosystem to quantize their models with AWQ algorithm, such as:
 | 
			
		||||
 | 
			
		||||
- [`llm-awq`](https://github.com/mit-han-lab/llm-awq) from MIT Han Lab
 | 
			
		||||
- [`autoawq`](https://github.com/casper-hansen/AutoAWQ) from [`casper-hansen`](https://github.com/casper-hansen)
 | 
			
		||||
- Intel neural compressor from Intel - through [`optimum-intel`](https://huggingface.co/docs/optimum/main/en/intel/optimization_inc)
 | 
			
		||||
 | 
			
		||||
Many other tools might exist in the ecosystem, please feel free to open a PR to add them to the list.
 | 
			
		||||
Currently the integration with 🤗 Transformers is only available for models that have been quantized using `autoawq` library and `llm-awq`. Most of the models quantized with `auto-awq` can be found under [`TheBloke`](https://huggingface.co/TheBloke) namespace of 🤗 Hub, and to quantize models with `llm-awq` please refer to the [`convert_to_hf.py`](https://github.com/mit-han-lab/llm-awq/blob/main/examples/convert_to_hf.py) script in the examples folder of [`llm-awq`](https://github.com/mit-han-lab/llm-awq/).
 | 
			
		||||
 | 
			
		||||
### Load a quantized model
 | 
			
		||||
 | 
			
		||||
You can load a quantized model from the Hub using the `from_pretrained` method. Make sure that the pushed weights are quantized, by checking that the attribute `quantization_config` is present in the model's configuration file (`configuration.json`). You can confirm that the model is quantized in the AWQ format by checking the field `quantization_config.quant_method` which should be set to `"awq"`. Note that loading the model will set other weights in `float16` by default for performance reasons. If you want to change that behavior, you can pass `torch_dtype` argument to `torch.float32` or `torch.bfloat16`. You can find in the sections below some example snippets and notebook.
 | 
			
		||||
 | 
			
		||||
## Example usage
 | 
			
		||||
 | 
			
		||||
First, you need to install [`autoawq`](https://github.com/casper-hansen/AutoAWQ) library
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install autoawq
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
model_id = "TheBloke/zephyr-7B-alpha-AWQ"
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda:0")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
In case you first load your model on CPU, make sure to move it to your GPU device before using 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
model_id = "TheBloke/zephyr-7B-alpha-AWQ"
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained(model_id).to("cuda:0")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Combining AWQ and Flash Attention
 | 
			
		||||
 | 
			
		||||
You can combine AWQ quantization with Flash Attention to get a model that is both quantized and faster. Simply load the model using `from_pretrained` and pass `use_flash_attention_2=True` argument.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained("TheBloke/zephyr-7B-alpha-AWQ", use_flash_attention_2=True, device_map="cuda:0")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Benchmarks
 | 
			
		||||
 | 
			
		||||
We performed some speed, throughput and latency benchmarks using [`optimum-benchmark`](https://github.com/huggingface/optimum-benchmark) library. 
 | 
			
		||||
 | 
			
		||||
Note at that time of writing this documentation section, the available quantization methods were: `awq`, `gptq` and `bitsandbytes`.
 | 
			
		||||
 | 
			
		||||
The benchmark was run on a NVIDIA-A100 instance and the model used was [`TheBloke/Mistral-7B-v0.1-AWQ`](https://huggingface.co/TheBloke/Mistral-7B-v0.1-AWQ) for the AWQ model, [`TheBloke/Mistral-7B-v0.1-GPTQ`](https://huggingface.co/TheBloke/Mistral-7B-v0.1-GPTQ) for the GPTQ model. We also benchmarked it against `bitsandbytes` quantization methods and native `float16` model. Some results are shown below:
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/forward_memory_plot.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/generate_memory_plot.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/generate_throughput_plot.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/forward_latency_plot.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
You can find the full results together with packages versions in [this link](https://github.com/huggingface/optimum-benchmark/tree/main/examples/running-mistrals).
 | 
			
		||||
 | 
			
		||||
From the results it appears that AWQ quantization method is the fastest quantization method for inference, text generation and among the lowest peak memory for text generation. However, AWQ seems to have the largest forward latency per batch size. 
 | 
			
		||||
 | 
			
		||||
### Google colab demo
 | 
			
		||||
 | 
			
		||||
Check out how to use this integration throughout this [Google Colab demo](https://colab.research.google.com/drive/1HzZH89yAXJaZgwJDhQj9LqSBux932BvY)!
 | 
			
		||||
 | 
			
		||||
### AwqConfig
 | 
			
		||||
## AwqConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] AwqConfig
 | 
			
		||||
 | 
			
		||||
## `AutoGPTQ` Integration
 | 
			
		||||
 | 
			
		||||
🤗 Transformers has integrated `optimum` API to perform GPTQ quantization on language models. You can load and quantize your model in 8, 4, 3 or even 2 bits without a big drop of performance and faster inference speed! This is supported by most GPU hardwares.
 | 
			
		||||
 | 
			
		||||
To learn more about the quantization model, check out: 
 | 
			
		||||
- the [GPTQ](https://arxiv.org/pdf/2210.17323.pdf) paper
 | 
			
		||||
- the `optimum` [guide](https://huggingface.co/docs/optimum/llm_quantization/usage_guides/quantization) on GPTQ quantization
 | 
			
		||||
- the [`AutoGPTQ`](https://github.com/PanQiWei/AutoGPTQ) library used as the backend
 | 
			
		||||
 | 
			
		||||
### Requirements
 | 
			
		||||
 | 
			
		||||
You need to have the following requirements installed to run the code below: 
 | 
			
		||||
 | 
			
		||||
- Install latest `AutoGPTQ` library
 | 
			
		||||
`pip install auto-gptq`
 | 
			
		||||
 | 
			
		||||
- Install latest `optimum` from source 
 | 
			
		||||
`pip install git+https://github.com/huggingface/optimum.git`
 | 
			
		||||
 | 
			
		||||
- Install latest `transformers` from source 
 | 
			
		||||
`pip install git+https://github.com/huggingface/transformers.git`
 | 
			
		||||
 | 
			
		||||
- Install latest `accelerate` library 
 | 
			
		||||
`pip install --upgrade accelerate`
 | 
			
		||||
 | 
			
		||||
Note that GPTQ integration supports for now only text models and you may encounter unexpected behaviour for vision, speech or multi-modal models.
 | 
			
		||||
 | 
			
		||||
### Load and quantize a model
 | 
			
		||||
 | 
			
		||||
GPTQ is a quantization method that requires weights calibration before using the quantized models. If you want to quantize transformers model from scratch, it might take some time before producing the quantized model (~5 min on a Google colab for `facebook/opt-350m` model). 
 | 
			
		||||
 | 
			
		||||
Hence, there are two different scenarios where you want to use GPTQ-quantized models. The first use case would be to load models that has been already quantized by other users that are available on the Hub, the second use case would be to quantize your model from scratch and save it or push it on the Hub so that other users can also use it.
 | 
			
		||||
 | 
			
		||||
#### GPTQ Configuration
 | 
			
		||||
 | 
			
		||||
In order to load and quantize a model, you need to create a [`GPTQConfig`]. You need to pass the number of `bits`, a `dataset` in order to calibrate the quantization and the `tokenizer` of the model in order prepare the dataset.
 | 
			
		||||
 | 
			
		||||
```python 
 | 
			
		||||
model_id = "facebook/opt-125m"
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
 | 
			
		||||
gptq_config = GPTQConfig(bits=4, dataset = "c4", tokenizer=tokenizer)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that you can pass your own dataset as a list of string. However, it is highly recommended to use the dataset from the GPTQ paper. 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
dataset = ["auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm."]
 | 
			
		||||
quantization = GPTQConfig(bits=4, dataset = dataset, tokenizer=tokenizer)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### Quantization
 | 
			
		||||
 | 
			
		||||
You can quantize a model by using `from_pretrained` and setting the `quantization_config`. 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=gptq_config)
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
Note that you will need a GPU to quantize a model. We will put the model in the cpu and move the modules back and forth to the gpu in order to quantize them.
 | 
			
		||||
 | 
			
		||||
If you want to maximize your gpus usage while using cpu offload, you can set `device_map = "auto"`.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", quantization_config=gptq_config)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that disk offload is not supported. Furthermore, if you are out of memory because of the dataset, you may have to pass `max_memory` in `from_pretained`. Checkout this [guide](https://huggingface.co/docs/accelerate/usage_guides/big_modeling#designing-a-device-map) to learn more about `device_map` and `max_memory`.
 | 
			
		||||
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
GPTQ quantization only works for text model for now. Futhermore, the quantization process can a lot of time depending on one's hardware (175B model = 4 gpu hours using NVIDIA A100). Please check on the hub if there is not a GPTQ quantized version of the model. If not, you can submit a demand on github. 
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
### Push quantized model to 🤗 Hub
 | 
			
		||||
 | 
			
		||||
You can push the quantized model like any 🤗 model to Hub with `push_to_hub`. The quantization config will be saved and pushed along the model. 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
quantized_model.push_to_hub("opt-125m-gptq")
 | 
			
		||||
tokenizer.push_to_hub("opt-125m-gptq")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
If you want to save your quantized model on your local machine, you can also do it with `save_pretrained`: 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
quantized_model.save_pretrained("opt-125m-gptq")
 | 
			
		||||
tokenizer.save_pretrained("opt-125m-gptq")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that if you have quantized your model with a `device_map`, make sure to move the entire model to one of your gpus or the `cpu` before saving it.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
quantized_model.to("cpu")
 | 
			
		||||
quantized_model.save_pretrained("opt-125m-gptq")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Load a quantized model from the 🤗 Hub
 | 
			
		||||
 | 
			
		||||
You can load a quantized model from the Hub by using `from_pretrained`.
 | 
			
		||||
Make sure that the pushed weights are quantized, by checking that the attribute `quantization_config` is present in the model configuration object.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
If you want to load a model faster and without allocating more memory than needed, the `device_map` argument also works with quantized model. Make sure that you have `accelerate` library installed.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Exllama kernels for faster inference
 | 
			
		||||
 | 
			
		||||
For 4-bit model, you can use the exllama kernels in order to a faster inference speed. It is activated by default. You can change that behavior by passing `use_exllama` in [`GPTQConfig`]. This will overwrite the quantization config stored in the config. Note that you will only be able to overwrite the attributes related to the kernels. Furthermore, you need to have the entire model on gpus if you want to use exllama kernels. Also, you can perform CPU inference using Auto-GPTQ for Auto-GPTQ version > 0.4.2 by passing `device_map` = "cpu". For CPU inference, you have to pass `use_exllama = False` in the `GPTQConfig.`
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
import torch
 | 
			
		||||
gptq_config = GPTQConfig(bits=4)
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config=gptq_config)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
With the release of the exllamav2 kernels, you can get faster inference speed compared to the exllama kernels. You just need to pass `exllama_config={"version": 2}` in [`GPTQConfig`]:
 | 
			
		||||
 | 
			
		||||
```py
 | 
			
		||||
import torch
 | 
			
		||||
gptq_config = GPTQConfig(bits=4, exllama_config={"version":2})
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config = gptq_config)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that only 4-bit models are supported for now. Furthermore, it is recommended to deactivate the exllama kernels if you are finetuning a quantized model with peft. 
 | 
			
		||||
 | 
			
		||||
You can find the benchmark of these kernels [here](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark)
 | 
			
		||||
#### Fine-tune a quantized model 
 | 
			
		||||
 | 
			
		||||
With the official support of adapters in the Hugging Face ecosystem, you can fine-tune models that have been quantized with GPTQ. 
 | 
			
		||||
Please have a look at [`peft`](https://github.com/huggingface/peft) library for more details.
 | 
			
		||||
 | 
			
		||||
### Example demo
 | 
			
		||||
 | 
			
		||||
Check out the Google Colab [notebook](https://colab.research.google.com/drive/1_TIrmuKOFhuRRiTWN94iLKUFu6ZX4ceb?usp=sharing) to learn how to quantize your model with GPTQ and how finetune the quantized model with peft. 
 | 
			
		||||
 | 
			
		||||
### GPTQConfig
 | 
			
		||||
## GPTQConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] GPTQConfig
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## `bitsandbytes` Integration
 | 
			
		||||
 | 
			
		||||
🤗 Transformers is closely integrated with most used modules on `bitsandbytes`. You can load your model in 8-bit precision with few lines of code.
 | 
			
		||||
This is supported by most of the GPU hardwares since the `0.37.0` release of `bitsandbytes`.
 | 
			
		||||
 | 
			
		||||
Learn more about the quantization method in the [LLM.int8()](https://arxiv.org/abs/2208.07339) paper, or the [blogpost](https://huggingface.co/blog/hf-bitsandbytes-integration) about the collaboration.
 | 
			
		||||
 | 
			
		||||
Since its `0.39.0` release, you can load any model that supports `device_map` using 4-bit quantization, leveraging FP4 data type.
 | 
			
		||||
 | 
			
		||||
If you want to quantize your own pytorch model, check out this [documentation](https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization) from 🤗 Accelerate library. 
 | 
			
		||||
 | 
			
		||||
Here are the things you can do using `bitsandbytes` integration
 | 
			
		||||
 | 
			
		||||
### General usage
 | 
			
		||||
 | 
			
		||||
You can quantize a model by using the `load_in_8bit` or `load_in_4bit` argument when calling the [`~PreTrainedModel.from_pretrained`] method as long as your model supports loading with 🤗 Accelerate and contains `torch.nn.Linear` layers. This should work for any modality as well.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM
 | 
			
		||||
 | 
			
		||||
model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True)
 | 
			
		||||
model_4bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_4bit=True)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
By default all other modules (e.g. `torch.nn.LayerNorm`) will be converted in `torch.float16`, but if you want to change their `dtype` you can overwrite the `torch_dtype` argument:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> import torch
 | 
			
		||||
>>> from transformers import AutoModelForCausalLM
 | 
			
		||||
 | 
			
		||||
>>> model_8bit = AutoModelForCausalLM.from_pretrained("facebook/opt-350m", load_in_8bit=True, torch_dtype=torch.float32)
 | 
			
		||||
>>> model_8bit.model.decoder.layers[-1].final_layer_norm.weight.dtype
 | 
			
		||||
torch.float32
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### FP4 quantization 
 | 
			
		||||
 | 
			
		||||
#### Requirements
 | 
			
		||||
 | 
			
		||||
Make sure that you have installed the requirements below before running any of the code snippets below.
 | 
			
		||||
 | 
			
		||||
- Latest `bitsandbytes` library
 | 
			
		||||
`pip install bitsandbytes>=0.39.0`
 | 
			
		||||
 | 
			
		||||
- Install latest `accelerate`
 | 
			
		||||
`pip install --upgrade accelerate`
 | 
			
		||||
 | 
			
		||||
- Install latest `transformers`
 | 
			
		||||
`pip install --upgrade transformers`
 | 
			
		||||
 | 
			
		||||
#### Tips and best practices
 | 
			
		||||
 | 
			
		||||
- **Advanced usage:** Refer to [this Google Colab notebook](https://colab.research.google.com/drive/1ge2F1QSK8Q7h0hn3YKuBCOAS0bK8E0wf) for advanced usage of 4-bit quantization with all the possible options.
 | 
			
		||||
 | 
			
		||||
- **Faster inference with `batch_size=1` :** Since the `0.40.0` release of bitsandbytes, for `batch_size=1` you can benefit from fast inference. Check out [these release notes](https://github.com/TimDettmers/bitsandbytes/releases/tag/0.40.0) and make sure to have a version that is greater than `0.40.0` to benefit from this feature out of the box. 
 | 
			
		||||
 | 
			
		||||
- **Training:** According to [QLoRA paper](https://arxiv.org/abs/2305.14314), for training 4-bit base models (e.g. using LoRA adapters) one should use `bnb_4bit_quant_type='nf4'`. 
 | 
			
		||||
 | 
			
		||||
- **Inference:** For inference, `bnb_4bit_quant_type` does not have a huge impact on the performance. However for consistency with the model's weights, make sure you use the same `bnb_4bit_compute_dtype` and `torch_dtype` arguments.
 | 
			
		||||
 | 
			
		||||
#### Load a large model in 4bit
 | 
			
		||||
 | 
			
		||||
By using `load_in_4bit=True` when calling the `.from_pretrained` method, you can divide your memory use by 4 (roughly).
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
# pip install transformers accelerate bitsandbytes
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
model_id = "bigscience/bloom-1b7"
 | 
			
		||||
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
 | 
			
		||||
Note that once a model has been loaded in 4-bit it is currently not possible to push the quantized weights on the Hub. Note also that you cannot train 4-bit weights as this is not supported yet. However you can use 4-bit models to train extra parameters, this will be covered in the next section.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
### Load a large model in 8bit
 | 
			
		||||
 | 
			
		||||
You can load a model by roughly halving the memory requirements by using `load_in_8bit=True` argument when calling `.from_pretrained` method
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
# pip install transformers accelerate bitsandbytes
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
model_id = "bigscience/bloom-1b7"
 | 
			
		||||
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_8bit=True)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Then, use your model as you would usually use a [`PreTrainedModel`].
 | 
			
		||||
 | 
			
		||||
You can check the memory footprint of your model with `get_memory_footprint` method.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
print(model.get_memory_footprint())
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
With this integration we were able to load large models on smaller devices and run them without any issue.  
 | 
			
		||||
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
 | 
			
		||||
Note that once a model has been loaded in 8-bit it is currently not possible to push the quantized weights on the Hub except if you use the latest `transformers` and `bitsandbytes`. Note also that you cannot train 8-bit weights as this is not supported yet. However you can use 8-bit models to train extra parameters, this will be covered in the next section.
 | 
			
		||||
Note also that `device_map` is optional but setting `device_map = 'auto'` is prefered for inference as it will dispatch efficiently the model on the available ressources.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
#### Advanced use cases
 | 
			
		||||
 | 
			
		||||
Here we will cover some advanced use cases you can perform with FP4 quantization 
 | 
			
		||||
 | 
			
		||||
##### Change the compute dtype
 | 
			
		||||
 | 
			
		||||
The compute dtype is used to change the dtype that will be used during computation. For example, hidden states could be in `float32` but computation can be set to bf16 for speedups. By default, the compute dtype is set to `float32`.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
import torch
 | 
			
		||||
from transformers import BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
quantization_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_compute_dtype=torch.bfloat16)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
##### Using NF4 (Normal Float 4) data type 
 | 
			
		||||
 | 
			
		||||
You can also use the NF4 data type, which is a new 4bit datatype adapted for weights that have been initialized using a normal distribution. For that run:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
nf4_config = BitsAndBytesConfig(
 | 
			
		||||
    load_in_4bit=True,
 | 
			
		||||
    bnb_4bit_quant_type="nf4",
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
model_nf4 = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=nf4_config)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
##### Use nested quantization for more memory efficient inference
 | 
			
		||||
 | 
			
		||||
We also advise users to use the nested quantization technique. This saves more memory at no additional performance - from our empirical observations, this enables fine-tuning llama-13b model on an NVIDIA-T4 16GB with a sequence length of 1024, batch size of 1 and gradient accumulation steps of 4.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
double_quant_config = BitsAndBytesConfig(
 | 
			
		||||
    load_in_4bit=True,
 | 
			
		||||
    bnb_4bit_use_double_quant=True,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
model_double_quant = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=double_quant_config)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### Push quantized models on the 🤗 Hub
 | 
			
		||||
 | 
			
		||||
You can push a quantized model on the Hub by naively using `push_to_hub` method. This will first push the quantization configuration file, then push the quantized model weights.
 | 
			
		||||
Make sure to use `bitsandbytes>0.37.2` (at this time of writing, we tested it on `bitsandbytes==0.38.0.post1`) to be able to use this feature. 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained("bigscience/bloom-560m", device_map="auto", load_in_8bit=True)
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloom-560m")
 | 
			
		||||
 | 
			
		||||
model.push_to_hub("bloom-560m-8bit")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
 | 
			
		||||
Pushing 8bit models on the Hub is strongely encouraged for large models. This will allow the community to benefit from the memory footprint reduction and loading for example large models on a Google Colab.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
### Load a quantized model from the 🤗 Hub
 | 
			
		||||
 | 
			
		||||
You can load a quantized model from the Hub by using `from_pretrained` method. Make sure that the pushed weights are quantized, by checking that the attribute `quantization_config` is present in the model configuration object.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained("{your_username}/bloom-560m-8bit", device_map="auto")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that in this case, you don't need to specify the arguments `load_in_8bit=True`, but you need to make sure that `bitsandbytes` and `accelerate` are installed.
 | 
			
		||||
Note also that `device_map` is optional but setting `device_map = 'auto'` is prefered for inference as it will dispatch efficiently the model on the available ressources.
 | 
			
		||||
 | 
			
		||||
### Advanced use cases
 | 
			
		||||
 | 
			
		||||
This section is intended to advanced users, that want to explore what it is possible to do beyond loading and running 8-bit models.
 | 
			
		||||
 | 
			
		||||
#### Offload between `cpu` and `gpu`
 | 
			
		||||
 | 
			
		||||
One of the advanced use case of this is being able to load a model and dispatch the weights between `CPU` and `GPU`. Note that the weights that will be dispatched on CPU **will not** be converted in 8-bit, thus kept in `float32`. This feature is intended for users that want to fit a very large model and dispatch the model between GPU and CPU.
 | 
			
		||||
 | 
			
		||||
First, load a [`BitsAndBytesConfig`] from `transformers` and set the attribute `llm_int8_enable_fp32_cpu_offload` to `True`:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
quantization_config = BitsAndBytesConfig(llm_int8_enable_fp32_cpu_offload=True)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Let's say you want to load `bigscience/bloom-1b7` model, and you have just enough GPU RAM to fit the entire model except the `lm_head`. Therefore write a custom device_map as follows:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
device_map = {
 | 
			
		||||
    "transformer.word_embeddings": 0,
 | 
			
		||||
    "transformer.word_embeddings_layernorm": 0,
 | 
			
		||||
    "lm_head": "cpu",
 | 
			
		||||
    "transformer.h": 0,
 | 
			
		||||
    "transformer.ln_f": 0,
 | 
			
		||||
}
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
And load your model as follows:
 | 
			
		||||
```python
 | 
			
		||||
model_8bit = AutoModelForCausalLM.from_pretrained(
 | 
			
		||||
    "bigscience/bloom-1b7",
 | 
			
		||||
    device_map=device_map,
 | 
			
		||||
    quantization_config=quantization_config,
 | 
			
		||||
)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
And that's it! Enjoy your model!
 | 
			
		||||
 | 
			
		||||
#### Play with `llm_int8_threshold`
 | 
			
		||||
 | 
			
		||||
You can play with the `llm_int8_threshold` argument to change the threshold of the outliers. An "outlier" is a hidden state value that is greater than a certain threshold. 
 | 
			
		||||
This corresponds to the outlier threshold for outlier detection as described in `LLM.int8()` paper. Any hidden states value that is above this threshold will be considered an outlier and the operation on those values will be done in fp16. Values are usually normally distributed, that is, most values are in the range [-3.5, 3.5], but there are some exceptional systematic outliers that are very differently distributed for large models. These outliers are often in the interval [-60, -6] or [6, 60]. Int8 quantization works well for values of magnitude ~5, but beyond that, there is a significant performance penalty. A good default threshold is 6, but a lower threshold might be needed for more unstable models (small models, fine-tuning).
 | 
			
		||||
This argument can impact the inference speed of the model. We suggest to play with this parameter to find which one is the best for your use case.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
model_id = "bigscience/bloom-1b7"
 | 
			
		||||
 | 
			
		||||
quantization_config = BitsAndBytesConfig(
 | 
			
		||||
    llm_int8_threshold=10,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
model_8bit = AutoModelForCausalLM.from_pretrained(
 | 
			
		||||
    model_id,
 | 
			
		||||
    device_map=device_map,
 | 
			
		||||
    quantization_config=quantization_config,
 | 
			
		||||
)
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### Skip the conversion of some modules
 | 
			
		||||
 | 
			
		||||
Some models has several modules that needs to be not converted in 8-bit to ensure stability. For example Jukebox model has several `lm_head` modules that should be skipped. Play with `llm_int8_skip_modules` 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
model_id = "bigscience/bloom-1b7"
 | 
			
		||||
 | 
			
		||||
quantization_config = BitsAndBytesConfig(
 | 
			
		||||
    llm_int8_skip_modules=["lm_head"],
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
model_8bit = AutoModelForCausalLM.from_pretrained(
 | 
			
		||||
    model_id,
 | 
			
		||||
    device_map=device_map,
 | 
			
		||||
    quantization_config=quantization_config,
 | 
			
		||||
)
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### Fine-tune a model that has been loaded in 8-bit
 | 
			
		||||
 | 
			
		||||
With the official support of adapters in the Hugging Face ecosystem, you can fine-tune models that have been loaded in 8-bit. 
 | 
			
		||||
This enables fine-tuning large models such as `flan-t5-large` or `facebook/opt-6.7b` in a single google Colab. Please have a look at [`peft`](https://github.com/huggingface/peft) library for more details.
 | 
			
		||||
 | 
			
		||||
Note that you don't need to pass `device_map` when loading the model for training. It will automatically load your model on your GPU. You can also set the device map to a specific device if needed (e.g. `cuda:0`, `0`, `torch.device('cuda:0')`). Please note that `device_map=auto` should be used for inference only. 
 | 
			
		||||
 | 
			
		||||
### BitsAndBytesConfig
 | 
			
		||||
## BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Quantization with 🤗 `optimum` 
 | 
			
		||||
 | 
			
		||||
Please have a look at [Optimum documentation](https://huggingface.co/docs/optimum/index) to learn more about quantization methods that are supported by `optimum` and see if these are applicable for your use case.
 | 
			
		||||
 | 
			
		||||
@ -26,7 +26,7 @@ If you're looking to fine-tune a language model like Llama-2 or Mistral on a tex
 | 
			
		||||
 | 
			
		||||
Before instantiating your [`Trainer`], create a [`TrainingArguments`] to access all the points of customization during training.
 | 
			
		||||
 | 
			
		||||
The API supports distributed training on multiple GPUs/TPUs, mixed precision through [NVIDIA Apex](https://github.com/NVIDIA/apex) and Native AMP for PyTorch.
 | 
			
		||||
The API supports distributed training on multiple GPUs/TPUs, mixed precision through [NVIDIA Apex] for NVIDIA GPUs, [ROCm APEX](https://github.com/ROCmSoftwarePlatform/apex) for AMD GPUs, and Native AMP for PyTorch.
 | 
			
		||||
 | 
			
		||||
The [`Trainer`] contains the basic training loop which supports the above features. To inject custom behavior you can subclass them and override the following methods:
 | 
			
		||||
 | 
			
		||||
@ -206,7 +206,7 @@ Let's discuss how you can tell your program which GPUs are to be used and in wha
 | 
			
		||||
When using [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html) to use only a subset of your GPUs, you simply specify the number of GPUs to use. For example, if you have 4 GPUs, but you wish to use the first 2 you can do:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python -m torch.distributed.launch --nproc_per_node=2  trainer-program.py ...
 | 
			
		||||
torchrun --nproc_per_node=2  trainer-program.py ...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
if you have either [`accelerate`](https://github.com/huggingface/accelerate) or [`deepspeed`](https://github.com/microsoft/DeepSpeed) installed you can also accomplish the same by using one of:
 | 
			
		||||
@ -219,7 +219,7 @@ accelerate launch --num_processes 2 trainer-program.py ...
 | 
			
		||||
deepspeed --num_gpus 2 trainer-program.py ...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
You don't need to use the Accelerate or [the Deepspeed integration](Deepspeed) features to use these launchers.
 | 
			
		||||
You don't need to use the Accelerate or [the Deepspeed integration](deepspeed) features to use these launchers.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Until now you were able to tell the program how many GPUs to use. Now let's discuss how to select specific GPUs and control their order.
 | 
			
		||||
@ -233,7 +233,7 @@ If you have multiple GPUs and you'd like to use only 1 or a few of those GPUs, s
 | 
			
		||||
For example, let's say you have 4 GPUs: 0, 1, 2 and 3. To run only on the physical GPUs 0 and 2, you can do:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
CUDA_VISIBLE_DEVICES=0,2 python -m torch.distributed.launch trainer-program.py ...
 | 
			
		||||
CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped to `cuda:0` and `cuda:1` correspondingly.
 | 
			
		||||
@ -241,7 +241,7 @@ So now pytorch will see only 2 GPUs, where your physical GPUs 0 and 2 are mapped
 | 
			
		||||
You can even change their order:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
CUDA_VISIBLE_DEVICES=2,0 python -m torch.distributed.launch trainer-program.py ...
 | 
			
		||||
CUDA_VISIBLE_DEVICES=2,0 torchrun trainer-program.py ...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Here your physical GPUs 0 and 2 are mapped to `cuda:1` and `cuda:0` correspondingly.
 | 
			
		||||
@ -263,7 +263,7 @@ As with any environment variable you can, of course, export those instead of add
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export CUDA_VISIBLE_DEVICES=0,2
 | 
			
		||||
python -m torch.distributed.launch trainer-program.py ...
 | 
			
		||||
torchrun trainer-program.py ...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
but this approach can be confusing since you may forget you set up the environment variable earlier and not understand why the wrong GPUs are used. Therefore, it's a common practice to set the environment variable just for a specific run on the same command line as it's shown in most examples of this section.
 | 
			
		||||
@ -272,7 +272,7 @@ but this approach can be confusing since you may forget you set up the environme
 | 
			
		||||
 | 
			
		||||
There is an additional environment variable `CUDA_DEVICE_ORDER` that controls how the physical devices are ordered. The two choices are:
 | 
			
		||||
 | 
			
		||||
1. ordered by PCIe bus IDs (matches `nvidia-smi`'s order) - this is the default.
 | 
			
		||||
1. ordered by PCIe bus IDs (matches `nvidia-smi` and `rocm-smi`'s order) - this is the default.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
export CUDA_DEVICE_ORDER=PCI_BUS_ID
 | 
			
		||||
@ -284,7 +284,7 @@ export CUDA_DEVICE_ORDER=PCI_BUS_ID
 | 
			
		||||
export CUDA_DEVICE_ORDER=FASTEST_FIRST
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Most of the time you don't need to care about this environment variable, but it's very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can't swap the cards (e.g., if the cooling of the devices gets impacted) then setting `CUDA_DEVICE_ORDER=FASTEST_FIRST` will always put the newer faster card first. It'll be somewhat confusing though since `nvidia-smi` will still report them in the PCIe order.
 | 
			
		||||
Most of the time you don't need to care about this environment variable, but it's very helpful if you have a lopsided setup where you have an old and a new GPUs physically inserted in such a way so that the slow older card appears to be first. One way to fix that is to swap the cards. But if you can't swap the cards (e.g., if the cooling of the devices gets impacted) then setting `CUDA_DEVICE_ORDER=FASTEST_FIRST` will always put the newer faster card first. It'll be somewhat confusing though since `nvidia-smi` (or `rocm-smi`) will still report them in the PCIe order.
 | 
			
		||||
 | 
			
		||||
The other solution to swapping the order is to use:
 | 
			
		||||
 | 
			
		||||
@ -426,8 +426,7 @@ To read more about it and the benefits, check out the [Fully Sharded Data Parall
 | 
			
		||||
We have integrated the latest PyTorch's Fully Sharded Data Parallel (FSDP) training feature.
 | 
			
		||||
All you need to do is enable it through the config.
 | 
			
		||||
 | 
			
		||||
**Required PyTorch version for FSDP support**: PyTorch Nightly (or 1.12.0 if you read this after it has been released)
 | 
			
		||||
as the model saving with FSDP activated is only available with recent fixes.
 | 
			
		||||
**Required PyTorch version for FSDP support**: PyTorch >=2.1.0
 | 
			
		||||
 | 
			
		||||
**Usage**:
 | 
			
		||||
 | 
			
		||||
@ -440,6 +439,8 @@ as the model saving with FSDP activated is only available with recent fixes.
 | 
			
		||||
  - SHARD_GRAD_OP : Shards optimizer states + gradients across data parallel workers/GPUs.
 | 
			
		||||
    For this, add `--fsdp shard_grad_op` to the command line arguments.
 | 
			
		||||
  - NO_SHARD : No sharding. For this, add `--fsdp no_shard` to the command line arguments.
 | 
			
		||||
  - HYBRID_SHARD : No sharding. For this, add `--fsdp hybrid_shard` to the command line arguments.
 | 
			
		||||
  - HYBRID_SHARD_ZERO2 : No sharding. For this, add `--fsdp hybrid_shard_zero2` to the command line arguments.
 | 
			
		||||
- To offload the parameters and gradients to the CPU, 
 | 
			
		||||
  add `--fsdp "full_shard offload"` or `--fsdp "shard_grad_op offload"` to the command line arguments.
 | 
			
		||||
- To automatically recursively wrap layers with FSDP using `default_auto_wrap_policy`, 
 | 
			
		||||
@ -449,18 +450,18 @@ as the model saving with FSDP activated is only available with recent fixes.
 | 
			
		||||
- Remaining FSDP config is passed via `--fsdp_config <path_to_fsdp_config.json>`. It is either a location of
 | 
			
		||||
  FSDP json config file (e.g., `fsdp_config.json`) or an already loaded json file as `dict`. 
 | 
			
		||||
  - If auto wrapping is enabled, you can either use transformer based auto wrap policy or size based auto wrap policy.
 | 
			
		||||
    - For transformer based auto wrap policy, it is recommended to specify `fsdp_transformer_layer_cls_to_wrap` in the config file. If not specified, the default value is `model._no_split_modules` when available.
 | 
			
		||||
    - For transformer based auto wrap policy, it is recommended to specify `transformer_layer_cls_to_wrap` in the config file. If not specified, the default value is `model._no_split_modules` when available.
 | 
			
		||||
      This specifies the list of transformer layer class name (case-sensitive) to wrap ,e.g, [`BertLayer`], [`GPTJBlock`], [`T5Block`] ....
 | 
			
		||||
      This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units.
 | 
			
		||||
      Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers. 
 | 
			
		||||
      Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit.
 | 
			
		||||
      Therefore, use this for transformer based models.
 | 
			
		||||
    - For size based auto wrap policy, please add `fsdp_min_num_params` in the config file. 
 | 
			
		||||
    - For size based auto wrap policy, please add `min_num_params` in the config file. 
 | 
			
		||||
      It specifies FSDP's minimum number of parameters for auto wrapping.
 | 
			
		||||
  - `fsdp_backward_prefetch` can be specified in the config file. It controls when to prefetch next set of parameters. 
 | 
			
		||||
  - `backward_prefetch` can be specified in the config file. It controls when to prefetch next set of parameters. 
 | 
			
		||||
    `backward_pre` and `backward_pos` are available options. 
 | 
			
		||||
    For more information refer `torch.distributed.fsdp.fully_sharded_data_parallel.BackwardPrefetch`
 | 
			
		||||
  - `fsdp_forward_prefetch` can be specified in the config file. It controls when to prefetch next set of parameters. 
 | 
			
		||||
  - `forward_prefetch` can be specified in the config file. It controls when to prefetch next set of parameters. 
 | 
			
		||||
    If `"True"`, FSDP explicitly prefetches the next upcoming all-gather while executing in the forward pass. 
 | 
			
		||||
  - `limit_all_gathers` can be specified in the config file. 
 | 
			
		||||
    If `"True"`, FSDP explicitly synchronizes the CPU thread to prevent too many in-flight all-gathers.
 | 
			
		||||
@ -468,6 +469,20 @@ as the model saving with FSDP activated is only available with recent fixes.
 | 
			
		||||
    If `"True"`, FSDP activation checkpointing is a technique to reduce memory usage by clearing activations of
 | 
			
		||||
    certain layers and recomputing them during a backward pass. Effectively, this trades extra computation time
 | 
			
		||||
    for reduced memory usage.
 | 
			
		||||
  - `use_orig_params` can be specified in the config file. 
 | 
			
		||||
    If True, allows non-uniform `requires_grad` during init, which means support for interspersed frozen and trainable paramteres. Useful in cases such as parameter-efficient fine-tuning. This also enables to have different optimizer param groups. This should be `True` when creating optimizer object before preparing/wrapping the model with FSDP.
 | 
			
		||||
    Please refer this [blog](https://dev-discuss.pytorch.org/t/rethinking-pytorch-fully-sharded-data-parallel-fsdp-from-first-principles/1019). 
 | 
			
		||||
 | 
			
		||||
**Saving and loading**
 | 
			
		||||
Saving entire intermediate checkpoints using `FULL_STATE_DICT` state_dict_type with CPU offloading on rank 0 takes a lot of time and often results in NCCL Timeout errors due to indefinite hanging during broadcasting. However, at the end of training, we want the whole model state dict instead of the sharded state dict which is only compatible with FSDP. Use `SHARDED_STATE_DICT` (default) state_dict_type to save the intermediate checkpoints and optimizer states in this format recommended by the PyTorch team. 
 | 
			
		||||
 | 
			
		||||
Saving the final checkpoint in transformers format using default `safetensors` format requires below changes.
 | 
			
		||||
```python
 | 
			
		||||
if trainer.is_fsdp_enabled:
 | 
			
		||||
    trainer.accelerator.state.fsdp_plugin.set_state_dict_type("FULL_STATE_DICT")
 | 
			
		||||
 | 
			
		||||
trainer.save_model(script_args.output_dir)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
**Few caveats to be aware of**
 | 
			
		||||
- it is incompatible with `generate`, thus is incompatible with `--predict_with_generate` 
 | 
			
		||||
@ -492,15 +507,15 @@ Pass `--fsdp "full shard"` along with following changes to be made in `--fsdp_co
 | 
			
		||||
  https://github.com/pytorch/xla/blob/master/torch_xla/distributed/fsdp/xla_fully_sharded_data_parallel.py).
 | 
			
		||||
- `xla_fsdp_grad_ckpt`. When `True`, uses gradient checkpointing over each nested XLA FSDP wrapped layer. 
 | 
			
		||||
  This setting can only be used when the xla flag is set to true, and an auto wrapping policy is specified through
 | 
			
		||||
  `fsdp_min_num_params` or `fsdp_transformer_layer_cls_to_wrap`. 
 | 
			
		||||
  `min_num_params` or `transformer_layer_cls_to_wrap`. 
 | 
			
		||||
- You can either use transformer based auto wrap policy or size based auto wrap policy.
 | 
			
		||||
  - For transformer based auto wrap policy, it is recommended to specify `fsdp_transformer_layer_cls_to_wrap` in the config file. If not specified, the default value is `model._no_split_modules` when available.
 | 
			
		||||
  - For transformer based auto wrap policy, it is recommended to specify `transformer_layer_cls_to_wrap` in the config file. If not specified, the default value is `model._no_split_modules` when available.
 | 
			
		||||
    This specifies the list of transformer layer class name (case-sensitive) to wrap ,e.g, [`BertLayer`], [`GPTJBlock`], [`T5Block`] ....
 | 
			
		||||
    This is important because submodules that share weights (e.g., embedding layer) should not end up in different FSDP wrapped units.
 | 
			
		||||
    Using this policy, wrapping happens for each block containing Multi-Head Attention followed by couple of MLP layers. 
 | 
			
		||||
    Remaining layers including the shared embeddings are conveniently wrapped in same outermost FSDP unit.
 | 
			
		||||
    Therefore, use this for transformer based models.
 | 
			
		||||
  - For size based auto wrap policy, please add `fsdp_min_num_params` in the config file. 
 | 
			
		||||
  - For size based auto wrap policy, please add `min_num_params` in the config file. 
 | 
			
		||||
    It specifies FSDP's minimum number of parameters for auto wrapping.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -59,13 +59,67 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). This
 | 
			
		||||
- Layers are split in groups that share parameters (to save memory).
 | 
			
		||||
Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by
 | 
			
		||||
[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Resources
 | 
			
		||||
 | 
			
		||||
- [Text classification task guide](../tasks/sequence_classification)
 | 
			
		||||
- [Token classification task guide](../tasks/token_classification)
 | 
			
		||||
- [Question answering task guide](../tasks/question_answering)
 | 
			
		||||
- [Masked language modeling task guide](../tasks/masked_language_modeling)
 | 
			
		||||
- [Multiple choice task guide](../tasks/multiple_choice)
 | 
			
		||||
 | 
			
		||||
The resources provided in the following sections consist of a list of official Hugging Face and community (indicated by 🌎) resources to help you get started with AlBERT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
<PipelineTag pipeline="text-classification"/>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- [`AlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- [`TFAlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification).
 | 
			
		||||
 | 
			
		||||
- [`FlaxAlbertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb).
 | 
			
		||||
- Check the [Text classification task guide](../tasks/sequence_classification) on how to use the model.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
<PipelineTag pipeline="token-classification"/>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- [`AlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- [`TFAlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- [`FlaxAlbertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification).
 | 
			
		||||
- [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course.
 | 
			
		||||
- Check the [Token classification task guide](../tasks/token_classification) on how to use the model.
 | 
			
		||||
 | 
			
		||||
<PipelineTag pipeline="fill-mask"/>
 | 
			
		||||
 | 
			
		||||
- [`AlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb).
 | 
			
		||||
- [`TFAlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
 | 
			
		||||
- [`FlaxAlbertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb).
 | 
			
		||||
- [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course.
 | 
			
		||||
- Check the [Masked language modeling task guide](../tasks/masked_language_modeling) on how to use the model.
 | 
			
		||||
 | 
			
		||||
<PipelineTag pipeline="question-answering"/>
 | 
			
		||||
 | 
			
		||||
- [`AlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb).
 | 
			
		||||
- [`TFAlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb).
 | 
			
		||||
- [`FlaxAlbertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering).
 | 
			
		||||
- [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course.
 | 
			
		||||
- Check the [Question answering task guide](../tasks/question_answering) on how to use the model.
 | 
			
		||||
 | 
			
		||||
**Multiple choice**
 | 
			
		||||
 | 
			
		||||
- [`AlbertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb).
 | 
			
		||||
- [`TFAlbertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb).
 | 
			
		||||
 | 
			
		||||
- Check the  [Multiple choice task guide](../tasks/multiple_choice) on how to use the model.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## AlbertConfig
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -29,7 +29,7 @@ The abstract from the paper is the following:
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/audio_spectogram_transformer_architecture.png"
 | 
			
		||||
alt="drawing" width="600"/>
 | 
			
		||||
 | 
			
		||||
<small> Audio pectrogram Transformer architecture. Taken from the <a href="https://arxiv.org/abs/2104.01778">original paper</a>.</small>
 | 
			
		||||
<small> Audio Spectrogram Transformer architecture. Taken from the <a href="https://arxiv.org/abs/2104.01778">original paper</a>.</small>
 | 
			
		||||
 | 
			
		||||
This model was contributed by [nielsr](https://huggingface.co/nielsr).
 | 
			
		||||
The original code can be found [here](https://github.com/YuanGongND/ast).
 | 
			
		||||
@ -72,4 +72,4 @@ If you're interested in submitting a resource to be included here, please feel f
 | 
			
		||||
## ASTForAudioClassification
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ASTForAudioClassification
 | 
			
		||||
    - forward
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
@ -49,7 +49,7 @@ You will then be able to use the auto classes like you would usually do!
 | 
			
		||||
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
 | 
			
		||||
If your `NewModelConfig` is a subclass of [`~transformer.PretrainedConfig`], make sure its
 | 
			
		||||
If your `NewModelConfig` is a subclass of [`~transformers.PretrainedConfig`], make sure its
 | 
			
		||||
`model_type` attribute is set to the same key you use when registering the config (here `"new-model"`).
 | 
			
		||||
 | 
			
		||||
Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its
 | 
			
		||||
 | 
			
		||||
@ -44,7 +44,19 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
 | 
			
		||||
model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16).to(device)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### Using 🤗 Better Transformer
 | 
			
		||||
#### Using CPU offload
 | 
			
		||||
 | 
			
		||||
As mentioned above, Bark is made up of 4 sub-models, which are called up sequentially during audio generation. In other words, while one sub-model is in use, the other sub-models are idle.
 | 
			
		||||
 | 
			
		||||
If you're using a CUDA device, a simple solution to benefit from an 80% reduction in memory footprint is to offload the submodels from GPU to CPU when they're idle. This operation is called *CPU offloading*. You can use it with one line of code as follows:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
model.enable_cpu_offload()
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that 🤗 Accelerate must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/accelerate/basic_tutorials/install)
 | 
			
		||||
 | 
			
		||||
#### Using Better Transformer
 | 
			
		||||
 | 
			
		||||
Better Transformer is an 🤗 Optimum feature that performs kernel fusion under the hood. You can gain 20% to 30% in speed with zero performance degradation. It only requires one line of code to export the model to 🤗 Better Transformer:
 | 
			
		||||
 | 
			
		||||
@ -54,21 +66,46 @@ model =  model.to_bettertransformer()
 | 
			
		||||
 | 
			
		||||
Note that 🤗 Optimum must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/optimum/installation)
 | 
			
		||||
 | 
			
		||||
#### Using CPU offload
 | 
			
		||||
#### Using Flash Attention 2
 | 
			
		||||
 | 
			
		||||
As mentioned above, Bark is made up of 4 sub-models, which are called up sequentially during audio generation. In other words, while one sub-model is in use, the other sub-models are idle.
 | 
			
		||||
Flash Attention 2 is an even faster, optimized version of the previous optimization.
 | 
			
		||||
 | 
			
		||||
If you're using a CUDA device, a simple solution to benefit from an 80% reduction in memory footprint is to offload the GPU's submodels when they're idle. This operation is called CPU offloading. You can use it with one line of code.
 | 
			
		||||
##### Installation 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
model.enable_cpu_offload()
 | 
			
		||||
First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer).
 | 
			
		||||
 | 
			
		||||
Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install -U flash-attn --no-build-isolation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that 🤗 Accelerate must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/accelerate/basic_tutorials/install)
 | 
			
		||||
 | 
			
		||||
##### Usage
 | 
			
		||||
 | 
			
		||||
To load a model using Flash Attention 2, we can pass the `attn_implementation="flash_attention_2"` flag to [`.from_pretrained`](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel.from_pretrained). We'll also load the model in half-precision (e.g. `torch.float16`), since it results in almost no degradation to audio quality but significantly lower memory usage and faster inference:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to(device)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
##### Performance comparison
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
The following diagram shows the latency for the native attention implementation (no optimisation) against Better Transformer and Flash Attention 2. In all cases, we generate 400 semantic tokens on a 40GB A100 GPU with PyTorch 2.1. Flash Attention 2 is also consistently faster than Better Transformer, and its performance improves even more as batch sizes increase:
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/ylacombe/benchmark-comparison/resolve/main/Bark%20Optimization%20Benchmark.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
To put this into perspective, on an NVIDIA A100 and when generating 400 semantic tokens with a batch size of 16, you can get 17 times the [throughput](https://huggingface.co/blog/optimizing-bark#throughput) and still be 2 seconds faster than generating sentences one by one with the native model implementation. In other words, all the samples will be generated 17 times faster.
 | 
			
		||||
 | 
			
		||||
At batch size 8, on an NVIDIA A100, Flash Attention 2 is also 10% faster than Better Transformer, and at batch size 16, 25%.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#### Combining optimization techniques
 | 
			
		||||
 | 
			
		||||
You can combine optimization techniques, and use CPU offload, half-precision and 🤗 Better Transformer all at once.
 | 
			
		||||
You can combine optimization techniques, and use CPU offload, half-precision and Flash Attention 2 (or 🤗 Better Transformer) all at once.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import BarkModel
 | 
			
		||||
@ -76,11 +113,8 @@ import torch
 | 
			
		||||
 | 
			
		||||
device = "cuda" if torch.cuda.is_available() else "cpu"
 | 
			
		||||
 | 
			
		||||
# load in fp16
 | 
			
		||||
model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16).to(device)
 | 
			
		||||
 | 
			
		||||
# convert to bettertransformer
 | 
			
		||||
model = BetterTransformer.transform(model, keep_original_model=False)
 | 
			
		||||
# load in fp16 and use Flash Attention 2
 | 
			
		||||
model = BarkModel.from_pretrained("suno/bark-small", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to(device)
 | 
			
		||||
 | 
			
		||||
# enable CPU offload
 | 
			
		||||
model.enable_cpu_offload()
 | 
			
		||||
 | 
			
		||||
@ -18,8 +18,7 @@ rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The BioGPT model was proposed in [BioGPT: generative pre-trained transformer for biomedical text generation and mining
 | 
			
		||||
](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu. BioGPT is a domain-specific generative pre-trained Transformer language model for biomedical text generation and mining. BioGPT follows the Transformer language model backbone, and is pre-trained on 15M PubMed abstracts from scratch.
 | 
			
		||||
The BioGPT model was proposed in [BioGPT: generative pre-trained transformer for biomedical text generation and mining](https://academic.oup.com/bib/advance-article/doi/10.1093/bib/bbac409/6713511?guestAccessKey=a66d9b5d-4f83-4017-bb52-405815c907b9) by Renqian Luo, Liai Sun, Yingce Xia, Tao Qin, Sheng Zhang, Hoifung Poon and Tie-Yan Liu. BioGPT is a domain-specific generative pre-trained Transformer language model for biomedical text generation and mining. BioGPT follows the Transformer language model backbone, and is pre-trained on 15M PubMed abstracts from scratch.
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										126
									
								
								docs/source/en/model_doc/clvp.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										126
									
								
								docs/source/en/model_doc/clvp.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,126 @@
 | 
			
		||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# CLVP
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The CLVP (Contrastive Language-Voice Pretrained Transformer) model was proposed in [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) by James Betker.
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
*In recent years, the field of image generation has been revolutionized by the application of autoregressive transformers and DDPMs. These approaches model the process of image generation as a step-wise probabilistic processes and leverage large amounts of compute and data to learn the image distribution. This methodology of improving performance need not be confined to images. This paper describes a way to apply advances in the image generative domain to speech synthesis. The result is TorToise - an expressive, multi-voice text-to-speech system.*
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
This model was contributed by [Susnato Dhar](https://huggingface.co/susnato).
 | 
			
		||||
The original code can be found [here](https://github.com/neonbjb/tortoise-tts).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
1. CLVP is an integral part of the Tortoise TTS model.
 | 
			
		||||
2. CLVP can be used to compare different generated speech candidates with the provided text, and the best speech tokens are forwarded to the diffusion model.
 | 
			
		||||
3. The use of the [`ClvpModelForConditionalGeneration.generate()`] method is strongly recommended for tortoise usage.
 | 
			
		||||
4. Note that the CLVP model expects the audio to be sampled at 22.05 kHz contrary to other audio models which expects 16 kHz. 
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Brief Explanation:
 | 
			
		||||
 | 
			
		||||
- The [`ClvpTokenizer`] tokenizes the text input, and the [`ClvpFeatureExtractor`] extracts the log mel-spectrogram from the desired audio.
 | 
			
		||||
- [`ClvpConditioningEncoder`] takes those text tokens and audio representations and converts them into embeddings conditioned on the text and audio.
 | 
			
		||||
- The [`ClvpForCausalLM`] uses those embeddings to generate multiple speech candidates.
 | 
			
		||||
- Each speech candidate is passed through the speech encoder ([`ClvpEncoder`]) which converts them into a vector representation, and the text encoder ([`ClvpEncoder`]) converts the text tokens into the same latent space. 
 | 
			
		||||
- At the end, we compare each speech vector with the text vector to see which speech vector is most similar to the text vector. 
 | 
			
		||||
- [`ClvpModelForConditionalGeneration.generate()`] compresses all of the logic described above into a single method.  
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Example :
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> import datasets
 | 
			
		||||
>>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
 | 
			
		||||
 | 
			
		||||
>>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library).
 | 
			
		||||
>>> text = "This is an example text."
 | 
			
		||||
 | 
			
		||||
>>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
 | 
			
		||||
>>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
 | 
			
		||||
>>> sample = ds[0]["audio"]
 | 
			
		||||
 | 
			
		||||
>>> # Define processor and model.
 | 
			
		||||
>>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
 | 
			
		||||
>>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
 | 
			
		||||
 | 
			
		||||
>>> # Generate processor output and model output.
 | 
			
		||||
>>> processor_output = processor(raw_speech=sample["array"], sampling_rate=sample["sampling_rate"], text=text, return_tensors="pt")
 | 
			
		||||
>>> generated_output = model.generate(**processor_output)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## ClvpConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpConfig
 | 
			
		||||
    - from_sub_model_configs
 | 
			
		||||
 | 
			
		||||
## ClvpEncoderConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpEncoderConfig
 | 
			
		||||
 | 
			
		||||
## ClvpDecoderConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpDecoderConfig
 | 
			
		||||
 | 
			
		||||
## ClvpTokenizer
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpTokenizer
 | 
			
		||||
    - save_vocabulary
 | 
			
		||||
 | 
			
		||||
## ClvpFeatureExtractor
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpFeatureExtractor
 | 
			
		||||
    - __call__
 | 
			
		||||
 | 
			
		||||
## ClvpProcessor
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpProcessor
 | 
			
		||||
    - __call__
 | 
			
		||||
    - decode
 | 
			
		||||
    - batch_decode
 | 
			
		||||
 | 
			
		||||
## ClvpModelForConditionalGeneration
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpModelForConditionalGeneration
 | 
			
		||||
    - forward
 | 
			
		||||
    - generate
 | 
			
		||||
    - get_text_features
 | 
			
		||||
    - get_speech_features
 | 
			
		||||
 | 
			
		||||
## ClvpForCausalLM
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpForCausalLM
 | 
			
		||||
 | 
			
		||||
## ClvpModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpModel
 | 
			
		||||
 | 
			
		||||
## ClvpEncoder
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpEncoder
 | 
			
		||||
 | 
			
		||||
## ClvpDecoder
 | 
			
		||||
 | 
			
		||||
[[autodoc]] ClvpDecoder
 | 
			
		||||
 | 
			
		||||
@ -146,7 +146,7 @@ As a summary, consider the following table:
 | 
			
		||||
| **Model** | [`~transformers.DetrForObjectDetection`] | [`~transformers.DetrForSegmentation`] | [`~transformers.DetrForSegmentation`] |
 | 
			
		||||
| **Example dataset** | COCO detection | COCO detection, COCO panoptic | COCO panoptic  |                                                                        |
 | 
			
		||||
| **Format of annotations to provide to**  [`~transformers.DetrImageProcessor`] | {'image_id': `int`, 'annotations': `List[Dict]`} each Dict being a COCO object annotation  | {'image_id': `int`, 'annotations': `List[Dict]`}  (in case of COCO detection) or {'file_name': `str`, 'image_id': `int`, 'segments_info': `List[Dict]`} (in case of COCO panoptic) | {'file_name': `str`, 'image_id': `int`, 'segments_info': `List[Dict]`} and masks_path (path to directory containing PNG files of the masks) |
 | 
			
		||||
| **Postprocessing** (i.e. converting the output of the model to COCO API) | [`~transformers.DetrImageProcessor.post_process`] | [`~transformers.DetrImageProcessor.post_process_segmentation`] | [`~transformers.DetrImageProcessor.post_process_segmentation`], [`~transformers.DetrImageProcessor.post_process_panoptic`] |
 | 
			
		||||
| **Postprocessing** (i.e. converting the output of the model to Pascal VOC format) | [`~transformers.DetrImageProcessor.post_process`] | [`~transformers.DetrImageProcessor.post_process_segmentation`] | [`~transformers.DetrImageProcessor.post_process_segmentation`], [`~transformers.DetrImageProcessor.post_process_panoptic`] |
 | 
			
		||||
| **evaluators** | `CocoEvaluator` with `iou_types="bbox"` | `CocoEvaluator` with `iou_types="bbox"` or `"segm"` | `CocoEvaluator` with `iou_tupes="bbox"` or `"segm"`, `PanopticEvaluator` |
 | 
			
		||||
 | 
			
		||||
In short, one should prepare the data either in COCO detection or COCO panoptic format, then use
 | 
			
		||||
 | 
			
		||||
@ -25,6 +25,49 @@ The abstract from the paper is the following:
 | 
			
		||||
This model was contributed by [nielsr](https://huggingface.co/nielsr).
 | 
			
		||||
The original code can be found [here](https://github.com/facebookresearch/dinov2).
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
The model can be traced using `torch.jit.trace` which leverages JIT compilation to optimize the model making it faster to run. Note this still produces some mis-matched elements and the difference between the original model and the traced model is of the order of 1e-4.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
import torch
 | 
			
		||||
from transformers import AutoImageProcessor, AutoModel
 | 
			
		||||
from PIL import Image
 | 
			
		||||
import requests
 | 
			
		||||
 | 
			
		||||
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
 | 
			
		||||
image = Image.open(requests.get(url, stream=True).raw)
 | 
			
		||||
 | 
			
		||||
processor = AutoImageProcessor.from_pretrained('facebook/dinov2-base')
 | 
			
		||||
model = AutoModel.from_pretrained('facebook/dinov2-base')
 | 
			
		||||
 | 
			
		||||
inputs = processor(images=image, return_tensors="pt")
 | 
			
		||||
outputs = model(**inputs)
 | 
			
		||||
last_hidden_states = outputs[0]
 | 
			
		||||
 | 
			
		||||
# We have to force return_dict=False for tracing
 | 
			
		||||
model.config.return_dict = False
 | 
			
		||||
 | 
			
		||||
with torch.no_grad():
 | 
			
		||||
    traced_model = torch.jit.trace(model, [inputs.pixel_values])
 | 
			
		||||
    traced_outputs = traced_model(inputs.pixel_values)
 | 
			
		||||
 | 
			
		||||
print((last_hidden_states - traced_outputs[0]).abs().max())
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Resources
 | 
			
		||||
 | 
			
		||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DPT.
 | 
			
		||||
 | 
			
		||||
- Demo notebooks for DINOv2 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DINOv2). 🌎
 | 
			
		||||
 | 
			
		||||
<PipelineTag pipeline="image-classification"/>
 | 
			
		||||
 | 
			
		||||
- [`Dinov2ForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
 | 
			
		||||
- See also: [Image classification task guide](../tasks/image_classification)
 | 
			
		||||
 | 
			
		||||
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
 | 
			
		||||
 | 
			
		||||
## Dinov2Config
 | 
			
		||||
 | 
			
		||||
[[autodoc]] Dinov2Config
 | 
			
		||||
 | 
			
		||||
@ -32,7 +32,7 @@ rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
The DistilBERT model was proposed in the blog post [Smaller, faster, cheaper, lighter: Introducing DistilBERT, a
 | 
			
		||||
distilled version of BERT](https://medium.com/huggingface/distilbert-8cf3380435b5), and the paper [DistilBERT, a
 | 
			
		||||
distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/papers/1910.01108). DistilBERT is a
 | 
			
		||||
distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108). DistilBERT is a
 | 
			
		||||
small, fast, cheap and light Transformer model trained by distilling BERT base. It has 40% less parameters than
 | 
			
		||||
*bert-base-uncased*, runs 60% faster while preserving over 95% of BERT's performances as measured on the GLUE language
 | 
			
		||||
understanding benchmark.
 | 
			
		||||
@ -153,7 +153,7 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
 | 
			
		||||
>>> device = "cuda" # the device to load the model onto
 | 
			
		||||
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
 | 
			
		||||
>>> model = AutoModel.from_pretrained("distilbert-base-uncased", torch_dtype=torch.float16, use_flash_attention_2=True)
 | 
			
		||||
>>> model = AutoModel.from_pretrained("distilbert-base-uncased", torch_dtype=torch.float16, attn_implementation="flash_attention_2")
 | 
			
		||||
 | 
			
		||||
>>> text = "Replace me by any text you'd like."
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -32,6 +32,21 @@ alt="drawing" width="600"/>
 | 
			
		||||
 | 
			
		||||
This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/isl-org/DPT).
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
DPT is compatible with the [`AutoBackbone`] class. This allows to use the DPT framework with various computer vision backbones available in the library, such as [`VitDetBackbone`] or [`Dinov2Backbone`]. One can create it as follows:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import Dinov2Config, DPTConfig, DPTForDepthEstimation
 | 
			
		||||
 | 
			
		||||
# initialize with a Transformer-based backbone such as DINOv2
 | 
			
		||||
# in that case, we also specify `reshape_hidden_states=False` to get feature maps of shape (batch_size, num_channels, height, width)
 | 
			
		||||
backbone_config = Dinov2Config.from_pretrained("facebook/dinov2-base", out_features=["stage1", "stage2", "stage3", "stage4"], reshape_hidden_states=False)
 | 
			
		||||
 | 
			
		||||
config = DPTConfig(backbone_config=backbone_config)
 | 
			
		||||
model = DPTForDepthEstimation(config=config)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Resources
 | 
			
		||||
 | 
			
		||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DPT.
 | 
			
		||||
 | 
			
		||||
@ -59,7 +59,7 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
 | 
			
		||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
>>> device = "cuda" # the device to load the model onto
 | 
			
		||||
 | 
			
		||||
>>> model = AutoModelForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder", torch_dtype=torch.float16, use_flash_attention_2=True)
 | 
			
		||||
>>> model = AutoModelForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder", torch_dtype=torch.float16, attn_implementation="flash_attention_2")
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("bigcode/gpt_bigcode-santacoder")
 | 
			
		||||
 | 
			
		||||
>>> prompt = "def hello_world():"
 | 
			
		||||
 | 
			
		||||
@ -56,13 +56,9 @@ The `generate()` method can be used to generate text using GPT Neo model.
 | 
			
		||||
 | 
			
		||||
## Combining GPT-Neo and Flash Attention 2
 | 
			
		||||
 | 
			
		||||
First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature.
 | 
			
		||||
First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature, and make sure your hardware is compatible with Flash-Attention 2. More details are available [here](https://huggingface.co/docs/transformers/perf_infer_gpu_one#flashattention-2) concerning the installation.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install -U flash-attn --no-build-isolation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16``)
 | 
			
		||||
Make sure as well to load your model in half-precision (e.g. `torch.float16`).
 | 
			
		||||
 | 
			
		||||
To load and run a model using Flash Attention 2, refer to the snippet below:
 | 
			
		||||
 | 
			
		||||
@ -71,7 +67,7 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
 | 
			
		||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
>>> device = "cuda" # the device to load the model onto
 | 
			
		||||
 | 
			
		||||
>>> model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B", torch_dtype=torch.float16, use_flash_attention_2=True)
 | 
			
		||||
>>> model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B", torch_dtype=torch.float16, attn_implementation="flash_attention_2")
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B")
 | 
			
		||||
 | 
			
		||||
>>> prompt = "def hello_world():"
 | 
			
		||||
 | 
			
		||||
@ -61,6 +61,40 @@ The `generate()` method can be used to generate text using GPT Neo model.
 | 
			
		||||
>>> gen_text = tokenizer.batch_decode(gen_tokens)[0]
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Using Flash Attention 2
 | 
			
		||||
 | 
			
		||||
Flash Attention 2 is an faster, optimized version of the model.
 | 
			
		||||
 | 
			
		||||
### Installation 
 | 
			
		||||
 | 
			
		||||
First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer).
 | 
			
		||||
 | 
			
		||||
Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install -U flash-attn --no-build-isolation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Usage
 | 
			
		||||
 | 
			
		||||
To load a model using Flash Attention 2, we can pass the argument `attn_implementation="flash_attention_2"` to [`.from_pretrained`](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel.from_pretrained). We'll also load the model in half-precision (e.g. `torch.float16`), since it results in almost no degradation to audio quality but significantly lower memory usage and faster inference:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> from transformers import GPTNeoXForCausalLM, GPTNeoXTokenizerFast
 | 
			
		||||
 | 
			
		||||
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to(device)
 | 
			
		||||
...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### Expected speedups
 | 
			
		||||
 | 
			
		||||
Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `stockmark/gpt-neox-japanese-1.4b` checkpoint and the Flash Attention 2 version of the model using a sequence length of 2048.
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/gpt-neox-1.8b-speedup.jpg">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
## Resources
 | 
			
		||||
 | 
			
		||||
- [Causal language modeling task guide](../tasks/language_modeling)
 | 
			
		||||
 | 
			
		||||
@ -50,6 +50,9 @@ come in several checkpoints they each contain a part of each weight of the model
 | 
			
		||||
 | 
			
		||||
- The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string.
 | 
			
		||||
 | 
			
		||||
This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). The Flax version of the implementation was contributed by [afmck](https://huggingface.co/afmck) with the code in the implementation based on Hugging Face's Flax GPT-Neo.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Based on the original LLaMA model, Meta AI has released some follow-up works:
 | 
			
		||||
 | 
			
		||||
- **Llama2**: Llama2 is an improved version of Llama with some architectural tweaks (Grouped Query Attention), and is pre-trained on 2Trillion tokens. Refer to the documentation of Llama2 which can be found [here](llama2).
 | 
			
		||||
@ -112,3 +115,13 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
 | 
			
		||||
 | 
			
		||||
[[autodoc]] LlamaForSequenceClassification
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## FlaxLlamaModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] FlaxLlamaModel
 | 
			
		||||
    - __call__
 | 
			
		||||
 | 
			
		||||
## FlaxLlamaForCausalLM
 | 
			
		||||
 | 
			
		||||
[[autodoc]] FlaxLlamaForCausalLM
 | 
			
		||||
    - __call__
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										80
									
								
								docs/source/en/model_doc/llava.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								docs/source/en/model_doc/llava.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,80 @@
 | 
			
		||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# LLaVa
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
LLaVa is an open-source chatbot trained by fine-tuning LlamA/Vicuna on GPT-generated multimodal instruction-following data. It is an auto-regressive language model, based on the transformer architecture. In other words, it is an multi-modal version of LLMs fine-tuned for chat / instructions.
 | 
			
		||||
 | 
			
		||||
The LLaVa model was proposed in [Visual Instruction Tuning](https://arxiv.org/abs/2304.08485) and improved in [Improved Baselines with Visual Instruction Tuning](https://arxiv.org/pdf/2310.03744) by Haotian Liu, Chunyuan Li, Yuheng Li and Yong Jae Lee.
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
*Large multimodal models (LMM) have recently shown encouraging progress with visual instruction tuning. In this note, we show that the fully-connected vision-language cross-modal connector in LLaVA is surprisingly powerful and data-efficient. With simple modifications to LLaVA, namely, using CLIP-ViT-L-336px with an MLP projection and adding academic-task-oriented VQA data with simple response formatting prompts, we establish stronger baselines that achieve state-of-the-art across 11 benchmarks. Our final 13B checkpoint uses merely 1.2M publicly available data, and finishes full training in ∼1 day on a single 8-A100 node. We hope this can make state-of-the-art LMM research more accessible. Code and model will be publicly available*
 | 
			
		||||
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llava_architecture.jpg"
 | 
			
		||||
alt="drawing" width="600"/>
 | 
			
		||||
 | 
			
		||||
<small> LLaVa architecture. Taken from the <a href="https://arxiv.org/abs/2304.08485">original paper.</a> </small>
 | 
			
		||||
 | 
			
		||||
This model was contributed by [ArthurZ](https://huggingface.co/ArthurZ) and [ybelkada](https://huggingface.co/ybelkada).
 | 
			
		||||
The original code can be found [here](https://github.com/haotian-liu/LLaVA/tree/main/llava).
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating.
 | 
			
		||||
 | 
			
		||||
- Note the model has not been explicitly trained to process multiple images in the same prompt, although this is technically possible, you may experience inaccurate results.
 | 
			
		||||
 | 
			
		||||
- For better results, we recommend users to prompt the model with the correct prompt format: 
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
"USER: <image>\n<prompt>ASSISTANT:"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
For multiple turns conversation:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
"USER: <image>\n<prompt1>ASSISTANT: <answer1>USER: <prompt2>ASSISTANT: <answer2>USER: <prompt3>ASSISTANT:"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Using Flash Attention 2
 | 
			
		||||
 | 
			
		||||
Flash Attention 2 is an even faster, optimized version of the previous optimization, please refer to the [Flash Attention 2 section of performance docs](https://huggingface.co/docs/transformers/perf_infer_gpu_one).
 | 
			
		||||
 | 
			
		||||
## Resources
 | 
			
		||||
 | 
			
		||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BEiT.
 | 
			
		||||
 | 
			
		||||
<PipelineTag pipeline="image-to-text"/>
 | 
			
		||||
 | 
			
		||||
- A [Google Colab demo](https://colab.research.google.com/drive/1qsl6cd2c8gGtEW1xV5io7S8NHh-Cp1TV?usp=sharing) on how to run Llava on a free-tier Google colab instance leveraging 4-bit inference.
 | 
			
		||||
- A [similar notebook](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LLaVa/Inference_with_LLaVa_for_multimodal_generation.ipynb) showcasing batched inference. 🌎
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## LlavaConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] LlavaConfig
 | 
			
		||||
 | 
			
		||||
## LlavaProcessor
 | 
			
		||||
 | 
			
		||||
[[autodoc]] LlavaProcessor
 | 
			
		||||
 | 
			
		||||
## LlavaForConditionalGeneration
 | 
			
		||||
 | 
			
		||||
[[autodoc]] LlavaForConditionalGeneration
 | 
			
		||||
    - forward
 | 
			
		||||
							
								
								
									
										68
									
								
								docs/source/en/model_doc/madlad-400.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										68
									
								
								docs/source/en/model_doc/madlad-400.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,68 @@
 | 
			
		||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# MADLAD-400
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
MADLAD-400 models were released in the paper [MADLAD-400: A Multilingual And Document-Level Large Audited Dataset](MADLAD-400: A Multilingual And Document-Level Large Audited Dataset). 
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following: 
 | 
			
		||||
 | 
			
		||||
*We introduce MADLAD-400, a manually audited, general domain 3T token monolingual dataset based on CommonCrawl, spanning 419 languages. We discuss 
 | 
			
		||||
the limitations revealed by self-auditing MADLAD-400, and the role data auditing
 | 
			
		||||
had in the dataset creation process. We then train and release a 10.7B-parameter
 | 
			
		||||
multilingual machine translation model on 250 billion tokens covering over 450
 | 
			
		||||
languages using publicly available data, and find that it is competitive with models
 | 
			
		||||
that are significantly larger, and report the results on different domains. In addition, we train a 8B-parameter language model, and assess the results on few-shot
 | 
			
		||||
translation. We make the baseline models 1
 | 
			
		||||
available to the research community.*
 | 
			
		||||
 | 
			
		||||
This model was added by [Juarez Bochi](https://huggingface.co/jbochi). The original checkpoints can be found [here](https://github.com/google-research/google-research/tree/master/madlad_400). 
 | 
			
		||||
 | 
			
		||||
This is a machine translation model that supports many low-resource languages, and that is competitive with models that are significantly larger.
 | 
			
		||||
 | 
			
		||||
One can directly use MADLAD-400 weights without finetuning the model:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained("google/madlad400-3b-mt")
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("google/madlad400-3b-mt")
 | 
			
		||||
 | 
			
		||||
>>> inputs = tokenizer("<2pt> I love pizza!", return_tensors="pt")
 | 
			
		||||
>>> outputs = model.generate(**inputs)
 | 
			
		||||
>>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
 | 
			
		||||
['Eu amo pizza!']
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Google has released the following variants:
 | 
			
		||||
 | 
			
		||||
- [google/madlad400-3b-mt](https://huggingface.co/google/madlad400-3b-mt)
 | 
			
		||||
 | 
			
		||||
- [google/madlad400-7b-mt](https://huggingface.co/google/madlad400-7b-mt)
 | 
			
		||||
 | 
			
		||||
- [google/madlad400-7b-mt-bt](https://huggingface.co/google/madlad400-7b-mt-bt)
 | 
			
		||||
 | 
			
		||||
- [google/madlad400-10b-mt](https://huggingface.co/google/madlad400-10b-mt)
 | 
			
		||||
 | 
			
		||||
The original checkpoints can be found [here](https://github.com/google-research/google-research/tree/master/madlad_400).
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
Refer to [T5's documentation page](t5) for all API references, code examples, and notebooks. For more details regarding training and evaluation of the MADLAD-400, refer to the model card.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
@ -99,7 +99,7 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
 | 
			
		||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
>>> device = "cuda" # the device to load the model onto
 | 
			
		||||
 | 
			
		||||
>>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16, use_flash_attention_2=True)
 | 
			
		||||
>>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16, attn_implementation="flash_attention_2")
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
 | 
			
		||||
 | 
			
		||||
>>> prompt = "My favourite condiment is"
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										163
									
								
								docs/source/en/model_doc/mixtral.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										163
									
								
								docs/source/en/model_doc/mixtral.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,163 @@
 | 
			
		||||
<!--Copyright 2023 Mistral AI and The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# Mixtral
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
Mixtral-8x7B is Mistral AI's second Large Language Model (LLM). 
 | 
			
		||||
 | 
			
		||||
The Mixtral model was proposed in the by the [Mistral AI](https://mistral.ai/) team.
 | 
			
		||||
 | 
			
		||||
It was introduced in the [Mixtral of Experts blogpost](https://mistral.ai/news/mixtral-of-experts/) with the following introduction:
 | 
			
		||||
 | 
			
		||||
*Today, the team is proud to release Mixtral 8x7B, a high-quality sparse mixture of experts models (SMoE) with open weights. Licensed under Apache 2.0. Mixtral outperforms Llama 2 70B on most benchmarks with 6x faster inference. It is the strongest open-weight model with a permissive license and the best model overall regarding cost/performance trade-offs. In particular, it matches or outperforms GPT3.5 on most standard benchmarks.*
 | 
			
		||||
 | 
			
		||||
Tips:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
- The model needs to be converted using the [conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/mixtral/convert_mixtral_weights_to_hf.py).
 | 
			
		||||
- If the model is quantized to 4bits, a single A100 is enough to fit the entire 45B model.
 | 
			
		||||
 | 
			
		||||
This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada) and [Arthur Zucker](https://huggingface.co/ArthurZ) .
 | 
			
		||||
The original code can be found [here](https://github.com/mistralai/mistral-src).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### Model Details
 | 
			
		||||
 | 
			
		||||
Mixtral-45B is a decoder-based LM with the following architectural choices:
 | 
			
		||||
 | 
			
		||||
* Mixtral is a Mixture of Expert (MOE) model with 8 experts per MLP, with a total of 45B paramateres but the compute required is the same as a 14B model. This is because even though each experts have to be loaded in RAM (70B like ram requirement) each token from the hidden states are dipatched twice (top 2 routing) and thus the compute (the operation required at each foward computation) is just 2 X sequence_length. 
 | 
			
		||||
 | 
			
		||||
The following implementation details are shared with Mistral AI's first model [mistral](~models/doc/mistral):
 | 
			
		||||
* Sliding Window Attention - Trained with 8k context length and fixed cache size, with a theoretical attention span of 128K tokens
 | 
			
		||||
* GQA (Grouped Query Attention) - allowing faster inference and lower cache size.
 | 
			
		||||
* Byte-fallback BPE tokenizer - ensures that characters are never mapped to out of vocabulary tokens.
 | 
			
		||||
 | 
			
		||||
They also provide an instruction fine-tuned model: `mistralai/Mixtral-8x7B-v0.1` which can be used for chat-based inference.
 | 
			
		||||
 | 
			
		||||
For more details please read our [release blog post](https://mistral.ai/news/mixtral-of-experts/)
 | 
			
		||||
 | 
			
		||||
### License
 | 
			
		||||
 | 
			
		||||
`Mixtral-8x7B` is released under the Apache 2.0 license.
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
`Mixtral-8x7B` can be found on the [Huggingface Hub](https://huggingface.co/mistralai)
 | 
			
		||||
 | 
			
		||||
These ready-to-use checkpoints can be downloaded and used via the HuggingFace Hub:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
>>> device = "cuda" # the device to load the model onto
 | 
			
		||||
 | 
			
		||||
>>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-8x7B")
 | 
			
		||||
 | 
			
		||||
>>> prompt = "My favourite condiment is"
 | 
			
		||||
 | 
			
		||||
>>> model_inputs = tokenizer([prompt], return_tensors="pt").to(device)
 | 
			
		||||
>>> model.to(device)
 | 
			
		||||
 | 
			
		||||
>>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True)
 | 
			
		||||
>>> tokenizer.batch_decode(generated_ids)[0]
 | 
			
		||||
"The expected output"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
To use the raw checkpoints with HuggingFace you can use the `convert_mixtral_weights_to_hf.py` script to convert them to the HuggingFace format:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python src/transformers/models/mixtral/convert_mixtral_weights_to_hf.py \
 | 
			
		||||
    --input_dir /path/to/downloaded/mistral/weights --output_dir /output/path
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
You can then load the converted model from the `output/path`:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import MixtralForCausalLM, LlamaTokenizer
 | 
			
		||||
 | 
			
		||||
tokenizer = LlamaTokenizer.from_pretrained("/output/path")
 | 
			
		||||
model = MixtralForCausalLM.from_pretrained("/output/path")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Combining Mixtral and Flash Attention 2
 | 
			
		||||
 | 
			
		||||
First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install -U flash-attn --no-build-isolation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of [`flash-attn`](https://github.com/Dao-AILab/flash-attention) repository. Make also sure to load your model in half-precision (e.g. `torch.float16`)
 | 
			
		||||
 | 
			
		||||
To load and run a model using Flash Attention 2, refer to the snippet below:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> import torch
 | 
			
		||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
>>> device = "cuda" # the device to load the model onto
 | 
			
		||||
 | 
			
		||||
>>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mixtral-8x7B-v0.1", torch_dtype=torch.float16, attn_implementation="flash_attention_2")
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
 | 
			
		||||
 | 
			
		||||
>>> prompt = "My favourite condiment is"
 | 
			
		||||
 | 
			
		||||
>>> model_inputs = tokenizer([prompt], return_tensors="pt").to(device)
 | 
			
		||||
>>> model.to(device)
 | 
			
		||||
 | 
			
		||||
>>> generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True)
 | 
			
		||||
>>> tokenizer.batch_decode(generated_ids)[0]
 | 
			
		||||
"The expected output"
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Expected speedups
 | 
			
		||||
 | 
			
		||||
Below is a expected speedup diagram that compares pure inference time between the native implementation in transformers using `mistralai/Mixtral-8x7B-v0.1` checkpoint and the Flash Attention 2 version of the model.
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/mixtral-7b-inference-large-seqlen.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
### Sliding window Attention
 | 
			
		||||
 | 
			
		||||
The current implementation supports the sliding window attention mechanism and memory efficient cache management. 
 | 
			
		||||
To enable sliding window attention, just make sure to have a `flash-attn` version that is compatible with sliding window attention (`>=2.3.0`). 
 | 
			
		||||
 | 
			
		||||
The Flash Attention-2 model uses also a more memory efficient cache slicing mechanism - as recommended per the official implementation of Mistral model that use rolling cache mechanism we keep the cache size fixed (`self.config.sliding_window`), support batched generation only for `padding_side="left"` and use the absolute position of the current token to compute the positional embedding.
 | 
			
		||||
 | 
			
		||||
## The Mistral Team
 | 
			
		||||
 | 
			
		||||
Albert Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, William El Sayed.
 | 
			
		||||
 | 
			
		||||
## MixtralConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MixtralConfig
 | 
			
		||||
 | 
			
		||||
## MixtralModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MixtralModel
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## MixtralForCausalLM
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MixtralForCausalLM
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## MixtralForSequenceClassification
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MixtralForSequenceClassification
 | 
			
		||||
    - forward
 | 
			
		||||
@ -46,6 +46,16 @@ This model was contributed by [sanchit-gandhi](https://huggingface.co/sanchit-ga
 | 
			
		||||
[here](https://github.com/facebookresearch/audiocraft). The pre-trained checkpoints can be found on the
 | 
			
		||||
[Hugging Face Hub](https://huggingface.co/models?sort=downloads&search=facebook%2Fmusicgen-).
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
- After downloading the original checkpoints from [here](https://github.com/facebookresearch/audiocraft/blob/main/docs/MUSICGEN.md#importing--exporting-models) , you can convert them using the **conversion script** available at
 | 
			
		||||
`src/transformers/models/musicgen/convert_musicgen_transformers.py` with the following command:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
python src/transformers/models/musicgen/convert_musicgen_transformers.py \
 | 
			
		||||
    --checkpoint small --pytorch_dump_folder /output/path --safe_serialization 
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Generation
 | 
			
		||||
 | 
			
		||||
MusicGen is compatible with two generation modes: greedy and sampling. In practice, sampling leads to significantly
 | 
			
		||||
 | 
			
		||||
@ -118,9 +118,9 @@ See example below for a translation from romanian to german:
 | 
			
		||||
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained(
 | 
			
		||||
...     "facebook/nllb-200-distilled-600M", use_auth_token=True, src_lang="ron_Latn"
 | 
			
		||||
...     "facebook/nllb-200-distilled-600M", token=True, src_lang="ron_Latn"
 | 
			
		||||
... )
 | 
			
		||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M", use_auth_token=True)
 | 
			
		||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M", token=True)
 | 
			
		||||
 | 
			
		||||
>>> article = "Şeful ONU spune că nu există o soluţie militară în Siria"
 | 
			
		||||
>>> inputs = tokenizer(article, return_tensors="pt")
 | 
			
		||||
 | 
			
		||||
@ -62,6 +62,55 @@ The resource should ideally demonstrate something new instead of duplicating an
 | 
			
		||||
 | 
			
		||||
- A blog post on [How 🤗 Accelerate runs very large models thanks to PyTorch](https://huggingface.co/blog/accelerate-large-models) with OPT.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Combining OPT and Flash Attention 2
 | 
			
		||||
 | 
			
		||||
First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install -U flash-attn --no-build-isolation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16``)
 | 
			
		||||
 | 
			
		||||
To load and run a model using Flash Attention 2, refer to the snippet below:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> import torch
 | 
			
		||||
>>> from transformers import OPTForCausalLM, GPT2Tokenizer
 | 
			
		||||
>>> device = "cuda" # the device to load the model onto
 | 
			
		||||
 | 
			
		||||
>>> model = OPTForCausalLM.from_pretrained("facebook/opt-350m", torch_dtype=torch.float16, attn_implementation="flash_attention_2")
 | 
			
		||||
>>> tokenizer = GPT2Tokenizer.from_pretrained("facebook/opt-350m")
 | 
			
		||||
 | 
			
		||||
>>> prompt = ("A chat between a curious human and the Statue of Liberty.\n\nHuman: What is your name?\nStatue: I am the "
 | 
			
		||||
              "Statue of Liberty.\nHuman: Where do you live?\nStatue: New York City.\nHuman: How long have you lived "
 | 
			
		||||
              "there?")
 | 
			
		||||
 | 
			
		||||
>>> model_inputs = tokenizer([prompt], return_tensors="pt").to(device)
 | 
			
		||||
>>> model.to(device)
 | 
			
		||||
 | 
			
		||||
>>> generated_ids = model.generate(**model_inputs, max_new_tokens=30, do_sample=False)
 | 
			
		||||
>>> tokenizer.batch_decode(generated_ids)[0]
 | 
			
		||||
'</s>A chat between a curious human and the Statue of Liberty.\n\nHuman: What is your name?\nStatue: I am the Statue of Liberty.\nHuman: Where do you live?\nStatue: New York City.\nHuman: How long have you lived there?\nStatue: I have lived here for about a year.\nHuman: What is your favorite place to eat?\nStatue: I love'
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Expected speedups
 | 
			
		||||
 | 
			
		||||
Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `facebook/opt-2.7b` checkpoint and the Flash Attention 2 version of the model using two different sequence lengths.
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://user-images.githubusercontent.com/49240599/281101546-d2fca6d2-ee44-48f3-9534-ba8d5bee4531.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `facebook/opt-350m` checkpoint and the Flash Attention 2 version of the model using two different sequence lengths.
 | 
			
		||||
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://user-images.githubusercontent.com/49240599/281101682-d1144e90-0dbc-46f4-8fc8-c6206cb793c9.png">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## OPTConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] OPTConfig
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ OWLv2 is, just like its predecessor [OWL-ViT](owlvit), a zero-shot text-conditio
 | 
			
		||||
 | 
			
		||||
>>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
 | 
			
		||||
>>> target_sizes = torch.Tensor([image.size[::-1]])
 | 
			
		||||
>>> # Convert outputs (bounding boxes and class logits) to COCO API
 | 
			
		||||
>>> # Convert outputs (bounding boxes and class logits) to Pascal VOC Format (xmin, ymin, xmax, ymax)
 | 
			
		||||
>>> results = processor.post_process_object_detection(outputs=outputs, target_sizes=target_sizes, threshold=0.1)
 | 
			
		||||
>>> i = 0  # Retrieve predictions for the first image for the corresponding text queries
 | 
			
		||||
>>> text = texts[i]
 | 
			
		||||
 | 
			
		||||
@ -55,7 +55,7 @@ OWL-ViT is a zero-shot text-conditioned object detection model. OWL-ViT uses [CL
 | 
			
		||||
 | 
			
		||||
>>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
 | 
			
		||||
>>> target_sizes = torch.Tensor([image.size[::-1]])
 | 
			
		||||
>>> # Convert outputs (bounding boxes and class logits) to COCO API
 | 
			
		||||
>>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
 | 
			
		||||
>>> results = processor.post_process_object_detection(outputs=outputs, target_sizes=target_sizes, threshold=0.1)
 | 
			
		||||
>>> i = 0  # Retrieve predictions for the first image for the corresponding text queries
 | 
			
		||||
>>> text = texts[i]
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										90
									
								
								docs/source/en/model_doc/patchtsmixer.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										90
									
								
								docs/source/en/model_doc/patchtsmixer.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,90 @@
 | 
			
		||||
<!--Copyright 2023 IBM and HuggingFace Inc. team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# PatchTSMixer
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The PatchTSMixer model was proposed in [TSMixer: Lightweight MLP-Mixer Model for Multivariate Time Series Forecasting](https://arxiv.org/pdf/2306.09364.pdf) by Vijay Ekambaram, Arindam Jati, Nam Nguyen, Phanwadee Sinthong and Jayant Kalagnanam.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
PatchTSMixer is a lightweight time-series modeling approach based on the MLP-Mixer architecture. In this HuggingFace implementation, we provide PatchTSMixer's capabilities to effortlessly facilitate lightweight mixing across patches, channels, and hidden features for effective multivariate time-series modeling. It also supports various attention mechanisms starting from simple gated attention to more complex self-attention blocks that can be customized accordingly. The model can be pretrained and subsequently used for various downstream tasks such as forecasting, classification and regression.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
*TSMixer is a lightweight neural architecture exclusively composed of multi-layer perceptron (MLP) modules designed for multivariate forecasting and representation learning on patched time series. Our model draws inspiration from the success of MLP-Mixer models in computer vision. We demonstrate the challenges involved in adapting Vision MLP-Mixer for time series and introduce empirically validated components to enhance accuracy. This includes a novel design paradigm of attaching online reconciliation heads to the MLP-Mixer backbone, for explicitly modeling the time-series properties such as hierarchy and channel-correlations. We also propose a Hybrid channel modeling approach to effectively handle noisy channel interactions and generalization across diverse datasets, a common challenge in existing patch channel-mixing methods. Additionally, a simple gated attention mechanism is introduced in the backbone to prioritize important features. By incorporating these lightweight components, we significantly enhance the learning capability of simple MLP structures, outperforming complex Transformer models with minimal computing usage. Moreover, TSMixer's modular design enables compatibility with both supervised and masked self-supervised learning methods, making it a promising building block for time-series Foundation Models. TSMixer outperforms state-of-the-art MLP and Transformer models in forecasting by a considerable margin of 8-60%. It also outperforms the latest strong benchmarks of Patch-Transformer models (by 1-2%) with a significant reduction in memory and runtime (2-3X).*
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
This model was contributed by [ajati](https://huggingface.co/ajati), [vijaye12](https://huggingface.co/vijaye12), 
 | 
			
		||||
[gsinthong](https://huggingface.co/gsinthong), [namctin](https://huggingface.co/namctin),
 | 
			
		||||
[wmgifford](https://huggingface.co/wmgifford), [kashif](https://huggingface.co/kashif).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Sample usage 
 | 
			
		||||
```python
 | 
			
		||||
 | 
			
		||||
from transformers import PatchTSMixerConfig, PatchTSMixerForPrediction
 | 
			
		||||
from transformers import Trainer, TrainingArguments,
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
config = PatchTSMixerConfig(context_length = 512, prediction_length = 96)
 | 
			
		||||
model = PatchTSMixerForPrediction(config)
 | 
			
		||||
trainer = Trainer(model=model, args=training_args, 
 | 
			
		||||
            train_dataset=train_dataset,
 | 
			
		||||
            eval_dataset=valid_dataset)
 | 
			
		||||
trainer.train()
 | 
			
		||||
results = trainer.evaluate(test_dataset)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
The model can also be used for time series classification and time series regression. See the respective [`PatchTSMixerForTimeSeriesClassification`] and [`PatchTSMixerForRegression`] classes.
 | 
			
		||||
 | 
			
		||||
## PatchTSMixerConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSMixerConfig
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## PatchTSMixerModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSMixerModel
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## PatchTSMixerForPrediction
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSMixerForPrediction
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## PatchTSMixerForTimeSeriesClassification
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSMixerForTimeSeriesClassification
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## PatchTSMixerForPretraining
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSMixerForPretraining
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## PatchTSMixerForRegression
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSMixerForRegression
 | 
			
		||||
    - forward
 | 
			
		||||
							
								
								
									
										65
									
								
								docs/source/en/model_doc/patchtst.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								docs/source/en/model_doc/patchtst.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,65 @@
 | 
			
		||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# PatchTST
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The PatchTST model was proposed in [A Time Series is Worth 64 Words: Long-term Forecasting with Transformers](https://arxiv.org/abs/2211.14730) by Yuqi Nie, Nam H. Nguyen, Phanwadee Sinthong and Jayant Kalagnanam.
 | 
			
		||||
 | 
			
		||||
At a high level the model vectorizes time series into patches of a given size and encodes the resulting sequence of vectors via a Transformer that then outputs the prediction length forecast via an appropriate head. The model is illustrated in the following figure:
 | 
			
		||||
 | 
			
		||||

 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
*We propose an efficient design of Transformer-based models for multivariate time series forecasting and self-supervised representation learning. It is based on two key components: (i) segmentation of time series into subseries-level patches which are served as input tokens to Transformer; (ii) channel-independence where each channel contains a single univariate time series that shares the same embedding and Transformer weights across all the series. Patching design naturally has three-fold benefit: local semantic information is retained in the embedding; computation and memory usage of the attention maps are quadratically reduced given the same look-back window; and the model can attend longer history. Our channel-independent patch time series Transformer (PatchTST) can improve the long-term forecasting accuracy significantly when compared with that of SOTA Transformer-based models. We also apply our model to self-supervised pre-training tasks and attain excellent fine-tuning performance, which outperforms supervised training on large datasets. Transferring of masked pre-trained representation on one dataset to others also produces SOTA forecasting accuracy.*
 | 
			
		||||
 | 
			
		||||
This model was contributed by [namctin](https://huggingface.co/namctin), [gsinthong](https://huggingface.co/gsinthong), [diepi](https://huggingface.co/diepi), [vijaye12](https://huggingface.co/vijaye12), [wmgifford](https://huggingface.co/wmgifford), and [kashif](https://huggingface.co/kashif). The original code can be found [here](https://github.com/yuqinie98/PatchTST).
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
The model can also be used for time series classification and time series regression. See the respective [`PatchTSTForClassification`] and [`PatchTSTForRegression`] classes.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## PatchTSTConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSTConfig
 | 
			
		||||
 | 
			
		||||
## PatchTSTModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSTModel
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## PatchTSTForPrediction
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSTForPrediction
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## PatchTSTForClassification
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSTForClassification
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## PatchTSTForPretraining
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSTForPretraining
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## PatchTSTForRegression
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PatchTSTForRegression
 | 
			
		||||
    - forward
 | 
			
		||||
							
								
								
									
										166
									
								
								docs/source/en/model_doc/phi.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										166
									
								
								docs/source/en/model_doc/phi.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,166 @@
 | 
			
		||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# Phi
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The Phi-1 model was proposed in [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li.
 | 
			
		||||
 | 
			
		||||
The Phi-1.5 model was proposed in [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
 | 
			
		||||
 | 
			
		||||
### Summary
 | 
			
		||||
In Phi-1 and Phi-1.5 papers, the authors showed how important the quality of the data is in training relative to the model size.
 | 
			
		||||
They selected high quality "textbook" data alongside with synthetically generated data for training their small sized Transformer
 | 
			
		||||
based model Phi-1 with 1.3B parameters. Despite this small scale, phi-1 attains pass@1 accuracy 50.6% on HumanEval and 55.5% on MBPP.
 | 
			
		||||
They follow the same strategy for Phi-1.5 and created another 1.3B parameter model with performance on natural language tasks comparable 
 | 
			
		||||
to models 5x larger, and surpassing most non-frontier LLMs. Phi-1.5 exhibits many of the traits of much larger LLMs such as the ability 
 | 
			
		||||
to “think step by step” or perform some rudimentary in-context learning.
 | 
			
		||||
With these two experiments the authors successfully showed the huge impact of quality of training data when training machine learning models.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
The abstract from the Phi-1 paper is the following:
 | 
			
		||||
 | 
			
		||||
*We introduce phi-1, a new large language model for code, with significantly smaller size than
 | 
			
		||||
competing models: phi-1 is a Transformer-based model with 1.3B parameters, trained for 4 days on
 | 
			
		||||
8 A100s, using a selection of “textbook quality” data from the web (6B tokens) and synthetically
 | 
			
		||||
generated textbooks and exercises with GPT-3.5 (1B tokens). Despite this small scale, phi-1 attains
 | 
			
		||||
pass@1 accuracy 50.6% on HumanEval and 55.5% on MBPP. It also displays surprising emergent
 | 
			
		||||
properties compared to phi-1-base, our model before our finetuning stage on a dataset of coding
 | 
			
		||||
exercises, and phi-1-small, a smaller model with 350M parameters trained with the same pipeline as
 | 
			
		||||
phi-1 that still achieves 45% on HumanEval.*
 | 
			
		||||
 | 
			
		||||
The abstract from the Phi-1.5 paper is the following:
 | 
			
		||||
 | 
			
		||||
*We continue the investigation into the power of smaller Transformer-based language models as
 | 
			
		||||
initiated by TinyStories – a 10 million parameter model that can produce coherent English – and
 | 
			
		||||
the follow-up work on phi-1, a 1.3 billion parameter model with Python coding performance close
 | 
			
		||||
to the state-of-the-art. The latter work proposed to use existing Large Language Models (LLMs) to
 | 
			
		||||
generate “textbook quality” data as a way to enhance the learning process compared to traditional
 | 
			
		||||
web data. We follow the “Textbooks Are All You Need” approach, focusing this time on common
 | 
			
		||||
sense reasoning in natural language, and create a new 1.3 billion parameter model named phi-1.5,
 | 
			
		||||
with performance on natural language tasks comparable to models 5x larger, and surpassing most
 | 
			
		||||
non-frontier LLMs on more complex reasoning tasks such as grade-school mathematics and basic
 | 
			
		||||
coding. More generally, phi-1.5 exhibits many of the traits of much larger LLMs, both good –such
 | 
			
		||||
as the ability to “think step by step” or perform some rudimentary in-context learning– and bad,
 | 
			
		||||
including hallucinations and the potential for toxic and biased generations –encouragingly though, we
 | 
			
		||||
are seeing improvement on that front thanks to the absence of web data. We open-source phi-1.5 to
 | 
			
		||||
promote further research on these urgent topics.*
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
This model was contributed by [Susnato Dhar](https://huggingface.co/susnato).
 | 
			
		||||
The original code for Phi-1 and Phi-1.5 can be found [here](https://huggingface.co/microsoft/phi-1/blob/main/modeling_mixformer_sequential.py) and [here](https://huggingface.co/microsoft/phi-1_5/blob/main/modeling_mixformer_sequential.py) respectively.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
- This model is quite similar to `Llama` with the main difference in [`PhiDecoderLayer`], where they used [`PhiAttention`] and [`PhiMLP`] layers in parallel configuration.
 | 
			
		||||
- The tokenizer used for this model is identical to the [`CodeGenTokenizer`].
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### Example :
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> from transformers import PhiForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
>>> # define the model and tokenizer.
 | 
			
		||||
>>> model = PhiForCausalLM.from_pretrained("susnato/phi-1_5_dev")
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("susnato/phi-1_5_dev")
 | 
			
		||||
 | 
			
		||||
>>> # feel free to change the prompt to your liking.
 | 
			
		||||
>>> prompt = "If I were an AI that had just achieved"
 | 
			
		||||
 | 
			
		||||
>>> # apply the tokenizer.
 | 
			
		||||
>>> tokens = tokenizer(prompt, return_tensors="pt")
 | 
			
		||||
 | 
			
		||||
>>> # use the model to generate new tokens.
 | 
			
		||||
>>> generated_output = model.generate(**tokens, use_cache=True, max_new_tokens=10)
 | 
			
		||||
 | 
			
		||||
>>> tokenizer.batch_decode(generated_output)[0]
 | 
			
		||||
'If I were an AI that had just achieved a breakthrough in machine learning, I would be thrilled'
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Combining Phi and Flash Attention 2
 | 
			
		||||
 | 
			
		||||
First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
pip install -U flash-attn --no-build-isolation
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16``)
 | 
			
		||||
 | 
			
		||||
To load and run a model using Flash Attention 2, refer to the snippet below:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> import torch
 | 
			
		||||
>>> from transformers import PhiForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
>>> # define the model and tokenizer and push the model and tokens to the GPU.
 | 
			
		||||
>>> model = PhiForCausalLM.from_pretrained("susnato/phi-1_5_dev", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to("cuda")
 | 
			
		||||
>>> tokenizer = AutoTokenizer.from_pretrained("susnato/phi-1_5_dev")
 | 
			
		||||
 | 
			
		||||
>>> # feel free to change the prompt to your liking.
 | 
			
		||||
>>> prompt = "If I were an AI that had just achieved"
 | 
			
		||||
 | 
			
		||||
>>> # apply the tokenizer.
 | 
			
		||||
>>> tokens = tokenizer(prompt, return_tensors="pt").to("cuda")
 | 
			
		||||
 | 
			
		||||
>>> # use the model to generate new tokens.
 | 
			
		||||
>>> generated_output = model.generate(**tokens, use_cache=True, max_new_tokens=10)
 | 
			
		||||
 | 
			
		||||
>>> tokenizer.batch_decode(generated_output)[0]
 | 
			
		||||
'If I were an AI that had just achieved a breakthrough in machine learning, I would be thrilled'
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Expected speedups
 | 
			
		||||
Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `susnato/phi-1_dev` checkpoint and the Flash Attention 2 version of the model using a sequence length of 2048.
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/phi_1_speedup_plot.jpg">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## PhiConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PhiConfig
 | 
			
		||||
 | 
			
		||||
<frameworkcontent>
 | 
			
		||||
<pt>
 | 
			
		||||
 | 
			
		||||
## PhiModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PhiModel
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## PhiForCausalLM
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PhiForCausalLM
 | 
			
		||||
    - forward
 | 
			
		||||
    - generate
 | 
			
		||||
 | 
			
		||||
## PhiForSequenceClassification
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PhiForSequenceClassification
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## PhiForTokenClassification
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PhiForTokenClassification
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
</pt>
 | 
			
		||||
</frameworkcontent>
 | 
			
		||||
@ -16,6 +16,8 @@ specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
The SeamlessM4T model was proposed in [SeamlessM4T — Massively Multilingual & Multimodal Machine Translation](https://dl.fbaipublicfiles.com/seamless/seamless_m4t_paper.pdf) by the Seamless Communication team from Meta AI.
 | 
			
		||||
 | 
			
		||||
This is the **version 1** release of the model. For the updated **version 2** release, refer to the [Seamless M4T v2 docs](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t_v2).
 | 
			
		||||
 | 
			
		||||
SeamlessM4T is a collection of models designed to provide high quality translation, allowing people from different linguistic communities to communicate effortlessly through speech and text.
 | 
			
		||||
 | 
			
		||||
SeamlessM4T enables multiple tasks without relying on separate models:
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										194
									
								
								docs/source/en/model_doc/seamless_m4t_v2.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										194
									
								
								docs/source/en/model_doc/seamless_m4t_v2.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,194 @@
 | 
			
		||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# SeamlessM4T-v2
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The SeamlessM4T-v2 model was proposed in [Seamless: Multilingual Expressive and Streaming Speech Translation](https://ai.meta.com/research/publications/seamless-multilingual-expressive-and-streaming-speech-translation/) by the Seamless Communication team from Meta AI.
 | 
			
		||||
 | 
			
		||||
SeamlessM4T-v2 is a collection of models designed to provide high quality translation, allowing people from different linguistic communities to communicate effortlessly through speech and text. It is an improvement on the [previous version](https://huggingface.co/docs/transformers/main/model_doc/seamless_m4t). For more details on the differences between v1 and v2, refer to section [Difference with SeamlessM4T-v1](#difference-with-seamlessm4t-v1).
 | 
			
		||||
 | 
			
		||||
SeamlessM4T-v2 enables multiple tasks without relying on separate models:
 | 
			
		||||
 | 
			
		||||
- Speech-to-speech translation (S2ST)
 | 
			
		||||
- Speech-to-text translation (S2TT)
 | 
			
		||||
- Text-to-speech translation (T2ST)
 | 
			
		||||
- Text-to-text translation (T2TT)
 | 
			
		||||
- Automatic speech recognition (ASR)
 | 
			
		||||
 | 
			
		||||
[`SeamlessM4Tv2Model`] can perform all the above tasks, but each task also has its own dedicated sub-model.
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
*Recent advancements in automatic speech translation have dramatically expanded language coverage, improved multimodal capabilities, and enabled a wide range of tasks and functionalities. That said, large-scale automatic speech translation systems today lack key features that help machine-mediated communication feel seamless when compared to human-to-human dialogue. In this work, we introduce a family of models that enable end-to-end expressive and multilingual translations in a streaming fashion. First, we contribute an improved version of the massively multilingual and multimodal SeamlessM4T model—SeamlessM4T v2. This newer model, incorporating an updated UnitY2 framework, was trained on more low-resource language data. The expanded version of SeamlessAlign adds 114,800 hours of automatically aligned data for a total of 76 languages. SeamlessM4T v2 provides the foundation on which our two newest models, SeamlessExpressive and SeamlessStreaming, are initiated. SeamlessExpressive enables translation that preserves vocal styles and prosody. Compared to previous efforts in expressive speech research, our work addresses certain underexplored aspects of prosody, such as speech rate and pauses, while also preserving the style of one’s voice. As for SeamlessStreaming, our model leverages the Efficient Monotonic Multihead Attention (EMMA) mechanism to generate low-latency target translations without waiting for complete source utterances. As the first of its kind, SeamlessStreaming enables simultaneous speech-to-speech/text translation for multiple source and target languages. To understand the performance of these models, we combined novel and modified versions of existing automatic metrics to evaluate prosody, latency, and robustness. For human evaluations, we adapted existing protocols tailored for measuring the most relevant attributes in the preservation of meaning, naturalness, and expressivity. To ensure that our models can be used safely and responsibly, we implemented the first known red-teaming effort for multimodal machine translation, a system for the detection and mitigation of added toxicity, a systematic evaluation of gender bias, and an inaudible localized watermarking mechanism designed to dampen the impact of deepfakes. Consequently, we bring major components from SeamlessExpressive and SeamlessStreaming together to form Seamless, the first publicly available system that unlocks expressive cross-lingual communication in real-time. In sum, Seamless gives us a pivotal look at the technical foundation needed to turn the Universal Speech Translator from a science fiction concept into a real-world technology. Finally, contributions in this work—including models, code, and a watermark detector—are publicly released and accessible at the link below.*
 | 
			
		||||
 | 
			
		||||
## Usage
 | 
			
		||||
 | 
			
		||||
In the following example, we'll load an Arabic audio sample and an English text sample and convert them into Russian speech and French text.
 | 
			
		||||
 | 
			
		||||
First, load the processor and a checkpoint of the model:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> from transformers import AutoProcessor, SeamlessM4Tv2Model
 | 
			
		||||
 | 
			
		||||
>>> processor = AutoProcessor.from_pretrained("facebook/seamless-m4t-v2-large")
 | 
			
		||||
>>> model = SeamlessM4Tv2Model.from_pretrained("facebook/seamless-m4t-v2-large")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
You can seamlessly use this model on text or on audio, to generated either translated text or translated audio.
 | 
			
		||||
 | 
			
		||||
Here is how to use the processor to process text and audio:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> # let's load an audio sample from an Arabic speech corpus
 | 
			
		||||
>>> from datasets import load_dataset
 | 
			
		||||
>>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True)
 | 
			
		||||
>>> audio_sample = next(iter(dataset))["audio"]
 | 
			
		||||
 | 
			
		||||
>>> # now, process it
 | 
			
		||||
>>> audio_inputs = processor(audios=audio_sample["array"], return_tensors="pt")
 | 
			
		||||
 | 
			
		||||
>>> # now, process some English text as well
 | 
			
		||||
>>> text_inputs = processor(text = "Hello, my dog is cute", src_lang="eng", return_tensors="pt")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
### Speech
 | 
			
		||||
 | 
			
		||||
[`SeamlessM4Tv2Model`] can *seamlessly* generate text or speech with few or no changes. Let's target Russian voice translation:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> audio_array_from_text = model.generate(**text_inputs, tgt_lang="rus")[0].cpu().numpy().squeeze()
 | 
			
		||||
>>> audio_array_from_audio = model.generate(**audio_inputs, tgt_lang="rus")[0].cpu().numpy().squeeze()
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
With basically the same code, I've translated English text and Arabic speech to Russian speech samples.
 | 
			
		||||
 | 
			
		||||
### Text
 | 
			
		||||
 | 
			
		||||
Similarly, you can generate translated text from audio files or from text with the same model. You only have to pass `generate_speech=False` to [`SeamlessM4Tv2Model.generate`].
 | 
			
		||||
This time, let's translate to French.
 | 
			
		||||
 | 
			
		||||
```python 
 | 
			
		||||
>>> # from audio
 | 
			
		||||
>>> output_tokens = model.generate(**audio_inputs, tgt_lang="fra", generate_speech=False)
 | 
			
		||||
>>> translated_text_from_audio = processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
 | 
			
		||||
 | 
			
		||||
>>> # from text
 | 
			
		||||
>>> output_tokens = model.generate(**text_inputs, tgt_lang="fra", generate_speech=False)
 | 
			
		||||
>>> translated_text_from_text = processor.decode(output_tokens[0].tolist()[0], skip_special_tokens=True)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Tips
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#### 1. Use dedicated models
 | 
			
		||||
 | 
			
		||||
[`SeamlessM4Tv2Model`] is transformers top level model to generate speech and text, but you can also use dedicated models that perform the task without additional components, thus reducing the memory footprint.
 | 
			
		||||
For example, you can replace the audio-to-audio generation snippet with the model dedicated to the S2ST task, the rest is exactly the same code: 
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> from transformers import SeamlessM4Tv2ForSpeechToSpeech
 | 
			
		||||
>>> model = SeamlessM4Tv2ForSpeechToSpeech.from_pretrained("facebook/seamless-m4t-v2-large")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Or you can replace the text-to-text generation snippet with the model dedicated to the T2TT task, you only have to remove `generate_speech=False`.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> from transformers import SeamlessM4Tv2ForTextToText
 | 
			
		||||
>>> model = SeamlessM4Tv2ForTextToText.from_pretrained("facebook/seamless-m4t-v2-large")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Feel free to try out [`SeamlessM4Tv2ForSpeechToText`] and [`SeamlessM4Tv2ForTextToSpeech`] as well.
 | 
			
		||||
 | 
			
		||||
#### 2. Change the speaker identity
 | 
			
		||||
 | 
			
		||||
You have the possibility to change the speaker used for speech synthesis with the `speaker_id` argument. Some `speaker_id` works better than other for some languages!
 | 
			
		||||
 | 
			
		||||
#### 3. Change the generation strategy
 | 
			
		||||
 | 
			
		||||
You can use different [generation strategies](../generation_strategies) for text generation, e.g `.generate(input_ids=input_ids, text_num_beams=4, text_do_sample=True)` which will perform multinomial beam-search decoding on the text model. Note that speech generation only supports greedy - by default - or multinomial sampling, which can be used with e.g. `.generate(..., speech_do_sample=True, speech_temperature=0.6)`.
 | 
			
		||||
 | 
			
		||||
#### 4. Generate speech and text at the same time
 | 
			
		||||
 | 
			
		||||
Use `return_intermediate_token_ids=True` with [`SeamlessM4Tv2Model`] to return both speech and text !
 | 
			
		||||
 | 
			
		||||
## Model architecture
 | 
			
		||||
 | 
			
		||||
SeamlessM4T-v2 features a versatile architecture that smoothly handles the sequential generation of text and speech. This setup comprises two sequence-to-sequence (seq2seq) models. The first model translates the input modality into translated text, while the second model generates speech tokens, known as "unit tokens," from the translated text.
 | 
			
		||||
 | 
			
		||||
Each modality has its own dedicated encoder with a unique architecture. Additionally, for speech output, a vocoder inspired by the [HiFi-GAN](https://arxiv.org/abs/2010.05646) architecture is placed on top of the second seq2seq model.
 | 
			
		||||
 | 
			
		||||
### Difference with SeamlessM4T-v1
 | 
			
		||||
 | 
			
		||||
The architecture of this new version differs from the first in a few aspects:
 | 
			
		||||
 | 
			
		||||
#### Improvements on the second-pass model
 | 
			
		||||
 | 
			
		||||
The second seq2seq model, named text-to-unit model, is now non-auto regressive, meaning that it computes units in a **single forward pass**. This achievement is made possible by:
 | 
			
		||||
- the use of **character-level embeddings**, meaning that each character of the predicted translated text has its own embeddings, which are then used to predict the unit tokens.
 | 
			
		||||
- the use of an intermediate duration predictor, that predicts speech duration at the **character-level** on the predicted translated text.
 | 
			
		||||
- the use of a new text-to-unit decoder mixing convolutions and self-attention to handle longer context.
 | 
			
		||||
 | 
			
		||||
#### Difference in the speech encoder
 | 
			
		||||
 | 
			
		||||
The speech encoder, which is used during the first-pass generation process to predict the translated text, differs mainly from the previous speech encoder through these mechanisms:
 | 
			
		||||
- the use of chunked attention mask to prevent attention across chunks, ensuring that each position attends only to positions within its own chunk and a fixed number of previous chunks.
 | 
			
		||||
- the use of relative position embeddings which only considers distance between sequence elements rather than absolute positions. Please refer to [Self-Attentionwith Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155) for more details.
 | 
			
		||||
- the use of a causal depth-wise convolution instead of a non-causal one.
 | 
			
		||||
 | 
			
		||||
### Generation process
 | 
			
		||||
 | 
			
		||||
Here's how the generation process works:
 | 
			
		||||
 | 
			
		||||
- Input text or speech is processed through its specific encoder.
 | 
			
		||||
- A decoder creates text tokens in the desired language.
 | 
			
		||||
- If speech generation is required, the second seq2seq model, generates unit tokens in an non auto-regressive way.
 | 
			
		||||
- These unit tokens are then passed through the final vocoder to produce the actual speech.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
This model was contributed by [ylacombe](https://huggingface.co/ylacombe). The original code can be found [here](https://github.com/facebookresearch/seamless_communication).
 | 
			
		||||
 | 
			
		||||
## SeamlessM4Tv2Model
 | 
			
		||||
 | 
			
		||||
[[autodoc]] SeamlessM4Tv2Model
 | 
			
		||||
    - generate
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## SeamlessM4Tv2ForTextToSpeech
 | 
			
		||||
 | 
			
		||||
[[autodoc]] SeamlessM4Tv2ForTextToSpeech
 | 
			
		||||
    - generate
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## SeamlessM4Tv2ForSpeechToSpeech
 | 
			
		||||
 | 
			
		||||
[[autodoc]] SeamlessM4Tv2ForSpeechToSpeech
 | 
			
		||||
    - generate
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## SeamlessM4Tv2ForTextToText
 | 
			
		||||
 | 
			
		||||
[[autodoc]] transformers.SeamlessM4Tv2ForTextToText
 | 
			
		||||
    - forward
 | 
			
		||||
    - generate
 | 
			
		||||
 | 
			
		||||
## SeamlessM4Tv2ForSpeechToText
 | 
			
		||||
 | 
			
		||||
[[autodoc]] transformers.SeamlessM4Tv2ForSpeechToText
 | 
			
		||||
    - forward
 | 
			
		||||
    - generate
 | 
			
		||||
 | 
			
		||||
## SeamlessM4Tv2Config
 | 
			
		||||
 | 
			
		||||
[[autodoc]] SeamlessM4Tv2Config
 | 
			
		||||
@ -314,7 +314,7 @@ The predicted tokens will then be placed between the sentinel tokens.
 | 
			
		||||
 | 
			
		||||
## Performance
 | 
			
		||||
 | 
			
		||||
If you'd like a faster training and inference performance, install [apex](https://github.com/NVIDIA/apex#quick-start) and then the model will automatically use `apex.normalization.FusedRMSNorm` instead of `T5LayerNorm`. The former uses an optimized fused kernel which is several times faster than the latter.
 | 
			
		||||
If you'd like a faster training and inference performance, install [NVIDIA APEX](https://github.com/NVIDIA/apex#quick-start) for NVIDIA GPUs, or [ROCm APEX](https://github.com/ROCmSoftwarePlatform/apex) for AMD GPUs and then the model will automatically use `apex.normalization.FusedRMSNorm` instead of `T5LayerNorm`. The former uses an optimized fused kernel which is several times faster than the latter.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Resources
 | 
			
		||||
 | 
			
		||||
@ -16,6 +16,35 @@ rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
# Transformer XL
 | 
			
		||||
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
 | 
			
		||||
This model is in maintenance mode only, so we won't accept any new PRs changing its code. This model was deprecated due to security issues linked to `pickle.load`.
 | 
			
		||||
 | 
			
		||||
We recommend switching to more recent models for improved security.
 | 
			
		||||
 | 
			
		||||
In case you would still like to use `TransfoXL` in your experiments, we recommend using the [Hub checkpoint](https://huggingface.co/transfo-xl-wt103) with a specific revision to ensure you are downloading safe files from the Hub.
 | 
			
		||||
 | 
			
		||||
You will need to set the environment variable `TRUST_REMOTE_CODE` to `True` in order to allow the
 | 
			
		||||
usage of `pickle.load()`:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
import os
 | 
			
		||||
from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel
 | 
			
		||||
 | 
			
		||||
os.environ["TRUST_REMOTE_CODE"] = "True"
 | 
			
		||||
 | 
			
		||||
checkpoint = 'transfo-xl-wt103'
 | 
			
		||||
revision = '40a186da79458c9f9de846edfaea79c412137f97'
 | 
			
		||||
 | 
			
		||||
tokenizer = TransfoXLTokenizer.from_pretrained(checkpoint, revision=revision)
 | 
			
		||||
model = TransfoXLLMHeadModel.from_pretrained(checkpoint, revision=revision)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
If you run into any issues running this model, please reinstall the last version that supported this model: v4.35.0.
 | 
			
		||||
You can do so by running the following command: `pip install -U transformers==4.35.0`.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
<div class="flex flex-wrap space-x-1">
 | 
			
		||||
<a href="https://huggingface.co/models?filter=transfo-xl">
 | 
			
		||||
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-transfo--xl-blueviolet">
 | 
			
		||||
@ -79,13 +108,13 @@ TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyT
 | 
			
		||||
 | 
			
		||||
## TransfoXL specific outputs
 | 
			
		||||
 | 
			
		||||
[[autodoc]] models.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput
 | 
			
		||||
[[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput
 | 
			
		||||
 | 
			
		||||
[[autodoc]] models.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput
 | 
			
		||||
[[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput
 | 
			
		||||
 | 
			
		||||
[[autodoc]] models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput
 | 
			
		||||
[[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput
 | 
			
		||||
 | 
			
		||||
[[autodoc]] models.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput
 | 
			
		||||
[[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput
 | 
			
		||||
 | 
			
		||||
<frameworkcontent>
 | 
			
		||||
<pt>
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										186
									
								
								docs/source/en/model_doc/tvp.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										186
									
								
								docs/source/en/model_doc/tvp.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,186 @@
 | 
			
		||||
<!--Copyright 2023 The Intel Team Authors and HuggingFace Inc. team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# TVP
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The text-visual prompting (TVP) framework was proposed in the paper [Text-Visual Prompting for Efficient 2D Temporal Video Grounding](https://arxiv.org/abs/2303.04995) by Yimeng Zhang, Xin Chen, Jinghan Jia, Sijia Liu, Ke Ding.
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
*In this paper, we study the problem of temporal video grounding (TVG), which aims to predict the starting/ending time points of moments described by a text sentence within a long untrimmed video. Benefiting from fine-grained 3D visual features, the TVG techniques have achieved remarkable progress in recent years. However, the high complexity of 3D convolutional neural networks (CNNs) makes extracting dense 3D visual features time-consuming, which calls for intensive memory and computing resources. Towards efficient TVG, we propose a novel text-visual prompting (TVP) framework, which incorporates optimized perturbation patterns (that we call ‘prompts’) into both visual inputs and textual features of a TVG model. In sharp contrast to 3D CNNs, we show that TVP allows us to effectively co-train vision encoder and language encoder in a 2D TVG model and improves the performance of cross-modal feature fusion using only low-complexity sparse 2D visual features. Further, we propose a Temporal-Distance IoU (TDIoU) loss for efficient learning of TVG. Experiments on two benchmark datasets, Charades-STA and ActivityNet Captions datasets, empirically show that the proposed TVP significantly boosts the performance of 2D TVG (e.g., 9.79% improvement on Charades-STA and 30.77% improvement on ActivityNet Captions) and achieves 5× inference acceleration over TVG using 3D visual features.*
 | 
			
		||||
 | 
			
		||||
This research addresses temporal video grounding (TVG), which is the process of pinpointing the start and end times of specific events in a long video, as described by a text sentence. Text-visual prompting (TVP), is proposed to enhance TVG. TVP involves integrating specially designed patterns, known as 'prompts', into both the visual (image-based) and textual (word-based) input components of a TVG model. These prompts provide additional spatial-temporal context, improving the model's ability to accurately determine event timings in the video. The approach employs 2D visual inputs in place of 3D ones. Although 3D inputs offer more spatial-temporal detail, they are also more time-consuming to process. The use of 2D inputs with the prompting method aims to provide similar levels of context and accuracy more efficiently.
 | 
			
		||||
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/tvp_architecture.png"
 | 
			
		||||
alt="drawing" width="600"/>
 | 
			
		||||
 | 
			
		||||
<small> TVP architecture. Taken from the <a href="https://arxiv.org/abs/2303.04995">original paper.</a> </small>
 | 
			
		||||
 | 
			
		||||
This model was contributed by [Jiqing Feng](https://huggingface.co/Jiqing). The original code can be found [here](https://github.com/intel/TVP).
 | 
			
		||||
 | 
			
		||||
## Usage tips and examples
 | 
			
		||||
 | 
			
		||||
Prompts are optimized perturbation patterns, which would be added to input video frames or text features. Universal set refers to using the same exact set of prompts for any input, this means that these prompts are added consistently to all video frames and text features, regardless of the input's content.
 | 
			
		||||
 | 
			
		||||
TVP consists of a visual encoder and cross-modal encoder. A universal set of visual prompts and text prompts to be integrated into sampled video frames and textual features, respectively. Specially, a set of different visual prompts are applied to uniformly-sampled frames of one untrimmed video in order.
 | 
			
		||||
 | 
			
		||||
The goal of this model is to incorporate trainable prompts into both visual inputs and textual features to temporal video grounding(TVG) problems.
 | 
			
		||||
In principle, one can apply any visual, cross-modal encoder in the proposed architecture.
 | 
			
		||||
 | 
			
		||||
The [`TvpProcessor`] wraps [`BertTokenizer`] and [`TvpImageProcessor`] into a single instance to both
 | 
			
		||||
encode the text and prepare the images respectively.
 | 
			
		||||
 | 
			
		||||
The following example shows how to run temporal video grounding using [`TvpProcessor`] and [`TvpForVideoGrounding`].
 | 
			
		||||
```python
 | 
			
		||||
import av
 | 
			
		||||
import cv2
 | 
			
		||||
import numpy as np
 | 
			
		||||
import torch
 | 
			
		||||
from huggingface_hub import hf_hub_download
 | 
			
		||||
from transformers import AutoProcessor, TvpForVideoGrounding
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def pyav_decode(container, sampling_rate, num_frames, clip_idx, num_clips, target_fps):
 | 
			
		||||
    '''
 | 
			
		||||
    Convert the video from its original fps to the target_fps and decode the video with PyAV decoder.
 | 
			
		||||
    Args:
 | 
			
		||||
        container (container): pyav container.
 | 
			
		||||
        sampling_rate (int): frame sampling rate (interval between two sampled frames).
 | 
			
		||||
        num_frames (int): number of frames to sample.
 | 
			
		||||
        clip_idx (int): if clip_idx is -1, perform random temporal sampling.
 | 
			
		||||
            If clip_idx is larger than -1, uniformly split the video to num_clips
 | 
			
		||||
            clips, and select the clip_idx-th video clip.
 | 
			
		||||
        num_clips (int): overall number of clips to uniformly sample from the given video.
 | 
			
		||||
        target_fps (int): the input video may have different fps, convert it to
 | 
			
		||||
            the target video fps before frame sampling.
 | 
			
		||||
    Returns:
 | 
			
		||||
        frames (tensor): decoded frames from the video. Return None if the no
 | 
			
		||||
            video stream was found.
 | 
			
		||||
        fps (float): the number of frames per second of the video.
 | 
			
		||||
    '''
 | 
			
		||||
    video = container.streams.video[0]
 | 
			
		||||
    fps = float(video.average_rate)
 | 
			
		||||
    clip_size = sampling_rate * num_frames / target_fps * fps
 | 
			
		||||
    delta = max(num_frames - clip_size, 0)
 | 
			
		||||
    start_idx = delta * clip_idx / num_clips
 | 
			
		||||
    end_idx = start_idx + clip_size - 1
 | 
			
		||||
    timebase = video.duration / num_frames
 | 
			
		||||
    video_start_pts = int(start_idx * timebase)
 | 
			
		||||
    video_end_pts = int(end_idx * timebase)
 | 
			
		||||
    seek_offset = max(video_start_pts - 1024, 0)
 | 
			
		||||
    container.seek(seek_offset, any_frame=False, backward=True, stream=video)
 | 
			
		||||
    frames = {}
 | 
			
		||||
    for frame in container.decode(video=0):
 | 
			
		||||
        if frame.pts < video_start_pts:
 | 
			
		||||
            continue
 | 
			
		||||
        frames[frame.pts] = frame
 | 
			
		||||
        if frame.pts > video_end_pts:
 | 
			
		||||
            break
 | 
			
		||||
    frames = [frames[pts] for pts in sorted(frames)]
 | 
			
		||||
    return frames, fps
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def decode(container, sampling_rate, num_frames, clip_idx, num_clips, target_fps):
 | 
			
		||||
    '''
 | 
			
		||||
    Decode the video and perform temporal sampling.
 | 
			
		||||
    Args:
 | 
			
		||||
        container (container): pyav container.
 | 
			
		||||
        sampling_rate (int): frame sampling rate (interval between two sampled frames).
 | 
			
		||||
        num_frames (int): number of frames to sample.
 | 
			
		||||
        clip_idx (int): if clip_idx is -1, perform random temporal sampling.
 | 
			
		||||
            If clip_idx is larger than -1, uniformly split the video to num_clips
 | 
			
		||||
            clips, and select the clip_idx-th video clip.
 | 
			
		||||
        num_clips (int): overall number of clips to uniformly sample from the given video.
 | 
			
		||||
        target_fps (int): the input video may have different fps, convert it to
 | 
			
		||||
            the target video fps before frame sampling.
 | 
			
		||||
    Returns:
 | 
			
		||||
        frames (tensor): decoded frames from the video.
 | 
			
		||||
    '''
 | 
			
		||||
    assert clip_idx >= -2, "Not a valied clip_idx {}".format(clip_idx)
 | 
			
		||||
    frames, fps = pyav_decode(container, sampling_rate, num_frames, clip_idx, num_clips, target_fps)
 | 
			
		||||
    clip_size = sampling_rate * num_frames / target_fps * fps
 | 
			
		||||
    index = np.linspace(0, clip_size - 1, num_frames)
 | 
			
		||||
    index = np.clip(index, 0, len(frames) - 1).astype(np.int64)
 | 
			
		||||
    frames = np.array([frames[idx].to_rgb().to_ndarray() for idx in index])
 | 
			
		||||
    frames = frames.transpose(0, 3, 1, 2)
 | 
			
		||||
    return frames
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
file = hf_hub_download(repo_id="Intel/tvp_demo", filename="AK2KG.mp4", repo_type="dataset")
 | 
			
		||||
model = TvpForVideoGrounding.from_pretrained("Intel/tvp-base")
 | 
			
		||||
 | 
			
		||||
decoder_kwargs = dict(
 | 
			
		||||
    container=av.open(file, metadata_errors="ignore"),
 | 
			
		||||
    sampling_rate=1,
 | 
			
		||||
    num_frames=model.config.num_frames,
 | 
			
		||||
    clip_idx=0,
 | 
			
		||||
    num_clips=1,
 | 
			
		||||
    target_fps=3,
 | 
			
		||||
)
 | 
			
		||||
raw_sampled_frms = decode(**decoder_kwargs)
 | 
			
		||||
 | 
			
		||||
text = "a person is sitting on a bed."
 | 
			
		||||
processor = AutoProcessor.from_pretrained("Intel/tvp-base")
 | 
			
		||||
model_inputs = processor(
 | 
			
		||||
    text=[text], videos=list(raw_sampled_frms), return_tensors="pt", max_text_length=100#, size=size
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
model_inputs["pixel_values"] = model_inputs["pixel_values"].to(model.dtype)
 | 
			
		||||
output = model(**model_inputs)
 | 
			
		||||
 | 
			
		||||
def get_video_duration(filename):
 | 
			
		||||
    cap = cv2.VideoCapture(filename)
 | 
			
		||||
    if cap.isOpened():
 | 
			
		||||
        rate = cap.get(5)
 | 
			
		||||
        frame_num = cap.get(7)
 | 
			
		||||
        duration = frame_num/rate
 | 
			
		||||
        return duration
 | 
			
		||||
    return -1
 | 
			
		||||
 | 
			
		||||
duration = get_video_duration(file)
 | 
			
		||||
start, end = processor.post_process_video_grounding(output.logits, duration)
 | 
			
		||||
 | 
			
		||||
print(f"The time slot of the video corresponding to the text \"{text}\" is from {start}s to {end}s")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Tips:
 | 
			
		||||
 | 
			
		||||
- This implementation of TVP uses [`BertTokenizer`] to generate text embeddings and Resnet-50 model to compute visual embeddings.
 | 
			
		||||
- Checkpoints for pre-trained [tvp-base](https://huggingface.co/Intel/tvp-base) is released.
 | 
			
		||||
- Please refer to [Table 2](https://arxiv.org/pdf/2303.04995.pdf) for TVP's performance on Temporal Video Grounding task.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## TvpConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] TvpConfig
 | 
			
		||||
 | 
			
		||||
## TvpImageProcessor
 | 
			
		||||
 | 
			
		||||
[[autodoc]] TvpImageProcessor
 | 
			
		||||
    - preprocess
 | 
			
		||||
 | 
			
		||||
## TvpProcessor
 | 
			
		||||
 | 
			
		||||
[[autodoc]] TvpProcessor
 | 
			
		||||
    - __call__
 | 
			
		||||
 | 
			
		||||
## TvpModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] TvpModel
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## TvpForVideoGrounding
 | 
			
		||||
 | 
			
		||||
[[autodoc]] TvpForVideoGrounding
 | 
			
		||||
    - forward
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user