mirror of
				https://github.com/huggingface/transformers.git
				synced 2025-11-04 12:04:37 +08:00 
			
		
		
		
	Compare commits
	
		
			73 Commits
		
	
	
		
			add_pipeli
			...
			v4.45-rele
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 53fad641cf | |||
| 2fd49d2b28 | |||
| 5df4ca826d | |||
| 277ed58f06 | |||
| b1c237fc4e | |||
| ae5f4916de | |||
| 6ea04aaad8 | |||
| be968434fd | |||
| 333ec0a523 | |||
| 3576fec8a3 | |||
| f0686f567a | |||
| 27f03e0a7b | |||
| e71a01a104 | |||
| 0317895840 | |||
| 4ea1c43a10 | |||
| 289edd9e8c | |||
| c64be318fc | |||
| 2ef31dec16 | |||
| 19d58d31f1 | |||
| 94f18cf23c | |||
| ade9e0fe41 | |||
| 196d35ccfc | |||
| 61e98cb957 | |||
| 68049b17a6 | |||
| 574a9e12bb | |||
| 7e638ef2b8 | |||
| 06e27e3dc0 | |||
| c6379858f3 | |||
| 5e2916bc14 | |||
| 52daf4ec76 | |||
| 5f0c181f4e | |||
| fa0bb0fe76 | |||
| 238b13478d | |||
| d5bdac3db7 | |||
| a7734238ff | |||
| 6f7d750b73 | |||
| 13749e8edb | |||
| 317e069ee7 | |||
| 75b7485cc7 | |||
| 01aec8c92d | |||
| 11c27dd331 | |||
| e15687fffe | |||
| 1456120929 | |||
| be9cf070ee | |||
| 214db9e660 | |||
| 6d02968d51 | |||
| b7c381f011 | |||
| 9eb93854b9 | |||
| 78b2929c05 | |||
| e71bf70e33 | |||
| e472e077c2 | |||
| 49a0bef4c1 | |||
| 7b2b536a81 | |||
| e9356a4206 | |||
| 75c878da1e | |||
| 077b552f07 | |||
| 77c5d59e0e | |||
| dc8b6eaeee | |||
| c0c6815dc9 | |||
| 31caf0b95f | |||
| 2fdb5e74cc | |||
| 653eb40425 | |||
| f9b4409726 | |||
| 266d0a6375 | |||
| ec1424c6a3 | |||
| 8bd1f2f338 | |||
| 31650a53a1 | |||
| 6dc364616d | |||
| bdf4649f67 | |||
| 0c718f16d1 | |||
| 4d8908df27 | |||
| b87755aa6d | |||
| f111d5b783 | 
@ -47,13 +47,13 @@ jobs:
 | 
			
		||||
 | 
			
		||||
            - run:
 | 
			
		||||
                name: "Retrieve Artifact Paths"
 | 
			
		||||
                env:
 | 
			
		||||
                    CIRCLE_TOKEN: ${{ secrets.CI_ARTIFACT_TOKEN }}
 | 
			
		||||
                # [reference] https://circleci.com/docs/api/v2/index.html#operation/getJobArtifacts
 | 
			
		||||
                # `CIRCLE_TOKEN` is defined as an environment variables set within a context, see `https://circleci.com/docs/contexts/`
 | 
			
		||||
                command: |
 | 
			
		||||
                    project_slug="gh/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}"
 | 
			
		||||
                    job_number=${CIRCLE_BUILD_NUM}
 | 
			
		||||
                    url="https://circleci.com/api/v2/project/${project_slug}/${job_number}/artifacts"
 | 
			
		||||
                    curl -o  test_preparation/artifacts.json ${url}
 | 
			
		||||
                    curl -o test_preparation/artifacts.json ${url} --header "Circle-Token: $CIRCLE_TOKEN"
 | 
			
		||||
            - run:
 | 
			
		||||
                name: "Prepare pipeline parameters"
 | 
			
		||||
                command: |
 | 
			
		||||
@ -82,22 +82,49 @@ jobs:
 | 
			
		||||
        parallelism: 1
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: uv pip install -e .
 | 
			
		||||
            - run: uv pip install -U -e .
 | 
			
		||||
            - run: echo 'export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)"' >> "$BASH_ENV" && source "$BASH_ENV"
 | 
			
		||||
            - run: mkdir -p test_preparation
 | 
			
		||||
            - run: python utils/tests_fetcher.py --fetch_all | tee tests_fetched_summary.txt
 | 
			
		||||
            - run: python utils/tests_fetcher.py --filter_tests
 | 
			
		||||
            - run: export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)" && echo $GIT_COMMIT_MESSAGE && python .circleci/create_circleci_config.py --fetcher_folder test_preparation
 | 
			
		||||
            - run: |
 | 
			
		||||
                  mkdir test_preparation
 | 
			
		||||
                  echo -n "tests" > test_preparation/test_list.txt
 | 
			
		||||
                  echo -n "all" > test_preparation/examples_test_list.txt
 | 
			
		||||
                  echo -n "tests/repo_utils" > test_preparation/test_repo_utils.txt
 | 
			
		||||
            - run: |
 | 
			
		||||
                  echo -n "tests" > test_list.txt
 | 
			
		||||
                  python utils/tests_fetcher.py --filter_tests
 | 
			
		||||
                  mv test_list.txt test_preparation/filtered_test_list.txt
 | 
			
		||||
            - run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation
 | 
			
		||||
            - run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt
 | 
			
		||||
                if [ ! -s test_preparation/generated_config.yml ]; then
 | 
			
		||||
                    echo "No tests to run, exiting early!"
 | 
			
		||||
                    circleci-agent step halt
 | 
			
		||||
                fi
 | 
			
		||||
 | 
			
		||||
            - store_artifacts:
 | 
			
		||||
                  path: test_preparation/generated_config.txt
 | 
			
		||||
                path: test_preparation
 | 
			
		||||
 | 
			
		||||
            - run:
 | 
			
		||||
                name: "Retrieve Artifact Paths"
 | 
			
		||||
                env:
 | 
			
		||||
                    CIRCLE_TOKEN: ${{ secrets.CI_ARTIFACT_TOKEN }}
 | 
			
		||||
                command: |
 | 
			
		||||
                    project_slug="gh/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}"
 | 
			
		||||
                    job_number=${CIRCLE_BUILD_NUM}
 | 
			
		||||
                    url="https://circleci.com/api/v2/project/${project_slug}/${job_number}/artifacts"
 | 
			
		||||
                    curl -o  test_preparation/artifacts.json ${url}
 | 
			
		||||
            - run:
 | 
			
		||||
                name: "Prepare pipeline parameters"
 | 
			
		||||
                command: |
 | 
			
		||||
                    python utils/process_test_artifacts.py 
 | 
			
		||||
 | 
			
		||||
            # To avoid too long generated_config.yaml on the continuation orb, we pass the links to the artifacts as parameters.
 | 
			
		||||
            # Otherwise the list of tests was just too big. Explicit is good but for that it was a limitation.
 | 
			
		||||
            # We used:
 | 
			
		||||
 | 
			
		||||
            # https://circleci.com/docs/api/v2/index.html#operation/getJobArtifacts : to get the job artifacts
 | 
			
		||||
            # We could not pass a nested dict, which is why we create the test_file_... parameters for every single job
 | 
			
		||||
 | 
			
		||||
            - store_artifacts:
 | 
			
		||||
                path: test_preparation/transformed_artifacts.json
 | 
			
		||||
            - store_artifacts:
 | 
			
		||||
                path: test_preparation/artifacts.json
 | 
			
		||||
            - continuation/continue:
 | 
			
		||||
                  configuration_path: test_preparation/generated_config.yml
 | 
			
		||||
                parameters:  test_preparation/transformed_artifacts.json
 | 
			
		||||
                configuration_path: test_preparation/generated_config.yml
 | 
			
		||||
 | 
			
		||||
    check_code_quality:
 | 
			
		||||
        working_directory: ~/transformers
 | 
			
		||||
@ -110,7 +137,7 @@ jobs:
 | 
			
		||||
        parallelism: 1
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: uv pip install -e .
 | 
			
		||||
            - run: uv pip install -e ".[quality]"
 | 
			
		||||
            - run:
 | 
			
		||||
                name: Show installed libraries and their versions
 | 
			
		||||
                command: pip freeze | tee installed.txt
 | 
			
		||||
@ -135,13 +162,14 @@ jobs:
 | 
			
		||||
        parallelism: 1
 | 
			
		||||
        steps:
 | 
			
		||||
            - checkout
 | 
			
		||||
            - run: uv pip install -e .
 | 
			
		||||
            - run: uv pip install -e ".[quality]"
 | 
			
		||||
            - run:
 | 
			
		||||
                name: Show installed libraries and their versions
 | 
			
		||||
                command: pip freeze | tee installed.txt
 | 
			
		||||
            - store_artifacts:
 | 
			
		||||
                  path: ~/transformers/installed.txt
 | 
			
		||||
            - run: python utils/check_copies.py
 | 
			
		||||
            - run: python utils/check_modular_conversion.py
 | 
			
		||||
            - run: python utils/check_table.py
 | 
			
		||||
            - run: python utils/check_dummies.py
 | 
			
		||||
            - run: python utils/check_repo.py
 | 
			
		||||
@ -163,7 +191,10 @@ workflows:
 | 
			
		||||
            - check_circleci_user
 | 
			
		||||
            - check_code_quality
 | 
			
		||||
            - check_repository_consistency
 | 
			
		||||
            - fetch_tests
 | 
			
		||||
            - fetch_tests:
 | 
			
		||||
                # [reference] https://circleci.com/docs/contexts/
 | 
			
		||||
                context:
 | 
			
		||||
                    - TRANSFORMERS_CONTEXT
 | 
			
		||||
 | 
			
		||||
    nightly:
 | 
			
		||||
        when: <<pipeline.parameters.nightly>>
 | 
			
		||||
 | 
			
		||||
@ -312,6 +312,15 @@ repo_utils_job = CircleCIJob(
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
non_model_job = CircleCIJob(
 | 
			
		||||
    "non_model",
 | 
			
		||||
    docker_image=[{"image": "huggingface/transformers-torch-light"}],
 | 
			
		||||
    marker="not generate",
 | 
			
		||||
    parallelism=6,
 | 
			
		||||
    pytest_num_workers=8,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# We also include a `dummy.py` file in the files to be doc-tested to prevent edge case failure. Otherwise, the pytest
 | 
			
		||||
# hangs forever during test collection while showing `collecting 0 items / 21 errors`. (To see this, we have to remove
 | 
			
		||||
# the bash output redirection.)
 | 
			
		||||
@ -336,7 +345,7 @@ doc_test_job = CircleCIJob(
 | 
			
		||||
    pytest_num_workers=1,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
REGULAR_TESTS = [torch_and_tf_job, torch_and_flax_job, torch_job, tf_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job] # fmt: skip
 | 
			
		||||
REGULAR_TESTS = [torch_and_tf_job, torch_and_flax_job, torch_job, tf_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
 | 
			
		||||
EXAMPLES_TESTS = [examples_torch_job, examples_tensorflow_job]
 | 
			
		||||
PIPELINE_TESTS = [pipelines_torch_job, pipelines_tf_job]
 | 
			
		||||
REPO_UTIL_TESTS = [repo_utils_job]
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										1
									
								
								.github/workflows/build_documentation.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								.github/workflows/build_documentation.yml
									
									
									
									
										vendored
									
									
								
							@ -1,6 +1,7 @@
 | 
			
		||||
name: Build documentation
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  workflow_dispatch:
 | 
			
		||||
  push:
 | 
			
		||||
    branches:
 | 
			
		||||
      - main
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										36
									
								
								.github/workflows/model_jobs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										36
									
								
								.github/workflows/model_jobs.yml
									
									
									
									
										vendored
									
									
								
							@ -41,7 +41,8 @@ jobs:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        folders: ${{ fromJson(inputs.folder_slices)[inputs.slice_id] }}
 | 
			
		||||
    runs-on: ['${{ inputs.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
 | 
			
		||||
    runs-on:
 | 
			
		||||
      group: '${{ inputs.machine_type }}'
 | 
			
		||||
    container:
 | 
			
		||||
      image: ${{ inputs.docker }}
 | 
			
		||||
      options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
@ -97,25 +98,42 @@ jobs:
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Set `machine_type` for report and artifact names
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "${{ inputs.machine_type }}"
 | 
			
		||||
 | 
			
		||||
          if [ "${{ inputs.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
 | 
			
		||||
            machine_type=single-gpu
 | 
			
		||||
          elif [ "${{ inputs.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
 | 
			
		||||
            machine_type=multi-gpu
 | 
			
		||||
          else
 | 
			
		||||
            machine_type=${{ inputs.machine_type }}
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo "$machine_type"
 | 
			
		||||
          echo "machine_type=$machine_type" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Run all tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: python3 -m pytest -rsfE -v --make-reports=${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
 | 
			
		||||
        run: python3 -m pytest -rsfE -v --make-reports=${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/failures_short.txt
 | 
			
		||||
        run: cat /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: Run test
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          mkdir -p /transformers/reports/${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
 | 
			
		||||
          echo "hello" > /transformers/reports/${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/hello.txt
 | 
			
		||||
          echo "${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports"
 | 
			
		||||
          mkdir -p /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
 | 
			
		||||
          echo "hello" > /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/hello.txt
 | 
			
		||||
          echo "${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports"
 | 
			
		||||
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ inputs.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports"
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports"
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v4
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ inputs.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
 | 
			
		||||
          name: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										3
									
								
								.github/workflows/self-scheduled-caller.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.github/workflows/self-scheduled-caller.yml
									
									
									
									
										vendored
									
									
								
							@ -2,6 +2,9 @@ name: Self-hosted runner (scheduled)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
on:
 | 
			
		||||
  repository_dispatch:
 | 
			
		||||
  schedule:
 | 
			
		||||
    - cron: "17 2 * * *"
 | 
			
		||||
  push:
 | 
			
		||||
    branches:
 | 
			
		||||
      - run_scheduled_ci*
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										167
									
								
								.github/workflows/self-scheduled.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										167
									
								
								.github/workflows/self-scheduled.yml
									
									
									
									
										vendored
									
									
								
							@ -50,8 +50,9 @@ jobs:
 | 
			
		||||
    name: Setup
 | 
			
		||||
    strategy:
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
    runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
 | 
			
		||||
        machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
 | 
			
		||||
    runs-on:
 | 
			
		||||
      group: '${{ matrix.machine_type }}'
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-all-latest-gpu
 | 
			
		||||
      options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
@ -102,7 +103,7 @@ jobs:
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
        machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
 | 
			
		||||
        slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }}
 | 
			
		||||
    uses: ./.github/workflows/model_jobs.yml
 | 
			
		||||
    with:
 | 
			
		||||
@ -119,8 +120,9 @@ jobs:
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
    runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
 | 
			
		||||
        machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
 | 
			
		||||
    runs-on:
 | 
			
		||||
      group: '${{ matrix.machine_type }}'
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-pytorch-gpu
 | 
			
		||||
      options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
@ -146,22 +148,39 @@ jobs:
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Set `machine_type` for report and artifact names
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "${{ matrix.machine_type }}"
 | 
			
		||||
 | 
			
		||||
          if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
 | 
			
		||||
            machine_type=single-gpu
 | 
			
		||||
          elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
 | 
			
		||||
            machine_type=multi-gpu
 | 
			
		||||
          else
 | 
			
		||||
            machine_type=${{ matrix.machine_type }}
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo "$machine_type"
 | 
			
		||||
          echo "machine_type=$machine_type" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Run all pipeline tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports tests/pipelines
 | 
			
		||||
          python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports tests/pipelines
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports/failures_short.txt
 | 
			
		||||
        run: cat /transformers/reports/${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports"
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports"
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v4
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports
 | 
			
		||||
          name: ${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports
 | 
			
		||||
 | 
			
		||||
  run_pipelines_tf_gpu:
 | 
			
		||||
    if: ${{ inputs.job == 'run_pipelines_tf_gpu' }}
 | 
			
		||||
@ -169,8 +188,9 @@ jobs:
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
    runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
 | 
			
		||||
        machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
 | 
			
		||||
    runs-on:
 | 
			
		||||
      group: '${{ matrix.machine_type }}'
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-tensorflow-gpu
 | 
			
		||||
      options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
@ -197,22 +217,39 @@ jobs:
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Set `machine_type` for report and artifact names
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "${{ matrix.machine_type }}"
 | 
			
		||||
 | 
			
		||||
          if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
 | 
			
		||||
            machine_type=single-gpu
 | 
			
		||||
          elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
 | 
			
		||||
            machine_type=multi-gpu
 | 
			
		||||
          else
 | 
			
		||||
            machine_type=${{ matrix.machine_type }}
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo "$machine_type"
 | 
			
		||||
          echo "machine_type=$machine_type" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Run all pipeline tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_run_pipelines_tf_gpu_test_reports tests/pipelines
 | 
			
		||||
          python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports tests/pipelines
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        run: |
 | 
			
		||||
          cat /transformers/reports/${{ matrix.machine_type }}_run_pipelines_tf_gpu_test_reports/failures_short.txt
 | 
			
		||||
          cat /transformers/reports/${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_pipelines_tf_gpu_test_reports"
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports"
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v4
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_pipelines_tf_gpu_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_run_pipelines_tf_gpu_test_reports
 | 
			
		||||
          name: ${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports
 | 
			
		||||
 | 
			
		||||
  run_examples_gpu:
 | 
			
		||||
    if: ${{ inputs.job == 'run_examples_gpu' }}
 | 
			
		||||
@ -220,8 +257,9 @@ jobs:
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu]
 | 
			
		||||
    runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
 | 
			
		||||
        machine_type: [aws-g4dn-2xlarge-cache]
 | 
			
		||||
    runs-on:
 | 
			
		||||
      group: '${{ matrix.machine_type }}'
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-all-latest-gpu
 | 
			
		||||
      options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
@ -247,23 +285,40 @@ jobs:
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Set `machine_type` for report and artifact names
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "${{ matrix.machine_type }}"
 | 
			
		||||
 | 
			
		||||
          if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
 | 
			
		||||
            machine_type=single-gpu
 | 
			
		||||
          elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
 | 
			
		||||
            machine_type=multi-gpu
 | 
			
		||||
          else
 | 
			
		||||
            machine_type=${{ matrix.machine_type }}
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo "$machine_type"
 | 
			
		||||
          echo "machine_type=$machine_type" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Run examples tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          pip install -r examples/pytorch/_tests_requirements.txt
 | 
			
		||||
          python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_examples_gpu_test_reports examples/pytorch
 | 
			
		||||
          python3 -m pytest -v --make-reports=${{ env.machine_type }}_run_examples_gpu_test_reports examples/pytorch
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ matrix.machine_type }}_run_examples_gpu_test_reports/failures_short.txt
 | 
			
		||||
        run: cat /transformers/reports/${{ env.machine_type }}_run_examples_gpu_test_reports/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_examples_gpu_test_reports"
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_examples_gpu_test_reports"
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v4
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_examples_gpu_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_run_examples_gpu_test_reports
 | 
			
		||||
          name: ${{ env.machine_type }}_run_examples_gpu_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ env.machine_type }}_run_examples_gpu_test_reports
 | 
			
		||||
 | 
			
		||||
  run_torch_cuda_extensions_gpu:
 | 
			
		||||
    if: ${{ inputs.job == 'run_torch_cuda_extensions_gpu' }}
 | 
			
		||||
@ -271,8 +326,9 @@ jobs:
 | 
			
		||||
    strategy:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
    runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
 | 
			
		||||
        machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
 | 
			
		||||
    runs-on:
 | 
			
		||||
      group: '${{ matrix.machine_type }}'
 | 
			
		||||
    container:
 | 
			
		||||
      image: ${{ inputs.docker }}
 | 
			
		||||
      options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
@ -326,22 +382,39 @@ jobs:
 | 
			
		||||
        working-directory: ${{ inputs.working-directory-prefix }}/transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Set `machine_type` for report and artifact names
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "${{ matrix.machine_type }}"
 | 
			
		||||
 | 
			
		||||
          if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
 | 
			
		||||
            machine_type=single-gpu
 | 
			
		||||
          elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
 | 
			
		||||
            machine_type=multi-gpu
 | 
			
		||||
          else
 | 
			
		||||
            machine_type=${{ matrix.machine_type }}
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo "$machine_type"
 | 
			
		||||
          echo "machine_type=$machine_type" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Run all tests on GPU
 | 
			
		||||
        working-directory: ${{ inputs.working-directory-prefix }}/transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
 | 
			
		||||
          python3 -m pytest -v --make-reports=${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat ${{ inputs.working-directory-prefix }}/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
 | 
			
		||||
        run: cat ${{ inputs.working-directory-prefix }}/transformers/reports/${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports"
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports"
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v4
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
 | 
			
		||||
          path: ${{ inputs.working-directory-prefix }}/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
 | 
			
		||||
          name: ${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
 | 
			
		||||
          path: ${{ inputs.working-directory-prefix }}/transformers/reports/${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
 | 
			
		||||
 | 
			
		||||
  run_quantization_torch_gpu:
 | 
			
		||||
    if: ${{ inputs.job == 'run_quantization_torch_gpu' }}
 | 
			
		||||
@ -352,8 +425,9 @@ jobs:
 | 
			
		||||
      fail-fast: false
 | 
			
		||||
      matrix:
 | 
			
		||||
        folders: ${{ fromJson(needs.setup.outputs.quantization_matrix) }}
 | 
			
		||||
        machine_type: [single-gpu, multi-gpu]
 | 
			
		||||
    runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
 | 
			
		||||
        machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
 | 
			
		||||
    runs-on:
 | 
			
		||||
      group: '${{ matrix.machine_type }}'
 | 
			
		||||
    container:
 | 
			
		||||
      image: huggingface/transformers-quantization-latest-gpu
 | 
			
		||||
      options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
 | 
			
		||||
@ -388,22 +462,39 @@ jobs:
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: pip freeze
 | 
			
		||||
 | 
			
		||||
      - name: Set `machine_type` for report and artifact names
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        shell: bash
 | 
			
		||||
        run: |
 | 
			
		||||
          echo "${{ matrix.machine_type }}"
 | 
			
		||||
 | 
			
		||||
          if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
 | 
			
		||||
            machine_type=single-gpu
 | 
			
		||||
          elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
 | 
			
		||||
            machine_type=multi-gpu
 | 
			
		||||
          else
 | 
			
		||||
            machine_type=${{ matrix.machine_type }}
 | 
			
		||||
          fi
 | 
			
		||||
 | 
			
		||||
          echo "$machine_type"
 | 
			
		||||
          echo "machine_type=$machine_type" >> $GITHUB_ENV
 | 
			
		||||
 | 
			
		||||
      - name: Run quantization tests on GPU
 | 
			
		||||
        working-directory: /transformers
 | 
			
		||||
        run: |
 | 
			
		||||
          python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
 | 
			
		||||
          python3 -m pytest -v --make-reports=${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
 | 
			
		||||
 | 
			
		||||
      - name: Failure short reports
 | 
			
		||||
        if: ${{ failure() }}
 | 
			
		||||
        continue-on-error: true
 | 
			
		||||
        run: cat /transformers/reports/${{ matrix.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports/failures_short.txt
 | 
			
		||||
        run: cat /transformers/reports/${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports/failures_short.txt
 | 
			
		||||
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports"
 | 
			
		||||
      - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports"
 | 
			
		||||
        if: ${{ always() }}
 | 
			
		||||
        uses: actions/upload-artifact@v4
 | 
			
		||||
        with:
 | 
			
		||||
          name: ${{ matrix.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ matrix.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports
 | 
			
		||||
          name: ${{ env.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports
 | 
			
		||||
          path: /transformers/reports/${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports
 | 
			
		||||
 | 
			
		||||
  run_extract_warnings:
 | 
			
		||||
    # Let's only do this for the job `run_models_gpu` to simplify the (already complex) logic.
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										2
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										2
									
								
								Makefile
									
									
									
									
									
								
							@ -36,6 +36,7 @@ autogenerate_code: deps_table_update
 | 
			
		||||
 | 
			
		||||
repo-consistency:
 | 
			
		||||
	python utils/check_copies.py
 | 
			
		||||
	python utils/check_modular_conversion.py
 | 
			
		||||
	python utils/check_table.py
 | 
			
		||||
	python utils/check_dummies.py
 | 
			
		||||
	python utils/check_repo.py
 | 
			
		||||
@ -80,6 +81,7 @@ fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency
 | 
			
		||||
 | 
			
		||||
fix-copies:
 | 
			
		||||
	python utils/check_copies.py --fix_and_overwrite
 | 
			
		||||
	python utils/check_modular_conversion.py  --fix_and_overwrite
 | 
			
		||||
	python utils/check_table.py --fix_and_overwrite
 | 
			
		||||
	python utils/check_dummies.py --fix_and_overwrite
 | 
			
		||||
	python utils/check_doctest_list.py --fix_and_overwrite
 | 
			
		||||
 | 
			
		||||
@ -5,6 +5,8 @@
 | 
			
		||||
    title: Quick tour
 | 
			
		||||
  - local: installation
 | 
			
		||||
    title: Installation
 | 
			
		||||
  - local: add_new_model
 | 
			
		||||
    title: Adding a new model to `transformers`
 | 
			
		||||
  title: Get started
 | 
			
		||||
- sections:
 | 
			
		||||
  - local: pipeline_tutorial
 | 
			
		||||
@ -149,6 +151,8 @@
 | 
			
		||||
    title: Interoperability with GGUF files
 | 
			
		||||
  - local: tiktoken
 | 
			
		||||
    title: Interoperability with TikToken files
 | 
			
		||||
  - local: modular_transformers
 | 
			
		||||
    title: Modularity in `transformers`
 | 
			
		||||
  title: Developer guides
 | 
			
		||||
- sections:
 | 
			
		||||
  - local: quantization/overview
 | 
			
		||||
@ -173,6 +177,8 @@
 | 
			
		||||
    title: Optimum
 | 
			
		||||
  - local: quantization/torchao
 | 
			
		||||
    title: TorchAO
 | 
			
		||||
  - local: quantization/compressed_tensors
 | 
			
		||||
    title: compressed-tensors
 | 
			
		||||
  - local: quantization/contribute
 | 
			
		||||
    title: Contribute new quantization method
 | 
			
		||||
  title: Quantization Methods
 | 
			
		||||
@ -424,6 +430,8 @@
 | 
			
		||||
        title: GPTSw3
 | 
			
		||||
      - local: model_doc/granite
 | 
			
		||||
        title: Granite
 | 
			
		||||
      - local: model_doc/granitemoe
 | 
			
		||||
        title: GraniteMoe
 | 
			
		||||
      - local: model_doc/herbert
 | 
			
		||||
        title: HerBERT
 | 
			
		||||
      - local: model_doc/ibert
 | 
			
		||||
@ -852,8 +860,12 @@
 | 
			
		||||
        title: MatCha
 | 
			
		||||
      - local: model_doc/mgp-str
 | 
			
		||||
        title: MGP-STR
 | 
			
		||||
      - local: model_doc/mllama
 | 
			
		||||
        title: mllama
 | 
			
		||||
      - local: model_doc/nougat
 | 
			
		||||
        title: Nougat
 | 
			
		||||
      - local: model_doc/omdet-turbo
 | 
			
		||||
        title: OmDet-Turbo
 | 
			
		||||
      - local: model_doc/oneformer
 | 
			
		||||
        title: OneFormer
 | 
			
		||||
      - local: model_doc/owlvit
 | 
			
		||||
 | 
			
		||||
@ -159,6 +159,7 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                   [GPTBigCode](model_doc/gpt_bigcode)                    |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|               [GPTSAN-japanese](model_doc/gptsan-japanese)               |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [Granite](model_doc/granite)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                  [GraniteMoeMoe](model_doc/granitemoe)                   |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                    [Graphormer](model_doc/graphormer)                    |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                [Grounding DINO](model_doc/grounding-dino)                |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                      [GroupViT](model_doc/groupvit)                      |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
@ -213,6 +214,7 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                          [Mimi](model_doc/mimi)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [Mistral](model_doc/mistral)                       |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                       [Mixtral](model_doc/mixtral)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                        [Mllama](model_doc/mllama)                        |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                         [mLUKE](model_doc/mluke)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                           [MMS](model_doc/mms)                           |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                    [MobileBERT](model_doc/mobilebert)                    |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
@ -236,6 +238,7 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                 [Nyströmformer](model_doc/nystromformer)                 |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                          [OLMo](model_doc/olmo)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                         [OLMoE](model_doc/olmoe)                         |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                   [OmDet-Turbo](model_doc/omdet-turbo)                   |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                     [OneFormer](model_doc/oneformer)                     |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                    [OpenAI GPT](model_doc/openai-gpt)                    |       ✅        |         ✅         |      ❌      |
 | 
			
		||||
|                      [OpenAI GPT-2](model_doc/gpt2)                      |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
@ -254,7 +257,7 @@ Flax), PyTorch, and/or TensorFlow.
 | 
			
		||||
|                          [Phi3](model_doc/phi3)                          |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [PhoBERT](model_doc/phobert)                       |       ✅        |         ✅         |      ✅      |
 | 
			
		||||
|                    [Pix2Struct](model_doc/pix2struct)                    |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                       [Pixtral](model_doc/pixtral)                       |       ❌        |         ❌         |      ❌      |
 | 
			
		||||
|                       [Pixtral](model_doc/pixtral)                       |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                        [PLBart](model_doc/plbart)                        |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                    [PoolFormer](model_doc/poolformer)                    |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
|                     [Pop2Piano](model_doc/pop2piano)                     |       ✅        |         ❌         |      ❌      |
 | 
			
		||||
 | 
			
		||||
@ -181,7 +181,7 @@ for every matrix multiplication. Dequantization and re-quantization is performed
 | 
			
		||||
 | 
			
		||||
Therefore, inference time is often **not** reduced when using quantized weights, but rather increases.
 | 
			
		||||
Enough theory, let's give it a try! To quantize the weights with Transformers, you need to make sure that
 | 
			
		||||
the [`bitsandbytes`](https://github.com/TimDettmers/bitsandbytes) library is installed.
 | 
			
		||||
the [`bitsandbytes`](https://github.com/bitsandbytes-foundation/bitsandbytes) library is installed.
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
!pip install bitsandbytes
 | 
			
		||||
 | 
			
		||||
@ -61,7 +61,10 @@ Learn how to quantize models in the [Quantization](../quantization) guide.
 | 
			
		||||
 | 
			
		||||
[[autodoc]] FbgemmFp8Config
 | 
			
		||||
 | 
			
		||||
## CompressedTensorsConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] CompressedTensorsConfig
 | 
			
		||||
 | 
			
		||||
## TorchAoConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] TorchAoConfig
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -46,7 +46,7 @@ url = "http://images.cocodataset.org/val2017/000000039769.jpg"
 | 
			
		||||
image = Image.open(requests.get(url, stream=True).raw)
 | 
			
		||||
candidate_labels = ["an image of a cat", "an image of a dog"]
 | 
			
		||||
 | 
			
		||||
inputs = processor(text=candidate_labels, images=image, return_tensors="pt")
 | 
			
		||||
inputs = processor(images=image ,text=candidate_labels, return_tensors="pt")
 | 
			
		||||
 | 
			
		||||
with torch.no_grad():
 | 
			
		||||
    outputs = model(**inputs)
 | 
			
		||||
 | 
			
		||||
@ -32,6 +32,51 @@ This model was contributed by [kamalkraj](https://huggingface.co/kamalkraj). The
 | 
			
		||||
- BioGPT was trained with a causal language modeling (CLM) objective and is therefore powerful at predicting the next token in a sequence. Leveraging this feature allows BioGPT to generate syntactically coherent text as it can be observed in the run_generation.py example script.
 | 
			
		||||
- The model can take the `past_key_values` (for PyTorch) as input, which is the previously computed key/value attention pairs. Using this (past_key_values or past) value prevents the model from re-computing pre-computed values in the context of text generation. For PyTorch, see past_key_values argument of the BioGptForCausalLM.forward() method for more information on its usage.
 | 
			
		||||
 | 
			
		||||
### Using Scaled Dot Product Attention (SDPA)
 | 
			
		||||
 | 
			
		||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function 
 | 
			
		||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the 
 | 
			
		||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html) 
 | 
			
		||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
 | 
			
		||||
page for more information.
 | 
			
		||||
 | 
			
		||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set 
 | 
			
		||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
from transformers import BioGptForCausalLM
 | 
			
		||||
model = BioGptForCausalLM.from_pretrained("microsoft/biogpt", attn_implementation="sdpa", torch_dtype=torch.float16)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
On a local benchmark (NVIDIA GeForce RTX 2060-8GB, PyTorch 2.3.1, OS Ubuntu 20.04) with `float16` and `microsoft/biogpt` model with a CausalLM head,
 | 
			
		||||
we saw the following speedups during training.
 | 
			
		||||
 | 
			
		||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
 | 
			
		||||
 | 
			
		||||
| num_training_steps | batch_size | seq_len | is cuda | Time per batch (eager - s) | Time per batch (sdpa - s) | Speedup (%) | Eager peak mem (MB) | sdpa peak mem (MB) | Mem saving (%) |
 | 
			
		||||
|--------------------|------------|---------|---------|----------------------------|---------------------------|-------------|---------------------|--------------------|----------------|
 | 
			
		||||
| 100                | 1          | 128     | False   | 0.038                      | 0.031                     | 21.301      | 1601.862            | 1601.497           | 0.023          |
 | 
			
		||||
| 100                | 1          | 256     | False   | 0.039                      | 0.034                     | 15.084      | 1624.944            | 1625.296           | -0.022         |
 | 
			
		||||
| 100                | 2          | 128     | False   | 0.039                      | 0.033                     | 16.820      | 1624.567            | 1625.296           | -0.045         |
 | 
			
		||||
| 100                | 2          | 256     | False   | 0.065                      | 0.059                     | 10.255      | 1672.164            | 1672.164           | 0.000          |
 | 
			
		||||
| 100                | 4          | 128     | False   | 0.062                      | 0.058                     | 6.998       | 1671.435            | 1672.164           | -0.044         |
 | 
			
		||||
| 100                | 4          | 256     | False   | 0.113                      | 0.100                     | 13.316      | 2350.179            | 1848.435           | 27.144         |
 | 
			
		||||
| 100                | 8          | 128     | False   | 0.107                      | 0.098                     | 9.883       | 2098.521            | 1848.435           | 13.530         |
 | 
			
		||||
| 100                | 8          | 256     | False   | 0.222                      | 0.196                     | 13.413      | 3989.980            | 2986.492           | 33.601         |
 | 
			
		||||
 | 
			
		||||
On a local benchmark (NVIDIA GeForce RTX 2060-8GB, PyTorch 2.3.1, OS Ubuntu 20.04) with `float16` and `microsoft/biogpt` model with a simple AutoModel head,
 | 
			
		||||
we saw the following speedups during inference.
 | 
			
		||||
 | 
			
		||||
| num_batches | batch_size | seq_len | is cuda | is half | use mask | Per token latency eager (ms) | Per token latency SDPA (ms) | Speedup (%) | Mem eager (MB) | Mem BT (MB) | Mem saved (%) |
 | 
			
		||||
|-------------|------------|---------|---------|---------|----------|------------------------------|-----------------------------|-------------|----------------|--------------|---------------|
 | 
			
		||||
| 50          | 1          | 64      | True    | True    | True     | 0.115                        | 0.098                       | 17.392      | 716.998        | 716.998      | 0.000         |
 | 
			
		||||
| 50          | 1          | 128     | True    | True    | True     | 0.115                        | 0.093                       | 24.640      | 730.916        | 730.916      | 0.000         |
 | 
			
		||||
| 50          | 2          | 64      | True    | True    | True     | 0.114                        | 0.096                       | 19.204      | 730.900        | 730.900      | 0.000         |
 | 
			
		||||
| 50          | 2          | 128     | True    | True    | True     | 0.117                        | 0.095                       | 23.529      | 759.262        | 759.262      | 0.000         |
 | 
			
		||||
| 50          | 4          | 64      | True    | True    | True     | 0.113                        | 0.096                       | 18.325      | 759.229        | 759.229      | 0.000         |
 | 
			
		||||
| 50          | 4          | 128     | True    | True    | True     | 0.186                        | 0.178                       | 4.289       | 816.478        | 816.478      | 0.000         |
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## Resources
 | 
			
		||||
 | 
			
		||||
- [Causal language modeling task guide](../tasks/language_modeling)
 | 
			
		||||
 | 
			
		||||
@ -128,7 +128,17 @@ processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokeniza
 | 
			
		||||
 | 
			
		||||
### Quantization using Bitsandbytes
 | 
			
		||||
 | 
			
		||||
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with:
 | 
			
		||||
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and to have access to a GPU/accelerator that is supported by the library.
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
 | 
			
		||||
 | 
			
		||||
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
Simply change the snippet above with:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import ChameleonForConditionalGeneration, BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
@ -18,16 +18,16 @@ rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The Fuyu model was created by [ADEPT](https://www.adept.ai/blog/fuyu-8b), and authored by Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, Sağnak Taşırlar. 
 | 
			
		||||
The Fuyu model was created by [ADEPT](https://www.adept.ai/blog/fuyu-8b), and authored by Rohan Bavishi, Erich Elsen, Curtis Hawthorne, Maxwell Nye, Augustus Odena, Arushi Somani, Sağnak Taşırlar.
 | 
			
		||||
 | 
			
		||||
The authors introduced Fuyu-8B, a decoder-only multimodal model based on the classic transformers architecture, with query and key normalization. A linear encoder is added to create multimodal embeddings from image inputs. 
 | 
			
		||||
The authors introduced Fuyu-8B, a decoder-only multimodal model based on the classic transformers architecture, with query and key normalization. A linear encoder is added to create multimodal embeddings from image inputs.
 | 
			
		||||
 | 
			
		||||
By treating image tokens like text tokens and using a special image-newline character, the model knows when an image line ends. Image positional embeddings are removed. This avoids the need for different training phases for various image resolutions. With 8 billion parameters and licensed under CC-BY-NC, Fuyu-8B is notable for its ability to handle both text and images, its impressive context size of 16K, and its overall performance.
 | 
			
		||||
 | 
			
		||||
<Tip warning={true}>
 | 
			
		||||
 | 
			
		||||
The `Fuyu` models were trained using `bfloat16`, but the original inference uses `float16` The checkpoints uploaded on the hub use `torch_dtype = 'float16'` which will be
 | 
			
		||||
used by the `AutoModel` API to cast the checkpoints from `torch.float32` to `torch.float16`. 
 | 
			
		||||
used by the `AutoModel` API to cast the checkpoints from `torch.float32` to `torch.float16`.
 | 
			
		||||
 | 
			
		||||
The `dtype` of the online weights is mostly irrelevant, unless you are using `torch_dtype="auto"` when initializing a model using `model = AutoModelForCausalLM.from_pretrained("path", torch_dtype = "auto")`. The reason is that the model will first be downloaded ( using the `dtype` of the checkpoints online) then it will be cast to the default `dtype` of `torch` (becomes `torch.float32`). Users should specify the `torch_dtype` they want, and if they don't it will be `torch.float32`.
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ tar -xvf 8b_base_model_release.tar
 | 
			
		||||
```
 | 
			
		||||
Then, model can be loaded via:
 | 
			
		||||
 | 
			
		||||
```py 
 | 
			
		||||
```py
 | 
			
		||||
from transformers import FuyuConfig, FuyuForCausalLM
 | 
			
		||||
model_config = FuyuConfig()
 | 
			
		||||
model = FuyuForCausalLM(model_config).from_pretrained('/output/path')
 | 
			
		||||
@ -81,7 +81,7 @@ text_prompt = "Generate a coco-style caption.\\n"
 | 
			
		||||
 | 
			
		||||
bus_image_url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/bus.png"
 | 
			
		||||
bus_image_pil = Image.open(io.BytesIO(requests.get(bus_image_url).content))
 | 
			
		||||
inputs_to_model = processor(text=text_prompt, images=bus_image_pil)
 | 
			
		||||
inputs_to_model = processor(images=bus_image_pil, text=text_prompt)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
@ -90,7 +90,7 @@ This model was contributed by [Molbap](https://huggingface.co/Molbap).
 | 
			
		||||
The original code can be found [here](https://github.com/persimmon-ai-labs/adept-inference).
 | 
			
		||||
 | 
			
		||||
- Fuyu uses a `sentencepiece` based tokenizer, with a `Unigram` model. It supports bytefallback, which is only available in `tokenizers==0.14.0` for the fast tokenizer.
 | 
			
		||||
The `LlamaTokenizer` is used as it is a standard wrapper around sentencepiece. 
 | 
			
		||||
The `LlamaTokenizer` is used as it is a standard wrapper around sentencepiece.
 | 
			
		||||
 | 
			
		||||
- The authors suggest to use the following prompt for image captioning: `f"Generate a coco-style caption.\\n"`
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										74
									
								
								docs/source/en/model_doc/granitemoe.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										74
									
								
								docs/source/en/model_doc/granitemoe.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,74 @@
 | 
			
		||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# GraniteMoe
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The GraniteMoe model was proposed in [Power Scheduler: A Batch Size and Token Number Agnostic Learning Rate Scheduler](https://arxiv.org/abs/2408.13359) by Yikang Shen, Matthew Stallone, Mayank Mishra, Gaoyuan Zhang, Shawn Tan, Aditya Prasad, Adriana Meza Soria, David D. Cox and Rameswar Panda.
 | 
			
		||||
 | 
			
		||||
PowerMoE-3B is a 3B sparse Mixture-of-Experts (sMoE) language model trained with the Power learning rate scheduler. It sparsely activates 800M parameters for each token. It is trained on a mix of open-source and proprietary datasets. PowerMoE-3B has shown promising results compared to other dense models with 2x activate parameters across various benchmarks, including natural language multi-choices, code generation, and math reasoning.
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
*Finding the optimal learning rate for language model pretraining is a challenging task.
 | 
			
		||||
This is not only because there is a complicated correlation between learning rate, batch size, number of training tokens, model size, and other hyperparameters but also because it is prohibitively expensive to perform a hyperparameter search for large language models with Billions or Trillions of parameters. Recent studies propose using small proxy models and small corpus to perform hyperparameter searches and transposing the optimal parameters to large models and large corpus. While the zero-shot transferability is theoretically and empirically proven for model size related hyperparameters, like depth and width, the zero-shot transfer from small corpus to large corpus is underexplored.
 | 
			
		||||
In this paper, we study the correlation between optimal learning rate, batch size, and number of training tokens for the recently proposed WSD scheduler. After thousands of small experiments, we found a power-law relationship between variables and demonstrated its transferability across model sizes. Based on the observation, we propose a new learning rate scheduler, Power scheduler, that is agnostic about the number of training tokens and batch size. The experiment shows that combining the Power scheduler with Maximum Update Parameterization (\mup) can consistently achieve impressive performance with one set of hyperparameters regardless of the number of training tokens, batch size, model size, and even model architecture. Our 3B dense and MoE models trained with the Power scheduler achieve comparable performance as state-of-the-art small language models.
 | 
			
		||||
We [open source](https://huggingface.co/collections/ibm/power-lm-66be64ae647ddf11b9808000) these pretrained models.*
 | 
			
		||||
 | 
			
		||||
Tips:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
import torch
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
model_path = "ibm/PowerMoE-3b"
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained(model_path)
 | 
			
		||||
 | 
			
		||||
# drop device_map if running on CPU
 | 
			
		||||
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto")
 | 
			
		||||
model.eval()
 | 
			
		||||
 | 
			
		||||
# change input text as desired
 | 
			
		||||
prompt = "Write a code to find the maximum value in a list of numbers."
 | 
			
		||||
 | 
			
		||||
# tokenize the text
 | 
			
		||||
input_tokens = tokenizer(prompt, return_tensors="pt")
 | 
			
		||||
# generate output tokens
 | 
			
		||||
output = model.generate(**input_tokens, max_new_tokens=100)
 | 
			
		||||
# decode output tokens into text
 | 
			
		||||
output = tokenizer.batch_decode(output)
 | 
			
		||||
# loop over the batch to print, in this example the batch size is 1
 | 
			
		||||
for i in output:
 | 
			
		||||
    print(i)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
This model was contributed by [mayank-mishra](https://huggingface.co/mayank-mishra).
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## GraniteMoeConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] GraniteMoeConfig
 | 
			
		||||
 | 
			
		||||
## GraniteMoeModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] GraniteMoeModel
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## GraniteMoeForCausalLM
 | 
			
		||||
 | 
			
		||||
[[autodoc]] GraniteMoeForCausalLM
 | 
			
		||||
    - forward
 | 
			
		||||
@ -133,7 +133,7 @@ import requests
 | 
			
		||||
 | 
			
		||||
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf")
 | 
			
		||||
 | 
			
		||||
model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True) 
 | 
			
		||||
model = LlavaNextForConditionalGeneration.from_pretrained("llava-hf/llava-v1.6-mistral-7b-hf", torch_dtype=torch.float16, low_cpu_mem_usage=True)
 | 
			
		||||
model.to("cuda:0")
 | 
			
		||||
 | 
			
		||||
# prepare image and text prompt, using the appropriate prompt template
 | 
			
		||||
@ -150,7 +150,7 @@ conversation = [
 | 
			
		||||
    },
 | 
			
		||||
]
 | 
			
		||||
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
 | 
			
		||||
inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
 | 
			
		||||
inputs = processor(image, prompt, return_tensors="pt").to("cuda:0")
 | 
			
		||||
 | 
			
		||||
# autoregressively complete prompt
 | 
			
		||||
output = model.generate(**inputs, max_new_tokens=100)
 | 
			
		||||
@ -222,7 +222,7 @@ prompts = [prompt_1, prompt_2]
 | 
			
		||||
 | 
			
		||||
# We can simply feed images in the order they have to be used in the text prompt
 | 
			
		||||
# Each "<image>" token uses one image leaving the next for the subsequent "<image>" tokens
 | 
			
		||||
inputs = processor(text=prompts, images=[image_stop, image_cats, image_snowman], padding=True, return_tensors="pt").to(model.device)
 | 
			
		||||
inputs = processor(images=[image_stop, image_cats, image_snowman], text=prompts, padding=True, return_tensors="pt").to(model.device)
 | 
			
		||||
 | 
			
		||||
# Generate
 | 
			
		||||
generate_ids = model.generate(**inputs, max_new_tokens=30)
 | 
			
		||||
@ -233,7 +233,17 @@ processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokeniza
 | 
			
		||||
 | 
			
		||||
### Quantization using Bitsandbytes
 | 
			
		||||
 | 
			
		||||
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with:
 | 
			
		||||
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes`, and to have access to a GPU/accelerator that is supported by the library.
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
 | 
			
		||||
 | 
			
		||||
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
Simply change the snippet above with:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import LlavaNextForConditionalGeneration, BitsAndBytesConfig
 | 
			
		||||
@ -256,8 +266,8 @@ First make sure to install flash-attn. Refer to the [original repository of Flas
 | 
			
		||||
from transformers import LlavaNextForConditionalGeneration
 | 
			
		||||
 | 
			
		||||
model = LlavaNextForConditionalGeneration.from_pretrained(
 | 
			
		||||
    model_id, 
 | 
			
		||||
    torch_dtype=torch.float16, 
 | 
			
		||||
    model_id,
 | 
			
		||||
    torch_dtype=torch.float16,
 | 
			
		||||
    low_cpu_mem_usage=True,
 | 
			
		||||
    use_flash_attention_2=True
 | 
			
		||||
).to(0)
 | 
			
		||||
 | 
			
		||||
@ -205,7 +205,17 @@ processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokeniza
 | 
			
		||||
 | 
			
		||||
The model can be loaded in lower bits, significantly reducing memory burden while maintaining the performance of the original model. This allows for efficient deployment on resource-constrained cases. 
 | 
			
		||||
 | 
			
		||||
First make sure to install bitsandbytes by running `pip install bitsandbytes` and to have access to a CUDA compatible GPU device. Load the quantized model by simply adding [`BitsAndBytesConfig`](../main_classes/quantization#transformers.BitsAndBytesConfig) as shown below:
 | 
			
		||||
First, make sure to install bitsandbytes by running `pip install bitsandbytes` and to have access to a GPU/accelerator that is supported by the library.
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
 | 
			
		||||
 | 
			
		||||
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
Then simply load the quantized model by adding [`BitsAndBytesConfig`](../main_classes/quantization#transformers.BitsAndBytesConfig) as shown below:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
 | 
			
		||||
@ -264,9 +264,19 @@ processor.batch_decode(out, skip_special_tokens=True, clean_up_tokenization_spac
 | 
			
		||||
 | 
			
		||||
## Model optimization
 | 
			
		||||
 | 
			
		||||
### Quantization using Bitsandbytes
 | 
			
		||||
### Quantization using bitsandbytes
 | 
			
		||||
 | 
			
		||||
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with:
 | 
			
		||||
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and make sure to have access to a GPU/accelerator that is supported by the library.
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
 | 
			
		||||
 | 
			
		||||
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
Simply change the snippet above with:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import LlavaOnevisionForConditionalGeneration, BitsAndBytesConfig
 | 
			
		||||
 | 
			
		||||
@ -163,3 +163,21 @@ Below is an expected speedup diagram that compares pure inference time between t
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/visheratin/documentation-images/resolve/main/nllb-speedup.webp">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
## Using Scaled Dot Product Attention (SDPA)
 | 
			
		||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
 | 
			
		||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
 | 
			
		||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
 | 
			
		||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
 | 
			
		||||
page for more information.
 | 
			
		||||
 | 
			
		||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
 | 
			
		||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import M2M100ForConditionalGeneration
 | 
			
		||||
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M", torch_dtype=torch.float16, attn_implementation="sdpa")
 | 
			
		||||
...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
 | 
			
		||||
@ -141,7 +141,7 @@ The Flash Attention-2 model uses also a more memory efficient cache slicing mech
 | 
			
		||||
 | 
			
		||||
As the Mixtral model has 45 billion parameters, that would require about 90GB of GPU RAM in half precision (float16), since each parameter is stored in 2 bytes. However, one can shrink down the size of the model using [quantization](../quantization.md). If the model is quantized to 4 bits (or half a byte per parameter), a single A100 with 40GB of RAM is enough to fit the entire model, as in that case only about 27 GB of RAM is required.
 | 
			
		||||
 | 
			
		||||
Quantizing a model is as simple as passing a `quantization_config` to the model. Below, we'll leverage the BitsAndyBytes quantization (but refer to [this page](../quantization.md) for other quantization methods):
 | 
			
		||||
Quantizing a model is as simple as passing a `quantization_config` to the model. Below, we'll leverage the bitsandbytes quantization library (but refer to [this page](../quantization.md) for alternative quantization methods):
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> import torch
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										124
									
								
								docs/source/en/model_doc/mllama.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										124
									
								
								docs/source/en/model_doc/mllama.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,124 @@
 | 
			
		||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# Mllama
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The Llama 3.2-Vision collection of multimodal large language models (LLMs) is a collection of pretrained and instruction-tuned image reasoning generative models in 11B and 90B sizes (text \+ images in / text out). The Llama 3.2-Vision instruction-tuned models are optimized for visual recognition, image reasoning, captioning, and answering general questions about an image.
 | 
			
		||||
 | 
			
		||||
**Model Architecture:** Llama 3.2-Vision is built on top of Llama 3.1 text-only model, which is an auto-regressive language model that uses an optimized transformer architecture. The tuned versions use supervised fine-tuning (SFT) and reinforcement learning with human feedback (RLHF) to align with human preferences for helpfulness and safety. To support image recognition tasks, the Llama 3.2-Vision model uses a separately trained vision adapter that integrates with the pre-trained Llama 3.1 language model. The adapter consists of a series of cross-attention layers that feed image encoder representations into the core LLM.
 | 
			
		||||
 | 
			
		||||
## Usage Tips
 | 
			
		||||
 | 
			
		||||
- For image+text and text inputs use `MllamaForConditionalGeneration`.
 | 
			
		||||
- For text-only inputs use `MllamaForCausalLM` for generation to avoid loading vision tower.
 | 
			
		||||
- Each sample can contain multiple images, and the number of images can vary between samples. The processor will pad the inputs to the maximum number of images across samples and to a maximum number of tiles within each image.
 | 
			
		||||
- The text passed to the processor should have the `"<|image|>"` tokens where the images should be inserted.
 | 
			
		||||
- The processor has its own `apply_chat_template` method to convert chat messages to text that can then be passed as text to the processor.
 | 
			
		||||
 | 
			
		||||
## Usage Example
 | 
			
		||||
 | 
			
		||||
#### Instruct model
 | 
			
		||||
```python
 | 
			
		||||
import requests
 | 
			
		||||
import torch
 | 
			
		||||
from PIL import Image
 | 
			
		||||
from transformers import MllamaForConditionalGeneration, AutoProcessor
 | 
			
		||||
 | 
			
		||||
model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
 | 
			
		||||
model = MllamaForConditionalGeneration.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
 | 
			
		||||
processor = AutoProcessor.from_pretrained(model_id)
 | 
			
		||||
 | 
			
		||||
messages = [
 | 
			
		||||
    [
 | 
			
		||||
        {
 | 
			
		||||
            "role": "user", 
 | 
			
		||||
            "content": [
 | 
			
		||||
                {"type": "image"},
 | 
			
		||||
                {"type": "text", "text": "What does the image show?"}
 | 
			
		||||
            ]
 | 
			
		||||
        }
 | 
			
		||||
    ],
 | 
			
		||||
]
 | 
			
		||||
text = processor.apply_chat_template(messages, add_generation_prompt=True)
 | 
			
		||||
 | 
			
		||||
url = "https://llava-vl.github.io/static/images/view.jpg"
 | 
			
		||||
image = Image.open(requests.get(url, stream=True).raw)
 | 
			
		||||
 | 
			
		||||
inputs = processor(text=text, images=image, return_tensors="pt").to(model.device)
 | 
			
		||||
output = model.generate(**inputs, max_new_tokens=25)
 | 
			
		||||
print(processor.decode(output[0]))
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
#### Base model
 | 
			
		||||
```python
 | 
			
		||||
import requests
 | 
			
		||||
import torch
 | 
			
		||||
from PIL import Image
 | 
			
		||||
from transformers import MllamaForConditionalGeneration, AutoProcessor
 | 
			
		||||
 | 
			
		||||
model_id = "meta-llama/Llama-3.2-11B-Vision"
 | 
			
		||||
model = MllamaForConditionalGeneration.from_pretrained(model_id, device_map="auto", torch_dtype=torch.bfloat16)
 | 
			
		||||
processor = AutoProcessor.from_pretrained(model_id)
 | 
			
		||||
 | 
			
		||||
prompt = "<|image|>If I had to write a haiku for this one"
 | 
			
		||||
url = "https://llava-vl.github.io/static/images/view.jpg"
 | 
			
		||||
raw_image = Image.open(requests.get(url, stream=True).raw)
 | 
			
		||||
 | 
			
		||||
inputs = processor(text=prompt, images=raw_image, return_tensors="pt").to(model.device)
 | 
			
		||||
output = model.generate(**inputs, do_sample=False, max_new_tokens=25)
 | 
			
		||||
print(processor.decode(output[0], skip_special_tokens=True))
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## MllamaConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MllamaConfig
 | 
			
		||||
 | 
			
		||||
## MllamaProcessor
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MllamaProcessor
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
## MllamaImageProcessor
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MllamaImageProcessor
 | 
			
		||||
 | 
			
		||||
## MllamaForConditionalGeneration
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MllamaForConditionalGeneration
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## MllamaForCausalLM
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MllamaForCausalLM
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## MllamaTextModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MllamaTextModel
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## MllamaForCausalLM
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MllamaForCausalLM
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## MllamaVisionModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] MllamaVisionModel
 | 
			
		||||
    - forward
 | 
			
		||||
@ -188,3 +188,21 @@ Below is an expected speedup diagram that compares pure inference time between t
 | 
			
		||||
<div style="text-align: center">
 | 
			
		||||
<img src="https://huggingface.co/datasets/visheratin/documentation-images/resolve/main/nllb-speedup.webp">
 | 
			
		||||
</div>
 | 
			
		||||
 | 
			
		||||
## Using Scaled Dot Product Attention (SDPA)
 | 
			
		||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
 | 
			
		||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
 | 
			
		||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
 | 
			
		||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
 | 
			
		||||
page for more information.
 | 
			
		||||
 | 
			
		||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
 | 
			
		||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForSeq2SeqLM
 | 
			
		||||
model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-200-distilled-600M", torch_dtype=torch.float16, attn_implementation="sdpa")
 | 
			
		||||
...
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
 | 
			
		||||
							
								
								
									
										164
									
								
								docs/source/en/model_doc/omdet-turbo.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										164
									
								
								docs/source/en/model_doc/omdet-turbo.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,164 @@
 | 
			
		||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
 | 
			
		||||
# OmDet-Turbo
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The OmDet-Turbo model was proposed in [Real-time Transformer-based Open-Vocabulary Detection with Efficient Fusion Head](https://arxiv.org/abs/2403.06892) by Tiancheng Zhao, Peng Liu, Xuan He, Lu Zhang, Kyusong Lee. OmDet-Turbo incorporates components from RT-DETR and introduces a swift multimodal fusion module to achieve real-time open-vocabulary object detection capabilities while maintaining high accuracy. The base model achieves performance of up to 100.2 FPS and 53.4 AP on COCO zero-shot.
 | 
			
		||||
 | 
			
		||||
The abstract from the paper is the following:
 | 
			
		||||
 | 
			
		||||
*End-to-end transformer-based detectors (DETRs) have shown exceptional performance in both closed-set and open-vocabulary object detection (OVD) tasks through the integration of language modalities. However, their demanding computational requirements have hindered their practical application in real-time object detection (OD) scenarios. In this paper, we scrutinize the limitations of two leading models in the OVDEval benchmark, OmDet and Grounding-DINO, and introduce OmDet-Turbo. This novel transformer-based real-time OVD model features an innovative Efficient Fusion Head (EFH) module designed to alleviate the bottlenecks observed in OmDet and Grounding-DINO. Notably, OmDet-Turbo-Base achieves a 100.2 frames per second (FPS) with TensorRT and language cache techniques applied. Notably, in zero-shot scenarios on COCO and LVIS datasets, OmDet-Turbo achieves performance levels nearly on par with current state-of-the-art supervised models. Furthermore, it establishes new state-of-the-art benchmarks on ODinW and OVDEval, boasting an AP of 30.1 and an NMS-AP of 26.86, respectively. The practicality of OmDet-Turbo in industrial applications is underscored by its exceptional performance on benchmark datasets and superior inference speed, positioning it as a compelling choice for real-time object detection tasks.*
 | 
			
		||||
 | 
			
		||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/omdet_turbo_architecture.jpeg" alt="drawing" width="600"/>
 | 
			
		||||
 | 
			
		||||
<small> OmDet-Turbo architecture overview. Taken from the <a href="https://arxiv.org/abs/2403.06892">original paper</a>. </small>
 | 
			
		||||
 | 
			
		||||
This model was contributed by [yonigozlan](https://huggingface.co/yonigozlan).
 | 
			
		||||
The original code can be found [here](https://github.com/om-ai-lab/OmDet).
 | 
			
		||||
 | 
			
		||||
## Usage tips
 | 
			
		||||
 | 
			
		||||
One unique property of OmDet-Turbo compared to other zero-shot object detection models, such as [Grounding DINO](grounding-dino), is the decoupled classes and prompt embedding structure that allows caching of text embeddings. This means that the model needs both classes and task as inputs, where classes is a list of objects we want to detect and task is the grounded text used to guide open-vocabulary detection. This approach limits the scope of the open-vocabulary detection and makes the decoding process faster.
 | 
			
		||||
 | 
			
		||||
[`OmDetTurboProcessor`] is used to prepare the classes, task and image triplet. The task input is optional, and when not provided, it will default to `"Detect [class1], [class2], [class3], ..."`. To process the results from the model, one can use `post_process_grounded_object_detection` from [`OmDetTurboProcessor`]. Notably, this function takes in the input classes, as unlike other zero-shot object detection models, the decoupling of classes and task embeddings means that no decoding of the predicted class embeddings is needed in the post-processing step, and the predicted classes can be matched to the inputted ones directly.
 | 
			
		||||
 | 
			
		||||
## Usage example
 | 
			
		||||
 | 
			
		||||
### Single image inference
 | 
			
		||||
 | 
			
		||||
Here's how to load the model and prepare the inputs to perform zero-shot object detection on a single image:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
import requests
 | 
			
		||||
from PIL import Image
 | 
			
		||||
 | 
			
		||||
from transformers import AutoProcessor, OmDetTurboForObjectDetection
 | 
			
		||||
 | 
			
		||||
processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-tiny")
 | 
			
		||||
model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-tiny")
 | 
			
		||||
 | 
			
		||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
 | 
			
		||||
image = Image.open(requests.get(url, stream=True).raw)
 | 
			
		||||
classes = ["cat", "remote"]
 | 
			
		||||
inputs = processor(image, text=classes, return_tensors="pt")
 | 
			
		||||
 | 
			
		||||
outputs = model(**inputs)
 | 
			
		||||
 | 
			
		||||
# convert outputs (bounding boxes and class logits)
 | 
			
		||||
results = processor.post_process_grounded_object_detection(
 | 
			
		||||
    outputs,
 | 
			
		||||
    classes=classes,
 | 
			
		||||
    target_sizes=[image.size[::-1]],
 | 
			
		||||
    score_threshold=0.3,
 | 
			
		||||
    nms_threshold=0.3,
 | 
			
		||||
)[0]
 | 
			
		||||
for score, class_name, box in zip(
 | 
			
		||||
    results["scores"], results["classes"], results["boxes"]
 | 
			
		||||
):
 | 
			
		||||
    box = [round(i, 1) for i in box.tolist()]
 | 
			
		||||
    print(
 | 
			
		||||
        f"Detected {class_name} with confidence "
 | 
			
		||||
        f"{round(score.item(), 2)} at location {box}"
 | 
			
		||||
    )
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
### Multi image inference
 | 
			
		||||
 | 
			
		||||
OmDet-Turbo can perform batched multi-image inference, with support for different text prompts and classes in the same batch:
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
>>> import torch
 | 
			
		||||
>>> import requests
 | 
			
		||||
>>> from io import BytesIO
 | 
			
		||||
>>> from PIL import Image
 | 
			
		||||
>>> from transformers import AutoProcessor, OmDetTurboForObjectDetection
 | 
			
		||||
 | 
			
		||||
>>> processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
 | 
			
		||||
>>> model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
 | 
			
		||||
 | 
			
		||||
>>> url1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
 | 
			
		||||
>>> image1 = Image.open(BytesIO(requests.get(url1).content)).convert("RGB")
 | 
			
		||||
>>> classes1 = ["cat", "remote"]
 | 
			
		||||
>>> task1 = "Detect {}.".format(", ".join(classes1))
 | 
			
		||||
 | 
			
		||||
>>> url2 = "http://images.cocodataset.org/train2017/000000257813.jpg"
 | 
			
		||||
>>> image2 = Image.open(BytesIO(requests.get(url2).content)).convert("RGB")
 | 
			
		||||
>>> classes2 = ["boat"]
 | 
			
		||||
>>> task2 = "Detect everything that looks like a boat."
 | 
			
		||||
 | 
			
		||||
>>> url3 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
 | 
			
		||||
>>> image3 = Image.open(BytesIO(requests.get(url3).content)).convert("RGB")
 | 
			
		||||
>>> classes3 = ["statue", "trees"]
 | 
			
		||||
>>> task3 = "Focus on the foreground, detect statue and trees."
 | 
			
		||||
 | 
			
		||||
>>> inputs = processor(
 | 
			
		||||
...     images=[image1, image2, image3],
 | 
			
		||||
...     text=[classes1, classes2, classes3],
 | 
			
		||||
...     task=[task1, task2, task3],
 | 
			
		||||
...     return_tensors="pt",
 | 
			
		||||
... )
 | 
			
		||||
 | 
			
		||||
>>> with torch.no_grad():
 | 
			
		||||
...     outputs = model(**inputs)
 | 
			
		||||
 | 
			
		||||
>>> # convert outputs (bounding boxes and class logits)
 | 
			
		||||
>>> results = processor.post_process_grounded_object_detection(
 | 
			
		||||
...     outputs,
 | 
			
		||||
...     classes=[classes1, classes2, classes3],
 | 
			
		||||
...     target_sizes=[image1.size[::-1], image2.size[::-1], image3.size[::-1]],
 | 
			
		||||
...     score_threshold=0.2,
 | 
			
		||||
...     nms_threshold=0.3,
 | 
			
		||||
... )
 | 
			
		||||
 | 
			
		||||
>>> for i, result in enumerate(results):
 | 
			
		||||
...     for score, class_name, box in zip(
 | 
			
		||||
...         result["scores"], result["classes"], result["boxes"]
 | 
			
		||||
...     ):
 | 
			
		||||
...         box = [round(i, 1) for i in box.tolist()]
 | 
			
		||||
...         print(
 | 
			
		||||
...             f"Detected {class_name} with confidence "
 | 
			
		||||
...             f"{round(score.item(), 2)} at location {box} in image {i}"
 | 
			
		||||
...         )
 | 
			
		||||
Detected remote with confidence 0.77 at location [39.9, 70.4, 176.7, 118.0] in image 0
 | 
			
		||||
Detected cat with confidence 0.72 at location [11.6, 54.2, 314.8, 474.0] in image 0
 | 
			
		||||
Detected remote with confidence 0.56 at location [333.4, 75.8, 370.7, 187.0] in image 0
 | 
			
		||||
Detected cat with confidence 0.55 at location [345.2, 24.0, 639.8, 371.7] in image 0
 | 
			
		||||
Detected boat with confidence 0.32 at location [146.9, 219.8, 209.6, 250.7] in image 1
 | 
			
		||||
Detected boat with confidence 0.3 at location [319.1, 223.2, 403.2, 238.4] in image 1
 | 
			
		||||
Detected boat with confidence 0.27 at location [37.7, 220.3, 84.0, 235.9] in image 1
 | 
			
		||||
Detected boat with confidence 0.22 at location [407.9, 207.0, 441.7, 220.2] in image 1
 | 
			
		||||
Detected statue with confidence 0.73 at location [544.7, 210.2, 651.9, 502.8] in image 2
 | 
			
		||||
Detected trees with confidence 0.25 at location [3.9, 584.3, 391.4, 785.6] in image 2
 | 
			
		||||
Detected trees with confidence 0.25 at location [1.4, 621.2, 118.2, 787.8] in image 2
 | 
			
		||||
Detected statue with confidence 0.2 at location [428.1, 205.5, 767.3, 759.5] in image 2
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## OmDetTurboConfig
 | 
			
		||||
 | 
			
		||||
[[autodoc]] OmDetTurboConfig
 | 
			
		||||
 | 
			
		||||
## OmDetTurboProcessor
 | 
			
		||||
 | 
			
		||||
[[autodoc]] OmDetTurboProcessor
 | 
			
		||||
    - post_process_grounded_object_detection
 | 
			
		||||
 | 
			
		||||
## OmDetTurboForObjectDetection
 | 
			
		||||
 | 
			
		||||
[[autodoc]] OmDetTurboForObjectDetection
 | 
			
		||||
    - forward
 | 
			
		||||
@ -41,7 +41,7 @@ processor = AutoProcessor.from_pretrained(model_id)
 | 
			
		||||
prompt = "What is on the flower?"
 | 
			
		||||
image_file = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg?download=true"
 | 
			
		||||
raw_image = Image.open(requests.get(image_file, stream=True).raw)
 | 
			
		||||
inputs = processor(prompt, raw_image, return_tensors="pt")
 | 
			
		||||
inputs = processor(raw_image, prompt, return_tensors="pt")
 | 
			
		||||
output = model.generate(**inputs, max_new_tokens=20)
 | 
			
		||||
 | 
			
		||||
print(processor.decode(output[0], skip_special_tokens=True)[len(prompt):])
 | 
			
		||||
@ -53,7 +53,7 @@ print(processor.decode(output[0], skip_special_tokens=True)[len(prompt):])
 | 
			
		||||
```python
 | 
			
		||||
prompt = "What is on the flower?"
 | 
			
		||||
answer = "a bee"
 | 
			
		||||
inputs = processor(text=prompt, images=raw_image, suffix=answer, return_tensors="pt")
 | 
			
		||||
inputs = processor(images=raw_image, text=prompt, suffix=answer, return_tensors="pt")
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Resources
 | 
			
		||||
 | 
			
		||||
@ -18,20 +18,22 @@ rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
## Overview
 | 
			
		||||
 | 
			
		||||
The Pixtral model was released by the Mistral AI team on [Vllm](https://github.com/vllm-project/vllm/pull/8377), where a version of the code can be found!
 | 
			
		||||
 | 
			
		||||
The Pixtral model was released by the Mistral AI team on [vLLM](https://github.com/vllm-project/vllm/pull/8377), where a version of the code can be found!
 | 
			
		||||
 | 
			
		||||
Tips:
 | 
			
		||||
 | 
			
		||||
- Pixtral is a multimodal model, the main contribution is the 2d ROPE on the images, and support for arbitrary image size (the images are not padded together nor are they resized)
 | 
			
		||||
- This model follows the `Llava` familiy, meaning image embeddings are placed instead of the `[IMG]` token placeholders.
 | 
			
		||||
- Pixtral is a multimodal model, taking images and text as input, and producing text as output.
 | 
			
		||||
- This model follows the [Llava](llava) family, meaning image embeddings are placed instead of the `[IMG]` token placeholders. The model uses [`PixtralVisionModel`] for its vision encoder, and [`MistralForCausalLM`] for its language decoder.
 | 
			
		||||
- The main contribution is the 2d ROPE (rotary postiion embeddings) on the images, and support for arbitrary image sizes (the images are not padded together nor are they resized).
 | 
			
		||||
- The format for one or mulitple prompts is the following:
 | 
			
		||||
```
 | 
			
		||||
"<s>[INST][IMG]\nWhat are the things I should be cautious about when I visit this place?[/INST]"
 | 
			
		||||
```
 | 
			
		||||
Then, the processor will replace each `[IMG]` token with  a number of `[IMG]` token that depends on the height and the width of the image. Each *row* of the image is separated by a `[IMG_BREAK]` token, and each image is separated by a  `[IMG_END]` token.
 | 
			
		||||
 | 
			
		||||
This model was contributed by [amyeroberts](https://huggingface.co/amyeroberts) and [ArthurZ](https://huggingface.co/ArthurZ)
 | 
			
		||||
This model was contributed by [amyeroberts](https://huggingface.co/amyeroberts) and [ArthurZ](https://huggingface.co/ArthurZ). The original code can be found [here](https://github.com/vllm-project/vllm/pull/8377).
 | 
			
		||||
 | 
			
		||||
## Usage
 | 
			
		||||
 | 
			
		||||
Here is an example of how to run it:
 | 
			
		||||
 | 
			
		||||
@ -39,7 +41,7 @@ Here is an example of how to run it:
 | 
			
		||||
from transformers import LlavaForConditionalGeneration, AutoProcessor
 | 
			
		||||
from PIL import Image
 | 
			
		||||
 | 
			
		||||
model_id = "hf-internal-testing/pixtral-12b"
 | 
			
		||||
model_id = "mistral-community/pixtral-12b"
 | 
			
		||||
model = LlavaForConditionalGeneration.from_pretrained(model_id).to("cuda")
 | 
			
		||||
processor = AutoProcessor.from_pretrained(model_id)
 | 
			
		||||
 | 
			
		||||
@ -53,7 +55,7 @@ PROMPT = "<s>[INST]Describe the images.\n[IMG][IMG][IMG][IMG][/INST]"
 | 
			
		||||
 | 
			
		||||
inputs = processor(images=IMG_URLS, text=PROMPT, return_tensors="pt").to("cuda")
 | 
			
		||||
generate_ids = model.generate(**inputs, max_new_tokens=500)
 | 
			
		||||
ouptut = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
 | 
			
		||||
output = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
 | 
			
		||||
 | 
			
		||||
EXPECTED_GENERATION = """
 | 
			
		||||
Describe the images.
 | 
			
		||||
@ -83,9 +85,9 @@ Each image captures a different scene, from a close-up of a dog to expansive nat
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PixtralVisionConfig
 | 
			
		||||
 | 
			
		||||
## PixtralModel
 | 
			
		||||
## PixtralVisionModel
 | 
			
		||||
 | 
			
		||||
[[autodoc]] PixtralModel
 | 
			
		||||
[[autodoc]] PixtralVisionModel
 | 
			
		||||
    - forward
 | 
			
		||||
 | 
			
		||||
## PixtralImageProcessor
 | 
			
		||||
 | 
			
		||||
@ -139,7 +139,17 @@ processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokeniza
 | 
			
		||||
 | 
			
		||||
The model can be loaded in lower bits, significantly reducing memory burden while maintaining the performance of the original model. his allows for efficient deployment on resource-constrained cases. 
 | 
			
		||||
 | 
			
		||||
First make sure to install bitsandbytes by running `pip install bitsandbytes` and to have access to a CUDA compatible GPU device. Load the quantized model by simply adding [`BitsAndBytesConfig`](../main_classes/quantization#transformers.BitsAndBytesConfig) as shown below:
 | 
			
		||||
First make sure to install bitsandbytes by running `pip install bitsandbytes` and to have access to a GPU/accelerator that is supported by the library.
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
 | 
			
		||||
 | 
			
		||||
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
Load the quantized model by simply adding [`BitsAndBytesConfig`](../main_classes/quantization#transformers.BitsAndBytesConfig) as shown below:
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
 | 
			
		||||
@ -233,7 +233,7 @@ Let's look at the details.
 | 
			
		||||
**Optimizer States:**
 | 
			
		||||
 | 
			
		||||
- 8 bytes * number of parameters for normal AdamW (maintains 2 states)
 | 
			
		||||
- 2 bytes * number of parameters for 8-bit AdamW optimizers like [bitsandbytes](https://github.com/TimDettmers/bitsandbytes)
 | 
			
		||||
- 2 bytes * number of parameters for 8-bit AdamW optimizers like [bitsandbytes](https://github.com/bitsandbytes-foundation/bitsandbytes)
 | 
			
		||||
- 4 bytes * number of parameters for optimizers like SGD with momentum (maintains only 1 state)
 | 
			
		||||
 | 
			
		||||
**Gradients**
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										121
									
								
								docs/source/en/modular_transformers.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								docs/source/en/modular_transformers.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,121 @@
 | 
			
		||||
# Modular transformers
 | 
			
		||||
 | 
			
		||||
`transformers` is an opinionated framework; our philosophy is defined in the following [conceptual guide](./philosophy).
 | 
			
		||||
 | 
			
		||||
The core of that philosophy is exemplified by the [single model, single file](https://huggingface.co/blog/transformers-design-philosophy)
 | 
			
		||||
aspect of the library. This component's downside is that it limits the inheritance and importability of components from
 | 
			
		||||
files to others in the toolkit.
 | 
			
		||||
 | 
			
		||||
As a result, model components tend to be repeated across many files. There are as many attention layers defined
 | 
			
		||||
in `transformers` as there are models, and a significant number of those are identical to each other. 
 | 
			
		||||
The unfortunate consequence is that independent implementations tend to diverge as fixes and changes get applied
 | 
			
		||||
to specific parts of the code.
 | 
			
		||||
 | 
			
		||||
In order to balance this issue, we introduced the concept of "copies" across the library. By adding a comment indicating
 | 
			
		||||
that code is a copy of another, we can enforce through CI and local commands that copies do not diverge. However,
 | 
			
		||||
while the complexity is low, this is often quite tedious to do.
 | 
			
		||||
 | 
			
		||||
And, finally, this contributes to adding a significant overhead to contributing models which we would like to remove.
 | 
			
		||||
This approach often requires model contributions to add modeling code (~1k lines), processor (~500 lines), tests, docs,
 | 
			
		||||
etc. Model contribution PRs rarely add less than 3-5k lines of code, with much of this code being boilerplate.
 | 
			
		||||
 | 
			
		||||
This raises the bar for contributions, and with Modular Transformers, we're aiming to lower the bar to a much more
 | 
			
		||||
acceptable point.
 | 
			
		||||
 | 
			
		||||
## What is it?
 | 
			
		||||
 | 
			
		||||
Modular Transformers introduces the concept of a "modular" file to a model folder. This modular file accepts code
 | 
			
		||||
that isn't typically accepted in modeling/processing files, as it allows importing from neighbouring models as well
 | 
			
		||||
as inheritance from classes to others.
 | 
			
		||||
 | 
			
		||||
This modular file defines models, processors, and the configuration class that would otherwise be defined in their
 | 
			
		||||
respective modules.
 | 
			
		||||
 | 
			
		||||
Finally, this feature introduces a new `linter` which will "unravel" the modular file into the "single model, single 
 | 
			
		||||
file" directory structure. These files will get auto-generated every time the script is run; reducing the required
 | 
			
		||||
contributions to the modular file, and therefore only to the changes between the contributed model and others.
 | 
			
		||||
 | 
			
		||||
Model users will end up importing and using the single-file interface, so no change is expected here. Doing this, we
 | 
			
		||||
hope to combine the best of both worlds: enabling simple contributions while sticking to our philosophy.
 | 
			
		||||
 | 
			
		||||
This is therefore a replacement for the `# Copied from` markers, and previously contributed models can be expected to
 | 
			
		||||
be moved to the new Modular Transformers format in the coming months.
 | 
			
		||||
 | 
			
		||||
### Details 
 | 
			
		||||
 | 
			
		||||
The "linter", which unravels the inheritance and creates all single-files from the modular file, will flatten the 
 | 
			
		||||
inheritance while trying to be invisible to Python users. At this time, the linter flattens a **single** level of
 | 
			
		||||
inheritance.
 | 
			
		||||
 | 
			
		||||
For example:
 | 
			
		||||
- If a configuration class inherits from another and adds/deletes an argument, the generated file will either directly 
 | 
			
		||||
  reference it (in case of addition) or completely remove it (in case of deletion).
 | 
			
		||||
- If a class inherits from another, for example: class GemmaModel(LlamaModel):, dependencies are automatically 
 | 
			
		||||
  inferred. All submodules will be automatically inferred from the superclass.
 | 
			
		||||
 | 
			
		||||
You should be able to write everything (the tokenizer, the image processor, the model, the config) in this `modular` 
 | 
			
		||||
file, and the corresponding files will be created for you. 
 | 
			
		||||
 | 
			
		||||
### Enforcement
 | 
			
		||||
 | 
			
		||||
[TODO] We are introducing a new test, that makes sure the generated content matches what is present in the `modular_xxxx.py`
 | 
			
		||||
 | 
			
		||||
### Examples
 | 
			
		||||
 | 
			
		||||
Here is a quick example with BERT and RoBERTa. The two models are intimately related: their modeling implementation 
 | 
			
		||||
differs solely by a change in the embedding layer.
 | 
			
		||||
 | 
			
		||||
Instead of redefining the model entirely, here is what the `modular_roberta.py` file looks like for the modeling &
 | 
			
		||||
configuration classes (for the sake of the example, the tokenizer is ignored at this time as very different).
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from torch import nn
 | 
			
		||||
from ..bert.configuration_bert import BertConfig
 | 
			
		||||
from ..bert.modeling_bert import (
 | 
			
		||||
    BertModel,
 | 
			
		||||
    BertEmbeddings,
 | 
			
		||||
    BertForMaskedLM
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
# The RoBERTa config is identical to BERT's config
 | 
			
		||||
class RobertaConfig(BertConfig):
 | 
			
		||||
  model_type = 'roberta'
 | 
			
		||||
 | 
			
		||||
# We redefine the embeddings here to highlight the padding ID difference, and we redefine the position embeddings
 | 
			
		||||
class RobertaEmbeddings(BertEmbeddings):
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super().__init__(config())
 | 
			
		||||
 | 
			
		||||
        self.padding_idx = config.pad_token_id
 | 
			
		||||
        self.position_embeddings = nn.Embedding(
 | 
			
		||||
            config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
# The RoBERTa model is identical to the BERT model, except for the embedding layer. 
 | 
			
		||||
# We redefine the embeddings above, so here there is no need to do additional work
 | 
			
		||||
class RobertaModel(BertModel):
 | 
			
		||||
  def __init__(self, config):
 | 
			
		||||
    super().__init__(config)
 | 
			
		||||
    self.embeddings = RobertaEmbeddings(config)
 | 
			
		||||
 | 
			
		||||
      
 | 
			
		||||
# The heads now only need to redefine the model inside to the correct `RobertaModel`
 | 
			
		||||
class RobertaForMaskedLM(BertForMaskedLM):
 | 
			
		||||
  def __init__(self, config):
 | 
			
		||||
    super().__init__(config)
 | 
			
		||||
    self.model = RobertaModel(config)
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Note that if you do not use the dependency that you defined, you will have the following error:
 | 
			
		||||
 | 
			
		||||
```bash
 | 
			
		||||
ValueError: You defined `RobertaEmbeddings` in the modular_roberta.py, it should be used
 | 
			
		||||
                                    when you define `BertModel`, as it is one of it's direct dependencies. Make sure
 | 
			
		||||
                                    you use it in the `__init__` function.
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Additionally, you may find a list of examples here:
 | 
			
		||||
 | 
			
		||||
## What it is not
 | 
			
		||||
 | 
			
		||||
It is not a replacement for the modeling code (yet?), and if your model is not based on anything else that ever existed, then you can add a `modeling` file as usual.
 | 
			
		||||
@ -52,6 +52,7 @@ FlashAttention-2 is currently supported for the following architectures:
 | 
			
		||||
* [GPTNeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox#transformers.GPTNeoXModel)
 | 
			
		||||
* [GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj#transformers.GPTJModel)
 | 
			
		||||
* [Granite](https://huggingface.co/docs/transformers/model_doc/granite#transformers.GraniteModel)
 | 
			
		||||
* [GraniteMoe](https://huggingface.co/docs/transformers/model_doc/granitemoe#transformers.GraniteMoeModel)
 | 
			
		||||
* [Idefics2](https://huggingface.co/docs/transformers/model_doc/idefics2#transformers.Idefics2Model)
 | 
			
		||||
* [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon#transformers.FalconModel)
 | 
			
		||||
* [JetMoe](https://huggingface.co/docs/transformers/model_doc/jetmoe#transformers.JetMoeModel)
 | 
			
		||||
@ -208,6 +209,7 @@ For now, Transformers supports SDPA inference and training for the following arc
 | 
			
		||||
* [Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer#transformers.ASTModel)
 | 
			
		||||
* [Bart](https://huggingface.co/docs/transformers/model_doc/bart#transformers.BartModel)
 | 
			
		||||
* [Bert](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertModel)
 | 
			
		||||
* [BioGpt](https://huggingface.co/docs/transformers/model_doc/biogpt#transformers.BioGptModel)
 | 
			
		||||
* [CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert#transformers.CamembertModel)
 | 
			
		||||
* [Chameleon](https://huggingface.co/docs/transformers/model_doc/chameleon#transformers.Chameleon)
 | 
			
		||||
* [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPModel)
 | 
			
		||||
@ -215,6 +217,7 @@ For now, Transformers supports SDPA inference and training for the following arc
 | 
			
		||||
* [data2vec_audio](https://huggingface.co/docs/transformers/main/en/model_doc/data2vec#transformers.Data2VecAudioModel)
 | 
			
		||||
* [Dbrx](https://huggingface.co/docs/transformers/model_doc/dbrx#transformers.DbrxModel)
 | 
			
		||||
* [DeiT](https://huggingface.co/docs/transformers/model_doc/deit#transformers.DeiTModel)
 | 
			
		||||
* [Dinov2](https://huggingface.co/docs/transformers/en/model_doc/dinov2)
 | 
			
		||||
* [Dpr](https://huggingface.co/docs/transformers/model_doc/dpr#transformers.DprReader)
 | 
			
		||||
* [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon#transformers.FalconModel)
 | 
			
		||||
* [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel)
 | 
			
		||||
@ -225,15 +228,19 @@ For now, Transformers supports SDPA inference and training for the following arc
 | 
			
		||||
* [Hubert](https://huggingface.co/docs/transformers/model_doc/hubert#transformers.HubertModel)
 | 
			
		||||
* [Idefics](https://huggingface.co/docs/transformers/model_doc/idefics#transformers.IdeficsModel)
 | 
			
		||||
* [Granite](https://huggingface.co/docs/transformers/model_doc/granite#transformers.GraniteModel)
 | 
			
		||||
* [GraniteMoe](https://huggingface.co/docs/transformers/model_doc/granitemoe#transformers.GraniteMoeModel)
 | 
			
		||||
* [JetMoe](https://huggingface.co/docs/transformers/model_doc/jetmoe#transformers.JetMoeModel)
 | 
			
		||||
* [Jamba](https://huggingface.co/docs/transformers/model_doc/jamba#transformers.JambaModel)
 | 
			
		||||
* [Llama](https://huggingface.co/docs/transformers/model_doc/llama#transformers.LlamaModel)
 | 
			
		||||
* [LLaVA-Onevision](https://huggingface.co/docs/transformers/model_doc/llava_onevision)
 | 
			
		||||
* [M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100#transformers.M2M100Model)
 | 
			
		||||
* [Mimi](https://huggingface.co/docs/transformers/model_doc/mimi)
 | 
			
		||||
* [Mistral](https://huggingface.co/docs/transformers/model_doc/mistral#transformers.MistralModel)
 | 
			
		||||
* [Mllama](https://huggingface.co/docs/transformers/model_doc/mllama#transformers.MllamaForConditionalGeneration)
 | 
			
		||||
* [Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral#transformers.MixtralModel)
 | 
			
		||||
* [Musicgen](https://huggingface.co/docs/transformers/model_doc/musicgen#transformers.MusicgenModel)
 | 
			
		||||
* [MusicGen Melody](https://huggingface.co/docs/transformers/model_doc/musicgen_melody#transformers.MusicgenMelodyModel)
 | 
			
		||||
* [NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)
 | 
			
		||||
* [OLMo](https://huggingface.co/docs/transformers/model_doc/olmo#transformers.OlmoModel)
 | 
			
		||||
* [OLMoE](https://huggingface.co/docs/transformers/model_doc/olmoe#transformers.OlmoeModel)
 | 
			
		||||
* [PaliGemma](https://huggingface.co/docs/transformers/model_doc/paligemma#transformers.PaliGemmaForConditionalGeneration)
 | 
			
		||||
@ -272,7 +279,6 @@ For now, Transformers supports SDPA inference and training for the following arc
 | 
			
		||||
* [XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLModel)
 | 
			
		||||
* [YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos#transformers.YolosModel)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
FlashAttention can only be used for models with the `fp16` or `bf16` torch type, so make sure to cast your model to the appropriate type first. The memory-efficient attention backend is able to handle `fp32` models.
 | 
			
		||||
 | 
			
		||||
@ -186,7 +186,7 @@ If you prefer to use 🤗 Accelerate, find the 🤗 Accelerate example [further
 | 
			
		||||
 | 
			
		||||
If you have access to an Ampere or newer hardware you can use bf16 for mixed precision training and evaluation. While 
 | 
			
		||||
bf16 has a worse precision than fp16, it has a much bigger dynamic range. In fp16 the biggest number you can have 
 | 
			
		||||
is `65535` and any number above that will result in an overflow. A bf16 number can be as large as `3.39e+38` (!) which 
 | 
			
		||||
is `65504` and any number above that will result in an overflow. A bf16 number can be as large as `3.39e+38` (!) which 
 | 
			
		||||
is about the same as fp32 - because both have 8-bits used for the numerical range.
 | 
			
		||||
 | 
			
		||||
You can enable BF16 in the 🤗 Trainer with:
 | 
			
		||||
@ -284,7 +284,7 @@ training_args = TrainingArguments(per_device_train_batch_size=4, optim="adamw_bn
 | 
			
		||||
 | 
			
		||||
However, we can also use a third-party implementation of the 8-bit optimizer for demonstration purposes to see how that can be integrated.
 | 
			
		||||
 | 
			
		||||
First, follow the installation guide in the GitHub [repo](https://github.com/TimDettmers/bitsandbytes) to install the `bitsandbytes` library 
 | 
			
		||||
First, follow the installation guide in the GitHub [repo](https://github.com/bitsandbytes-foundation/bitsandbytes) to install the `bitsandbytes` library 
 | 
			
		||||
that implements the 8-bit Adam optimizer.
 | 
			
		||||
 | 
			
		||||
Next you need to initialize the optimizer. This involves two steps: 
 | 
			
		||||
 | 
			
		||||
@ -38,6 +38,14 @@ pip install --upgrade accelerate transformers
 | 
			
		||||
</hfoption>
 | 
			
		||||
</hfoptions>
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
 | 
			
		||||
 | 
			
		||||
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
Now you can quantize a model by passing a `BitsAndBytesConfig` to [`~PreTrainedModel.from_pretrained`] method. This works for any model in any modality, as long as it supports loading with Accelerate and contains `torch.nn.Linear` layers.
 | 
			
		||||
 | 
			
		||||
<hfoptions id="bnb">
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										230
									
								
								docs/source/en/quantization/compressed_tensors.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										230
									
								
								docs/source/en/quantization/compressed_tensors.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,230 @@
 | 
			
		||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
 | 
			
		||||
 | 
			
		||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
 | 
			
		||||
the License. You may obtain a copy of the License at
 | 
			
		||||
 | 
			
		||||
http://www.apache.org/licenses/LICENSE-2.0
 | 
			
		||||
 | 
			
		||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
 | 
			
		||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
 | 
			
		||||
specific language governing permissions and limitations under the License.
 | 
			
		||||
 | 
			
		||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
 | 
			
		||||
rendered properly in your Markdown viewer.
 | 
			
		||||
 | 
			
		||||
-->
 | 
			
		||||
# Compressed Tensors
 | 
			
		||||
 | 
			
		||||
The [`compressed-tensors`](https://github.com/neuralmagic/compressed-tensors) library provides a versatile and efficient way to store and manage compressed model checkpoints. This library supports various quantization and sparsity schemes, making it a unified format for handling different model optimizations like GPTQ, AWQ, SmoothQuant, INT8, FP8, SparseGPT, and more.
 | 
			
		||||
 | 
			
		||||
Some of the supported formats include:
 | 
			
		||||
1. `dense`
 | 
			
		||||
2. `int-quantized`: INT8 quantized models
 | 
			
		||||
    - sample [model/config](https://huggingface.co/nm-testing/tinyllama-w8a8-compressed-hf-quantizer)
 | 
			
		||||
3. `float-quantized`: FP8 quantized models; currently support E4M3
 | 
			
		||||
    - sample [model/config](https://huggingface.co/nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat/tree/main)
 | 
			
		||||
4. `pack-quantized`: INT4 or INT8 weight-quantized models, packed into INT32. For INT4, the weights have an INT4 range but are stored as INT8 and   then packed into INT32.
 | 
			
		||||
    - sample [model/config](nm-testing/tinyllama-w4a16-compressed-hf-quantizer)
 | 
			
		||||
 | 
			
		||||
Compressed models can be easily created using [llm-compressor](https://github.com/vllm-project/llm-compressor).
 | 
			
		||||
Alternatively models can be created indepedenty and serialized with a compressed tensors config.
 | 
			
		||||
 | 
			
		||||
To find existing models on the Hugging Face Model Hub, search for the [`compressed-tensors` tag](https://huggingface.co/models?other=compressed-tensors).
 | 
			
		||||
 | 
			
		||||
#### Features:
 | 
			
		||||
 - Weight and activation precisions: FP8, INT4, INT8 (for Q/DQ arbitrary precision is allowed for INT)
 | 
			
		||||
 - Quantization scales and zero-points strategies: [tensor, channel, group, block, token](https://github.com/neuralmagic/compressed-tensors/blob/83b2e7a969d70606421a76b9a3d112646077c8de/src/compressed_tensors/quantization/quant_args.py#L43-L52)
 | 
			
		||||
 - Dynamic per-token activation quantization (or any static strategy)
 | 
			
		||||
 - Sparsity can be 
 | 
			
		||||
 - Supports quantization of arbitrary modules, not just Linear modules
 | 
			
		||||
 - Targeted support or ignoring of modules by name or class
 | 
			
		||||
 | 
			
		||||
## Installation
 | 
			
		||||
 | 
			
		||||
It is recommended to install stable releases of compressed-tensors from [PyPI](https://pypi.org/project/compressed-tensors):
 | 
			
		||||
```bash
 | 
			
		||||
pip install compressed-tensors
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
Developers who want to experiment with the latest features can also install the package from source:
 | 
			
		||||
```bash
 | 
			
		||||
git clone https://github.com/neuralmagic/compressed-tensors
 | 
			
		||||
cd compressed-tensors
 | 
			
		||||
pip install -e .
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
## Quickstart Model Load
 | 
			
		||||
Quantized models can be easily loaded for inference as shown below. Only models that have already been quantized can be loaded at the moment. To quantize a model into the compressed-tensors format see [llm-compressor](https://github.com/vllm-project/llm-compressor).
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM
 | 
			
		||||
 | 
			
		||||
# Load the model in compressed-tensors format
 | 
			
		||||
ct_model = AutoModelForCausalLM.from_pretrained("nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf")
 | 
			
		||||
 | 
			
		||||
# Measure memory usage
 | 
			
		||||
mem_params = sum([param.nelement()*param.element_size() for param in ct_model.parameters()])
 | 
			
		||||
print(f"{mem/2**30:.4f} GB")
 | 
			
		||||
# 8.4575 GB
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
We can see just above that the compressed-tensors FP8 checkpoint of Llama 3.1 8B is able to be loaded for inference using half of the memory of the unquantized reference checkpoint.
 | 
			
		||||
 | 
			
		||||
## Sample Use Cases - Load and run an FP8 model
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM, AutoTokenizer
 | 
			
		||||
 | 
			
		||||
prompt = [
 | 
			
		||||
    "Hello, my name is",
 | 
			
		||||
    "The capital of France is",
 | 
			
		||||
    "The future of AI is"
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
model_name = "nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat"
 | 
			
		||||
 | 
			
		||||
quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
 | 
			
		||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
 | 
			
		||||
 | 
			
		||||
inputs = tokenizer(prompt, return_tensors="pt")
 | 
			
		||||
generated_ids = quantized_model.generate(**inputs, max_length=50, do_sample=False)
 | 
			
		||||
outputs = tokenizer.batch_decode(generated_ids)
 | 
			
		||||
 | 
			
		||||
print(outputs)
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
['<|begin_of_text|>Hello, my name is [Name]. I am a [Your Profession/Student] and I am here to learn about the [Course/Program] at [University/Institution]. I am excited to be here and I am looking forward to', '<|begin_of_text|>The capital of France is Paris, which is located in the north-central part of the country. Paris is the most populous city in France and is known for its stunning architecture, art museums, fashion, and romantic atmosphere. The city is home to', "<|begin_of_text|>The future of AI is here, and it's already changing the way we live and work. From virtual assistants to self-driving cars, AI is transforming industries and revolutionizing the way we interact with technology. But what does the future of AI hold"]
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
The above shows a quick example for running generation using a `compressed-tensors`
 | 
			
		||||
model. Currently, once loaded the model cannot be saved.
 | 
			
		||||
 | 
			
		||||
## Deep dive into a compressed-tensors model checkpoint
 | 
			
		||||
 | 
			
		||||
In this example we will examine how the compressed-tensors model nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf is defined through its configuration entry and see how this translates to the loaded model representation. 
 | 
			
		||||
 | 
			
		||||
First, let us look at the [`quantization_config` of the model](https://huggingface.co/nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf/blob/main/config.json). At a glance it looks overwhelming with the number of entries but this is because compressed-tensors is a format that allows for flexible expression both during and after model compression.
 | 
			
		||||
 | 
			
		||||
In practice for checkpoint loading and inference the configuration can be simplified to not include all the default or empty entries, so we will do that here to focus on what compression is actually represented.
 | 
			
		||||
 | 
			
		||||
```yaml
 | 
			
		||||
"quantization_config": {
 | 
			
		||||
  "config_groups": {
 | 
			
		||||
    "group_0": {
 | 
			
		||||
      "input_activations": {
 | 
			
		||||
        "num_bits": 8,
 | 
			
		||||
        "strategy": "tensor",
 | 
			
		||||
        "type": "float"
 | 
			
		||||
      },
 | 
			
		||||
      "targets": ["Linear"],
 | 
			
		||||
      "weights": {
 | 
			
		||||
        "num_bits": 8,
 | 
			
		||||
        "strategy": "tensor",
 | 
			
		||||
        "type": "float"
 | 
			
		||||
      }
 | 
			
		||||
    }
 | 
			
		||||
  },
 | 
			
		||||
  "format": "naive-quantized",
 | 
			
		||||
  "ignore": ["lm_head"],
 | 
			
		||||
  "quant_method": "compressed-tensors",
 | 
			
		||||
  "quantization_status": "frozen"
 | 
			
		||||
},
 | 
			
		||||
```
 | 
			
		||||
 | 
			
		||||
We can see from the above configuration that it is specifying one config group that includes weight and activation quantization to FP8 with a static per-tensor strategy. It is also worth noting that in the `ignore` list there is an entry to skip quantization of the `lm_head` module, so that module should be untouched in the checkpoint.
 | 
			
		||||
 | 
			
		||||
To see the result of the configuration in practice, we can simply use the [safetensors viewer](https://huggingface.co/nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf?show_file_info=model.safetensors.index.json) on the model card to see the quantized weights, input_scale, and weight_scale for all of the Linear modules in the first model layer (and so on for the rest of the layers).
 | 
			
		||||
 | 
			
		||||
| Tensors | Shape |	Precision |
 | 
			
		||||
| ------- | ----- | --------- |
 | 
			
		||||
model.layers.0.input_layernorm.weight	| [4 096]	| BF16 
 | 
			
		||||
model.layers.0.mlp.down_proj.input_scale	| [1]	| BF16 
 | 
			
		||||
model.layers.0.mlp.down_proj.weight	| [4 096, 14 336] |	F8_E4M3 
 | 
			
		||||
model.layers.0.mlp.down_proj.weight_scale |	[1]	| BF16 
 | 
			
		||||
model.layers.0.mlp.gate_proj.input_scale |	[1]	| BF16 
 | 
			
		||||
model.layers.0.mlp.gate_proj.weight	| [14 336, 4 096]	| F8_E4M3 
 | 
			
		||||
model.layers.0.mlp.gate_proj.weight_scale	| [1] |	BF16 
 | 
			
		||||
model.layers.0.mlp.up_proj.input_scale|	[1]	|BF16 
 | 
			
		||||
model.layers.0.mlp.up_proj.weight |	[14 336, 4 096]	| F8_E4M3 
 | 
			
		||||
model.layers.0.mlp.up_proj.weight_scale | [1]	| BF16 
 | 
			
		||||
model.layers.0.post_attention_layernorm.weight |	[4 096]	|BF16 
 | 
			
		||||
model.layers.0.self_attn.k_proj.input_scale |	[1]	|  BF16
 | 
			
		||||
model.layers.0.self_attn.k_proj.weight |	[1 024, 4 096]|	F8_E4M3
 | 
			
		||||
model.layers.0.self_attn.k_proj.weight_scale |[1]	| BF16 
 | 
			
		||||
model.layers.0.self_attn.o_proj.input_scale	| [1]	| BF16
 | 
			
		||||
model.layers.0.self_attn.o_proj.weight | [4 096, 4 096]	| F8_E4M3 
 | 
			
		||||
model.layers.0.self_attn.o_proj.weight_scale | [1]	| BF16 
 | 
			
		||||
model.layers.0.self_attn.q_proj.input_scale	| [1]	| BF16 
 | 
			
		||||
model.layers.0.self_attn.q_proj.weight | [4 096, 4 096]	| F8_E4M3 
 | 
			
		||||
model.layers.0.self_attn.q_proj.weight_scale |	[1] | BF16 
 | 
			
		||||
model.layers.0.self_attn.v_proj.input_scale	| [1] | BF16 
 | 
			
		||||
model.layers.0.self_attn.v_proj.weight |	[1 024, 4 096]	| F8_E4M3 
 | 
			
		||||
model.layers.0.self_attn.v_proj.weight_scale |	[1] |	BF16 
 | 
			
		||||
 | 
			
		||||
When we load the model with the compressed-tensors HFQuantizer integration, we can see that all of the Linear modules that are specified within the quantization configuration have been replaced by `CompressedLinear` modules that manage the compressed weights and forward pass for inference. Note that the `lm_head` mentioned before in the ignore list is still kept as an unquantized Linear module.
 | 
			
		||||
 | 
			
		||||
```python
 | 
			
		||||
from transformers import AutoModelForCausalLM
 | 
			
		||||
 | 
			
		||||
ct_model = AutoModelForCausalLM.from_pretrained("nm-testing/Meta-Llama-3.1-8B-Instruct-FP8-hf")
 | 
			
		||||
print(ct_model)
 | 
			
		||||
"""
 | 
			
		||||
LlamaForCausalLM(
 | 
			
		||||
  (model): LlamaModel(
 | 
			
		||||
    (embed_tokens): Embedding(128256, 4096)
 | 
			
		||||
    (layers): ModuleList(
 | 
			
		||||
      (0-31): 32 x LlamaDecoderLayer(
 | 
			
		||||
        (self_attn): LlamaSdpaAttention(
 | 
			
		||||
          (q_proj): CompressedLinear(
 | 
			
		||||
            in_features=4096, out_features=4096, bias=False
 | 
			
		||||
            (input_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
            (weight_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
          )
 | 
			
		||||
          (k_proj): CompressedLinear(
 | 
			
		||||
            in_features=4096, out_features=1024, bias=False
 | 
			
		||||
            (input_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
            (weight_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
          )
 | 
			
		||||
          (v_proj): CompressedLinear(
 | 
			
		||||
            in_features=4096, out_features=1024, bias=False
 | 
			
		||||
            (input_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
            (weight_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
          )
 | 
			
		||||
          (o_proj): CompressedLinear(
 | 
			
		||||
            in_features=4096, out_features=4096, bias=False
 | 
			
		||||
            (input_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
            (weight_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
          )
 | 
			
		||||
          (rotary_emb): LlamaRotaryEmbedding()
 | 
			
		||||
        )
 | 
			
		||||
        (mlp): LlamaMLP(
 | 
			
		||||
          (gate_proj): CompressedLinear(
 | 
			
		||||
            in_features=4096, out_features=14336, bias=False
 | 
			
		||||
            (input_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
            (weight_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
          )
 | 
			
		||||
          (up_proj): CompressedLinear(
 | 
			
		||||
            in_features=4096, out_features=14336, bias=False
 | 
			
		||||
            (input_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
            (weight_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
          )
 | 
			
		||||
          (down_proj): CompressedLinear(
 | 
			
		||||
            in_features=14336, out_features=4096, bias=False
 | 
			
		||||
            (input_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
            (weight_observer): MovingAverageMinMaxObserver()
 | 
			
		||||
          )
 | 
			
		||||
          (act_fn): SiLU()
 | 
			
		||||
        )
 | 
			
		||||
        (input_layernorm): LlamaRMSNorm((4096,), eps=1e-05)
 | 
			
		||||
        (post_attention_layernorm): LlamaRMSNorm((4096,), eps=1e-05)
 | 
			
		||||
      )
 | 
			
		||||
    )
 | 
			
		||||
    (norm): LlamaRMSNorm((4096,), eps=1e-05)
 | 
			
		||||
    (rotary_emb): LlamaRotaryEmbedding()
 | 
			
		||||
  )
 | 
			
		||||
  (lm_head): Linear(in_features=4096, out_features=128256, bias=False)
 | 
			
		||||
)
 | 
			
		||||
"""
 | 
			
		||||
```
 | 
			
		||||
@ -49,7 +49,8 @@ Use the table below to help you decide which quantization method to use.
 | 
			
		||||
|-------------------------------------|-------------------------|-----|----------|----------------|-----------------------|-------------------------|----------------|-------------------------------------|--------------|------------------------|---------------------------------------------|
 | 
			
		||||
| [AQLM](./aqlm)                                | 🔴                       |  🟢   |     🟢     | 🔴              | 🔴                     | 🟢                      | 1 / 2          | 🟢                                   | 🟢            | 🟢                      | https://github.com/Vahe1994/AQLM            |
 | 
			
		||||
| [AWQ](./awq) | 🔴                       | 🔴   | 🟢        | 🟢              | 🔴                     | ?                       | 4              | 🟢                                   | 🟢            | 🟢                      | https://github.com/casper-hansen/AutoAWQ    |
 | 
			
		||||
| [bitsandbytes](./bitsandbytes)                        | 🟢                       | 🔴   |     🟢     | 🔴              | 🔴                     | 🔴                       | 4 / 8          | 🟢                                   | 🟢            | 🟢                      | https://github.com/TimDettmers/bitsandbytes |
 | 
			
		||||
| [bitsandbytes](./bitsandbytes)     | 🟢            | 🟡 *   |     🟢     | 🟡 *            | 🔴 **    | 🔴    (soon!)          | 4 / 8          | 🟢                                   | 🟢            | 🟢                      | https://github.com/bitsandbytes-foundation/bitsandbytes |
 | 
			
		||||
| [compressed-tensors](./compressed_tensors)                        | 🔴                       | 🟢   |     🟢     | 🟢              | 🔴                     | 🔴                       | 1 - 8          | 🟢                                   | 🟢            | 🟢                      | https://github.com/neuralmagic/compressed-tensors |
 | 
			
		||||
| [EETQ](./eetq)                                | 🟢                       | 🔴   | 🟢        | 🔴              | 🔴                     | ?                       | 8              | 🟢                                   | 🟢            | 🟢                      | https://github.com/NetEase-FuXi/EETQ        |
 | 
			
		||||
| GGUF / GGML (llama.cpp)             | 🟢                       | 🟢   | 🟢        | 🔴              | 🟢                     | 🔴                       | 1 - 8          | 🔴                                   | [See GGUF section](../gguf)                | [See GGUF section](../gguf)                      | https://github.com/ggerganov/llama.cpp      |
 | 
			
		||||
| [GPTQ](./gptq)                                | 🔴                       | 🔴   | 🟢        | 🟢              | 🔴                     | 🔴                       | 2 - 3 - 4 - 8          | 🟢                                   | 🟢            | 🟢                      | https://github.com/AutoGPTQ/AutoGPTQ        |
 | 
			
		||||
@ -57,3 +58,17 @@ Use the table below to help you decide which quantization method to use.
 | 
			
		||||
| [Quanto](./quanto)                              | 🟢                       | 🟢   | 🟢        | 🔴              | 🟢                     | 🟢                       | 2 / 4 / 8      | 🔴                                   | 🔴            | 🟢                      | https://github.com/huggingface/quanto       |
 | 
			
		||||
| [FBGEMM_FP8](./fbgemm_fp8.md)                              | 🟢                       | 🔴    | 🟢        | 🔴              | 🔴                      | 🔴                        | 8      | 🔴                                   | 🟢            | 🟢                      | https://github.com/pytorch/FBGEMM       |
 | 
			
		||||
| [torchao](./torchao.md)                              | 🟢                       |     | 🟢        | 🔴              | partial support (int4 weight only)       |                       | 4 / 8      |                                   | 🟢🔴           | 🟢                      | https://github.com/pytorch/ao       |
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
\* bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
 | 
			
		||||
 | 
			
		||||
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
<Tip>
 | 
			
		||||
 | 
			
		||||
\** bitsandbytes is seeking contributors to help develop and lead the Apple Silicon backend. Interested? Contact them directly via their repo. Stipends may be available through sponsorships.
 | 
			
		||||
 | 
			
		||||
</Tip>
 | 
			
		||||
 | 
			
		||||
@ -1,20 +0,0 @@
 | 
			
		||||
# Using the `diff_converter` linter
 | 
			
		||||
 | 
			
		||||
`pip install libcst` is a must!
 | 
			
		||||
 | 
			
		||||
# `sh examples/diff-conversion/convert_examples.sh` to get the converted outputs
 | 
			
		||||
 | 
			
		||||
The diff converter is a new `linter` specific to `transformers`. It allows us to unpack inheritance in python to convert a modular `diff` file like `diff_gemma.py` into a `single model single file`. 
 | 
			
		||||
 | 
			
		||||
Examples of possible usage are available in the `examples/diff-conversion`, or `diff_gemma` for a full model usage.
 | 
			
		||||
 | 
			
		||||
`python utils/diff_model_converter.py --files_to_parse "/Users/arthurzucker/Work/transformers/examples/diff-conversion/diff_my_new_model2.py"`
 | 
			
		||||
 | 
			
		||||
## How it works
 | 
			
		||||
We use the `libcst` parser to produce an AST representation of the `diff_xxx.py` file. For any imports that are made from `transformers.models.modeling_xxxx` we parse the source code of that module, and build a class dependency mapping, which allows us to unpack the difference dependencies.
 | 
			
		||||
 | 
			
		||||
The code from the `diff` file and the class dependency mapping are "merged" to produce the single model single file. 
 | 
			
		||||
We use ruff to automatically remove the potential duplicate imports.
 | 
			
		||||
 | 
			
		||||
## Why we use libcst instead of the native AST?
 | 
			
		||||
AST is super powerful, but it does not keep the `docstring`, `comment` or code formatting. Thus we decided to go with `libcst`
 | 
			
		||||
@ -61,7 +61,7 @@ from transformers.utils import check_min_version, send_example_telemetry
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
Array = Any
 | 
			
		||||
Dataset = datasets.arrow_dataset.Dataset
 | 
			
		||||
 | 
			
		||||
@ -60,7 +60,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risk.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/flax/speech-recognition/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ from transformers.utils import check_min_version, send_example_telemetry
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
Array = Any
 | 
			
		||||
Dataset = datasets.arrow_dataset.Dataset
 | 
			
		||||
 | 
			
		||||
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
							
								
								
									
										20
									
								
								examples/modular-transformers/README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								examples/modular-transformers/README.md
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,20 @@
 | 
			
		||||
# Using the `modular_converter` linter
 | 
			
		||||
 | 
			
		||||
`pip install libcst` is a must!
 | 
			
		||||
 | 
			
		||||
# `sh examples/modular-transformers/convert_examples.sh` to get the converted outputs
 | 
			
		||||
 | 
			
		||||
The modular converter is a new `linter` specific to `transformers`. It allows us to unpack inheritance in python to convert a modular file like `modular_gemma.py` into a `single model single file`. 
 | 
			
		||||
 | 
			
		||||
Examples of possible usage are available in the `examples/modular-transformers`, or `modular_gemma` for a full model usage.
 | 
			
		||||
 | 
			
		||||
`python utils/modular_model_converter.py --files_to_parse "/Users/arthurzucker/Work/transformers/examples/modular-transformers/modular_my_new_model2.py"`
 | 
			
		||||
 | 
			
		||||
## How it works
 | 
			
		||||
We use the `libcst` parser to produce an AST representation of the `modular_xxx.py` file. For any imports that are made from `transformers.models.modeling_xxxx` we parse the source code of that module, and build a class dependency mapping, which allows us to unpack the modularerence dependencies.
 | 
			
		||||
 | 
			
		||||
The code from the `modular` file and the class dependency mapping are "merged" to produce the single model single file. 
 | 
			
		||||
We use ruff to automatically remove the potential duplicate imports.
 | 
			
		||||
 | 
			
		||||
## Why we use libcst instead of the native AST?
 | 
			
		||||
AST is super powerful, but it does not keep the `docstring`, `comment` or code formatting. Thus we decided to go with `libcst`
 | 
			
		||||
							
								
								
									
										196
									
								
								examples/modular-transformers/configuration_my_new_model.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										196
									
								
								examples/modular-transformers/configuration_my_new_model.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,196 @@
 | 
			
		||||
#           🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
 | 
			
		||||
#               This file was automatically generated from <path_to_diff_file.py>.
 | 
			
		||||
#         Do NOT edit this file manually as any edits will be overwritten by the generation of
 | 
			
		||||
#         the file from the diff. If any change should be done, please apply the change to the
 | 
			
		||||
#                           diff.py file directly. One of our CI enforces this
 | 
			
		||||
#           🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
 | 
			
		||||
 | 
			
		||||
from ...configuration_utils import PretrainedConfig
 | 
			
		||||
from ...modeling_rope_utils import rope_config_validation
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MyNewModelConfig(PretrainedConfig):
 | 
			
		||||
    r"""
 | 
			
		||||
    This is the configuration class to store the configuration of a [`MyNewModelModel`]. It is used to instantiate an MyNewModel
 | 
			
		||||
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
 | 
			
		||||
    defaults will yield a similar configuration to that of the MyNewModel-7B.
 | 
			
		||||
 | 
			
		||||
    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
 | 
			
		||||
    documentation from [`PretrainedConfig`] for more information.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        vocab_size (`int`, *optional*, defaults to 32000):
 | 
			
		||||
            Vocabulary size of the MyNewModel model. Defines the number of different tokens that can be represented by the
 | 
			
		||||
            `inputs_ids` passed when calling [`MyNewModelModel`]
 | 
			
		||||
        hidden_size (`int`, *optional*, defaults to 4096):
 | 
			
		||||
            Dimension of the hidden representations.
 | 
			
		||||
        intermediate_size (`int`, *optional*, defaults to 11008):
 | 
			
		||||
            Dimension of the MLP representations.
 | 
			
		||||
        num_hidden_layers (`int`, *optional*, defaults to 32):
 | 
			
		||||
            Number of hidden layers in the Transformer decoder.
 | 
			
		||||
        num_attention_heads (`int`, *optional*, defaults to 32):
 | 
			
		||||
            Number of attention heads for each attention layer in the Transformer decoder.
 | 
			
		||||
        num_key_value_heads (`int`, *optional*):
 | 
			
		||||
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
 | 
			
		||||
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
 | 
			
		||||
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
 | 
			
		||||
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
 | 
			
		||||
            by meanpooling all the original heads within that group. For more details checkout [this
 | 
			
		||||
            paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
 | 
			
		||||
            `num_attention_heads`.
 | 
			
		||||
        hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
 | 
			
		||||
            The non-linear activation function (function or string) in the decoder.
 | 
			
		||||
        max_position_embeddings (`int`, *optional*, defaults to 2048):
 | 
			
		||||
            The maximum sequence length that this model might ever be used with. MyNewModel 1 supports up to 2048 tokens,
 | 
			
		||||
            MyNewModel 2 up to 4096, CodeMyNewModel up to 16384.
 | 
			
		||||
        initializer_range (`float`, *optional*, defaults to 0.02):
 | 
			
		||||
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
 | 
			
		||||
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
 | 
			
		||||
            The epsilon used by the rms normalization layers.
 | 
			
		||||
        use_cache (`bool`, *optional*, defaults to `True`):
 | 
			
		||||
            Whether or not the model should return the last key/values attentions (not used by all models). Only
 | 
			
		||||
            relevant if `config.is_decoder=True`.
 | 
			
		||||
        pad_token_id (`int`, *optional*):
 | 
			
		||||
            Padding token id.
 | 
			
		||||
        bos_token_id (`int`, *optional*, defaults to 1):
 | 
			
		||||
            Beginning of stream token id.
 | 
			
		||||
        eos_token_id (`int`, *optional*, defaults to 2):
 | 
			
		||||
            End of stream token id.
 | 
			
		||||
        pretraining_tp (`int`, *optional*, defaults to 1):
 | 
			
		||||
            Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
 | 
			
		||||
            document](https://huggingface.co/docs/transformers/main/perf_train_gpu_many#tensor-parallelism) to
 | 
			
		||||
            understand more about it. This value is necessary to ensure exact reproducibility of the pretraining
 | 
			
		||||
            results. Please refer to [this issue](https://github.com/pytorch/pytorch/issues/76232).
 | 
			
		||||
        tie_word_embeddings (`bool`, *optional*, defaults to `False`):
 | 
			
		||||
            Whether to tie weight embeddings
 | 
			
		||||
        rope_theta (`float`, *optional*, defaults to 10000.0):
 | 
			
		||||
            The base period of the RoPE embeddings.
 | 
			
		||||
        rope_scaling (`Dict`, *optional*):
 | 
			
		||||
            Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
 | 
			
		||||
            and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
 | 
			
		||||
            accordingly.
 | 
			
		||||
            Expected contents:
 | 
			
		||||
                `rope_type` (`str`):
 | 
			
		||||
                    The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
 | 
			
		||||
                    'my_new_model3'], with 'default' being the original RoPE implementation.
 | 
			
		||||
                `factor` (`float`, *optional*):
 | 
			
		||||
                    Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
 | 
			
		||||
                    most scaling types, a `factor` of x will enable the model to handle sequences of length x *
 | 
			
		||||
                    original maximum pre-trained length.
 | 
			
		||||
                `original_max_position_embeddings` (`int`, *optional*):
 | 
			
		||||
                    Used with 'dynamic', 'longrope' and 'my_new_model3'. The original max position embeddings used during
 | 
			
		||||
                    pretraining.
 | 
			
		||||
                `attention_factor` (`float`, *optional*):
 | 
			
		||||
                    Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
 | 
			
		||||
                    computation. If unspecified, it defaults to value recommended by the implementation, using the
 | 
			
		||||
                    `factor` field to infer the suggested value.
 | 
			
		||||
                `beta_fast` (`float`, *optional*):
 | 
			
		||||
                    Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
 | 
			
		||||
                    ramp function. If unspecified, it defaults to 32.
 | 
			
		||||
                `beta_slow` (`float`, *optional*):
 | 
			
		||||
                    Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
 | 
			
		||||
                    ramp function. If unspecified, it defaults to 1.
 | 
			
		||||
                `short_factor` (`List[float]`, *optional*):
 | 
			
		||||
                    Only used with 'longrope'. The scaling factor to be applied to short contexts (<
 | 
			
		||||
                    `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
 | 
			
		||||
                    size divided by the number of attention heads divided by 2
 | 
			
		||||
                `long_factor` (`List[float]`, *optional*):
 | 
			
		||||
                    Only used with 'longrope'. The scaling factor to be applied to long contexts (<
 | 
			
		||||
                    `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
 | 
			
		||||
                    size divided by the number of attention heads divided by 2
 | 
			
		||||
                `low_freq_factor` (`float`, *optional*):
 | 
			
		||||
                    Only used with 'my_new_model3'. Scaling factor applied to low frequency components of the RoPE
 | 
			
		||||
                `high_freq_factor` (`float`, *optional*):
 | 
			
		||||
                    Only used with 'my_new_model3'. Scaling factor applied to high frequency components of the RoPE
 | 
			
		||||
        attention_bias (`bool`, *optional*, defaults to `False`):
 | 
			
		||||
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
 | 
			
		||||
        attention_dropout (`float`, *optional*, defaults to 0.0):
 | 
			
		||||
            The dropout ratio for the attention probabilities.
 | 
			
		||||
        mlp_bias (`bool`, *optional*, defaults to `False`):
 | 
			
		||||
            Whether to use a bias in up_proj, down_proj and gate_proj layers in the MLP layers.
 | 
			
		||||
        head_dim (`int`, *optional*):
 | 
			
		||||
            The attention head dimension. If None, it will default to hidden_size // num_heads
 | 
			
		||||
        new_param (`int`, *optional*, defaults to `False`):
 | 
			
		||||
            A fun new parameter
 | 
			
		||||
 | 
			
		||||
    ```python
 | 
			
		||||
    >>> from transformers import MyNewModelModel, MyNewModelConfig
 | 
			
		||||
 | 
			
		||||
    >>> # Initializing a MyNewModel my_new_model-7b style configuration
 | 
			
		||||
    >>> configuration = MyNewModelConfig()
 | 
			
		||||
 | 
			
		||||
    >>> # Initializing a model from the my_new_model-7b style configuration
 | 
			
		||||
    >>> model = MyNewModelModel(configuration)
 | 
			
		||||
 | 
			
		||||
    >>> # Accessing the model configuration
 | 
			
		||||
    >>> configuration = model.config
 | 
			
		||||
    ```"""
 | 
			
		||||
 | 
			
		||||
    model_type = "my_new_model"
 | 
			
		||||
    keys_to_ignore_at_inference = ["past_key_values"]
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        vocab_size=32000,
 | 
			
		||||
        hidden_size=4096,
 | 
			
		||||
        intermediate_size=11008,
 | 
			
		||||
        num_hidden_layers=32,
 | 
			
		||||
        num_attention_heads=32,
 | 
			
		||||
        num_key_value_heads=None,
 | 
			
		||||
        hidden_act="silu",
 | 
			
		||||
        max_position_embeddings=2048,
 | 
			
		||||
        initializer_range=0.02,
 | 
			
		||||
        rms_norm_eps=1e-6,
 | 
			
		||||
        use_cache=True,
 | 
			
		||||
        pad_token_id=None,
 | 
			
		||||
        bos_token_id=1,
 | 
			
		||||
        eos_token_id=2,
 | 
			
		||||
        pretraining_tp=1,
 | 
			
		||||
        tie_word_embeddings=False,
 | 
			
		||||
        rope_theta=10000.0,
 | 
			
		||||
        rope_scaling=None,
 | 
			
		||||
        attention_bias=False,
 | 
			
		||||
        attention_dropout=0.0,
 | 
			
		||||
        mlp_bias=True,
 | 
			
		||||
        head_dim=None,
 | 
			
		||||
        new_param=0,
 | 
			
		||||
        **kwargs,
 | 
			
		||||
    ):
 | 
			
		||||
        self.vocab_size = vocab_size
 | 
			
		||||
        self.max_position_embeddings = max_position_embeddings
 | 
			
		||||
        self.hidden_size = hidden_size
 | 
			
		||||
        self.intermediate_size = intermediate_size
 | 
			
		||||
        self.num_hidden_layers = num_hidden_layers
 | 
			
		||||
        self.num_attention_heads = num_attention_heads
 | 
			
		||||
 | 
			
		||||
        # for backward compatibility
 | 
			
		||||
        if num_key_value_heads is None:
 | 
			
		||||
            num_key_value_heads = num_attention_heads
 | 
			
		||||
 | 
			
		||||
        self.num_key_value_heads = num_key_value_heads
 | 
			
		||||
        self.hidden_act = hidden_act
 | 
			
		||||
        self.initializer_range = initializer_range
 | 
			
		||||
        self.rms_norm_eps = rms_norm_eps
 | 
			
		||||
        self.pretraining_tp = pretraining_tp
 | 
			
		||||
        self.use_cache = use_cache
 | 
			
		||||
        self.rope_theta = rope_theta
 | 
			
		||||
        self.rope_scaling = rope_scaling
 | 
			
		||||
        self.attention_bias = attention_bias
 | 
			
		||||
        self.attention_dropout = attention_dropout
 | 
			
		||||
        self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
 | 
			
		||||
        # Validate the correctness of rotary position embeddings parameters
 | 
			
		||||
        # BC: if there is a 'type' field, copy it it to 'rope_type'.
 | 
			
		||||
        if self.rope_scaling is not None and "type" in self.rope_scaling:
 | 
			
		||||
            self.rope_scaling["rope_type"] = self.rope_scaling["type"]
 | 
			
		||||
        rope_config_validation(self)
 | 
			
		||||
 | 
			
		||||
        super().__init__(
 | 
			
		||||
            pad_token_id=pad_token_id,
 | 
			
		||||
            bos_token_id=bos_token_id,
 | 
			
		||||
            eos_token_id=eos_token_id,
 | 
			
		||||
            tie_word_embeddings=tie_word_embeddings,
 | 
			
		||||
            **kwargs,
 | 
			
		||||
        )
 | 
			
		||||
        self.mlp_bias = mlp_bias
 | 
			
		||||
        self.new_param = new_param
 | 
			
		||||
							
								
								
									
										97
									
								
								examples/modular-transformers/configuration_my_new_model2.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								examples/modular-transformers/configuration_my_new_model2.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,97 @@
 | 
			
		||||
#           🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
 | 
			
		||||
#               This file was automatically generated from <path_to_diff_file.py>.
 | 
			
		||||
#         Do NOT edit this file manually as any edits will be overwritten by the generation of
 | 
			
		||||
#         the file from the diff. If any change should be done, please apply the change to the
 | 
			
		||||
#                           diff.py file directly. One of our CI enforces this
 | 
			
		||||
#           🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
 | 
			
		||||
from ...configuration_utils import PretrainedConfig
 | 
			
		||||
from ...modeling_rope_utils import rope_config_validation
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MyNewModel2Config(PretrainedConfig):
 | 
			
		||||
    r"""
 | 
			
		||||
    This is the configuration class to store the configuration of a [`GemmaModel`]. It is used to instantiate an Gemma
 | 
			
		||||
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
 | 
			
		||||
    defaults will yield a similar configuration to that of the Gemma-7B.
 | 
			
		||||
    e.g. [google/gemma-7b](https://huggingface.co/google/gemma-7b)
 | 
			
		||||
    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
 | 
			
		||||
    documentation from [`PretrainedConfig`] for more information.
 | 
			
		||||
    Args:
 | 
			
		||||
        vocab_size (`int`, *optional*, defaults to 256000):
 | 
			
		||||
            Vocabulary size of the Gemma model. Defines the number of different tokens that can be represented by the
 | 
			
		||||
            `inputs_ids` passed when calling [`GemmaModel`]
 | 
			
		||||
    ```python
 | 
			
		||||
    >>> from transformers import GemmaModel, GemmaConfig
 | 
			
		||||
    >>> # Initializing a Gemma gemma-7b style configuration
 | 
			
		||||
    >>> configuration = GemmaConfig()
 | 
			
		||||
    >>> # Initializing a model from the gemma-7b style configuration
 | 
			
		||||
    >>> model = GemmaModel(configuration)
 | 
			
		||||
    >>> # Accessing the model configuration
 | 
			
		||||
    >>> configuration = model.config
 | 
			
		||||
    ```"""
 | 
			
		||||
 | 
			
		||||
    model_type = "my_new_model2"
 | 
			
		||||
    keys_to_ignore_at_inference = ["past_key_values"]
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        vocab_size=32000,
 | 
			
		||||
        hidden_size=4096,
 | 
			
		||||
        intermediate_size=11008,
 | 
			
		||||
        num_hidden_layers=32,
 | 
			
		||||
        num_attention_heads=32,
 | 
			
		||||
        num_key_value_heads=None,
 | 
			
		||||
        hidden_act="silu",
 | 
			
		||||
        max_position_embeddings=2048,
 | 
			
		||||
        initializer_range=0.02,
 | 
			
		||||
        rms_norm_eps=1e-6,
 | 
			
		||||
        use_cache=True,
 | 
			
		||||
        pad_token_id=None,
 | 
			
		||||
        bos_token_id=1,
 | 
			
		||||
        eos_token_id=2,
 | 
			
		||||
        pretraining_tp=1,
 | 
			
		||||
        tie_word_embeddings=False,
 | 
			
		||||
        rope_theta=10000.0,
 | 
			
		||||
        rope_scaling=None,
 | 
			
		||||
        attention_bias=False,
 | 
			
		||||
        attention_dropout=0.0,
 | 
			
		||||
        mlp_bias=False,
 | 
			
		||||
        head_dim=None,
 | 
			
		||||
        **kwargs,
 | 
			
		||||
    ):
 | 
			
		||||
        self.vocab_size = vocab_size
 | 
			
		||||
        self.max_position_embeddings = max_position_embeddings
 | 
			
		||||
        self.hidden_size = hidden_size
 | 
			
		||||
        self.intermediate_size = intermediate_size
 | 
			
		||||
        self.num_hidden_layers = num_hidden_layers
 | 
			
		||||
        self.num_attention_heads = num_attention_heads
 | 
			
		||||
 | 
			
		||||
        # for backward compatibility
 | 
			
		||||
        if num_key_value_heads is None:
 | 
			
		||||
            num_key_value_heads = num_attention_heads
 | 
			
		||||
 | 
			
		||||
        self.num_key_value_heads = num_key_value_heads
 | 
			
		||||
        self.hidden_act = hidden_act
 | 
			
		||||
        self.initializer_range = initializer_range
 | 
			
		||||
        self.rms_norm_eps = rms_norm_eps
 | 
			
		||||
        self.pretraining_tp = pretraining_tp
 | 
			
		||||
        self.use_cache = use_cache
 | 
			
		||||
        self.rope_theta = rope_theta
 | 
			
		||||
        self.rope_scaling = rope_scaling
 | 
			
		||||
        self.attention_bias = attention_bias
 | 
			
		||||
        self.attention_dropout = attention_dropout
 | 
			
		||||
        self.mlp_bias = mlp_bias
 | 
			
		||||
        self.head_dim = head_dim if head_dim is not None else self.hidden_size // self.num_attention_heads
 | 
			
		||||
        # Validate the correctness of rotary position embeddings parameters
 | 
			
		||||
        # BC: if there is a 'type' field, move it to 'rope_type'.
 | 
			
		||||
        if self.rope_scaling is not None and "type" in self.rope_scaling:
 | 
			
		||||
            self.rope_scaling["rope_type"] = self.rope_scaling["type"]
 | 
			
		||||
        rope_config_validation(self)
 | 
			
		||||
 | 
			
		||||
        super().__init__(
 | 
			
		||||
            pad_token_id=pad_token_id,
 | 
			
		||||
            bos_token_id=bos_token_id,
 | 
			
		||||
            eos_token_id=eos_token_id,
 | 
			
		||||
            tie_word_embeddings=tie_word_embeddings,
 | 
			
		||||
            **kwargs,
 | 
			
		||||
        )
 | 
			
		||||
							
								
								
									
										134
									
								
								examples/modular-transformers/configuration_new_model.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										134
									
								
								examples/modular-transformers/configuration_new_model.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,134 @@
 | 
			
		||||
#           🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
 | 
			
		||||
#               This file was automatically generated from <path_to_diff_file.py>.
 | 
			
		||||
#         Do NOT edit this file manually as any edits will be overwritten by the generation of
 | 
			
		||||
#         the file from the diff. If any change should be done, please apply the change to the
 | 
			
		||||
#                           diff.py file directly. One of our CI enforces this
 | 
			
		||||
#           🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
 | 
			
		||||
# Example where we only want to overwrite the defaults of an init
 | 
			
		||||
 | 
			
		||||
from transformers import PretrainedConfig
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class NewModelConfig(PretrainedConfig):
 | 
			
		||||
    r"""
 | 
			
		||||
    This is the configuration class to store the configuration of a [`NewModelModel`]. It is used to instantiate an NewModel
 | 
			
		||||
    model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
 | 
			
		||||
    defaults will yield a similar configuration to that of the NewModel-7B.
 | 
			
		||||
    e.g. [google/new_model-7b](https://huggingface.co/google/new_model-7b)
 | 
			
		||||
    Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
 | 
			
		||||
    documentation from [`PretrainedConfig`] for more information.
 | 
			
		||||
    Args:
 | 
			
		||||
        vocab_size (`int`, *optional*, defaults to 256000):
 | 
			
		||||
            Vocabulary size of the NewModel model. Defines the number of different tokens that can be represented by the
 | 
			
		||||
            `inputs_ids` passed when calling [`NewModelModel`]
 | 
			
		||||
        hidden_size (`int`, *optional*, defaults to 3072):
 | 
			
		||||
            Dimension of the hidden representations.
 | 
			
		||||
        intermediate_size (`int`, *optional*, defaults to 24576):
 | 
			
		||||
            Dimension of the MLP representations.
 | 
			
		||||
        num_hidden_layers (`int`, *optional*, defaults to 28):
 | 
			
		||||
            Number of hidden layers in the Transformer decoder.
 | 
			
		||||
        num_attention_heads (`int`, *optional*, defaults to 16):
 | 
			
		||||
            Number of attention heads for each attention layer in the Transformer decoder.
 | 
			
		||||
        num_key_value_heads (`int`, *optional*, defaults to 16):
 | 
			
		||||
            This is the number of key_value heads that should be used to implement Grouped Query Attention. If
 | 
			
		||||
            `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
 | 
			
		||||
            `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
 | 
			
		||||
            converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
 | 
			
		||||
            by meanpooling all the original heads within that group. For more details checkout [this
 | 
			
		||||
            paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
 | 
			
		||||
            `num_attention_heads`.
 | 
			
		||||
        head_dim (`int`, *optional*, defaults to 256):
 | 
			
		||||
            The attention head dimension.
 | 
			
		||||
        hidden_act (`str` or `function`, *optional*, defaults to `"gelu_pytorch_tanh"`):
 | 
			
		||||
            The legacy activation function. It is overwritten by the `hidden_activation`.
 | 
			
		||||
        hidden_activation (`str` or `function`, *optional*):
 | 
			
		||||
            The non-linear activation function (function or string) in the decoder. Will default to `"gelu_pytorch_tanh"`
 | 
			
		||||
            if not specified. `"gelu_pytorch_tanh"` uses an approximation of the `"gelu"` activation function.
 | 
			
		||||
        max_position_embeddings (`int`, *optional*, defaults to 8192):
 | 
			
		||||
            The maximum sequence length that this model might ever be used with.
 | 
			
		||||
        initializer_range (`float`, *optional*, defaults to 0.02):
 | 
			
		||||
            The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
 | 
			
		||||
        rms_norm_eps (`float`, *optional*, defaults to 1e-06):
 | 
			
		||||
            The epsilon used by the rms normalization layers.
 | 
			
		||||
        use_cache (`bool`, *optional*, defaults to `True`):
 | 
			
		||||
            Whether or not the model should return the last key/values attentions (not used by all models). Only
 | 
			
		||||
            relevant if `config.is_decoder=True`.
 | 
			
		||||
        pad_token_id (`int`, *optional*, defaults to 0):
 | 
			
		||||
            Padding token id.
 | 
			
		||||
        eos_token_id (`int`, *optional*, defaults to 1):
 | 
			
		||||
            End of stream token id.
 | 
			
		||||
        bos_token_id (`int`, *optional*, defaults to 2):
 | 
			
		||||
            Beginning of stream token id.
 | 
			
		||||
        tie_word_embeddings (`bool`, *optional*, defaults to `True`):
 | 
			
		||||
            Whether to tie weight embeddings
 | 
			
		||||
        rope_theta (`float`, *optional*, defaults to 10000.0):
 | 
			
		||||
            The base period of the RoPE embeddings.
 | 
			
		||||
        attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
 | 
			
		||||
            Whether to use a bias in the query, key, value and output projection layers during self-attention.
 | 
			
		||||
        attention_dropout (`float`, *optional*, defaults to 0.0):
 | 
			
		||||
            The dropout ratio for the attention probabilities.
 | 
			
		||||
    ```python
 | 
			
		||||
    >>> from transformers import NewModelModel, NewModelConfig
 | 
			
		||||
    >>> # Initializing a NewModel new_model-7b style configuration
 | 
			
		||||
    >>> configuration = NewModelConfig()
 | 
			
		||||
    >>> # Initializing a model from the new_model-7b style configuration
 | 
			
		||||
    >>> model = NewModelModel(configuration)
 | 
			
		||||
    >>> # Accessing the model configuration
 | 
			
		||||
    >>> configuration = model.config
 | 
			
		||||
    ```"""
 | 
			
		||||
 | 
			
		||||
    model_type = "new_model"
 | 
			
		||||
    keys_to_ignore_at_inference = ["past_key_values"]
 | 
			
		||||
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        vocab_size=256030,
 | 
			
		||||
        hidden_size=64,
 | 
			
		||||
        intermediate_size=90,
 | 
			
		||||
        num_hidden_layers=28,
 | 
			
		||||
        num_attention_heads=16,
 | 
			
		||||
        num_key_value_heads=16,
 | 
			
		||||
        head_dim=256,
 | 
			
		||||
        hidden_act="gelu_pytorch_tanh",
 | 
			
		||||
        hidden_activation=None,
 | 
			
		||||
        max_position_embeddings=1500,
 | 
			
		||||
        initializer_range=0.02,
 | 
			
		||||
        rms_norm_eps=1e-6,
 | 
			
		||||
        use_cache=True,
 | 
			
		||||
        pad_token_id=0,
 | 
			
		||||
        eos_token_id=1,
 | 
			
		||||
        bos_token_id=2,
 | 
			
		||||
        tie_word_embeddings=True,
 | 
			
		||||
        rope_theta=10000.0,
 | 
			
		||||
        attention_bias=False,
 | 
			
		||||
        attention_dropout=0.0,
 | 
			
		||||
        **kwargs,
 | 
			
		||||
    ):
 | 
			
		||||
        self.vocab_size = vocab_size
 | 
			
		||||
        self.max_position_embeddings = max_position_embeddings
 | 
			
		||||
        self.hidden_size = hidden_size
 | 
			
		||||
        self.intermediate_size = intermediate_size
 | 
			
		||||
        self.num_hidden_layers = num_hidden_layers
 | 
			
		||||
        self.num_attention_heads = num_attention_heads
 | 
			
		||||
        self.head_dim = head_dim
 | 
			
		||||
        self.num_key_value_heads = num_key_value_heads
 | 
			
		||||
        self.hidden_act = hidden_act
 | 
			
		||||
        self.hidden_activation = hidden_activation
 | 
			
		||||
        self.initializer_range = initializer_range
 | 
			
		||||
        self.rms_norm_eps = rms_norm_eps
 | 
			
		||||
        self.use_cache = use_cache
 | 
			
		||||
        self.rope_theta = rope_theta
 | 
			
		||||
        self.attention_bias = attention_bias
 | 
			
		||||
        self.attention_dropout = attention_dropout
 | 
			
		||||
 | 
			
		||||
        super().__init__(
 | 
			
		||||
            pad_token_id=pad_token_id,
 | 
			
		||||
            bos_token_id=bos_token_id,
 | 
			
		||||
            eos_token_id=eos_token_id,
 | 
			
		||||
            tie_word_embeddings=tie_word_embeddings,
 | 
			
		||||
            **kwargs,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def num_heads(self):
 | 
			
		||||
        return self.num_attention_heads
 | 
			
		||||
@ -1,7 +1,7 @@
 | 
			
		||||
#!/bin/bash
 | 
			
		||||
 | 
			
		||||
# Iterate over each file in the current directory
 | 
			
		||||
for file in examples/diff-conversion/diff_*; do
 | 
			
		||||
for file in examples/modular-transformers/modular_*; do
 | 
			
		||||
    # Check if it's a regular file
 | 
			
		||||
    if [ -f "$file" ]; then
 | 
			
		||||
        # Call the Python script with the file name as an argument
 | 
			
		||||
							
								
								
									
										1053
									
								
								examples/modular-transformers/modeling_dummy.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1053
									
								
								examples/modular-transformers/modeling_dummy.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										1038
									
								
								examples/modular-transformers/modeling_dummy_bert.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1038
									
								
								examples/modular-transformers/modeling_dummy_bert.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										1059
									
								
								examples/modular-transformers/modeling_my_new_model2.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										1059
									
								
								examples/modular-transformers/modeling_my_new_model2.py
									
									
									
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										953
									
								
								examples/modular-transformers/modeling_super.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										953
									
								
								examples/modular-transformers/modeling_super.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,953 @@
 | 
			
		||||
#           🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
 | 
			
		||||
#               This file was automatically generated from <path_to_diff_file.py>.
 | 
			
		||||
#         Do NOT edit this file manually as any edits will be overwritten by the generation of
 | 
			
		||||
#         the file from the diff. If any change should be done, please apply the change to the
 | 
			
		||||
#                           diff.py file directly. One of our CI enforces this
 | 
			
		||||
#           🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
 | 
			
		||||
import math
 | 
			
		||||
from typing import List, Optional, Tuple, Union
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
import torch.nn.functional as F
 | 
			
		||||
import torch.utils.checkpoint
 | 
			
		||||
from torch import nn
 | 
			
		||||
 | 
			
		||||
from ...activations import ACT2FN
 | 
			
		||||
from ...cache_utils import Cache, StaticCache
 | 
			
		||||
from ...modeling_attn_mask_utils import AttentionMaskConverter
 | 
			
		||||
from ...modeling_flash_attention_utils import _flash_attention_forward
 | 
			
		||||
from ...modeling_outputs import (
 | 
			
		||||
    BaseModelOutputWithPast,
 | 
			
		||||
)
 | 
			
		||||
from ...modeling_rope_utils import ROPE_INIT_FUNCTIONS
 | 
			
		||||
from ...modeling_utils import PreTrainedModel
 | 
			
		||||
from ...utils import (
 | 
			
		||||
    add_start_docstrings,
 | 
			
		||||
    add_start_docstrings_to_model_forward,
 | 
			
		||||
    is_flash_attn_greater_or_equal_2_10,
 | 
			
		||||
    logging,
 | 
			
		||||
)
 | 
			
		||||
from .configuration_super import SuperConfig
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
logger = logging.get_logger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _prepare_4d_causal_attention_mask_with_cache_position(
 | 
			
		||||
    attention_mask: torch.Tensor,
 | 
			
		||||
    sequence_length: int,
 | 
			
		||||
    target_length: int,
 | 
			
		||||
    dtype: torch.dtype,
 | 
			
		||||
    device: torch.device,
 | 
			
		||||
    min_dtype: float,
 | 
			
		||||
    cache_position: torch.Tensor,
 | 
			
		||||
    batch_size: int,
 | 
			
		||||
):
 | 
			
		||||
    """
 | 
			
		||||
    Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
 | 
			
		||||
    `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        attention_mask (`torch.Tensor`):
 | 
			
		||||
            A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
 | 
			
		||||
        sequence_length (`int`):
 | 
			
		||||
            The sequence length being processed.
 | 
			
		||||
        target_length (`int`):
 | 
			
		||||
            The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
 | 
			
		||||
        dtype (`torch.dtype`):
 | 
			
		||||
            The dtype to use for the 4D attention mask.
 | 
			
		||||
        device (`torch.device`):
 | 
			
		||||
            The device to plcae the 4D attention mask on.
 | 
			
		||||
        min_dtype (`float`):
 | 
			
		||||
            The minimum value representable with the dtype `dtype`.
 | 
			
		||||
        cache_position (`torch.Tensor`):
 | 
			
		||||
            Indices depicting the position of the input sequence tokens in the sequence.
 | 
			
		||||
        batch_size (`torch.Tensor`):
 | 
			
		||||
            Batch size.
 | 
			
		||||
    """
 | 
			
		||||
    if attention_mask is not None and attention_mask.dim() == 4:
 | 
			
		||||
        # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
 | 
			
		||||
        causal_mask = attention_mask
 | 
			
		||||
    else:
 | 
			
		||||
        causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device)
 | 
			
		||||
        if sequence_length != 1:
 | 
			
		||||
            causal_mask = torch.triu(causal_mask, diagonal=1)
 | 
			
		||||
        causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
 | 
			
		||||
        causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
 | 
			
		||||
        if attention_mask is not None:
 | 
			
		||||
            causal_mask = causal_mask.clone()  # copy to contiguous memory for in-place edit
 | 
			
		||||
            mask_length = attention_mask.shape[-1]
 | 
			
		||||
            padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
 | 
			
		||||
            padding_mask = padding_mask == 0
 | 
			
		||||
            causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
 | 
			
		||||
                padding_mask, min_dtype
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
    return causal_mask
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SuperRMSNorm(nn.Module):
 | 
			
		||||
    def __init__(self, hidden_size, eps=1e-6):
 | 
			
		||||
        """
 | 
			
		||||
        SuperRMSNorm is equivalent to T5LayerNorm
 | 
			
		||||
        """
 | 
			
		||||
        super().__init__()
 | 
			
		||||
        self.weight = nn.Parameter(torch.ones(hidden_size))
 | 
			
		||||
        self.variance_epsilon = eps
 | 
			
		||||
 | 
			
		||||
    def forward(self, hidden_states):
 | 
			
		||||
        input_dtype = hidden_states.dtype
 | 
			
		||||
        hidden_states = hidden_states.to(torch.float32)
 | 
			
		||||
        variance = hidden_states.pow(2).mean(-1, keepdim=True)
 | 
			
		||||
        hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
 | 
			
		||||
        return self.weight * hidden_states.to(input_dtype)
 | 
			
		||||
 | 
			
		||||
    def extra_repr(self):
 | 
			
		||||
        return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SuperRotaryEmbedding(nn.Module):
 | 
			
		||||
    def __init__(
 | 
			
		||||
        self,
 | 
			
		||||
        dim=None,
 | 
			
		||||
        max_position_embeddings=2048,
 | 
			
		||||
        base=10000,
 | 
			
		||||
        device=None,
 | 
			
		||||
        scaling_factor=1.0,
 | 
			
		||||
        rope_type="default",
 | 
			
		||||
        config: Optional[SuperConfig] = None,
 | 
			
		||||
    ):
 | 
			
		||||
        super().__init__()
 | 
			
		||||
        # TODO (joao): remove the `if` below, only used for BC
 | 
			
		||||
        self.rope_kwargs = {}
 | 
			
		||||
        if config is None:
 | 
			
		||||
            logger.warning_once(
 | 
			
		||||
                "`SuperRotaryEmbedding` can now be fully parameterized by passing the model config through the "
 | 
			
		||||
                "`config` argument. All other arguments will be removed in v4.45"
 | 
			
		||||
            )
 | 
			
		||||
            self.rope_kwargs = {
 | 
			
		||||
                "rope_type": rope_type,
 | 
			
		||||
                "factor": scaling_factor,
 | 
			
		||||
                "dim": dim,
 | 
			
		||||
                "base": base,
 | 
			
		||||
                "max_position_embeddings": max_position_embeddings,
 | 
			
		||||
            }
 | 
			
		||||
            self.rope_type = rope_type
 | 
			
		||||
            self.max_seq_len_cached = max_position_embeddings
 | 
			
		||||
            self.original_max_seq_len = max_position_embeddings
 | 
			
		||||
        else:
 | 
			
		||||
            # BC: "rope_type" was originally "type"
 | 
			
		||||
            if config.rope_scaling is not None:
 | 
			
		||||
                self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
 | 
			
		||||
            else:
 | 
			
		||||
                self.rope_type = "default"
 | 
			
		||||
            self.max_seq_len_cached = config.max_position_embeddings
 | 
			
		||||
            self.original_max_seq_len = config.max_position_embeddings
 | 
			
		||||
 | 
			
		||||
        self.config = config
 | 
			
		||||
        self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
 | 
			
		||||
 | 
			
		||||
        inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
 | 
			
		||||
        self.register_buffer("inv_freq", inv_freq, persistent=False)
 | 
			
		||||
        self.original_inv_freq = self.inv_freq
 | 
			
		||||
 | 
			
		||||
    def _dynamic_frequency_update(self, position_ids, device):
 | 
			
		||||
        """
 | 
			
		||||
        dynamic RoPE layers should recompute `inv_freq` in the following situations:
 | 
			
		||||
        1 - growing beyond the cached sequence length (allow scaling)
 | 
			
		||||
        2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
 | 
			
		||||
        """
 | 
			
		||||
        seq_len = torch.max(position_ids) + 1
 | 
			
		||||
        if seq_len > self.max_seq_len_cached:  # growth
 | 
			
		||||
            inv_freq, self.attention_scaling = self.rope_init_fn(
 | 
			
		||||
                self.config, device, seq_len=seq_len, **self.rope_kwargs
 | 
			
		||||
            )
 | 
			
		||||
            self.register_buffer("inv_freq", inv_freq, persistent=False)  # TODO joao: may break with compilation
 | 
			
		||||
            self.max_seq_len_cached = seq_len
 | 
			
		||||
 | 
			
		||||
        if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len:  # reset
 | 
			
		||||
            self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
 | 
			
		||||
            self.max_seq_len_cached = self.original_max_seq_len
 | 
			
		||||
 | 
			
		||||
    @torch.no_grad()
 | 
			
		||||
    def forward(self, x, position_ids):
 | 
			
		||||
        if "dynamic" in self.rope_type:
 | 
			
		||||
            self._dynamic_frequency_update(position_ids, device=x.device)
 | 
			
		||||
 | 
			
		||||
        # Core RoPE block
 | 
			
		||||
        inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
 | 
			
		||||
        position_ids_expanded = position_ids[:, None, :].float()
 | 
			
		||||
        # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
 | 
			
		||||
        device_type = x.device.type
 | 
			
		||||
        device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
 | 
			
		||||
        with torch.autocast(device_type=device_type, enabled=False):
 | 
			
		||||
            freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
 | 
			
		||||
            emb = torch.cat((freqs, freqs), dim=-1)
 | 
			
		||||
            cos = emb.cos()
 | 
			
		||||
            sin = emb.sin()
 | 
			
		||||
 | 
			
		||||
        # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
 | 
			
		||||
        cos = cos * self.attention_scaling
 | 
			
		||||
        sin = sin * self.attention_scaling
 | 
			
		||||
 | 
			
		||||
        return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def rotate_half(x):
 | 
			
		||||
    """Rotates half the hidden dims of the input."""
 | 
			
		||||
    x1 = x[..., : x.shape[-1] // 2]
 | 
			
		||||
    x2 = x[..., x.shape[-1] // 2 :]
 | 
			
		||||
    return torch.cat((-x2, x1), dim=-1)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
 | 
			
		||||
    """Applies Rotary Position Embedding to the query and key tensors.
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        q (`torch.Tensor`): The query tensor.
 | 
			
		||||
        k (`torch.Tensor`): The key tensor.
 | 
			
		||||
        cos (`torch.Tensor`): The cosine part of the rotary embedding.
 | 
			
		||||
        sin (`torch.Tensor`): The sine part of the rotary embedding.
 | 
			
		||||
        position_ids (`torch.Tensor`, *optional*):
 | 
			
		||||
            Deprecated and unused.
 | 
			
		||||
        unsqueeze_dim (`int`, *optional*, defaults to 1):
 | 
			
		||||
            The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
 | 
			
		||||
            sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
 | 
			
		||||
            that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
 | 
			
		||||
            k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
 | 
			
		||||
            cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
 | 
			
		||||
            the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
 | 
			
		||||
    Returns:
 | 
			
		||||
        `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
 | 
			
		||||
    """
 | 
			
		||||
    cos = cos.unsqueeze(unsqueeze_dim)
 | 
			
		||||
    sin = sin.unsqueeze(unsqueeze_dim)
 | 
			
		||||
    q_embed = (q * cos) + (rotate_half(q) * sin)
 | 
			
		||||
    k_embed = (k * cos) + (rotate_half(k) * sin)
 | 
			
		||||
    return q_embed, k_embed
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SuperMLP(nn.Module):
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super().__init__()
 | 
			
		||||
        self.config = config
 | 
			
		||||
        self.hidden_size = config.hidden_size
 | 
			
		||||
        self.intermediate_size = config.intermediate_size
 | 
			
		||||
        self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
 | 
			
		||||
        self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
 | 
			
		||||
        self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
 | 
			
		||||
        self.act_fn = ACT2FN[config.hidden_act]
 | 
			
		||||
 | 
			
		||||
    def forward(self, x):
 | 
			
		||||
        if self.config.pretraining_tp > 1:
 | 
			
		||||
            slice = self.intermediate_size // self.config.pretraining_tp
 | 
			
		||||
            gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
 | 
			
		||||
            up_proj_slices = self.up_proj.weight.split(slice, dim=0)
 | 
			
		||||
            down_proj_slices = self.down_proj.weight.split(slice, dim=1)
 | 
			
		||||
 | 
			
		||||
            gate_proj = torch.cat(
 | 
			
		||||
                [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
 | 
			
		||||
            )
 | 
			
		||||
            up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
 | 
			
		||||
 | 
			
		||||
            intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
 | 
			
		||||
            down_proj = [
 | 
			
		||||
                F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
 | 
			
		||||
            ]
 | 
			
		||||
            down_proj = sum(down_proj)
 | 
			
		||||
        else:
 | 
			
		||||
            down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
 | 
			
		||||
 | 
			
		||||
        return down_proj
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
 | 
			
		||||
    """
 | 
			
		||||
    This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
 | 
			
		||||
    num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
 | 
			
		||||
    """
 | 
			
		||||
    batch, num_key_value_heads, slen, head_dim = hidden_states.shape
 | 
			
		||||
    if n_rep == 1:
 | 
			
		||||
        return hidden_states
 | 
			
		||||
    hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
 | 
			
		||||
    return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SuperAttention(nn.Module):
 | 
			
		||||
    """Multi-headed attention from 'Attention Is All You Need' paper"""
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config: SuperConfig, layer_idx: Optional[int] = None):
 | 
			
		||||
        super().__init__()
 | 
			
		||||
        self.config = config
 | 
			
		||||
        self.layer_idx = layer_idx
 | 
			
		||||
        if layer_idx is None:
 | 
			
		||||
            logger.warning_once(
 | 
			
		||||
                f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
 | 
			
		||||
                "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
 | 
			
		||||
                "when creating this class."
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        self.attention_dropout = config.attention_dropout
 | 
			
		||||
        self.hidden_size = config.hidden_size
 | 
			
		||||
        self.num_heads = config.num_attention_heads
 | 
			
		||||
        self.head_dim = getattr(config, "head_dim", self.hidden_size // self.num_heads)
 | 
			
		||||
        self.num_key_value_heads = config.num_key_value_heads
 | 
			
		||||
        self.num_key_value_groups = self.num_heads // self.num_key_value_heads
 | 
			
		||||
        self.max_position_embeddings = config.max_position_embeddings
 | 
			
		||||
        self.rope_theta = config.rope_theta
 | 
			
		||||
        self.is_causal = True
 | 
			
		||||
 | 
			
		||||
        self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
 | 
			
		||||
        self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
 | 
			
		||||
        self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
 | 
			
		||||
        self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=config.attention_bias)
 | 
			
		||||
 | 
			
		||||
        # TODO (joao): remove in v4.45 (RoPE is computed in the model, not in the decoder layers)
 | 
			
		||||
        self.rotary_emb = SuperRotaryEmbedding(config=self.config)
 | 
			
		||||
 | 
			
		||||
    def forward(
 | 
			
		||||
        self,
 | 
			
		||||
        hidden_states: torch.Tensor,
 | 
			
		||||
        attention_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        position_ids: Optional[torch.LongTensor] = None,
 | 
			
		||||
        past_key_value: Optional[Cache] = None,
 | 
			
		||||
        output_attentions: bool = False,
 | 
			
		||||
        use_cache: bool = False,
 | 
			
		||||
        cache_position: Optional[torch.LongTensor] = None,
 | 
			
		||||
        position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,  # will become mandatory in v4.45
 | 
			
		||||
        **kwargs,
 | 
			
		||||
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
 | 
			
		||||
        bsz, q_len, _ = hidden_states.size()
 | 
			
		||||
 | 
			
		||||
        if self.config.pretraining_tp > 1:
 | 
			
		||||
            key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
 | 
			
		||||
            query_slices = self.q_proj.weight.split(
 | 
			
		||||
                (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
 | 
			
		||||
            )
 | 
			
		||||
            key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
 | 
			
		||||
            value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
 | 
			
		||||
 | 
			
		||||
            query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
 | 
			
		||||
            query_states = torch.cat(query_states, dim=-1)
 | 
			
		||||
 | 
			
		||||
            key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
 | 
			
		||||
            key_states = torch.cat(key_states, dim=-1)
 | 
			
		||||
 | 
			
		||||
            value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
 | 
			
		||||
            value_states = torch.cat(value_states, dim=-1)
 | 
			
		||||
 | 
			
		||||
        else:
 | 
			
		||||
            query_states = self.q_proj(hidden_states)
 | 
			
		||||
            key_states = self.k_proj(hidden_states)
 | 
			
		||||
            value_states = self.v_proj(hidden_states)
 | 
			
		||||
 | 
			
		||||
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
 | 
			
		||||
        if position_embeddings is None:
 | 
			
		||||
            logger.warning_once(
 | 
			
		||||
                "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
 | 
			
		||||
                "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
 | 
			
		||||
                "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
 | 
			
		||||
                "removed and `position_embeddings` will be mandatory."
 | 
			
		||||
            )
 | 
			
		||||
            cos, sin = self.rotary_emb(value_states, position_ids)
 | 
			
		||||
        else:
 | 
			
		||||
            cos, sin = position_embeddings
 | 
			
		||||
        query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
 | 
			
		||||
 | 
			
		||||
        if past_key_value is not None:
 | 
			
		||||
            # sin and cos are specific to RoPE models; cache_position needed for the static cache
 | 
			
		||||
            cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
 | 
			
		||||
            key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
 | 
			
		||||
 | 
			
		||||
        key_states = repeat_kv(key_states, self.num_key_value_groups)
 | 
			
		||||
        value_states = repeat_kv(value_states, self.num_key_value_groups)
 | 
			
		||||
        attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
 | 
			
		||||
 | 
			
		||||
        if attention_mask is not None:  # no matter the length, we just slice it
 | 
			
		||||
            causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
 | 
			
		||||
            attn_weights = attn_weights + causal_mask
 | 
			
		||||
 | 
			
		||||
        # upcast attention to fp32
 | 
			
		||||
        attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
 | 
			
		||||
        attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
 | 
			
		||||
        attn_output = torch.matmul(attn_weights, value_states)
 | 
			
		||||
 | 
			
		||||
        if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
 | 
			
		||||
                f" {attn_output.size()}"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        attn_output = attn_output.transpose(1, 2).contiguous()
 | 
			
		||||
 | 
			
		||||
        attn_output = attn_output.reshape(bsz, q_len, -1)
 | 
			
		||||
 | 
			
		||||
        if self.config.pretraining_tp > 1:
 | 
			
		||||
            attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
 | 
			
		||||
            o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
 | 
			
		||||
            attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
 | 
			
		||||
        else:
 | 
			
		||||
            attn_output = self.o_proj(attn_output)
 | 
			
		||||
 | 
			
		||||
        if not output_attentions:
 | 
			
		||||
            attn_weights = None
 | 
			
		||||
 | 
			
		||||
        return attn_output, attn_weights, past_key_value
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SuperFlashAttention2(SuperAttention):
 | 
			
		||||
    """
 | 
			
		||||
    Super flash attention module. This module inherits from `SuperAttention` as the weights of the module stays
 | 
			
		||||
    untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
 | 
			
		||||
    flash attention and deal with padding tokens in case the input contains any of them.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        super().__init__(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
        # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
 | 
			
		||||
        # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
 | 
			
		||||
        # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
 | 
			
		||||
        self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
 | 
			
		||||
 | 
			
		||||
    def forward(
 | 
			
		||||
        self,
 | 
			
		||||
        hidden_states: torch.Tensor,
 | 
			
		||||
        attention_mask: Optional[torch.LongTensor] = None,
 | 
			
		||||
        position_ids: Optional[torch.LongTensor] = None,
 | 
			
		||||
        past_key_value: Optional[Cache] = None,
 | 
			
		||||
        output_attentions: bool = False,
 | 
			
		||||
        use_cache: bool = False,
 | 
			
		||||
        cache_position: Optional[torch.LongTensor] = None,
 | 
			
		||||
        position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,  # will become mandatory in v4.45
 | 
			
		||||
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
 | 
			
		||||
        if isinstance(past_key_value, StaticCache):
 | 
			
		||||
            raise ValueError(
 | 
			
		||||
                "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
 | 
			
		||||
                "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        output_attentions = False
 | 
			
		||||
 | 
			
		||||
        bsz, q_len, _ = hidden_states.size()
 | 
			
		||||
 | 
			
		||||
        query_states = self.q_proj(hidden_states)
 | 
			
		||||
        key_states = self.k_proj(hidden_states)
 | 
			
		||||
        value_states = self.v_proj(hidden_states)
 | 
			
		||||
 | 
			
		||||
        # Flash attention requires the input to have the shape
 | 
			
		||||
        # batch_size x seq_length x head_dim x hidden_dim
 | 
			
		||||
        # therefore we just need to keep the original shape
 | 
			
		||||
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
 | 
			
		||||
        if position_embeddings is None:
 | 
			
		||||
            logger.warning_once(
 | 
			
		||||
                "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
 | 
			
		||||
                "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
 | 
			
		||||
                "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
 | 
			
		||||
                "removed and `position_embeddings` will be mandatory."
 | 
			
		||||
            )
 | 
			
		||||
            cos, sin = self.rotary_emb(value_states, position_ids)
 | 
			
		||||
        else:
 | 
			
		||||
            cos, sin = position_embeddings
 | 
			
		||||
        query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
 | 
			
		||||
 | 
			
		||||
        if past_key_value is not None:
 | 
			
		||||
            # sin and cos are specific to RoPE models; cache_position needed for the static cache
 | 
			
		||||
            cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
 | 
			
		||||
            key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
 | 
			
		||||
 | 
			
		||||
        # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
 | 
			
		||||
        # to be able to avoid many of these transpose/reshape/view.
 | 
			
		||||
        query_states = query_states.transpose(1, 2)
 | 
			
		||||
        key_states = key_states.transpose(1, 2)
 | 
			
		||||
        value_states = value_states.transpose(1, 2)
 | 
			
		||||
 | 
			
		||||
        dropout_rate = self.attention_dropout if self.training else 0.0
 | 
			
		||||
 | 
			
		||||
        # In PEFT, usually we cast the layer norms in float32 for training stability reasons
 | 
			
		||||
        # therefore the input hidden states gets silently casted in float32. Hence, we need
 | 
			
		||||
        # cast them back in the correct dtype just to be sure everything works as expected.
 | 
			
		||||
        # This might slowdown training & inference so it is recommended to not cast the LayerNorms
 | 
			
		||||
        # in fp32. (SuperRMSNorm handles it correctly)
 | 
			
		||||
 | 
			
		||||
        input_dtype = query_states.dtype
 | 
			
		||||
        if input_dtype == torch.float32:
 | 
			
		||||
            if torch.is_autocast_enabled():
 | 
			
		||||
                target_dtype = torch.get_autocast_gpu_dtype()
 | 
			
		||||
            # Handle the case where the model is quantized
 | 
			
		||||
            elif hasattr(self.config, "_pre_quantization_dtype"):
 | 
			
		||||
                target_dtype = self.config._pre_quantization_dtype
 | 
			
		||||
            else:
 | 
			
		||||
                target_dtype = self.q_proj.weight.dtype
 | 
			
		||||
 | 
			
		||||
            logger.warning_once(
 | 
			
		||||
                f"The input hidden states seems to be silently casted in float32, this might be related to"
 | 
			
		||||
                f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
 | 
			
		||||
                f" {target_dtype}."
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
            query_states = query_states.to(target_dtype)
 | 
			
		||||
            key_states = key_states.to(target_dtype)
 | 
			
		||||
            value_states = value_states.to(target_dtype)
 | 
			
		||||
 | 
			
		||||
        attn_output = _flash_attention_forward(
 | 
			
		||||
            query_states,
 | 
			
		||||
            key_states,
 | 
			
		||||
            value_states,
 | 
			
		||||
            attention_mask,
 | 
			
		||||
            q_len,
 | 
			
		||||
            position_ids=position_ids,
 | 
			
		||||
            dropout=dropout_rate,
 | 
			
		||||
            sliding_window=getattr(self, "sliding_window", None),
 | 
			
		||||
            use_top_left_mask=self._flash_attn_uses_top_left_mask,
 | 
			
		||||
            is_causal=self.is_causal,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
 | 
			
		||||
        attn_output = self.o_proj(attn_output)
 | 
			
		||||
 | 
			
		||||
        if not output_attentions:
 | 
			
		||||
            attn_weights = None
 | 
			
		||||
 | 
			
		||||
        return attn_output, attn_weights, past_key_value
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SuperSdpaAttention(SuperAttention):
 | 
			
		||||
    """
 | 
			
		||||
    Super attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
 | 
			
		||||
    `SuperAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
 | 
			
		||||
    SDPA API.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    # Adapted from SuperAttention.forward
 | 
			
		||||
    def forward(
 | 
			
		||||
        self,
 | 
			
		||||
        hidden_states: torch.Tensor,
 | 
			
		||||
        attention_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        position_ids: Optional[torch.LongTensor] = None,
 | 
			
		||||
        past_key_value: Optional[Cache] = None,
 | 
			
		||||
        output_attentions: bool = False,
 | 
			
		||||
        use_cache: bool = False,
 | 
			
		||||
        cache_position: Optional[torch.LongTensor] = None,
 | 
			
		||||
        position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,  # will become mandatory in v4.45
 | 
			
		||||
        **kwargs,
 | 
			
		||||
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
 | 
			
		||||
        if output_attentions:
 | 
			
		||||
            # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
 | 
			
		||||
            logger.warning_once(
 | 
			
		||||
                "SuperModel is using SuperSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
 | 
			
		||||
                'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
 | 
			
		||||
            )
 | 
			
		||||
            return super().forward(
 | 
			
		||||
                hidden_states=hidden_states,
 | 
			
		||||
                attention_mask=attention_mask,
 | 
			
		||||
                position_ids=position_ids,
 | 
			
		||||
                past_key_value=past_key_value,
 | 
			
		||||
                output_attentions=output_attentions,
 | 
			
		||||
                use_cache=use_cache,
 | 
			
		||||
                cache_position=cache_position,
 | 
			
		||||
                position_embeddings=position_embeddings,
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        bsz, q_len, _ = hidden_states.size()
 | 
			
		||||
 | 
			
		||||
        query_states = self.q_proj(hidden_states)
 | 
			
		||||
        key_states = self.k_proj(hidden_states)
 | 
			
		||||
        value_states = self.v_proj(hidden_states)
 | 
			
		||||
 | 
			
		||||
        query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
        key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
        value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
 | 
			
		||||
 | 
			
		||||
        if position_embeddings is None:
 | 
			
		||||
            logger.warning_once(
 | 
			
		||||
                "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
 | 
			
		||||
                "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
 | 
			
		||||
                "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
 | 
			
		||||
                "removed and `position_embeddings` will be mandatory."
 | 
			
		||||
            )
 | 
			
		||||
            cos, sin = self.rotary_emb(value_states, position_ids)
 | 
			
		||||
        else:
 | 
			
		||||
            cos, sin = position_embeddings
 | 
			
		||||
        query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
 | 
			
		||||
 | 
			
		||||
        if past_key_value is not None:
 | 
			
		||||
            # sin and cos are specific to RoPE models; cache_position needed for the static cache
 | 
			
		||||
            cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
 | 
			
		||||
            key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
 | 
			
		||||
 | 
			
		||||
        key_states = repeat_kv(key_states, self.num_key_value_groups)
 | 
			
		||||
        value_states = repeat_kv(value_states, self.num_key_value_groups)
 | 
			
		||||
 | 
			
		||||
        causal_mask = attention_mask
 | 
			
		||||
        if attention_mask is not None:
 | 
			
		||||
            causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
 | 
			
		||||
 | 
			
		||||
        # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
 | 
			
		||||
        # Reference: https://github.com/pytorch/pytorch/issues/112577.
 | 
			
		||||
        if query_states.device.type == "cuda" and causal_mask is not None:
 | 
			
		||||
            query_states = query_states.contiguous()
 | 
			
		||||
            key_states = key_states.contiguous()
 | 
			
		||||
            value_states = value_states.contiguous()
 | 
			
		||||
 | 
			
		||||
        # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
 | 
			
		||||
        # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
 | 
			
		||||
        is_causal = True if causal_mask is None and q_len > 1 else False
 | 
			
		||||
 | 
			
		||||
        attn_output = torch.nn.functional.scaled_dot_product_attention(
 | 
			
		||||
            query_states,
 | 
			
		||||
            key_states,
 | 
			
		||||
            value_states,
 | 
			
		||||
            attn_mask=causal_mask,
 | 
			
		||||
            dropout_p=self.attention_dropout if self.training else 0.0,
 | 
			
		||||
            is_causal=is_causal,
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        attn_output = attn_output.transpose(1, 2).contiguous()
 | 
			
		||||
        attn_output = attn_output.view(bsz, q_len, -1)
 | 
			
		||||
 | 
			
		||||
        attn_output = self.o_proj(attn_output)
 | 
			
		||||
 | 
			
		||||
        return attn_output, None, past_key_value
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
SUPER_ATTENTION_CLASSES = {
 | 
			
		||||
    "eager": SuperAttention,
 | 
			
		||||
    "flash_attention_2": SuperFlashAttention2,
 | 
			
		||||
    "sdpa": SuperSdpaAttention,
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class SuperDecoderLayer(nn.Module):
 | 
			
		||||
    def __init__(self, config: SuperConfig, layer_idx: int):
 | 
			
		||||
        super().__init__()
 | 
			
		||||
        self.hidden_size = config.hidden_size
 | 
			
		||||
 | 
			
		||||
        self.self_attn = SUPER_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
 | 
			
		||||
 | 
			
		||||
        self.mlp = SuperMLP(config)
 | 
			
		||||
        self.input_layernorm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
 | 
			
		||||
        self.post_attention_layernorm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
 | 
			
		||||
 | 
			
		||||
    def forward(
 | 
			
		||||
        self,
 | 
			
		||||
        hidden_states: torch.Tensor,
 | 
			
		||||
        attention_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        position_ids: Optional[torch.LongTensor] = None,
 | 
			
		||||
        past_key_value: Optional[Cache] = None,
 | 
			
		||||
        output_attentions: Optional[bool] = False,
 | 
			
		||||
        use_cache: Optional[bool] = False,
 | 
			
		||||
        cache_position: Optional[torch.LongTensor] = None,
 | 
			
		||||
        position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,  # will become mandatory in v4.45
 | 
			
		||||
        **kwargs,
 | 
			
		||||
    ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
 | 
			
		||||
        """
 | 
			
		||||
        Args:
 | 
			
		||||
            hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
 | 
			
		||||
            attention_mask (`torch.FloatTensor`, *optional*):
 | 
			
		||||
                attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
 | 
			
		||||
                query_sequence_length, key_sequence_length)` if default attention is used.
 | 
			
		||||
            output_attentions (`bool`, *optional*):
 | 
			
		||||
                Whether or not to return the attentions tensors of all attention layers. See `attentions` under
 | 
			
		||||
                returned tensors for more detail.
 | 
			
		||||
            use_cache (`bool`, *optional*):
 | 
			
		||||
                If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
 | 
			
		||||
                (see `past_key_values`).
 | 
			
		||||
            past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
 | 
			
		||||
            cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
 | 
			
		||||
                Indices depicting the position of the input sequence tokens in the sequence
 | 
			
		||||
            position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
 | 
			
		||||
                Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
 | 
			
		||||
                with `head_dim` being the embedding dimension of each attention head.
 | 
			
		||||
            kwargs (`dict`, *optional*):
 | 
			
		||||
                Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
 | 
			
		||||
                into the model
 | 
			
		||||
        """
 | 
			
		||||
        residual = hidden_states
 | 
			
		||||
 | 
			
		||||
        hidden_states = self.input_layernorm(hidden_states)
 | 
			
		||||
 | 
			
		||||
        # Self Attention
 | 
			
		||||
        hidden_states, self_attn_weights, present_key_value = self.self_attn(
 | 
			
		||||
            hidden_states=hidden_states,
 | 
			
		||||
            attention_mask=attention_mask,
 | 
			
		||||
            position_ids=position_ids,
 | 
			
		||||
            past_key_value=past_key_value,
 | 
			
		||||
            output_attentions=output_attentions,
 | 
			
		||||
            use_cache=use_cache,
 | 
			
		||||
            cache_position=cache_position,
 | 
			
		||||
            position_embeddings=position_embeddings,
 | 
			
		||||
            **kwargs,
 | 
			
		||||
        )
 | 
			
		||||
        hidden_states = residual + hidden_states
 | 
			
		||||
 | 
			
		||||
        # Fully Connected
 | 
			
		||||
        residual = hidden_states
 | 
			
		||||
        hidden_states = self.post_attention_layernorm(hidden_states)
 | 
			
		||||
        hidden_states = self.mlp(hidden_states)
 | 
			
		||||
        hidden_states = residual + hidden_states
 | 
			
		||||
 | 
			
		||||
        outputs = (hidden_states,)
 | 
			
		||||
 | 
			
		||||
        if output_attentions:
 | 
			
		||||
            outputs += (self_attn_weights,)
 | 
			
		||||
 | 
			
		||||
        if use_cache:
 | 
			
		||||
            outputs += (present_key_value,)
 | 
			
		||||
 | 
			
		||||
        return outputs
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
SUPER_START_DOCSTRING = r"""
 | 
			
		||||
    This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
 | 
			
		||||
    library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
 | 
			
		||||
    etc.)
 | 
			
		||||
 | 
			
		||||
    This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
 | 
			
		||||
    Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
 | 
			
		||||
    and behavior.
 | 
			
		||||
 | 
			
		||||
    Parameters:
 | 
			
		||||
        config ([`SuperConfig`]):
 | 
			
		||||
            Model configuration class with all the parameters of the model. Initializing with a config file does not
 | 
			
		||||
            load the weights associated with the model, only the configuration. Check out the
 | 
			
		||||
            [`~PreTrainedModel.from_pretrained`] method to load the model weights.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@add_start_docstrings(
 | 
			
		||||
    "The bare Super Model outputting raw hidden-states without any specific head on top.",
 | 
			
		||||
    SUPER_START_DOCSTRING,
 | 
			
		||||
)
 | 
			
		||||
class SuperPreTrainedModel(PreTrainedModel):
 | 
			
		||||
    config_class = SuperConfig
 | 
			
		||||
    base_model_prefix = "model"
 | 
			
		||||
    supports_gradient_checkpointing = True
 | 
			
		||||
    _no_split_modules = ["SuperDecoderLayer"]
 | 
			
		||||
    _skip_keys_device_placement = ["past_key_values"]
 | 
			
		||||
    _supports_flash_attn_2 = True
 | 
			
		||||
    _supports_sdpa = True
 | 
			
		||||
    _supports_cache_class = True
 | 
			
		||||
    _supports_quantized_cache = True
 | 
			
		||||
    _supports_static_cache = True
 | 
			
		||||
 | 
			
		||||
    def _init_weights(self, module):
 | 
			
		||||
        std = self.config.initializer_range
 | 
			
		||||
        if isinstance(module, nn.Linear):
 | 
			
		||||
            module.weight.data.normal_(mean=0.0, std=std)
 | 
			
		||||
            if module.bias is not None:
 | 
			
		||||
                module.bias.data.zero_()
 | 
			
		||||
        elif isinstance(module, nn.Embedding):
 | 
			
		||||
            module.weight.data.normal_(mean=0.0, std=std)
 | 
			
		||||
            if module.padding_idx is not None:
 | 
			
		||||
                module.weight.data[module.padding_idx].zero_()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
SUPER_INPUTS_DOCSTRING = r"""
 | 
			
		||||
    Args:
 | 
			
		||||
        input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
 | 
			
		||||
            Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
 | 
			
		||||
            it.
 | 
			
		||||
 | 
			
		||||
            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
 | 
			
		||||
            [`PreTrainedTokenizer.__call__`] for details.
 | 
			
		||||
 | 
			
		||||
            [What are input IDs?](../glossary#input-ids)
 | 
			
		||||
        attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
 | 
			
		||||
            Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
 | 
			
		||||
 | 
			
		||||
            - 1 for tokens that are **not masked**,
 | 
			
		||||
            - 0 for tokens that are **masked**.
 | 
			
		||||
 | 
			
		||||
            [What are attention masks?](../glossary#attention-mask)
 | 
			
		||||
 | 
			
		||||
            Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
 | 
			
		||||
            [`PreTrainedTokenizer.__call__`] for details.
 | 
			
		||||
 | 
			
		||||
            If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
 | 
			
		||||
            `past_key_values`).
 | 
			
		||||
 | 
			
		||||
            If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
 | 
			
		||||
            and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
 | 
			
		||||
            information on the default strategy.
 | 
			
		||||
 | 
			
		||||
            - 1 indicates the head is **not masked**,
 | 
			
		||||
            - 0 indicates the head is **masked**.
 | 
			
		||||
        position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
 | 
			
		||||
            Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
 | 
			
		||||
            config.n_positions - 1]`.
 | 
			
		||||
 | 
			
		||||
            [What are position IDs?](../glossary#position-ids)
 | 
			
		||||
        past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
 | 
			
		||||
            Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
 | 
			
		||||
            blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
 | 
			
		||||
            returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
 | 
			
		||||
 | 
			
		||||
            Two formats are allowed:
 | 
			
		||||
            - a [`~cache_utils.Cache`] instance;
 | 
			
		||||
            - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
 | 
			
		||||
            shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
 | 
			
		||||
            cache format.
 | 
			
		||||
 | 
			
		||||
            The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
 | 
			
		||||
            legacy cache format will be returned.
 | 
			
		||||
 | 
			
		||||
            If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
 | 
			
		||||
            have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
 | 
			
		||||
            of shape `(batch_size, sequence_length)`.
 | 
			
		||||
        inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
 | 
			
		||||
            Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
 | 
			
		||||
            is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
 | 
			
		||||
            model's internal embedding lookup matrix.
 | 
			
		||||
        use_cache (`bool`, *optional*):
 | 
			
		||||
            If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
 | 
			
		||||
            `past_key_values`).
 | 
			
		||||
        output_attentions (`bool`, *optional*):
 | 
			
		||||
            Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
 | 
			
		||||
            tensors for more detail.
 | 
			
		||||
        output_hidden_states (`bool`, *optional*):
 | 
			
		||||
            Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
 | 
			
		||||
            more detail.
 | 
			
		||||
        return_dict (`bool`, *optional*):
 | 
			
		||||
            Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
 | 
			
		||||
        cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
 | 
			
		||||
            Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
 | 
			
		||||
            this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
 | 
			
		||||
            the complete sequence length.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@add_start_docstrings(
 | 
			
		||||
    "The bare Super Model outputting raw hidden-states without any specific head on top.",
 | 
			
		||||
    SUPER_START_DOCSTRING,
 | 
			
		||||
)
 | 
			
		||||
class SuperModel(SuperPreTrainedModel):
 | 
			
		||||
    """
 | 
			
		||||
    Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`SuperDecoderLayer`]
 | 
			
		||||
 | 
			
		||||
    Args:
 | 
			
		||||
        config: SuperConfig
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, config: SuperConfig):
 | 
			
		||||
        super().__init__(config)
 | 
			
		||||
        self.padding_idx = config.pad_token_id
 | 
			
		||||
        self.vocab_size = config.vocab_size
 | 
			
		||||
 | 
			
		||||
        self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
 | 
			
		||||
        self.layers = nn.ModuleList(
 | 
			
		||||
            [SuperDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
 | 
			
		||||
        )
 | 
			
		||||
        self.norm = SuperRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
 | 
			
		||||
        self.rotary_emb = SuperRotaryEmbedding(config=config)
 | 
			
		||||
        self.gradient_checkpointing = False
 | 
			
		||||
 | 
			
		||||
        # Initialize weights and apply final processing
 | 
			
		||||
        self.post_init()
 | 
			
		||||
 | 
			
		||||
    def get_input_embeddings(self):
 | 
			
		||||
        return self.embed_tokens
 | 
			
		||||
 | 
			
		||||
    def set_input_embeddings(self, value):
 | 
			
		||||
        self.embed_tokens = value
 | 
			
		||||
 | 
			
		||||
    @add_start_docstrings_to_model_forward(SUPER_INPUTS_DOCSTRING)
 | 
			
		||||
    def forward(
 | 
			
		||||
        self,
 | 
			
		||||
        input_ids: torch.LongTensor = None,
 | 
			
		||||
        attention_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        position_ids: Optional[torch.LongTensor] = None,
 | 
			
		||||
        past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
 | 
			
		||||
        inputs_embeds: Optional[torch.FloatTensor] = None,
 | 
			
		||||
        use_cache: Optional[bool] = None,
 | 
			
		||||
        output_attentions: Optional[bool] = None,
 | 
			
		||||
        output_hidden_states: Optional[bool] = None,
 | 
			
		||||
        return_dict: Optional[bool] = None,
 | 
			
		||||
        cache_position: Optional[torch.LongTensor] = None,
 | 
			
		||||
    ) -> Union[Tuple, BaseModelOutputWithPast]:
 | 
			
		||||
        out = super().forward(
 | 
			
		||||
            input_ids,
 | 
			
		||||
            attention_mask,
 | 
			
		||||
            position_ids,
 | 
			
		||||
            past_key_values,
 | 
			
		||||
            inputs_embeds,
 | 
			
		||||
            use_cache,
 | 
			
		||||
            output_attentions,
 | 
			
		||||
            output_hidden_states,
 | 
			
		||||
            return_dict,
 | 
			
		||||
            cache_position,
 | 
			
		||||
        )
 | 
			
		||||
        out.logits *= 2**4
 | 
			
		||||
        return out
 | 
			
		||||
 | 
			
		||||
    def _update_causal_mask(
 | 
			
		||||
        self,
 | 
			
		||||
        attention_mask: torch.Tensor,
 | 
			
		||||
        input_tensor: torch.Tensor,
 | 
			
		||||
        cache_position: torch.Tensor,
 | 
			
		||||
        past_key_values: Cache,
 | 
			
		||||
        output_attentions: bool,
 | 
			
		||||
    ):
 | 
			
		||||
        if self.config._attn_implementation == "flash_attention_2":
 | 
			
		||||
            if attention_mask is not None and 0.0 in attention_mask:
 | 
			
		||||
                return attention_mask
 | 
			
		||||
            return None
 | 
			
		||||
 | 
			
		||||
        # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
 | 
			
		||||
        # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
 | 
			
		||||
        # to infer the attention mask.
 | 
			
		||||
        past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
 | 
			
		||||
        using_static_cache = isinstance(past_key_values, StaticCache)
 | 
			
		||||
 | 
			
		||||
        # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
 | 
			
		||||
        if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
 | 
			
		||||
            if AttentionMaskConverter._ignore_causal_mask_sdpa(
 | 
			
		||||
                attention_mask,
 | 
			
		||||
                inputs_embeds=input_tensor,
 | 
			
		||||
                past_key_values_length=past_seen_tokens,
 | 
			
		||||
                is_training=self.training,
 | 
			
		||||
            ):
 | 
			
		||||
                return None
 | 
			
		||||
 | 
			
		||||
        dtype, device = input_tensor.dtype, input_tensor.device
 | 
			
		||||
        min_dtype = torch.finfo(dtype).min
 | 
			
		||||
        sequence_length = input_tensor.shape[1]
 | 
			
		||||
        if using_static_cache:
 | 
			
		||||
            target_length = past_key_values.get_max_length()
 | 
			
		||||
        else:
 | 
			
		||||
            target_length = (
 | 
			
		||||
                attention_mask.shape[-1]
 | 
			
		||||
                if isinstance(attention_mask, torch.Tensor)
 | 
			
		||||
                else past_seen_tokens + sequence_length + 1
 | 
			
		||||
            )
 | 
			
		||||
 | 
			
		||||
        # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
 | 
			
		||||
        causal_mask = _prepare_4d_causal_attention_mask_with_cache_position(
 | 
			
		||||
            attention_mask,
 | 
			
		||||
            sequence_length=sequence_length,
 | 
			
		||||
            target_length=target_length,
 | 
			
		||||
            dtype=dtype,
 | 
			
		||||
            device=device,
 | 
			
		||||
            min_dtype=min_dtype,
 | 
			
		||||
            cache_position=cache_position,
 | 
			
		||||
            batch_size=input_tensor.shape[0],
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
        if (
 | 
			
		||||
            self.config._attn_implementation == "sdpa"
 | 
			
		||||
            and attention_mask is not None
 | 
			
		||||
            and attention_mask.device.type == "cuda"
 | 
			
		||||
            and not output_attentions
 | 
			
		||||
        ):
 | 
			
		||||
            # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
 | 
			
		||||
            # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
 | 
			
		||||
            # Details: https://github.com/pytorch/pytorch/issues/110213
 | 
			
		||||
            causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
 | 
			
		||||
 | 
			
		||||
        return causal_mask
 | 
			
		||||
@ -3,10 +3,11 @@ from typing import List, Optional, Tuple, Union
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from transformers import Cache
 | 
			
		||||
from transformers.modeling_outputs import CausalLMOutputWithPast
 | 
			
		||||
from transformers.models.llama.modeling_llama import LlamaModel
 | 
			
		||||
 | 
			
		||||
from ...cache_utils import Cache
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def _pre_process_input(input_ids):
 | 
			
		||||
    print(log(input_ids))
 | 
			
		||||
							
								
								
									
										27
									
								
								examples/modular-transformers/modular_dummy_bert.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								examples/modular-transformers/modular_dummy_bert.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,27 @@
 | 
			
		||||
from typing import List, Optional, Tuple, Union
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from transformers.models.bert.modeling_bert import BertModel
 | 
			
		||||
 | 
			
		||||
from ...modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class DummyBertModel(BertModel):
 | 
			
		||||
    def forward(
 | 
			
		||||
        self,
 | 
			
		||||
        input_ids: Optional[torch.Tensor] = None,
 | 
			
		||||
        attention_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        token_type_ids: Optional[torch.Tensor] = None,
 | 
			
		||||
        position_ids: Optional[torch.Tensor] = None,
 | 
			
		||||
        head_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        inputs_embeds: Optional[torch.Tensor] = None,
 | 
			
		||||
        encoder_hidden_states: Optional[torch.Tensor] = None,
 | 
			
		||||
        encoder_attention_mask: Optional[torch.Tensor] = None,
 | 
			
		||||
        past_key_values: Optional[List[torch.FloatTensor]] = None,
 | 
			
		||||
        use_cache: Optional[bool] = None,
 | 
			
		||||
        output_attentions: Optional[bool] = None,
 | 
			
		||||
        output_hidden_states: Optional[bool] = None,
 | 
			
		||||
        return_dict: Optional[bool] = None,
 | 
			
		||||
    ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
 | 
			
		||||
        return super().forward(input_ids)
 | 
			
		||||
@ -5,10 +5,11 @@ from transformers.models.llama.configuration_llama import LlamaConfig
 | 
			
		||||
# here there is no `ARG` so we are gonna take parent doc
 | 
			
		||||
class MyNewModelConfig(LlamaConfig):
 | 
			
		||||
    r"""
 | 
			
		||||
    mlp_bias (`bool`, *optional*, defaults to `False`)
 | 
			
		||||
    new_param (`int`, *optional*, defaults to `False`):
 | 
			
		||||
        A fun new parameter
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, mlp_bias=True, new_param=0, **super_kwargs):
 | 
			
		||||
        super().__init__(self, **super_kwargs)
 | 
			
		||||
        self.mlp_bias = mlp_bias
 | 
			
		||||
        self.new_param = new_param
 | 
			
		||||
        super().__init__(self, **super_kwargs)
 | 
			
		||||
@ -26,5 +26,10 @@ class NewModelConfig(GemmaConfig):
 | 
			
		||||
        rope_theta=10000.0,
 | 
			
		||||
        attention_bias=False,
 | 
			
		||||
        attention_dropout=0.0,
 | 
			
		||||
        **kwargs,
 | 
			
		||||
    ):
 | 
			
		||||
        super().__init__(self)
 | 
			
		||||
        super().__init__(self, **kwargs)
 | 
			
		||||
 | 
			
		||||
    @property
 | 
			
		||||
    def num_heads(self):
 | 
			
		||||
        return self.num_attention_heads
 | 
			
		||||
							
								
								
									
										20
									
								
								examples/modular-transformers/modular_roberta.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								examples/modular-transformers/modular_roberta.py
									
									
									
									
									
										Normal file
									
								
							@ -0,0 +1,20 @@
 | 
			
		||||
import torch.nn as nn
 | 
			
		||||
 | 
			
		||||
from transformers.models.bert.modeling_bert import BertEmbeddings, BertModel
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RobertaEmbeddings(BertEmbeddings):
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super().__init__(config)
 | 
			
		||||
        self.pad_token_id = config.pad_token_id
 | 
			
		||||
        self.position_embeddings = nn.Embedding(
 | 
			
		||||
            config.max_position_embeddings, config.hidden_size, config.pad_token_id
 | 
			
		||||
        )
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RobertaModel(BertModel):
 | 
			
		||||
    def __init__(self, config):
 | 
			
		||||
        super().__init__(self, config)
 | 
			
		||||
        # Error out here. Why? Because `RobertaEmbeddings` is defined but not used.
 | 
			
		||||
        # no, because it's defined, and RobertaModel should use RobertaEmbedding
 | 
			
		||||
        # here if initialized that way it won't use the new embedding.
 | 
			
		||||
@ -2,10 +2,11 @@ from typing import List, Optional, Tuple, Union
 | 
			
		||||
 | 
			
		||||
import torch
 | 
			
		||||
 | 
			
		||||
from transformers import Cache
 | 
			
		||||
from transformers.modeling_outputs import CausalLMOutputWithPast
 | 
			
		||||
from transformers.models.llama.modeling_llama import LlamaModel
 | 
			
		||||
 | 
			
		||||
from ...cache_utils import Cache
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# example where we need some deps and some functions
 | 
			
		||||
class SuperModel(LlamaModel):
 | 
			
		||||
@ -45,7 +45,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -43,7 +43,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -48,7 +48,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -53,7 +53,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -46,7 +46,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/instance-segmentation/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/instance-segmentation/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -58,7 +58,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -60,7 +60,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
 | 
			
		||||
 | 
			
		||||
@ -47,7 +47,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -47,7 +47,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
# You should update this to your particular problem to have better documentation of `model_type`
 | 
			
		||||
 | 
			
		||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/object-detection/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logging.basicConfig(level=logging.INFO)
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
 | 
			
		||||
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -46,7 +46,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
 | 
			
		||||
 | 
			
		||||
@ -47,7 +47,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = get_logger(__name__)
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
 | 
			
		||||
 | 
			
		||||
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version(
 | 
			
		||||
    "datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/contrastive-image-text/requirements.txt"
 | 
			
		||||
 | 
			
		||||
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@ -50,7 +50,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
 | 
			
		||||
check_min_version("4.45.0.dev0")
 | 
			
		||||
check_min_version("4.45.0")
 | 
			
		||||
 | 
			
		||||
logger = logging.getLogger(__name__)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user