mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-21 17:48:57 +08:00
Compare commits
44 Commits
fix_test_e
...
v4.42.1
Author | SHA1 | Date | |
---|---|---|---|
e3cb841ca8 | |||
b2455e5b81 | |||
6c1d0b069d | |||
69b0f44b81 | |||
be50a0338b | |||
3a028101e9 | |||
11138ca013 | |||
c9f191a0b7 | |||
dc76e9fa7f | |||
1de7dc7403 | |||
1f9f57ab4c | |||
a3fb96a42a | |||
492ee17ec3 | |||
e71f2863d7 | |||
b1ec745475 | |||
3f93fd0694 | |||
ac52084bf2 | |||
915cce39c9 | |||
b07770c5eb | |||
1218e439b5 | |||
2daf2c3eaa | |||
0f67ba1d74 | |||
aab0829790 | |||
e73a97a2b3 | |||
fc689d75a0 | |||
a958c4a801 | |||
7e86cb6c6f | |||
74b92c6256 | |||
3a49ebe0d8 | |||
2fc9d8e9b1 | |||
2d4820284d | |||
0e23e60a5a | |||
aac8ee4237 | |||
c54a8ca48e | |||
0dd65a0319 | |||
dce253f645 | |||
3c2d4d60d7 | |||
4b822560a1 | |||
74a207404e | |||
8b7cd40273 | |||
1e79eade41 | |||
730a440734 | |||
12b1620e61 | |||
d4564df1d4 |
4
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
4
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -25,7 +25,7 @@ body:
|
||||
|
||||
Models:
|
||||
|
||||
- text models: @ArthurZucker and @younesbelkada
|
||||
- text models: @ArthurZucker
|
||||
- vision models: @amyeroberts
|
||||
- speech models: @sanchit-gandhi
|
||||
- graph models: @clefourrier
|
||||
@ -44,7 +44,7 @@ body:
|
||||
- deepspeed: HF Trainer/Accelerate: @muellerzr
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- Big Model Inference: @SunMarc
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc and @younesbelkada
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc
|
||||
|
||||
Documentation: @stevhliu
|
||||
|
||||
|
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -39,7 +39,7 @@ members/contributors who may be interested in your PR.
|
||||
|
||||
Models:
|
||||
|
||||
- text models: @ArthurZucker and @younesbelkada
|
||||
- text models: @ArthurZucker
|
||||
- vision models: @amyeroberts
|
||||
- speech models: @sanchit-gandhi
|
||||
- graph models: @clefourrier
|
||||
@ -58,7 +58,7 @@ Integrations:
|
||||
- deepspeed: HF Trainer/Accelerate: @muellerzr
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- Big Model Inference: @SunMarc
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc and @younesbelkada
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc
|
||||
|
||||
Documentation: @stevhliu and @MKhalusova
|
||||
|
||||
|
@ -15,16 +15,6 @@ jobs:
|
||||
name: "Nightly PyTorch + Stable TensorFlow"
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
@ -52,16 +42,6 @@ jobs:
|
||||
name: "Nightly PyTorch + DeepSpeed"
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
23
.github/workflows/model_jobs.yml
vendored
23
.github/workflows/model_jobs.yml
vendored
@ -12,6 +12,12 @@ on:
|
||||
slice_id:
|
||||
required: true
|
||||
type: number
|
||||
runner:
|
||||
required: true
|
||||
type: string
|
||||
docker:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
@ -31,12 +37,13 @@ jobs:
|
||||
run_models_gpu:
|
||||
name: " "
|
||||
strategy:
|
||||
max-parallel: 8
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(inputs.folder_slices)[inputs.slice_id] }}
|
||||
runs-on: ['${{ inputs.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
runs-on: ['${{ inputs.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
image: ${{ inputs.docker }}
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Echo input and matrix info
|
||||
@ -65,6 +72,18 @@ jobs:
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update / Install some packages (for Past CI)
|
||||
if: ${{ contains(inputs.docker, '-past-') }}
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install -U datasets
|
||||
|
||||
- name: Update / Install some packages (for Past CI)
|
||||
if: ${{ contains(inputs.docker, '-past-') && contains(inputs.docker, '-pytorch-') }}
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
43
.github/workflows/self-nightly-caller.yml
vendored
Normal file
43
.github/workflows/self-nightly-caller.yml
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
name: Self-hosted runner (nightly-ci)
|
||||
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "17 2 * * *"
|
||||
push:
|
||||
branches:
|
||||
- run_nightly_ci*
|
||||
|
||||
jobs:
|
||||
build_nightly_ci_images:
|
||||
name: Build Nightly CI Docker Images
|
||||
if: (github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_nightly_ci'))
|
||||
uses: ./.github/workflows/build-nightly-ci-docker-images.yml
|
||||
secrets: inherit
|
||||
|
||||
model-ci:
|
||||
name: Model CI
|
||||
needs: [build_nightly_ci_images]
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-past-future"
|
||||
runner: ci
|
||||
docker: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||
ci_event: Nightly CI
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
needs: [build_nightly_ci_images]
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-past-future"
|
||||
runner: ci
|
||||
# test deepspeed nightly build with the latest release torch
|
||||
docker: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
ci_event: Nightly CI
|
||||
working-directory-prefix: /workspace
|
||||
secrets: inherit
|
@ -2,32 +2,30 @@ name: Self-hosted runner (nightly-past-ci-caller)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# 2:17 am on each Sunday and Thursday
|
||||
|
||||
- cron: "17 2 * * 0,4"
|
||||
- cron: "17 2,14 * * *"
|
||||
push:
|
||||
branches:
|
||||
- run_nightly_ci*
|
||||
- run_past_ci*
|
||||
|
||||
jobs:
|
||||
build_nightly_ci_images:
|
||||
name: Build Nightly CI Docker Images
|
||||
if: (github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_nightly_ci'))
|
||||
uses: ./.github/workflows/build-nightly-ci-docker-images.yml
|
||||
secrets: inherit
|
||||
|
||||
run_nightly_ci:
|
||||
name: Nightly CI
|
||||
needs: [build_nightly_ci_images]
|
||||
uses: ./.github/workflows/self-nightly-scheduled.yml
|
||||
secrets: inherit
|
||||
get_number:
|
||||
name: Get number
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
run_number: ${{ steps.get_number.outputs.run_number }}
|
||||
steps:
|
||||
- name: Get number
|
||||
id: get_number
|
||||
run: |
|
||||
echo "${{ github.run_number }}"
|
||||
echo "$(python3 -c 'print(int(${{ github.run_number }}) % 10)')"
|
||||
echo "run_number=$(python3 -c 'print(int(${{ github.run_number }}) % 10)')" >> $GITHUB_OUTPUT
|
||||
|
||||
run_past_ci_pytorch_1-13:
|
||||
name: PyTorch 1.13
|
||||
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
needs: [run_nightly_ci]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 0 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.13"
|
||||
@ -36,9 +34,9 @@ jobs:
|
||||
|
||||
run_past_ci_pytorch_1-12:
|
||||
name: PyTorch 1.12
|
||||
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
needs: [run_past_ci_pytorch_1-13]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 1 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.12"
|
||||
@ -47,9 +45,9 @@ jobs:
|
||||
|
||||
run_past_ci_pytorch_1-11:
|
||||
name: PyTorch 1.11
|
||||
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
needs: [run_past_ci_pytorch_1-12]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 2 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.11"
|
||||
@ -58,9 +56,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-11:
|
||||
name: TensorFlow 2.11
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_pytorch_1-11]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 3 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.11"
|
||||
@ -69,9 +67,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-10:
|
||||
name: TensorFlow 2.10
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-11]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 4 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.10"
|
||||
@ -80,9 +78,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-9:
|
||||
name: TensorFlow 2.9
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-10]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 5 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.9"
|
||||
@ -91,9 +89,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-8:
|
||||
name: TensorFlow 2.8
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-9]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 6 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.8"
|
||||
@ -102,9 +100,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-7:
|
||||
name: TensorFlow 2.7
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-8]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 7 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.7"
|
||||
@ -113,9 +111,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-6:
|
||||
name: TensorFlow 2.6
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-7]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 8 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.6"
|
||||
@ -124,9 +122,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-5:
|
||||
name: TensorFlow 2.5
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-6]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 9 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.5"
|
||||
|
290
.github/workflows/self-nightly-scheduled.yml
vendored
290
.github/workflows/self-nightly-scheduled.yml
vendored
@ -1,290 +0,0 @@
|
||||
name: Self-hosted runner (nightly-ci)
|
||||
|
||||
# Note that each job's dependencies go into a corresponding docker file.
|
||||
#
|
||||
# For example for `run_torch_cuda_extensions_gpu` the docker image is
|
||||
# `huggingface/transformers-pytorch-deepspeed-latest-gpu`, which can be found at
|
||||
# `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile`
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_call:
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
name: Setup
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Cleanup
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf tests/models/__pycache__
|
||||
rm -rf reports
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- id: set-matrix
|
||||
name: Identify models to test
|
||||
working-directory: /transformers/tests
|
||||
run: |
|
||||
echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
run_tests_single_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machine_type: [single-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_tests_multi_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machine_type: [multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_torch_cuda_extensions_gpu:
|
||||
name: Torch CUDA extension tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
needs: setup
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-deepspeed-nightly-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /workspace/transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /workspace/transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Remove cached torch extensions
|
||||
run: rm -rf /github/home/.cache/torch_extensions/
|
||||
|
||||
# To avoid unknown test failures
|
||||
- name: Pre build DeepSpeed *again*
|
||||
working-directory: /workspace
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
rm -rf DeepSpeed
|
||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /workspace/transformers
|
||||
run: |
|
||||
python utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /workspace/transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /workspace/transformers
|
||||
run: |
|
||||
python -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /workspace/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports_postfix_nightly"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports_postfix_nightly
|
||||
path: /workspace/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-22.04
|
||||
if: always()
|
||||
needs: [
|
||||
setup,
|
||||
run_tests_single_gpu,
|
||||
run_tests_multi_gpu,
|
||||
run_torch_cuda_extensions_gpu
|
||||
]
|
||||
steps:
|
||||
- name: Preliminary job status
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
echo "Setup status: ${{ needs.setup.result }}"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
CI_EVENT: Nightly CI
|
||||
SETUP_STATUS: ${{ needs.setup.result }}
|
||||
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||
|
||||
|
||||
# delete-artifact
|
||||
- uses: geekyeggo/delete-artifact@v2
|
||||
with:
|
||||
name: |
|
||||
single-*
|
||||
multi-*
|
40
.github/workflows/self-past-caller.yml
vendored
Normal file
40
.github/workflows/self-past-caller.yml
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
name: Self-hosted runner (past-ci)
|
||||
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
framework:
|
||||
required: true
|
||||
type: string
|
||||
version:
|
||||
required: true
|
||||
type: string
|
||||
# Use this to control the commit to test against
|
||||
sha:
|
||||
default: 'main'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-past-future"
|
||||
runner: past-ci
|
||||
docker: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
ci_event: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-past-future"
|
||||
runner: past-ci
|
||||
docker: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
ci_event: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
|
||||
secrets: inherit
|
357
.github/workflows/self-past.yml
vendored
357
.github/workflows/self-past.yml
vendored
@ -1,357 +0,0 @@
|
||||
name: Self-hosted runner (past-ci)
|
||||
|
||||
# Note that each job's dependencies go into a corresponding docker file.
|
||||
#
|
||||
# For example for `run_torch_cuda_extensions_gpu` the docker image is
|
||||
# `huggingface/transformers-pytorch-deepspeed-latest-gpu`, which can be found at
|
||||
# `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile`
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
framework:
|
||||
required: true
|
||||
type: string
|
||||
version:
|
||||
required: true
|
||||
type: string
|
||||
# Use this to control the commit to test against
|
||||
sha:
|
||||
default: 'main'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
name: Setup
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Cleanup
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf tests/models/__pycache__
|
||||
rm -rf reports
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- id: set-matrix
|
||||
working-directory: /transformers
|
||||
name: Identify models to test
|
||||
run: |
|
||||
cd tests
|
||||
echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT
|
||||
|
||||
run_tests_single_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machine_type: [single-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update some packages
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip install -U datasets
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install
|
||||
if: inputs.framework == 'pytorch'
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: Save job name
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
matrix_folders=${matrix_folders/'models_'/'models/'}
|
||||
job_name="Model tests ($matrix_folders, ${{ matrix.machine_type }})"
|
||||
echo "$job_name"
|
||||
echo "$job_name" > /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/job_name.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_tests_multi_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machine_type: [multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update some packages
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip install -U datasets
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install
|
||||
if: inputs.framework == 'pytorch'
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: Save job name
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
matrix_folders=${matrix_folders/'models_'/'models/'}
|
||||
job_name="Model tests ($matrix_folders, ${{ matrix.machine_type }})"
|
||||
echo "$job_name"
|
||||
echo "$job_name" > /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/job_name.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_torch_cuda_extensions_gpu:
|
||||
name: Torch CUDA extension tests
|
||||
if: inputs.framework == 'pytorch'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
needs: setup
|
||||
container:
|
||||
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update some packages
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip install -U datasets
|
||||
|
||||
- name: Install
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: Remove cached torch extensions
|
||||
run: rm -rf /github/home/.cache/torch_extensions/
|
||||
|
||||
# To avoid unknown test failures
|
||||
- name: Pre build DeepSpeed *again*
|
||||
working-directory: /
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
rm -rf DeepSpeed
|
||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-22.04
|
||||
if: always()
|
||||
needs: [
|
||||
setup,
|
||||
run_tests_single_gpu,
|
||||
run_tests_multi_gpu,
|
||||
run_torch_cuda_extensions_gpu
|
||||
]
|
||||
steps:
|
||||
- name: Preliminary job status
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
echo "Setup status: ${{ needs.setup.result }}"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
|
||||
# Create a directory to store test failure tables in the next step
|
||||
- name: Create directory
|
||||
run: mkdir test_failure_tables
|
||||
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
|
||||
SETUP_STATUS: ${{ needs.setup.result }}
|
||||
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||
|
||||
# Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack.
|
||||
- name: Failure table artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test_failure_tables_${{ inputs.framework }}-${{ inputs.version }}
|
||||
path: test_failure_tables
|
||||
|
||||
# delete-artifact
|
||||
- uses: geekyeggo/delete-artifact@v2
|
||||
with:
|
||||
name: |
|
||||
single-*
|
||||
multi-*
|
19
.github/workflows/self-scheduled-caller.yml
vendored
19
.github/workflows/self-scheduled-caller.yml
vendored
@ -16,6 +16,9 @@ jobs:
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-models"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
torch-pipeline:
|
||||
@ -24,6 +27,9 @@ jobs:
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-pipeline-torch"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-pytorch-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
tf-pipeline:
|
||||
@ -32,6 +38,9 @@ jobs:
|
||||
with:
|
||||
job: run_pipelines_tf_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-pipeline-tf"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-tensorflow-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
example-ci:
|
||||
@ -40,6 +49,9 @@ jobs:
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-examples"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
@ -48,6 +60,10 @@ jobs:
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-deepspeed"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
ci_event: Daily CI
|
||||
working-directory-prefix: /workspace
|
||||
secrets: inherit
|
||||
|
||||
quantization-ci:
|
||||
@ -56,4 +72,7 @@ jobs:
|
||||
with:
|
||||
job: run_quantization_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-quantization"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-quantization-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
73
.github/workflows/self-scheduled.yml
vendored
73
.github/workflows/self-scheduled.yml
vendored
@ -15,6 +15,19 @@ on:
|
||||
slack_report_channel:
|
||||
required: true
|
||||
type: string
|
||||
runner:
|
||||
required: true
|
||||
type: string
|
||||
docker:
|
||||
required: true
|
||||
type: string
|
||||
ci_event:
|
||||
required: true
|
||||
type: string
|
||||
working-directory-prefix:
|
||||
default: ''
|
||||
required: false
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
@ -38,7 +51,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -96,6 +109,8 @@ jobs:
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
machine_type: ${{ matrix.machine_type }}
|
||||
slice_id: ${{ matrix.slice_id }}
|
||||
runner: ${{ inputs.runner }}
|
||||
docker: ${{ inputs.docker }}
|
||||
secrets: inherit
|
||||
|
||||
run_pipelines_torch_gpu:
|
||||
@ -105,7 +120,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -155,7 +170,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-tensorflow-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -206,7 +221,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -257,69 +272,88 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
image: ${{ inputs.docker }}
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /workspace/transformers
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /workspace/transformers
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update / Install some packages (for Past CI)
|
||||
if: ${{ contains(inputs.docker, '-past-') && contains(inputs.docker, '-pytorch-') }}
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
run: |
|
||||
python3 -m pip install -U datasets
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: Remove cached torch extensions
|
||||
run: rm -rf /github/home/.cache/torch_extensions/
|
||||
|
||||
# To avoid unknown test failures
|
||||
- name: Pre build DeepSpeed *again*
|
||||
working-directory: /workspace
|
||||
- name: Pre build DeepSpeed *again* (for daily CI)
|
||||
if: ${{ contains(inputs.ci_event, 'Daily CI') }}
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
DS_DISABLE_NINJA=1 DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
# To avoid unknown test failures
|
||||
- name: Pre build DeepSpeed *again* (for nightly & Past CI)
|
||||
if: ${{ contains(inputs.ci_event, 'Nightly CI') || contains(inputs.ci_event, 'Past CI') }}
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
rm -rf DeepSpeed
|
||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /workspace/transformers
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
run: |
|
||||
python utils/print_env.py
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /workspace/transformers
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /workspace/transformers
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
run: |
|
||||
python -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
|
||||
python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /workspace/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
run: cat ${{ inputs.working-directory-prefix }}/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
path: /workspace/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
path: ${{ inputs.working-directory-prefix }}/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
|
||||
run_quantization_torch_gpu:
|
||||
if: ${{ inputs.job == 'run_quantization_torch_gpu' }}
|
||||
name: " "
|
||||
needs: setup
|
||||
strategy:
|
||||
max-parallel: 4
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.quantization_matrix) }}
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-quantization-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -434,5 +468,6 @@ jobs:
|
||||
# This would be an empty string if `setup` is skipped.
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
quantization_matrix: ${{ needs.setup.outputs.quantization_matrix }}
|
||||
|
||||
ci_event: ${{ inputs.ci_event }}
|
||||
|
||||
secrets: inherit
|
||||
|
7
.github/workflows/slack-report.yml
vendored
7
.github/workflows/slack-report.yml
vendored
@ -18,6 +18,9 @@ on:
|
||||
quantization_matrix:
|
||||
required: true
|
||||
type: string
|
||||
ci_event:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN: ${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }}
|
||||
@ -45,7 +48,7 @@ jobs:
|
||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||
SLACK_REPORT_CHANNEL: ${{ inputs.slack_report_channel }}
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
CI_EVENT: scheduled
|
||||
CI_EVENT: ${{ inputs.ci_event }}
|
||||
CI_SHA: ${{ github.sha }}
|
||||
CI_WORKFLOW_REF: ${{ github.workflow_ref }}
|
||||
CI_TEST_JOB: ${{ inputs.job }}
|
||||
@ -76,7 +79,7 @@ jobs:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
SLACK_REPORT_CHANNEL: ${{ inputs.slack_report_channel }}
|
||||
CI_EVENT: scheduled
|
||||
CI_EVENT: ${{ inputs.ci_event }}
|
||||
CI_SHA: ${{ github.sha }}
|
||||
CI_TEST_JOB: ${{ inputs.job }}
|
||||
SETUP_STATUS: ${{ inputs.setup_status }}
|
||||
|
24
README.md
24
README.md
@ -36,18 +36,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<b>English</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
|
@ -185,16 +185,16 @@ pytest -k "test and ada" tests/test_optimization.py
|
||||
|
||||
Manchmal müssen Sie `accelerate` Tests für Ihre Modelle ausführen. Dazu fügen Sie einfach `-m accelerate_tests` zu Ihrem Befehl hinzu, wenn Sie diese Tests bei einem `OPT`-Lauf ausführen möchten:
|
||||
```bash
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
```
|
||||
|
||||
|
||||
### Dokumentationstests ausführen
|
||||
### Dokumentationstests ausführen
|
||||
|
||||
Um zu testen, ob die Dokumentationsbeispiele korrekt sind, sollten Sie überprüfen, ob die `doctests` erfolgreich sind.
|
||||
Lassen Sie uns als Beispiel den docstring von [WhisperModel.forward](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035) verwenden:
|
||||
Um zu testen, ob die Dokumentationsbeispiele korrekt sind, sollten Sie überprüfen, ob die `doctests` erfolgreich sind.
|
||||
Lassen Sie uns als Beispiel den docstring von [WhisperModel.forward](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035) verwenden:
|
||||
|
||||
```python
|
||||
```python
|
||||
r"""
|
||||
Returns:
|
||||
|
||||
@ -217,8 +217,8 @@ Example:
|
||||
|
||||
```
|
||||
|
||||
Führen Sie einfach die folgende Zeile aus, um automatisch jedes docstring-Beispiel in der gewünschten Datei zu testen:
|
||||
```bash
|
||||
Führen Sie einfach die folgende Zeile aus, um automatisch jedes docstring-Beispiel in der gewünschten Datei zu testen:
|
||||
```bash
|
||||
pytest --doctest-modules <path_to_file_or_dir>
|
||||
```
|
||||
Wenn die Datei eine Markdown-Erweiterung hat, sollten Sie das Argument `--doctest-glob="*.md"` hinzufügen.
|
||||
@ -862,7 +862,7 @@ Code, der fehlerhaft ist, einen schlechten Zustand verursacht, der sich auf ande
|
||||
- Hier sehen Sie, wie Sie einen ganzen Test bedingungslos überspringen können:
|
||||
|
||||
```python no-style
|
||||
@unittest.skip("this bug needs to be fixed")
|
||||
@unittest.skip(reason="this bug needs to be fixed")
|
||||
def test_feature_x():
|
||||
```
|
||||
|
||||
|
@ -627,6 +627,8 @@
|
||||
title: RegNet
|
||||
- local: model_doc/resnet
|
||||
title: ResNet
|
||||
- local: model_doc/rt_detr
|
||||
title: RT-DETR
|
||||
- local: model_doc/segformer
|
||||
title: SegFormer
|
||||
- local: model_doc/seggpt
|
||||
@ -774,6 +776,8 @@
|
||||
title: Idefics2
|
||||
- local: model_doc/instructblip
|
||||
title: InstructBLIP
|
||||
- local: model_doc/instructblipvideo
|
||||
title: InstructBlipVideo
|
||||
- local: model_doc/kosmos-2
|
||||
title: KOSMOS-2
|
||||
- local: model_doc/layoutlm
|
||||
@ -790,6 +794,8 @@
|
||||
title: Llava
|
||||
- local: model_doc/llava_next
|
||||
title: LLaVA-NeXT
|
||||
- local: model_doc/llava-next-video
|
||||
title: LLaVa-NeXT-Video
|
||||
- local: model_doc/lxmert
|
||||
title: LXMERT
|
||||
- local: model_doc/matcha
|
||||
|
@ -145,6 +145,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Funnel Transformer](model_doc/funnel) | ✅ | ✅ | ❌ |
|
||||
| [Fuyu](model_doc/fuyu) | ✅ | ❌ | ❌ |
|
||||
| [Gemma](model_doc/gemma) | ✅ | ❌ | ✅ |
|
||||
| [Gemma2](model_doc/gemma2) | ✅ | ❌ | ❌ |
|
||||
| [GIT](model_doc/git) | ✅ | ❌ | ❌ |
|
||||
| [GLPN](model_doc/glpn) | ✅ | ❌ | ❌ |
|
||||
| [GPT Neo](model_doc/gpt_neo) | ✅ | ❌ | ✅ |
|
||||
@ -165,6 +166,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [ImageGPT](model_doc/imagegpt) | ✅ | ❌ | ❌ |
|
||||
| [Informer](model_doc/informer) | ✅ | ❌ | ❌ |
|
||||
| [InstructBLIP](model_doc/instructblip) | ✅ | ❌ | ❌ |
|
||||
| [InstructBlipVideo](model_doc/instructblipvideo) | ✅ | ❌ | ❌ |
|
||||
| [Jamba](model_doc/jamba) | ✅ | ❌ | ❌ |
|
||||
| [JetMoe](model_doc/jetmoe) | ✅ | ❌ | ❌ |
|
||||
| [Jukebox](model_doc/jukebox) | ✅ | ❌ | ❌ |
|
||||
@ -181,6 +183,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Llama3](model_doc/llama3) | ✅ | ❌ | ✅ |
|
||||
| [LLaVa](model_doc/llava) | ✅ | ❌ | ❌ |
|
||||
| [LLaVA-NeXT](model_doc/llava_next) | ✅ | ❌ | ❌ |
|
||||
| [LLaVa-NeXT-Video](model_doc/llava-next-video) | ✅ | ❌ | ❌ |
|
||||
| [Longformer](model_doc/longformer) | ✅ | ✅ | ❌ |
|
||||
| [LongT5](model_doc/longt5) | ✅ | ❌ | ✅ |
|
||||
| [LUKE](model_doc/luke) | ✅ | ❌ | ❌ |
|
||||
@ -262,6 +265,8 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [RoBERTa-PreLayerNorm](model_doc/roberta-prelayernorm) | ✅ | ✅ | ✅ |
|
||||
| [RoCBert](model_doc/roc_bert) | ✅ | ❌ | ❌ |
|
||||
| [RoFormer](model_doc/roformer) | ✅ | ✅ | ✅ |
|
||||
| [RT-DETR](model_doc/rt_detr) | ✅ | ❌ | ❌ |
|
||||
| [RT-DETR-ResNet](model_doc/rt_detr_resnet) | ✅ | ❌ | ❌ |
|
||||
| [RWKV](model_doc/rwkv) | ✅ | ❌ | ❌ |
|
||||
| [SAM](model_doc/sam) | ✅ | ✅ | ❌ |
|
||||
| [SeamlessM4T](model_doc/seamless_m4t) | ✅ | ❌ | ❌ |
|
||||
|
58
docs/source/en/model_doc/gemma2.md
Normal file
58
docs/source/en/model_doc/gemma2.md
Normal file
@ -0,0 +1,58 @@
|
||||
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Gemma2
|
||||
|
||||
## Overview
|
||||
|
||||
The Gemma2 model was proposed in [Gemma2: Open Models Based on Gemini Technology and Research](https://blog.google/technology/developers/Gemma2-open-models/) by Gemma2 Team, Google.
|
||||
Gemma2 models are trained on 6T tokens, and released with 2 versions, 2b and 7b.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This work introduces Gemma2, a new family of open language models demonstrating strong performance across academic benchmarks for language understanding, reasoning, and safety. We release two sizes of models (2 billion and 7 billion parameters), and provide both pretrained and fine-tuned checkpoints. Gemma2 outperforms similarly sized open models on 11 out of 18 text-based tasks, and we present comprehensive evaluations of safety and responsibility aspects of the models, alongside a detailed description of our model development. We believe the responsible release of LLMs is critical for improving the safety of frontier models, and for enabling the next wave of LLM innovations*
|
||||
|
||||
Tips:
|
||||
|
||||
- The original checkpoints can be converted using the conversion script `src/transformers/models/Gemma2/convert_Gemma2_weights_to_hf.py`
|
||||
|
||||
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ), [Pedro Cuenca](https://huggingface.co/pcuenq) and [Tom Arsen]().
|
||||
|
||||
|
||||
## Gemma2Config
|
||||
|
||||
[[autodoc]] Gemma2Config
|
||||
|
||||
## Gemma2Model
|
||||
|
||||
[[autodoc]] Gemma2Model
|
||||
- forward
|
||||
|
||||
## Gemma2ForCausalLM
|
||||
|
||||
[[autodoc]] Gemma2ForCausalLM
|
||||
- forward
|
||||
|
||||
## Gemma2ForSequenceClassification
|
||||
|
||||
[[autodoc]] Gemma2ForSequenceClassification
|
||||
- forward
|
||||
|
||||
## Gemma2ForTokenClassification
|
||||
|
||||
[[autodoc]] Gemma2ForTokenClassification
|
||||
- forward
|
@ -95,6 +95,68 @@ Below is an expected speedup diagram that compares pure inference time between t
|
||||
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/gpt-neox-1.8b-speedup.jpg">
|
||||
</div>
|
||||
|
||||
|
||||
## Using Scaled Dot Product Attention (SDPA)
|
||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
|
||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
|
||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
|
||||
page for more information.
|
||||
|
||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
|
||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
|
||||
|
||||
```python
|
||||
from transformers import GPTNeoXForCausalLM
|
||||
model = GPTNeoXForCausalLM.from_pretrained("EleutherAI/gpt-neox-20b", torch_dtype=torch.float16, attn_implementation="sdpa")
|
||||
...
|
||||
```
|
||||
|
||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
|
||||
|
||||
On a local benchmark (rtx3080ti-16GB, PyTorch 2.2.1, OS Ubuntu 22.04) using `float16` with
|
||||
[pythia-410m-deduped](https://huggingface.co/EleutherAI/pythia-410m-deduped), we saw the
|
||||
following speedups during training and inference.
|
||||
|
||||
### Training
|
||||
| Batch size | Seq len | Time per batch (Eager - s) | Time per batch (SDPA - s) | Speedup (%) | Eager peak mem (MB) | SDPA peak mem (MB) | Mem saving (%) |
|
||||
|-----------:|-----------:|---------------------------:|-----------------------------:|------------:|--------------------:|-------------------:|------------------:|
|
||||
| 1 | 128 | 0.024 | 0.019 | 28.945 | 1789.95 | 1789.95 | 0 |
|
||||
| 1 | 256 | 0.039 | 0.031 | 23.18 | 1845.83 | 1844.84 | 0.053 |
|
||||
| 1 | 512 | 0.08 | 0.055 | 45.524 | 2278.38 | 1953.76 | 16.615 |
|
||||
| 1 | 1024 | 0.19 | 0.102 | 86.777 | 4772.36 | 2408.35 | 98.159 |
|
||||
| 1 | 2048 | 0.565 | 0.204 | 177.098 | 13484.1 | 3882.01 | 247.348 |
|
||||
| 2 | 128 | 0.037 | 0.032 | 15.121 | 1843.86 | 1844.78 | -0.05 |
|
||||
| 2 | 256 | 0.067 | 0.055 | 21.706 | 1999.72 | 1951.67 | 2.462 |
|
||||
| 2 | 512 | 0.144 | 0.096 | 50.046 | 3613.16 | 2406.77 | 50.125 |
|
||||
| 2 | 1024 | 0.366 | 0.193 | 89.666 | 8707.55 | 3878.86 | 124.487 |
|
||||
| 2 | 2048 | OOM | 0.379 | / | OOM | 6825.13 | SDPA does not OOM |
|
||||
| 4 | 128 | 0.06 | 0.054 | 11.539 | 1947.6 | 1952.06 | -0.228 |
|
||||
| 4 | 256 | 0.119 | 0.093 | 28.072 | 3008.39 | 2405.99 | 25.038 |
|
||||
| 4 | 512 | 0.275 | 0.187 | 47.145 | 6290.58 | 3877.29 | 62.242 |
|
||||
| 4 | 1024 | OOM | 0.36 | / | OOM | 6821.98 | SDPA does not OOM |
|
||||
| 4 | 2048 | OOM | 0.731 | / | OOM | 12705.1 | SDPA does not OOM |
|
||||
|
||||
### Inference
|
||||
| Batch size | Seq len | Per token latency Eager (ms) | Per token latency SDPA (ms) | Speedup (%) | Mem Eager (MB) | Mem SDPA (MB) | Mem saved (%) |
|
||||
|--------------:|-------------:|--------------------------------:|-------------------------------:|---------------:|------------------:|----------------:|-----------------:|
|
||||
| 1 | 128 | 6.569 | 5.858 | 12.14 | 974.831 | 974.826 | 0 |
|
||||
| 1 | 256 | 7.009 | 5.863 | 19.542 | 1029.01 | 1028.08 | 0.09 |
|
||||
| 1 | 512 | 7.157 | 5.965 | 19.983 | 1137.54 | 1137.52 | 0.001 |
|
||||
| 1 | 1024 | 7.523 | 6.506 | 15.637 | 1329.3 | 1329.26 | 0.003 |
|
||||
| 1 | 2048 | 9.271 | 9.205 | 0.713 | 1752.47 | 1734.51 | 1.036 |
|
||||
| 2 | 128 | 7.239 | 5.959 | 21.493 | 1044.8 | 1028.37 | 1.597 |
|
||||
| 2 | 256 | 7.228 | 6.036 | 19.757 | 1167.32 | 1137.73 | 2.601 |
|
||||
| 2 | 512 | 7.538 | 6.693 | 12.628 | 1352.93 | 1329.55 | 1.758 |
|
||||
| 2 | 1024 | 8.916 | 8.632 | 3.291 | 1752.56 | 1734.62 | 1.034 |
|
||||
| 2 | 2048 | 12.628 | 12.606 | 0.181 | 2558.72 | 2545.8 | 0.508 |
|
||||
| 4 | 128 | 7.278 | 6.046 | 20.373 | 1168.41 | 1137.79 | 2.691 |
|
||||
| 4 | 256 | 7.614 | 6.588 | 15.574 | 1353.1 | 1329.79 | 1.753 |
|
||||
| 4 | 512 | 8.798 | 8.144 | 8.028 | 1752.76 | 1734.85 | 1.032 |
|
||||
| 4 | 1024 | 11.765 | 11.303 | 4.09 | 2558.96 | 2546.04 | 0.508 |
|
||||
| 4 | 2048 | 19.568 | 17.735 | 10.33 | 4175.5 | 4165.26 | 0.246 |
|
||||
|
||||
|
||||
## Resources
|
||||
|
||||
- [Causal language modeling task guide](../tasks/language_modeling)
|
||||
|
@ -50,6 +50,7 @@ InstructBLIP uses the same architecture as [BLIP-2](blip2) with a tiny but impor
|
||||
|
||||
[[autodoc]] InstructBlipProcessor
|
||||
|
||||
|
||||
## InstructBlipVisionModel
|
||||
|
||||
[[autodoc]] InstructBlipVisionModel
|
||||
|
74
docs/source/en/model_doc/instructblipvideo.md
Normal file
74
docs/source/en/model_doc/instructblipvideo.md
Normal file
@ -0,0 +1,74 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# InstructBlipVideo
|
||||
|
||||
## Overview
|
||||
|
||||
## Overview
|
||||
|
||||
The InstructBLIPVideo is an extension of the models proposed in [InstructBLIP: Towards General-purpose Vision-Language Models with Instruction Tuning](https://arxiv.org/abs/2305.06500) by Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, Steven Hoi.
|
||||
InstructBLIPVideo uses the same architecture as [InstructBLIP](instructblip) and works with the same checkpoints as [InstructBLIP](instructblip). The only difference is the ability to process videos.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*General-purpose language models that can solve various language-domain tasks have emerged driven by the pre-training and instruction-tuning pipeline. However, building general-purpose vision-language models is challenging due to the increased task discrepancy introduced by the additional visual input. Although vision-language pre-training has been widely studied, vision-language instruction tuning remains relatively less explored. In this paper, we conduct a systematic and comprehensive study on vision-language instruction tuning based on the pre-trained BLIP-2 models. We gather a wide variety of 26 publicly available datasets, transform them into instruction tuning format and categorize them into two clusters for held-in instruction tuning and held-out zero-shot evaluation. Additionally, we introduce instruction-aware visual feature extraction, a crucial method that enables the model to extract informative features tailored to the given instruction. The resulting InstructBLIP models achieve state-of-the-art zero-shot performance across all 13 held-out datasets, substantially outperforming BLIP-2 and the larger Flamingo. Our models also lead to state-of-the-art performance when finetuned on individual downstream tasks (e.g., 90.7% accuracy on ScienceQA IMG). Furthermore, we qualitatively demonstrate the advantages of InstructBLIP over concurrent multimodal models.*
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/instructblip_architecture.jpg"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> InstructBLIPVideo architecture. Taken from the <a href="https://arxiv.org/abs/2305.06500">original paper.</a> </small>
|
||||
|
||||
This model was contributed by [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
|
||||
The original code can be found [here](https://github.com/salesforce/LAVIS/tree/main/projects/instructblip).
|
||||
|
||||
## Usage tips
|
||||
|
||||
- The model was trained by sampling 4 frames per video, so it's recommended to sample 4 frames
|
||||
|
||||
## InstructBlipVideoConfig
|
||||
|
||||
[[autodoc]] InstructBlipVideoConfig
|
||||
- from_vision_qformer_text_configs
|
||||
|
||||
## InstructBlipVideoVisionConfig
|
||||
|
||||
[[autodoc]] InstructBlipVideoVisionConfig
|
||||
|
||||
## InstructBlipVideoQFormerConfig
|
||||
|
||||
[[autodoc]] InstructBlipVideoQFormerConfig
|
||||
|
||||
## InstructBlipVideoProcessor
|
||||
|
||||
[[autodoc]] InstructBlipVideoProcessor
|
||||
|
||||
## InstructBlipVideoImageProcessor
|
||||
|
||||
[[autodoc]] InstructBlipVideoImageProcessor
|
||||
- preprocess
|
||||
|
||||
## InstructBlipVideoVisionModel
|
||||
|
||||
[[autodoc]] InstructBlipVideoVisionModel
|
||||
- forward
|
||||
|
||||
## InstructBlipVideoQFormerModel
|
||||
|
||||
[[autodoc]] InstructBlipVideoQFormerModel
|
||||
- forward
|
||||
|
||||
## InstructBlipVideoForConditionalGeneration
|
||||
|
||||
[[autodoc]] InstructBlipVideoForConditionalGeneration
|
||||
- forward
|
||||
- generate
|
259
docs/source/en/model_doc/llava-next-video.md
Normal file
259
docs/source/en/model_doc/llava-next-video.md
Normal file
@ -0,0 +1,259 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# LLaVa-NeXT-Video
|
||||
|
||||
## Overview
|
||||
|
||||
The LLaVa-NeXT-Video model was proposed in [LLaVA-NeXT: A Strong Zero-shot Video Understanding Model
|
||||
](https://llava-vl.github.io/blog/2024-04-30-llava-next-video/) by Yuanhan Zhang, Bo Li, Haotian Liu, Yong Jae Lee, Liangke Gui, Di Fu, Jiashi Feng, Ziwei Liu, Chunyuan Li. LLaVa-NeXT-Video improves upon [LLaVa-NeXT](llava_next) by fine-tuning on a mix if video and image dataset thus increasing the model's performance on videos.
|
||||
|
||||
[LLaVA-NeXT](llava_next) surprisingly has strong performance in understanding video content in zero-shot fashion with the AnyRes technique that it uses. The AnyRes technique naturally represents a high-resolution image into multiple images. This technique is naturally generalizable to represent videos because videos can be considered as a set of frames (similar to a set of images in LLaVa-NeXT). The current version of LLaVA-NeXT makes use of AnyRes and trains with supervised fine-tuning (SFT) on top of LLaVA-Next on video data to achieves better video understanding capabilities.The model is a current SOTA among open-source models on [VideoMME bench](https://arxiv.org/abs/2405.21075).
|
||||
|
||||
|
||||
The introduction from the blog is the following:
|
||||
|
||||
On January 30, 2024, we released LLaVA-NeXT, an open-source Large Multimodal Model (LMM) that has been trained exclusively on text-image data. With the proposed AnyRes technique, it boosts capabilities in reasoning, OCR, and world knowledge, demonstrating remarkable performance across a spectrum of image-based multimodal understanding tasks, and even exceeding Gemini-Pro on several image benchmarks, e.g. MMMU and MathVista.
|
||||
|
||||
**In today’s exploration, we delve into the performance of LLaVA-NeXT within the realm of video understanding tasks. We reveal that LLaVA-NeXT surprisingly has strong performance in understanding video content. The current version of LLaVA-NeXT for videos has several improvements:
|
||||
|
||||
- Zero-shot video representation capabilities with AnyRes: The AnyRes technique naturally represents a high-resolution image into multiple images that a pre-trained VIT is able to digest, and forms them into a concantenated sequence. This technique is naturally generalizable to represent videos (consisting of multiple frames), allowing the image-only-trained LLaVA-Next model to perform surprisingly well on video tasks. Notably, this is the first time that LMMs show strong zero-shot modality transfer ability.
|
||||
- Inference with length generalization improves on longer videos. The linear scaling technique enables length generalization, allowing LLaVA-NeXT to effectively handle long-video beyond the limitation of the "max_token_length" of the LLM.
|
||||
- Strong video understanding ability. (1) LLaVA-Next-Image, which combines the above two techniques, yields superior zero-shot performance than open-source LMMs tuned on videos. (2) LLaVA-Next-Video, further supervised fine-tuning (SFT) LLaVA-Next-Image on video data, achieves better video understanding capabilities compared to LLaVA-Next-Image. (3) LLaVA-Next-Video-DPO, which aligns the model response with AI feedback using direct preference optimization (DPO), showing significant performance boost.
|
||||
- Efficient deployment and inference with SGLang. It allows 5x faster inference on video tasks, allowing more scalable serving such as million-level video re-captioning. See instructions in our repo.**
|
||||
|
||||
|
||||
This model was contributed by [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
|
||||
The original code can be found [here](https://github.com/LLaVA-VL/LLaVA-NeXT/tree/inference).
|
||||
|
||||
## Usage tips
|
||||
|
||||
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating.
|
||||
|
||||
- Note that each checkpoint has been trained with a specific prompt format, depending on which large language model (LLM) was used. You can use tokenizer's `apply_chat_template` to format your prompts correctly. Below is an example of how to do that.
|
||||
|
||||
We will use [LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf) and a conversation history of videos and images. Each content field has to be a list of dicts, as follows:
|
||||
|
||||
```python
|
||||
from transformers import LlavaNextVideoProcessor
|
||||
|
||||
processor = LlavaNextVideoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": [
|
||||
{"type": "text", "text": "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions."},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "What’s shown in this image?"},
|
||||
{"type": "image"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [{"type": "text", "text": "This image shows a red stop sign."},]
|
||||
},
|
||||
{
|
||||
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Why is this video funny?"},
|
||||
{"type": "video"},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
|
||||
# Note that the template simply formats your prompt, you still have to tokenize it and obtain pixel values for your visuals
|
||||
print(text_prompt)
|
||||
```
|
||||
|
||||
## Usage example
|
||||
|
||||
### Single Media Mode
|
||||
|
||||
The model can accept both images and videos as input. Here's an example code for inference in half-precision (`torch.float16`):
|
||||
|
||||
```python
|
||||
import av
|
||||
import torch
|
||||
import numpy as np
|
||||
from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoProcessor
|
||||
|
||||
def read_video_pyav(container, indices):
|
||||
'''
|
||||
Decode the video with PyAV decoder.
|
||||
Args:
|
||||
container (`av.container.input.InputContainer`): PyAV container.
|
||||
indices (`List[int]`): List of frame indices to decode.
|
||||
Returns:
|
||||
result (np.ndarray): np array of decoded frames of shape (num_frames, height, width, 3).
|
||||
'''
|
||||
frames = []
|
||||
container.seek(0)
|
||||
start_index = indices[0]
|
||||
end_index = indices[-1]
|
||||
for i, frame in enumerate(container.decode(video=0)):
|
||||
if i > end_index:
|
||||
break
|
||||
if i >= start_index and i in indices:
|
||||
frames.append(frame)
|
||||
return np.stack([x.to_ndarray(format="rgb24") for x in frames])
|
||||
|
||||
# Load the model in half-precision
|
||||
model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", torch_dtype=torch.float16, device_map="auto")
|
||||
processor = LlavaNextVideoProcessor.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf")
|
||||
|
||||
# Load the video as an np.array, sampling uniformly 8 frames (can sample more for longer videos)
|
||||
video_path = hf_hub_download(repo_id="raushan-testing-hf/videos-test", filename="sample_demo_1.mp4", repo_type="dataset")
|
||||
container = av.open(video_path)
|
||||
total_frames = container.streams.video[0].frames
|
||||
indices = np.arange(0, total_frames, total_frames / 8).astype(int)
|
||||
video = read_video_pyav(container, indices)
|
||||
|
||||
conversation = [
|
||||
{
|
||||
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Why is this video funny?"},
|
||||
{"type": "video"},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
inputs = processor(text=prompt, videos=video, return_tensors="pt")
|
||||
|
||||
out = model.generate(**inputs, max_new_tokens=60)
|
||||
processor.batch_decode(out, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
```
|
||||
|
||||
|
||||
### Mixed Media Mode
|
||||
|
||||
The model can also generate from an interleaved image-video inputs. However note, that it was not trained in interleaved image-video setting which might affect the performance. Below is an example usage for mixed media input, add the following lines to the above code snippet:
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
# Generate from image and video mixed inputs
|
||||
# Load and image and write a new prompt
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
conversation = [
|
||||
{
|
||||
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "How many cats are there in the image?"},
|
||||
{"type": "image"},
|
||||
],
|
||||
},
|
||||
{
|
||||
|
||||
"role": "assistant",
|
||||
"content": [{"type": "text", "text": "There are two cats"}],
|
||||
},
|
||||
{
|
||||
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Why is this video funny?"},
|
||||
{"type": "video"},
|
||||
],
|
||||
},
|
||||
]
|
||||
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
inputs = processor(text=prompt, images=image, videos=clip, padding=True, return_tensors="pt")
|
||||
|
||||
# Generate
|
||||
generate_ids = model.generate(**inputs, max_length=50)
|
||||
processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
|
||||
```
|
||||
|
||||
## Model optimization
|
||||
|
||||
### Quantization using Bitsandbytes for memory efficiency
|
||||
|
||||
The model can be loaded in lower bits, significantly reducing memory burden while maintaining the performance of the original model. This allows for efficient deployment on resource-constrained cases.
|
||||
|
||||
First make sure to install bitsandbytes by running `pip install bitsandbytes` and to have access to a CUDA compatible GPU device. Load the quantized model by simply adding [`BitsAndBytesConfig`](../main_classes/quantization#transformers.BitsAndBytesConfig) as shown below:
|
||||
|
||||
|
||||
```python
|
||||
from transformers import LlavaNextVideoForConditionalGeneration, LlavaNextVideoProcessor
|
||||
|
||||
# specify how to quantize the model
|
||||
quantization_config = BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
bnb_4bit_compute_dtype=torch.float16,
|
||||
)
|
||||
|
||||
model = LlavaNextVideoForConditionalGeneration.from_pretrained("llava-hf/LLaVA-NeXT-Video-7B-hf", quantization_config=quantization_config, device_map="auto")
|
||||
```
|
||||
|
||||
|
||||
### Flash-Attention 2 to speed-up generation
|
||||
|
||||
Additionally, we can greatly speed-up model inference by using [Flash Attention](../perf_train_gpu_one.md#flash-attention-2), which is a faster implementation of the attention mechanism used inside the model.
|
||||
|
||||
First, make sure to install the latest version of Flash Attention 2:
|
||||
|
||||
```bash
|
||||
pip install -U flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
Also, you should have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of the [flash attention repository](https://github.com/Dao-AILab/flash-attention). FlashAttention-2 can only be used when a model is loaded in `torch.float16` or `torch.bfloat16`.
|
||||
|
||||
To load and run a model using Flash Attention-2, simply add `attn_implementation="flash_attention_2"` when loading the model as follows:
|
||||
|
||||
```python
|
||||
from transformers import LlavaNextVideoForConditionalGeneration
|
||||
|
||||
model = LlavaNextVideoForConditionalGeneration.from_pretrained(
|
||||
"llava-hf/LLaVA-NeXT-Video-7B-hf",
|
||||
torch_dtype=torch.float16,
|
||||
attn_implementation="flash_attention_2",
|
||||
).to(0)
|
||||
```
|
||||
|
||||
|
||||
|
||||
## LlavaNextVideoConfig
|
||||
|
||||
[[autodoc]] LlavaNextVideoConfig
|
||||
|
||||
## LlavaNextVideoProcessor
|
||||
|
||||
[[autodoc]] LlavaNextVideoProcessor
|
||||
|
||||
## LlavaNextVideoImageProcessor
|
||||
|
||||
[[autodoc]] LlavaNextVideoImageProcessor
|
||||
|
||||
## LlavaNextVideoForConditionalGeneration
|
||||
|
||||
[[autodoc]] LlavaNextVideoForConditionalGeneration
|
||||
- forward
|
96
docs/source/en/model_doc/rt_detr.md
Normal file
96
docs/source/en/model_doc/rt_detr.md
Normal file
@ -0,0 +1,96 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# RT-DETR
|
||||
|
||||
## Overview
|
||||
|
||||
|
||||
The RT-DETR model was proposed in [DETRs Beat YOLOs on Real-time Object Detection](https://arxiv.org/abs/2304.08069) by Wenyu Lv, Yian Zhao, Shangliang Xu, Jinman Wei, Guanzhong Wang, Cheng Cui, Yuning Du, Qingqing Dang, Yi Liu.
|
||||
|
||||
RT-DETR is an object detection model that stands for "Real-Time DEtection Transformer." This model is designed to perform object detection tasks with a focus on achieving real-time performance while maintaining high accuracy. Leveraging the transformer architecture, which has gained significant popularity in various fields of deep learning, RT-DETR processes images to identify and locate multiple objects within them.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Recently, end-to-end transformer-based detectors (DETRs) have achieved remarkable performance. However, the issue of the high computational cost of DETRs has not been effectively addressed, limiting their practical application and preventing them from fully exploiting the benefits of no post-processing, such as non-maximum suppression (NMS). In this paper, we first analyze the influence of NMS in modern real-time object detectors on inference speed, and establish an end-to-end speed benchmark. To avoid the inference delay caused by NMS, we propose a Real-Time DEtection TRansformer (RT-DETR), the first real-time end-to-end object detector to our best knowledge. Specifically, we design an efficient hybrid encoder to efficiently process multi-scale features by decoupling the intra-scale interaction and cross-scale fusion, and propose IoU-aware query selection to improve the initialization of object queries. In addition, our proposed detector supports flexibly adjustment of the inference speed by using different decoder layers without the need for retraining, which facilitates the practical application of real-time object detectors. Our RT-DETR-L achieves 53.0% AP on COCO val2017 and 114 FPS on T4 GPU, while RT-DETR-X achieves 54.8% AP and 74 FPS, outperforming all YOLO detectors of the same scale in both speed and accuracy. Furthermore, our RT-DETR-R50 achieves 53.1% AP and 108 FPS, outperforming DINO-Deformable-DETR-R50 by 2.2% AP in accuracy and by about 21 times in FPS.*
|
||||
|
||||
The model version was contributed by [rafaelpadilla](https://huggingface.co/rafaelpadilla) and [sangbumchoi](https://github.com/SangbumChoi). The original code can be found [here](https://github.com/lyuwenyu/RT-DETR/).
|
||||
|
||||
|
||||
## Usage tips
|
||||
|
||||
Initially, an image is processed using a pre-trained convolutional neural network, specifically a Resnet-D variant as referenced in the original code. This network extracts features from the final three layers of the architecture. Following this, a hybrid encoder is employed to convert the multi-scale features into a sequential array of image features. Then, a decoder, equipped with auxiliary prediction heads is used to refine the object queries. This process facilitates the direct generation of bounding boxes, eliminating the need for any additional post-processing to acquire the logits and coordinates for the bounding boxes.
|
||||
|
||||
```py
|
||||
>>> import torch
|
||||
>>> import requests
|
||||
|
||||
>>> from PIL import Image
|
||||
>>> from transformers import RTDetrForObjectDetection, RTDetrImageProcessor
|
||||
|
||||
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> image_processor = RTDetrImageProcessor.from_pretrained("PekingU/rtdetr_r50vd")
|
||||
>>> model = RTDetrForObjectDetection.from_pretrained("PekingU/rtdetr_r50vd")
|
||||
|
||||
>>> inputs = image_processor(images=image, return_tensors="pt")
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(**inputs)
|
||||
|
||||
>>> results = image_processor.post_process_object_detection(outputs, target_sizes=torch.tensor([image.size[::-1]]), threshold=0.3)
|
||||
|
||||
>>> for result in results:
|
||||
... for score, label_id, box in zip(result["scores"], result["labels"], result["boxes"]):
|
||||
... score, label = score.item(), label_id.item()
|
||||
... box = [round(i, 2) for i in box.tolist()]
|
||||
... print(f"{model.config.id2label[label]}: {score:.2f} {box}")
|
||||
sofa: 0.97 [0.14, 0.38, 640.13, 476.21]
|
||||
cat: 0.96 [343.38, 24.28, 640.14, 371.5]
|
||||
cat: 0.96 [13.23, 54.18, 318.98, 472.22]
|
||||
remote: 0.95 [40.11, 73.44, 175.96, 118.48]
|
||||
remote: 0.92 [333.73, 76.58, 369.97, 186.99]
|
||||
```
|
||||
|
||||
## RTDetrConfig
|
||||
|
||||
[[autodoc]] RTDetrConfig
|
||||
|
||||
## RTDetrResNetConfig
|
||||
|
||||
[[autodoc]] RTDetrResNetConfig
|
||||
|
||||
## RTDetrImageProcessor
|
||||
|
||||
[[autodoc]] RTDetrImageProcessor
|
||||
- preprocess
|
||||
- post_process_object_detection
|
||||
|
||||
## RTDetrModel
|
||||
|
||||
[[autodoc]] RTDetrModel
|
||||
- forward
|
||||
|
||||
## RTDetrForObjectDetection
|
||||
|
||||
[[autodoc]] RTDetrForObjectDetection
|
||||
- forward
|
||||
|
||||
## RTDetrResNetBackbone
|
||||
|
||||
[[autodoc]] RTDetrResNetBackbone
|
||||
- forward
|
@ -55,6 +55,7 @@ FlashAttention-2 is currently supported for the following architectures:
|
||||
* [Llama](https://huggingface.co/docs/transformers/model_doc/llama#transformers.LlamaModel)
|
||||
* [Llava](https://huggingface.co/docs/transformers/model_doc/llava)
|
||||
* [Llava-NeXT](https://huggingface.co/docs/transformers/model_doc/llava_next)
|
||||
* [Llava-NeXT-Video](https://huggingface.co/docs/transformers/model_doc/llava_next_video)
|
||||
* [VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)
|
||||
* [VideoLlava](https://huggingface.co/docs/transformers/model_doc/video_llava)
|
||||
* [M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)
|
||||
@ -203,6 +204,7 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel)
|
||||
* [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)
|
||||
* [GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode#transformers.GPTBigCodeModel)
|
||||
* [GPTNeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox#transformers.GPTNeoXModel)
|
||||
* [JetMoe](https://huggingface.co/docs/transformers/model_doc/jetmoe#transformers.JetMoeModel)
|
||||
* [Jamba](https://huggingface.co/docs/transformers/model_doc/jamba#transformers.JambaModel)
|
||||
* [Llama](https://huggingface.co/docs/transformers/model_doc/llama#transformers.LlamaModel)
|
||||
|
@ -124,6 +124,7 @@ the processor.
|
||||
|
||||
```python
|
||||
from transformers import SamModel, SamProcessor
|
||||
import torch
|
||||
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
|
||||
@ -147,7 +148,6 @@ masks = processor.image_processor.post_process_masks(outputs.pred_masks.cpu(), i
|
||||
We can visualize the three masks in the `masks` output.
|
||||
|
||||
```python
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
@ -211,7 +211,7 @@ import matplotlib.patches as patches
|
||||
fig, ax = plt.subplots()
|
||||
ax.imshow(image)
|
||||
|
||||
rectangle = patches.Rectangle((2350, 1600, 500, 500, linewidth=2, edgecolor='r', facecolor='none')
|
||||
rectangle = patches.Rectangle((2350, 1600), 500, 500, linewidth=2, edgecolor='r', facecolor='none')
|
||||
ax.add_patch(rectangle)
|
||||
ax.axis("off")
|
||||
plt.show()
|
||||
|
@ -184,16 +184,16 @@ pytest -k "test and ada" tests/test_optimization.py
|
||||
Sometimes you need to run `accelerate` tests on your models. For that you can just add `-m accelerate_tests` to your command, if let's say you want to run these tests on `OPT` run:
|
||||
|
||||
```bash
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
```
|
||||
|
||||
|
||||
### Run documentation tests
|
||||
### Run documentation tests
|
||||
|
||||
In order to test whether the documentation examples are correct, you should check that the `doctests` are passing.
|
||||
As an example, let's use [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035):
|
||||
In order to test whether the documentation examples are correct, you should check that the `doctests` are passing.
|
||||
As an example, let's use [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035):
|
||||
|
||||
```python
|
||||
```python
|
||||
r"""
|
||||
Returns:
|
||||
|
||||
@ -216,8 +216,8 @@ Example:
|
||||
|
||||
```
|
||||
|
||||
Just run the following line to automatically test every docstring example in the desired file:
|
||||
```bash
|
||||
Just run the following line to automatically test every docstring example in the desired file:
|
||||
```bash
|
||||
pytest --doctest-modules <path_to_file_or_dir>
|
||||
```
|
||||
If the file has a markdown extention, you should add the `--doctest-glob="*.md"` argument.
|
||||
@ -881,7 +881,7 @@ code that's buggy causes some bad state that will affect other tests, do not use
|
||||
- Here is how to skip whole test unconditionally:
|
||||
|
||||
```python no-style
|
||||
@unittest.skip("this bug needs to be fixed")
|
||||
@unittest.skip(reason="this bug needs to be fixed")
|
||||
def test_feature_x():
|
||||
```
|
||||
|
||||
|
@ -171,16 +171,16 @@ pytest -k "test and ada" tests/test_optimization.py
|
||||
時々、モデルに対して `accelerate` テストを実行する必要があります。たとえば、`OPT` 実行に対してこれらのテストを実行したい場合、コマンドに `-m accelerate_tests` を追加するだけで済みます:
|
||||
|
||||
```bash
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
```
|
||||
|
||||
### Run documentation tests
|
||||
### Run documentation tests
|
||||
|
||||
ドキュメンテーションの例が正しいかどうかをテストするには、`doctests` が合格しているかを確認する必要があります。
|
||||
例として、[`WhisperModel.forward` のドックストリング](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035)を使用しましょう。
|
||||
|
||||
|
||||
```python
|
||||
```python
|
||||
r"""
|
||||
Returns:
|
||||
|
||||
@ -205,7 +205,7 @@ Example:
|
||||
|
||||
指定したファイル内のすべてのドックストリング例を自動的にテストするために、以下の行を実行してください:
|
||||
|
||||
```bash
|
||||
```bash
|
||||
pytest --doctest-modules <path_to_file_or_dir>
|
||||
```
|
||||
|
||||
@ -809,7 +809,7 @@ with ExtendSysPath(f"{bindir}/.."):
|
||||
|
||||
|
||||
```python no-style
|
||||
@unittest.skip("this bug needs to be fixed")
|
||||
@unittest.skip(reason="this bug needs to be fixed")
|
||||
def test_feature_x():
|
||||
```
|
||||
|
||||
@ -1211,4 +1211,3 @@ cmd_that_may_fail || true
|
||||
|
||||
- [Github Actions:](https://github.com/actions/toolkit/issues/399)
|
||||
- [CircleCI:](https://ideas.circleci.com/ideas/CCI-I-344)
|
||||
|
||||
|
@ -26,19 +26,19 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
## Transformers 테스트 방법[[how-transformers-are-tested]]
|
||||
|
||||
1. PR이 제출되면 9개의 CircleCi 작업으로 테스트가 진행됩니다. 해당 PR에 대해 새로운 커밋이 생성될 때마다 테스트는 다시 진행됩니다. 이 작업들은
|
||||
이 [config 파일](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml)에 정의되어 있으므로 필요하다면
|
||||
1. PR이 제출되면 9개의 CircleCi 작업으로 테스트가 진행됩니다. 해당 PR에 대해 새로운 커밋이 생성될 때마다 테스트는 다시 진행됩니다. 이 작업들은
|
||||
이 [config 파일](https://github.com/huggingface/transformers/tree/main/.circleci/config.yml)에 정의되어 있으므로 필요하다면
|
||||
사용자의 로컬 환경에서 동일하게 재현해 볼 수 있습니다.
|
||||
|
||||
이 CI 작업은 `@slow` 테스트를 실행하지 않습니다.
|
||||
|
||||
2. [github actions](https://github.com/huggingface/transformers/actions)에 의해 실행되는 작업은 3개입니다:
|
||||
|
||||
- [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml):
|
||||
- [torch hub integration](https://github.com/huggingface/transformers/tree/main/.github/workflows/github-torch-hub.yml):
|
||||
torch hub integration이 작동하는지 확인합니다.
|
||||
|
||||
- [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): `main` 브랜치에서 커밋이 업데이트된 경우에만 GPU를 이용한 빠른 테스트를 실행합니다.
|
||||
이는 `src`, `tests`, `.github` 폴더 중 하나에 코드가 업데이트된 경우에만 실행됩니다.
|
||||
- [self-hosted (push)](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-push.yml): `main` 브랜치에서 커밋이 업데이트된 경우에만 GPU를 이용한 빠른 테스트를 실행합니다.
|
||||
이는 `src`, `tests`, `.github` 폴더 중 하나에 코드가 업데이트된 경우에만 실행됩니다.
|
||||
(model card, notebook, 기타 등등을 추가한 경우 실행되지 않도록 하기 위해서입니다)
|
||||
|
||||
- [self-hosted runner](https://github.com/huggingface/transformers/tree/main/.github/workflows/self-scheduled.yml): `tests` 및 `examples`에서
|
||||
@ -61,7 +61,7 @@ RUN_SLOW=1 pytest examples/
|
||||
|
||||
### 실행할 테스트 선택[[choosing-which-tests-to-run]]
|
||||
|
||||
이 문서는 테스트를 실행하는 다양한 방법에 대해 자세히 설명합니다.
|
||||
이 문서는 테스트를 실행하는 다양한 방법에 대해 자세히 설명합니다.
|
||||
모든 내용을 읽은 후에도, 더 자세한 내용이 필요하다면 [여기](https://docs.pytest.org/en/latest/usage.html)에서 확인할 수 있습니다.
|
||||
|
||||
다음은 가장 유용한 테스트 실행 방법 몇 가지입니다.
|
||||
@ -186,7 +186,7 @@ pytest -k "test and ada" tests/test_optimization.py
|
||||
모델에서 `accelerate` 테스트를 실행해야 할 때가 있습니다. 이를 위해서는 명령어에 `-m accelerate_tests`를 추가하면 됩니다.
|
||||
예를 들어, `OPT`에서 이러한 테스트를 실행하려면 다음과 같습니다:
|
||||
```bash
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
```
|
||||
|
||||
### 문서 테스트 실행[[run-documentation-tests]]
|
||||
@ -194,7 +194,7 @@ RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
예시 문서가 올바른지 테스트하려면 `doctests`가 통과하는지 확인해야 합니다.
|
||||
예를 들어, [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035)를 사용해 봅시다:
|
||||
|
||||
```python
|
||||
```python
|
||||
r"""
|
||||
Returns:
|
||||
|
||||
@ -218,7 +218,7 @@ Example:
|
||||
```
|
||||
|
||||
원하는 파일의 모든 docstring 예제를 자동으로 테스트하려면 다음 명령을 실행하면 됩니다:
|
||||
```bash
|
||||
```bash
|
||||
pytest --doctest-modules <path_to_file_or_dir>
|
||||
```
|
||||
파일의 확장자가 markdown인 경우 `--doctest-glob="*.md"` 인수를 추가해야 합니다.
|
||||
@ -240,9 +240,9 @@ pytest --picked
|
||||
|
||||
### 소스 수정 시 실패한 테스트 자동 재실행[[automatically-rerun-failed-tests-on-source-modification]]
|
||||
|
||||
[pytest-xdist](https://github.com/pytest-dev/pytest-xdist)는 모든 실패한 테스트를 감지하고,
|
||||
[pytest-xdist](https://github.com/pytest-dev/pytest-xdist)는 모든 실패한 테스트를 감지하고,
|
||||
파일을 수정한 후에 파일을 계속 재실행하여 테스트가 성공할 때까지 기다리는 매우 유용한 기능을 제공합니다.
|
||||
따라서 수정한 내용을 확인한 후 pytest를 다시 시작할 필요가 없습니다.
|
||||
따라서 수정한 내용을 확인한 후 pytest를 다시 시작할 필요가 없습니다.
|
||||
모든 테스트가 통과될 때까지 이 과정을 반복한 후 다시 전체 실행이 이루어집니다.
|
||||
|
||||
```bash
|
||||
@ -252,7 +252,7 @@ pip install pytest-xdist
|
||||
재귀적 모드의 사용: `pytest -f` 또는 `pytest --looponfail`
|
||||
|
||||
파일의 변경 사항은 `looponfailroots` 루트 디렉터리와 해당 내용을 (재귀적으로) 확인하여 감지됩니다.
|
||||
이 값의 기본값이 작동하지 않는 경우,
|
||||
이 값의 기본값이 작동하지 않는 경우,
|
||||
`setup.cfg`의 설정 옵션을 변경하여 프로젝트에서 변경할 수 있습니다:
|
||||
|
||||
```ini
|
||||
@ -275,7 +275,7 @@ looponfailroots = transformers tests
|
||||
|
||||
### 특정 테스트 모듈 건너뛰기[[skip-a-test-module]]
|
||||
|
||||
모든 테스트 모듈을 실행하되 특정 모듈을 제외하려면, 실행할 테스트 목록을 명시적으로 지정할 수 있습니다.
|
||||
모든 테스트 모듈을 실행하되 특정 모듈을 제외하려면, 실행할 테스트 목록을 명시적으로 지정할 수 있습니다.
|
||||
예를 들어, `test_modeling_*.py` 테스트를 제외한 모든 테스트를 실행하려면 다음을 사용할 수 있습니다:
|
||||
|
||||
```bash
|
||||
@ -292,19 +292,19 @@ pytest --cache-clear tests
|
||||
|
||||
### 테스트를 병렬로 실행[[running-tests-in-parallel]]
|
||||
|
||||
이전에 언급한 것처럼 `make test`는 테스트를 병렬로 실행하기 위해
|
||||
이전에 언급한 것처럼 `make test`는 테스트를 병렬로 실행하기 위해
|
||||
`pytest-xdist` 플러그인(`-n X` 인수, 예를 들어 `-n 2`를 사용하여 2개의 병렬 작업 실행)을 통해 실행됩니다.
|
||||
|
||||
`pytest-xdist`의 `--dist=` 옵션을 사용하여 테스트를 어떻게 그룹화할지 제어할 수 있습니다.
|
||||
`pytest-xdist`의 `--dist=` 옵션을 사용하여 테스트를 어떻게 그룹화할지 제어할 수 있습니다.
|
||||
`--dist=loadfile`은 하나의 파일에 있는 테스트를 동일한 프로세스로 그룹화합니다.
|
||||
|
||||
실행된 테스트의 순서가 다르고 예측할 수 없기 때문에, `pytest-xdist`로 테스트 스위트를 실행하면 실패가 발생할 수 있습니다 (검출되지 않은 결합된 테스트가 있는 경우).
|
||||
이 경우 [pytest-replay](https://github.com/ESSS/pytest-replay)를 사용하면 동일한 순서로 테스트를 다시 실행해서
|
||||
이 경우 [pytest-replay](https://github.com/ESSS/pytest-replay)를 사용하면 동일한 순서로 테스트를 다시 실행해서
|
||||
실패하는 시퀀스를 최소화하는 데에 도움이 됩니다.
|
||||
|
||||
### 테스트 순서와 반복[[test-order-and-repetition]]
|
||||
|
||||
잠재적인 종속성 및 상태 관련 버그(tear down)를 감지하기 위해
|
||||
잠재적인 종속성 및 상태 관련 버그(tear down)를 감지하기 위해
|
||||
테스트를 여러 번, 연속으로, 무작위로 또는 세트로 반복하는 것이 좋습니다.
|
||||
그리고 직접적인 여러 번의 반복은 DL의 무작위성에 의해 발견되는 일부 문제를 감지하는 데에도 유용합니다.
|
||||
|
||||
@ -341,10 +341,10 @@ pytest --flake-finder --flake-runs=5 tests/test_failing_test.py
|
||||
pip install pytest-random-order
|
||||
```
|
||||
|
||||
중요: `pytest-random-order`가 설치되면 테스트가 자동으로 임의의 순서로 섞입니다.
|
||||
중요: `pytest-random-order`가 설치되면 테스트가 자동으로 임의의 순서로 섞입니다.
|
||||
구성 변경이나 커맨드 라인 옵션이 필요하지 않습니다.
|
||||
|
||||
앞서 설명한 것처럼 이를 통해 한 테스트의 상태가 다른 테스트의 상태에 영향을 미치는 결합된 테스트를 감지할 수 있습니다.
|
||||
앞서 설명한 것처럼 이를 통해 한 테스트의 상태가 다른 테스트의 상태에 영향을 미치는 결합된 테스트를 감지할 수 있습니다.
|
||||
`pytest-random-order`가 설치되면 해당 세션에서 사용된 랜덤 시드가 출력되며 예를 들어 다음과 같습니다:
|
||||
|
||||
```bash
|
||||
@ -364,7 +364,7 @@ Using --random-order-seed=573663
|
||||
```
|
||||
|
||||
정확히 동일한 테스트 목록(또는 목록이 없음)을 사용하는 경우에만 정확한 순서를 재현합니다.
|
||||
목록을 수동으로 좁히기 시작하면 더 이상 시드에 의존할 수 없고 실패했던 정확한 순서로 수동으로 목록을 나열해야합니다. 그리고 `--random-order-bucket=none`을 사용하여 pytest에게 순서를 임의로 설정하지 않도록 알려야 합니다.
|
||||
목록을 수동으로 좁히기 시작하면 더 이상 시드에 의존할 수 없고 실패했던 정확한 순서로 수동으로 목록을 나열해야합니다. 그리고 `--random-order-bucket=none`을 사용하여 pytest에게 순서를 임의로 설정하지 않도록 알려야 합니다.
|
||||
예를 들어 다음과 같습니다:
|
||||
|
||||
```bash
|
||||
@ -377,19 +377,19 @@ pytest --random-order-bucket=none tests/test_a.py tests/test_c.py tests/test_b.p
|
||||
pytest --random-order-bucket=none
|
||||
```
|
||||
|
||||
기본적으로 `--random-order-bucket=module`이 내재되어 있으므로, 모듈 수준에서 파일을 섞습니다.
|
||||
기본적으로 `--random-order-bucket=module`이 내재되어 있으므로, 모듈 수준에서 파일을 섞습니다.
|
||||
또한 `class`, `package`, `global` 및 `none` 수준에서도 섞을 수 있습니다.
|
||||
자세한 내용은 해당 [문서](https://github.com/jbasko/pytest-random-order)를 참조하세요.
|
||||
|
||||
또 다른 무작위화의 대안은 [`pytest-randomly`](https://github.com/pytest-dev/pytest-randomly)입니다.
|
||||
이 모듈은 매우 유사한 기능/인터페이스를 가지고 있지만, `pytest-random-order`에 있는 버킷 모드를 사용할 수는 없습니다.
|
||||
이 모듈은 매우 유사한 기능/인터페이스를 가지고 있지만, `pytest-random-order`에 있는 버킷 모드를 사용할 수는 없습니다.
|
||||
설치 후에는 자동으로 적용되는 문제도 동일하게 가집니다.
|
||||
|
||||
### 외관과 느낌을 변경[[look-and-feel-variations]
|
||||
|
||||
#### pytest-sugar 사용[[pytest-sugar]]
|
||||
|
||||
[pytest-sugar](https://github.com/Frozenball/pytest-sugar)는 테스트가 보여지는 형태를 개선하고,
|
||||
[pytest-sugar](https://github.com/Frozenball/pytest-sugar)는 테스트가 보여지는 형태를 개선하고,
|
||||
진행 상황 바를 추가하며, 실패한 테스트와 검증을 즉시 표시하는 플러그인입니다. 설치하면 자동으로 활성화됩니다.
|
||||
|
||||
```bash
|
||||
@ -416,7 +416,7 @@ pytest --pspec tests/test_optimization.py
|
||||
|
||||
#### 실패한 테스트 즉시 표시[[instantly-shows-failed-tests]]
|
||||
|
||||
[pytest-instafail](https://github.com/pytest-dev/pytest-instafail)은 테스트 세션의 끝까지 기다리지 않고
|
||||
[pytest-instafail](https://github.com/pytest-dev/pytest-instafail)은 테스트 세션의 끝까지 기다리지 않고
|
||||
실패 및 오류를 즉시 표시합니다.
|
||||
|
||||
```bash
|
||||
@ -435,7 +435,7 @@ GPU가 활성화된 환경에서, CPU 전용 모드로 테스트하려면 `CUDA_
|
||||
CUDA_VISIBLE_DEVICES="" pytest tests/utils/test_logging.py
|
||||
```
|
||||
|
||||
또는 다중 GPU가 있는 경우 `pytest`에서 사용할 GPU를 지정할 수도 있습니다.
|
||||
또는 다중 GPU가 있는 경우 `pytest`에서 사용할 GPU를 지정할 수도 있습니다.
|
||||
예를 들어, GPU `0` 및 `1`이 있는 경우 다음을 실행할 수 있습니다:
|
||||
|
||||
```bash
|
||||
@ -444,7 +444,7 @@ CUDA_VISIBLE_DEVICES="1" pytest tests/utils/test_logging.py
|
||||
|
||||
이렇게 하면 다른 GPU에서 다른 작업을 실행하려는 경우 유용합니다.
|
||||
|
||||
일부 테스트는 반드시 CPU 전용으로 실행해야 하며, 일부는 CPU 또는 GPU 또는 TPU에서 실행해야 하고, 일부는 여러 GPU에서 실행해야 합니다.
|
||||
일부 테스트는 반드시 CPU 전용으로 실행해야 하며, 일부는 CPU 또는 GPU 또는 TPU에서 실행해야 하고, 일부는 여러 GPU에서 실행해야 합니다.
|
||||
다음 스킵 데코레이터는 테스트의 요구 사항을 CPU/GPU/TPU별로 설정하는 데 사용됩니다:
|
||||
|
||||
- `require_torch` - 이 테스트는 torch에서만 실행됩니다.
|
||||
@ -480,7 +480,7 @@ def test_example_with_multi_gpu():
|
||||
def test_tf_thing_with_tensorflow():
|
||||
```
|
||||
|
||||
이러한 데코레이터는 중첩될 수 있습니다.
|
||||
이러한 데코레이터는 중첩될 수 있습니다.
|
||||
예를 들어, 느린 테스트로 진행되고 pytorch에서 적어도 하나의 GPU가 필요한 경우 다음과 같이 설정할 수 있습니다:
|
||||
|
||||
```python no-style
|
||||
@ -489,7 +489,7 @@ def test_tf_thing_with_tensorflow():
|
||||
def test_example_slow_on_gpu():
|
||||
```
|
||||
|
||||
`@parametrized`와 같은 일부 데코레이터는 테스트 이름을 다시 작성하기 때문에 `@require_*` 스킵 데코레이터는 올바르게 작동하려면 항상 맨 마지막에 나열되어야 합니다.
|
||||
`@parametrized`와 같은 일부 데코레이터는 테스트 이름을 다시 작성하기 때문에 `@require_*` 스킵 데코레이터는 올바르게 작동하려면 항상 맨 마지막에 나열되어야 합니다.
|
||||
다음은 올바른 사용 예입니다:
|
||||
|
||||
```python no-style
|
||||
@ -498,7 +498,7 @@ def test_example_slow_on_gpu():
|
||||
def test_integration_foo():
|
||||
```
|
||||
|
||||
`@pytest.mark.parametrize`에는 이러한 순서 문제는 없으므로 처음 혹은 마지막에 위치시킬 수 있고 이러한 경우에도 잘 작동할 것입니다.
|
||||
`@pytest.mark.parametrize`에는 이러한 순서 문제는 없으므로 처음 혹은 마지막에 위치시킬 수 있고 이러한 경우에도 잘 작동할 것입니다.
|
||||
하지만 unittest가 아닌 경우에만 작동합니다.
|
||||
|
||||
테스트 내부에서 다음을 사용할 수 있습니다:
|
||||
@ -513,7 +513,7 @@ n_gpu = get_gpu_count() #torch와 tf와 함께 작동
|
||||
|
||||
### 분산 훈련[[distributed-training]]
|
||||
|
||||
`pytest`는 분산 훈련을 직접적으로 다루지 못합니다.
|
||||
`pytest`는 분산 훈련을 직접적으로 다루지 못합니다.
|
||||
이를 시도하면 하위 프로세스가 올바른 작업을 수행하지 않고 `pytest`라고 생각하기에 테스트 스위트를 반복해서 실행하게 됩니다.
|
||||
그러나 일반 프로세스를 생성한 다음 여러 워커를 생성하고 IO 파이프를 관리하도록 하면 동작합니다.
|
||||
|
||||
@ -532,7 +532,7 @@ CUDA_VISIBLE_DEVICES=0,1 RUN_SLOW=1 pytest -sv tests/test_trainer_distributed.py
|
||||
|
||||
### 출력 캡처[[output-capture]]
|
||||
|
||||
테스트 실행 중 `stdout` 및 `stderr`로 전송된 모든 출력이 캡처됩니다.
|
||||
테스트 실행 중 `stdout` 및 `stderr`로 전송된 모든 출력이 캡처됩니다.
|
||||
테스트나 설정 메소드가 실패하면 캡처된 출력은 일반적으로 실패 추적 정보와 함께 표시됩니다.
|
||||
|
||||
출력 캡처를 비활성화하고 `stdout` 및 `stderr`를 정상적으로 받으려면 `-s` 또는 `--capture=no`를 사용하세요:
|
||||
@ -563,7 +563,7 @@ pytest --color=no tests/utils/test_logging.py
|
||||
pytest --pastebin=failed tests/utils/test_logging.py
|
||||
```
|
||||
|
||||
이렇게 하면 각 실패에 대한 URL을 제공하는 remote Paste service에 테스트 실행 정보를 제출합니다.
|
||||
이렇게 하면 각 실패에 대한 URL을 제공하는 remote Paste service에 테스트 실행 정보를 제출합니다.
|
||||
일반적인 테스트를 선택할 수도 있고 혹은 특정 실패만 보내려면 `-x`와 같이 추가할 수도 있습니다.
|
||||
|
||||
전체 테스트 세션 로그에 대한 URL을 생성합니다:
|
||||
@ -574,17 +574,17 @@ pytest --pastebin=all tests/utils/test_logging.py
|
||||
|
||||
## 테스트 작성[[writing-tests]]
|
||||
|
||||
🤗 transformers 테스트는 대부분 `unittest`를 기반으로 하지만,
|
||||
🤗 transformers 테스트는 대부분 `unittest`를 기반으로 하지만,
|
||||
`pytest`에서 실행되므로 대부분의 경우 두 시스템의 기능을 사용할 수 있습니다.
|
||||
|
||||
지원되는 기능에 대해 [여기](https://docs.pytest.org/en/stable/unittest.html)에서 확인할 수 있지만,
|
||||
지원되는 기능에 대해 [여기](https://docs.pytest.org/en/stable/unittest.html)에서 확인할 수 있지만,
|
||||
기억해야 할 중요한 점은 대부분의 `pytest` fixture가 작동하지 않는다는 것입니다.
|
||||
파라미터화도 작동하지 않지만, 우리는 비슷한 방식으로 작동하는 `parameterized` 모듈을 사용합니다.
|
||||
|
||||
|
||||
### 매개변수화[[parametrization]]
|
||||
|
||||
동일한 테스트를 다른 인수로 여러 번 실행해야 하는 경우가 종종 있습니다.
|
||||
동일한 테스트를 다른 인수로 여러 번 실행해야 하는 경우가 종종 있습니다.
|
||||
테스트 내에서 이 작업을 수행할 수 있지만, 그렇게 하면 하나의 인수 세트에 대해 테스트를 실행할 수 없습니다.
|
||||
|
||||
```python
|
||||
@ -605,7 +605,7 @@ class TestMathUnitTest(unittest.TestCase):
|
||||
assert_equal(math.floor(input), expected)
|
||||
```
|
||||
|
||||
이제 기본적으로 이 테스트는 `test_floor`의 마지막 3개 인수가
|
||||
이제 기본적으로 이 테스트는 `test_floor`의 마지막 3개 인수가
|
||||
매개변수 목록의 해당 인수에 할당되는 것으로 3번 실행될 것입니다.
|
||||
|
||||
그리고 `negative` 및 `integer` 매개변수 집합만 실행하려면 다음과 같이 실행할 수 있습니다:
|
||||
@ -620,7 +620,7 @@ pytest -k "negative and integer" tests/test_mytest.py
|
||||
pytest -k "not negative" tests/test_mytest.py
|
||||
```
|
||||
|
||||
앞에서 언급한 `-k` 필터를 사용하는 것 외에도,
|
||||
앞에서 언급한 `-k` 필터를 사용하는 것 외에도,
|
||||
각 서브 테스트의 정확한 이름을 확인한 후에 일부 혹은 전체 서브 테스트를 실행할 수 있습니다.
|
||||
|
||||
```bash
|
||||
@ -641,10 +641,10 @@ test_this1.py::TestMathUnitTest::test_floor_2_large_fraction
|
||||
pytest test_this1.py::TestMathUnitTest::test_floor_0_negative test_this1.py::TestMathUnitTest::test_floor_1_integer
|
||||
```
|
||||
|
||||
`transformers`의 개발자 종속성에 이미 있는 [parameterized](https://pypi.org/project/parameterized/) 모듈은
|
||||
`transformers`의 개발자 종속성에 이미 있는 [parameterized](https://pypi.org/project/parameterized/) 모듈은
|
||||
`unittests`와 `pytest` 테스트 모두에서 작동합니다.
|
||||
|
||||
그러나 테스트가 `unittest`가 아닌 경우 `pytest.mark.parametrize`를 사용할 수 있습니다(이미 있는 일부 테스트에서 사용되는 경우도 있습니다.
|
||||
그러나 테스트가 `unittest`가 아닌 경우 `pytest.mark.parametrize`를 사용할 수 있습니다(이미 있는 일부 테스트에서 사용되는 경우도 있습니다.
|
||||
주로 `examples` 하위에 있습니다).
|
||||
|
||||
다음은 `pytest`의 `parametrize` 마커를 사용한 동일한 예입니다:
|
||||
@ -666,8 +666,8 @@ def test_floor(name, input, expected):
|
||||
assert_equal(math.floor(input), expected)
|
||||
```
|
||||
|
||||
`parameterized`와 마찬가지로 `pytest.mark.parametrize`를 사용하면
|
||||
`-k` 필터가 작동하지 않는 경우에도 실행할 서브 테스트를 정확하게 지정할 수 있습니다.
|
||||
`parameterized`와 마찬가지로 `pytest.mark.parametrize`를 사용하면
|
||||
`-k` 필터가 작동하지 않는 경우에도 실행할 서브 테스트를 정확하게 지정할 수 있습니다.
|
||||
단, 이 매개변수화 함수는 서브 테스트의 이름 집합을 약간 다르게 생성합니다. 다음과 같은 모습입니다:
|
||||
|
||||
```bash
|
||||
@ -694,7 +694,7 @@ pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[i
|
||||
|
||||
### 파일 및 디렉터리[[files-and-directories]]
|
||||
|
||||
테스트에서 종종 현재 테스트 파일과 관련된 상대적인 위치를 알아야 하는 경우가 있습니다.
|
||||
테스트에서 종종 현재 테스트 파일과 관련된 상대적인 위치를 알아야 하는 경우가 있습니다.
|
||||
테스트가 여러 디렉터리에서 호출되거나 깊이가 다른 하위 디렉터리에 있을 수 있기 때문에 그 위치를 아는 것은 간단하지 않습니다.
|
||||
`transformers.test_utils.TestCasePlus`라는 헬퍼 클래스는 모든 기본 경로를 처리하고 간단한 액세서를 제공하여 이 문제를 해결합니다:
|
||||
|
||||
@ -717,7 +717,7 @@ pytest test_this2.py::test_floor[negative--1.5--2.0] test_this2.py::test_floor[i
|
||||
- `repo_root_dir_str`
|
||||
- `src_dir_str`
|
||||
|
||||
위의 내용을 사용하려면 테스트가 'transformers.test_utils.TestCasePlus'의 서브클래스에 있는지 확인해야 합니다.
|
||||
위의 내용을 사용하려면 테스트가 'transformers.test_utils.TestCasePlus'의 서브클래스에 있는지 확인해야 합니다.
|
||||
예를 들어 다음과 같습니다:
|
||||
|
||||
```python
|
||||
@ -729,7 +729,7 @@ class PathExampleTest(TestCasePlus):
|
||||
data_dir = self.tests_dir / "fixtures/tests_samples/wmt_en_ro"
|
||||
```
|
||||
|
||||
만약 `pathlib`를 통해 경로를 조작할 필요가 없거나 경로를 문자열로만 필요로 하는 경우에는 `pathlib` 객체에 `str()`을 호출하거나 `_str`로 끝나는 접근자를 사용할 수 있습니다.
|
||||
만약 `pathlib`를 통해 경로를 조작할 필요가 없거나 경로를 문자열로만 필요로 하는 경우에는 `pathlib` 객체에 `str()`을 호출하거나 `_str`로 끝나는 접근자를 사용할 수 있습니다.
|
||||
예를 들어 다음과 같습니다:
|
||||
|
||||
```python
|
||||
@ -743,14 +743,14 @@ class PathExampleTest(TestCasePlus):
|
||||
|
||||
### 임시 파일 및 디렉터리[[temporary-files-and-directories]]
|
||||
|
||||
고유한 임시 파일 및 디렉터리를 사용하는 것은 병렬 테스트 실행에 있어 필수적입니다.
|
||||
이렇게 함으로써 테스트들이 서로의 데이터를 덮어쓰지 않게 할 수 있습니다. 또한 우리는 생성된 테스트의 종료 단계에서 이러한 임시 파일 및 디렉터리를 제거하고 싶습니다.
|
||||
고유한 임시 파일 및 디렉터리를 사용하는 것은 병렬 테스트 실행에 있어 필수적입니다.
|
||||
이렇게 함으로써 테스트들이 서로의 데이터를 덮어쓰지 않게 할 수 있습니다. 또한 우리는 생성된 테스트의 종료 단계에서 이러한 임시 파일 및 디렉터리를 제거하고 싶습니다.
|
||||
따라서 이러한 요구 사항을 충족시켜주는 `tempfile`과 같은 패키지를 사용하는 것이 중요합니다.
|
||||
|
||||
그러나 테스트를 디버깅할 때는 임시 파일이나 디렉터리에 들어가는 내용을 확인할 수 있어야 하며,
|
||||
그러나 테스트를 디버깅할 때는 임시 파일이나 디렉터리에 들어가는 내용을 확인할 수 있어야 하며,
|
||||
재실행되는 각 테스트마다 임시 파일이나 디렉터리의 경로에 대해 무작위 값이 아닌 정확한 값을 알고 싶을 것입니다.
|
||||
|
||||
`transformers.test_utils.TestCasePlus`라는 도우미 클래스는 이러한 목적에 가장 적합합니다.
|
||||
`transformers.test_utils.TestCasePlus`라는 도우미 클래스는 이러한 목적에 가장 적합합니다.
|
||||
이 클래스는 `unittest.TestCase`의 하위 클래스이므로, 우리는 이것을 테스트 모듈에서 쉽게 상속할 수 있습니다.
|
||||
|
||||
다음은 해당 클래스를 사용하는 예시입니다:
|
||||
@ -773,7 +773,7 @@ def test_whatever(self):
|
||||
tmp_dir = self.get_auto_remove_tmp_dir()
|
||||
```
|
||||
|
||||
`tmp_dir`에는 생성된 임시 디렉터리의 경로가 포함됩니다.
|
||||
`tmp_dir`에는 생성된 임시 디렉터리의 경로가 포함됩니다.
|
||||
이는 테스트의 종료 단계에서 자동으로 제거됩니다.
|
||||
|
||||
- 선택한 경로로 임시 디렉터리 생성 후에 테스트 시작 전에 비어 있는 상태인지 확인하고, 테스트 후에는 비우지 마세요.
|
||||
@ -783,10 +783,10 @@ def test_whatever(self):
|
||||
tmp_dir = self.get_auto_remove_tmp_dir("./xxx")
|
||||
```
|
||||
|
||||
이것은 디버깅할 때 특정 디렉터리를 모니터링하고,
|
||||
이것은 디버깅할 때 특정 디렉터리를 모니터링하고,
|
||||
그 디렉터리에 이전에 실행된 테스트가 데이터를 남기지 않도록 하는 데에 유용합니다.
|
||||
|
||||
- `before` 및 `after` 인수를 직접 오버라이딩하여 기본 동작을 변경할 수 있으며
|
||||
- `before` 및 `after` 인수를 직접 오버라이딩하여 기본 동작을 변경할 수 있으며
|
||||
다음 중 하나의 동작으로 이어집니다:
|
||||
|
||||
- `before=True`: 테스트 시작 시 임시 디렉터리가 항상 지워집니다.
|
||||
@ -804,7 +804,7 @@ def test_whatever(self):
|
||||
|
||||
<Tip>
|
||||
|
||||
각 테스트는 여러 개의 임시 디렉터리를 등록할 수 있으며,
|
||||
각 테스트는 여러 개의 임시 디렉터리를 등록할 수 있으며,
|
||||
별도로 요청하지 않는 한 모두 자동으로 제거됩니다.
|
||||
|
||||
</Tip>
|
||||
@ -826,17 +826,17 @@ with ExtendSysPath(f"{bindir}/.."):
|
||||
|
||||
### 테스트 건너뛰기[[skipping-tests]]
|
||||
|
||||
이것은 버그가 발견되어 새로운 테스트가 작성되었지만 아직 그 버그가 수정되지 않은 경우에 유용합니다.
|
||||
이것은 버그가 발견되어 새로운 테스트가 작성되었지만 아직 그 버그가 수정되지 않은 경우에 유용합니다.
|
||||
이 테스트를 주 저장소에 커밋하려면 `make test` 중에 건너뛰도록 해야 합니다.
|
||||
|
||||
방법:
|
||||
|
||||
- **skip**은 테스트가 일부 조건이 충족될 경우에만 통과될 것으로 예상되고, 그렇지 않으면 pytest가 전체 테스트를 건너뛰어야 함을 의미합니다.
|
||||
일반적인 예로는 Windows가 아닌 플랫폼에서 Windows 전용 테스트를 건너뛰거나
|
||||
- **skip**은 테스트가 일부 조건이 충족될 경우에만 통과될 것으로 예상되고, 그렇지 않으면 pytest가 전체 테스트를 건너뛰어야 함을 의미합니다.
|
||||
일반적인 예로는 Windows가 아닌 플랫폼에서 Windows 전용 테스트를 건너뛰거나
|
||||
외부 리소스(예를 들어 데이터베이스)에 의존하는 테스트를 건너뛰는 것이 있습니다.
|
||||
|
||||
- **xfail**은 테스트가 특정한 이유로 인해 실패할 것으로 예상하는 것을 의미합니다.
|
||||
일반적인 예로는 아직 구현되지 않은 기능이나 아직 수정되지 않은 버그의 테스트가 있습니다.
|
||||
- **xfail**은 테스트가 특정한 이유로 인해 실패할 것으로 예상하는 것을 의미합니다.
|
||||
일반적인 예로는 아직 구현되지 않은 기능이나 아직 수정되지 않은 버그의 테스트가 있습니다.
|
||||
`xfail`로 표시된 테스트가 예상대로 실패하지 않고 통과된 경우, 이것은 xpass이며 테스트 결과 요약에 기록됩니다.
|
||||
|
||||
두 가지 중요한 차이점 중 하나는 `skip`은 테스트를 실행하지 않지만 `xfail`은 실행한다는 것입니다.
|
||||
@ -847,7 +847,7 @@ with ExtendSysPath(f"{bindir}/.."):
|
||||
- 전체 테스트를 무조건 건너뛰려면 다음과 같이 할 수 있습니다:
|
||||
|
||||
```python no-style
|
||||
@unittest.skip("this bug needs to be fixed")
|
||||
@unittest.skip(reason="this bug needs to be fixed")
|
||||
def test_feature_x():
|
||||
```
|
||||
|
||||
@ -920,7 +920,7 @@ class TestClass():
|
||||
|
||||
### 느린 테스트[[slow-tests]]
|
||||
|
||||
테스트 라이브러리는 지속적으로 확장되고 있으며, 일부 테스트는 실행하는 데 몇 분이 걸립니다.
|
||||
테스트 라이브러리는 지속적으로 확장되고 있으며, 일부 테스트는 실행하는 데 몇 분이 걸립니다.
|
||||
그리고 우리에게는 테스트 스위트가 CI를 통해 완료되기까지 한 시간을 기다릴 여유가 없습니다.
|
||||
따라서 필수 테스트를 위한 일부 예외를 제외하고 느린 테스트는 다음과 같이 표시해야 합니다.
|
||||
|
||||
@ -936,7 +936,7 @@ def test_integration_foo():
|
||||
RUN_SLOW=1 pytest tests
|
||||
```
|
||||
|
||||
`@parameterized`와 같은 몇 가지 데코레이터는 테스트 이름을 다시 작성합니다.
|
||||
`@parameterized`와 같은 몇 가지 데코레이터는 테스트 이름을 다시 작성합니다.
|
||||
그러므로 `@slow`와 나머지 건너뛰기 데코레이터 `@require_*`가 올바르게 작동되려면 마지막에 나열되어야 합니다. 다음은 올바른 사용 예입니다.
|
||||
|
||||
```python no-style
|
||||
@ -945,25 +945,25 @@ RUN_SLOW=1 pytest tests
|
||||
def test_integration_foo():
|
||||
```
|
||||
|
||||
이 문서의 초반부에 설명된 것처럼 느린 테스트는 PR의 CI 확인이 아닌 예약된 일정 기반으로 실행됩니다.
|
||||
이 문서의 초반부에 설명된 것처럼 느린 테스트는 PR의 CI 확인이 아닌 예약된 일정 기반으로 실행됩니다.
|
||||
따라서 PR 제출 중에 일부 문제를 놓친 채로 병합될 수 있습니다.
|
||||
이러한 문제들은 다음번의 예정된 CI 작업 중에 감지됩니다.
|
||||
이러한 문제들은 다음번의 예정된 CI 작업 중에 감지됩니다.
|
||||
하지만 PR을 제출하기 전에 자신의 컴퓨터에서 느린 테스트를 실행하는 것 또한 중요합니다.
|
||||
|
||||
느린 테스트로 표시해야 하는지 여부를 결정하는 대략적인 결정 기준은 다음과 같습니다.
|
||||
|
||||
만약 테스트가 라이브러리의 내부 구성 요소 중 하나에 집중되어 있다면(예: 모델링 파일, 토큰화 파일, 파이프라인),
|
||||
만약 테스트가 라이브러리의 내부 구성 요소 중 하나에 집중되어 있다면(예: 모델링 파일, 토큰화 파일, 파이프라인),
|
||||
해당 테스트를 느린 테스트 스위트에서 실행해야 합니다.
|
||||
만약 라이브러리의 다른 측면(예: 문서 또는 예제)에 집중되어 있다면,
|
||||
만약 라이브러리의 다른 측면(예: 문서 또는 예제)에 집중되어 있다면,
|
||||
해당 테스트를 느린 테스트 스위트에서 실행해야 합니다. 그리고 이 접근 방식을 보완하기 위해 예외를 만들어야 합니다.
|
||||
|
||||
- 무거운 가중치 세트나 50MB보다 큰 데이터셋을 다운로드해야 하는 모든 테스트(예: 모델 통합 테스트, 토크나이저 통합 테스트, 파이프라인 통합 테스트)를
|
||||
- 무거운 가중치 세트나 50MB보다 큰 데이터셋을 다운로드해야 하는 모든 테스트(예: 모델 통합 테스트, 토크나이저 통합 테스트, 파이프라인 통합 테스트)를
|
||||
느린 테스트로 설정해야 합니다.
|
||||
새로운 모델을 추가하는 경우 통합 테스트용으로 무작위 가중치로 작은 버전을 만들어 허브에 업로드해야 합니다.
|
||||
새로운 모델을 추가하는 경우 통합 테스트용으로 무작위 가중치로 작은 버전을 만들어 허브에 업로드해야 합니다.
|
||||
이 내용은 아래 단락에서 설명됩니다.
|
||||
- 특별히 빠르게 실행되도록 최적화되지 않은 학습을 수행해야 하는 테스트는 느린 테스트로 설정해야 합니다.
|
||||
- 느리지 않아야 할 테스트 중 일부가 극도로 느린 경우
|
||||
예외를 도입하고 이를 `@slow`로 설정할 수 있습니다.
|
||||
- 느리지 않아야 할 테스트 중 일부가 극도로 느린 경우
|
||||
예외를 도입하고 이를 `@slow`로 설정할 수 있습니다.
|
||||
대용량 파일을 디스크에 저장하고 불러오는 자동 모델링 테스트는 `@slow`으로 표시된 테스트의 좋은 예입니다.
|
||||
- CI에서 1초 이내에 테스트가 완료되는 경우(다운로드 포함)에는 느린 테스트가 아니어야 합니다.
|
||||
|
||||
@ -976,22 +976,22 @@ def test_integration_foo():
|
||||
grep tiny tests examples
|
||||
```
|
||||
|
||||
다음은 작은 모델[stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de)을 만든
|
||||
[script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) 예시입니다.
|
||||
다음은 작은 모델[stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de)을 만든
|
||||
[script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) 예시입니다.
|
||||
특정 모델의 아키텍처에 맞게 쉽게 조정할 수 있습니다.
|
||||
|
||||
예를 들어 대용량 모델을 다운로드하는 경우 런타임을 잘못 측정하기 쉽지만,
|
||||
로컬에서 테스트하면 다운로드한 파일이 캐시되어 다운로드 시간이 측정되지 않습니다.
|
||||
예를 들어 대용량 모델을 다운로드하는 경우 런타임을 잘못 측정하기 쉽지만,
|
||||
로컬에서 테스트하면 다운로드한 파일이 캐시되어 다운로드 시간이 측정되지 않습니다.
|
||||
대신 CI 로그의 실행 속도 보고서를 확인하세요(`pytest --durations=0 tests`의 출력).
|
||||
|
||||
이 보고서는 느린 이상값으로 표시되지 않거나 빠르게 다시 작성해야 하는 느린 이상값을 찾는 데도 유용합니다.
|
||||
이 보고서는 느린 이상값으로 표시되지 않거나 빠르게 다시 작성해야 하는 느린 이상값을 찾는 데도 유용합니다.
|
||||
CI에서 테스트 스위트가 느려지기 시작하면 이 보고서의 맨 위 목록에 가장 느린 테스트가 표시됩니다.
|
||||
|
||||
|
||||
|
||||
### stdout/stderr 출력 테스트[[testing-the-stdout/stderr-output]]
|
||||
|
||||
`stdout` 및/또는 `stderr`로 쓰는 함수를 테스트하려면 `pytest`의 [capsys 시스템](https://docs.pytest.org/en/latest/capture.html)을 사용하여 해당 스트림에 액세스할 수 있습니다.
|
||||
`stdout` 및/또는 `stderr`로 쓰는 함수를 테스트하려면 `pytest`의 [capsys 시스템](https://docs.pytest.org/en/latest/capture.html)을 사용하여 해당 스트림에 액세스할 수 있습니다.
|
||||
다음과 같이 수행할 수 있습니다.
|
||||
|
||||
```python
|
||||
@ -1019,7 +1019,7 @@ def test_result_and_stdout(capsys):
|
||||
assert msg in err
|
||||
```
|
||||
|
||||
그리고, 물론 대부분의 경우에는 `stderr`는 예외의 일부로 제공됩니다.
|
||||
그리고, 물론 대부분의 경우에는 `stderr`는 예외의 일부로 제공됩니다.
|
||||
그러므로 해당 경우에는 try/except를 사용해야 합니다.
|
||||
|
||||
```python
|
||||
@ -1061,11 +1061,11 @@ def test_result_and_stdout():
|
||||
```
|
||||
|
||||
`stdout` 캡처에 관련된 중요한 문제 중 하나는 보통 `print`에서 이전에 인쇄된 내용을 재설정하는 `\r` 문자가 포함될 수 있다는 것입니다.
|
||||
`pytest`에서는 문제가 없지만 `pytest -s`에서는 이러한 문자가 버퍼에 포함되므로
|
||||
`pytest`에서는 문제가 없지만 `pytest -s`에서는 이러한 문자가 버퍼에 포함되므로
|
||||
`-s`가 있거나 없는 상태에서 태스트를 수행할 수 있으려면 캡처된 출력에 대해 추가적인 정리가 필요합니다.
|
||||
이 경우에는 `re.sub(r'~.*\r', '', buf, 0, re.M)`을 사용할 수 있습니다.
|
||||
|
||||
하지만 도우미 컨텍스트 관리자 래퍼를 사용하면
|
||||
하지만 도우미 컨텍스트 관리자 래퍼를 사용하면
|
||||
출력에 `\r`이 포함되어 있는지의 여부에 관계없이 모든 것을 자동으로 처리하므로 편리합니다.
|
||||
|
||||
```python
|
||||
@ -1108,7 +1108,7 @@ with CaptureStd() as cs:
|
||||
print(cs.err, cs.out)
|
||||
```
|
||||
|
||||
또한, 테스트의 디버깅을 지원하기 위해
|
||||
또한, 테스트의 디버깅을 지원하기 위해
|
||||
이러한 컨텍스트 관리자는 기본적으로 컨텍스트에서 종료할 때 캡처된 스트림을 자동으로 다시 실행합니다.
|
||||
|
||||
|
||||
@ -1130,7 +1130,7 @@ assert cl.out, msg + "\n"
|
||||
|
||||
### 환경 변수를 이용하여 테스트[[testing-with-environment-variables]]
|
||||
|
||||
특정 테스트의 환경 변수 영향을 검증하려면
|
||||
특정 테스트의 환경 변수 영향을 검증하려면
|
||||
`transformers.testing_utils.mockenv`라는 도우미 데코레이터를 사용할 수 있습니다.
|
||||
|
||||
```python
|
||||
@ -1143,7 +1143,7 @@ class HfArgumentParserTest(unittest.TestCase):
|
||||
env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None)
|
||||
```
|
||||
|
||||
일부 경우에는 외부 프로그램을 호출해야할 수도 있는데, 이 때에는 여러 개의 로컬 경로를 포함하는 `os.environ`에서 `PYTHONPATH`의 설정이 필요합니다.
|
||||
일부 경우에는 외부 프로그램을 호출해야할 수도 있는데, 이 때에는 여러 개의 로컬 경로를 포함하는 `os.environ`에서 `PYTHONPATH`의 설정이 필요합니다.
|
||||
헬퍼 클래스 `transformers.test_utils.TestCasePlus`가 도움이 됩니다:
|
||||
|
||||
```python
|
||||
@ -1156,8 +1156,8 @@ class EnvExampleTest(TestCasePlus):
|
||||
# 이제 `env`를 사용하여 외부 프로그램 호출
|
||||
```
|
||||
|
||||
테스트 파일이 `tests` 테스트 스위트 또는 `examples`에 있는지에 따라
|
||||
`env[PYTHONPATH]`가 두 디렉터리 중 하나를 포함하도록 설정되며,
|
||||
테스트 파일이 `tests` 테스트 스위트 또는 `examples`에 있는지에 따라
|
||||
`env[PYTHONPATH]`가 두 디렉터리 중 하나를 포함하도록 설정되며,
|
||||
현재 저장소에 대해 테스트가 수행되도록 `src` 디렉터리도 포함됩니다.
|
||||
테스트 호출 이전에 설정된 경우에는 `env[PYTHONPATH]`를 그대로 사용합니다.
|
||||
|
||||
@ -1166,7 +1166,7 @@ class EnvExampleTest(TestCasePlus):
|
||||
|
||||
### 재현 가능한 결과 얻기[[getting-reproducible-results]]
|
||||
|
||||
일부 상황에서 테스트에서 임의성을 제거하여 동일하게 재현 가능한 결과를 얻고 싶을 수 있습니다.
|
||||
일부 상황에서 테스트에서 임의성을 제거하여 동일하게 재현 가능한 결과를 얻고 싶을 수 있습니다.
|
||||
이를 위해서는 다음과 같이 시드를 고정해야 합니다.
|
||||
|
||||
```python
|
||||
@ -1207,11 +1207,11 @@ pytest tests/utils/test_logging.py -W error::UserWarning --pdb
|
||||
셀프 푸시 워크플로우 CI 작업을 트리거하려면, 다음을 수행해야 합니다.
|
||||
|
||||
1. `transformers` 원본에서 새 브랜치를 만듭니다(포크가 아닙니다!).
|
||||
2. 브랜치 이름은 `ci_` 또는 `ci-`로 시작해야 합니다(`main`도 트리거하지만 `main`에서는 PR을 할 수 없습니다).
|
||||
또한 특정 경로에 대해서만 트리거되므로 이 문서가 작성된 후에 변경된 내용은
|
||||
2. 브랜치 이름은 `ci_` 또는 `ci-`로 시작해야 합니다(`main`도 트리거하지만 `main`에서는 PR을 할 수 없습니다).
|
||||
또한 특정 경로에 대해서만 트리거되므로 이 문서가 작성된 후에 변경된 내용은
|
||||
[여기](https://github.com/huggingface/transformers/blob/main/.github/workflows/self-push.yml)의 *push:*에서 확인할 수 있습니다.
|
||||
3. 이 브랜치에서 PR을 생성합니다
|
||||
4. 그런 다음 [여기](https://github.com/huggingface/transformers/actions/workflows/self-push.yml)에서 작업이 나타나는지 확인할 수 있습니다.
|
||||
4. 그런 다음 [여기](https://github.com/huggingface/transformers/actions/workflows/self-push.yml)에서 작업이 나타나는지 확인할 수 있습니다.
|
||||
백로그가 있는 경우, 바로 실행되지 않을 수도 있습니다.
|
||||
|
||||
|
||||
@ -1219,13 +1219,13 @@ pytest tests/utils/test_logging.py -W error::UserWarning --pdb
|
||||
|
||||
## 실험적인 CI 기능 테스트[[testing-Experimental-CI-Features]]
|
||||
|
||||
CI 기능을 테스트하는 것은 일반 CI 작동에 방해가 될 수 있기 때문에 잠재적으로 문제가 발생할 수 있습니다.
|
||||
CI 기능을 테스트하는 것은 일반 CI 작동에 방해가 될 수 있기 때문에 잠재적으로 문제가 발생할 수 있습니다.
|
||||
따라서 새로운 CI 기능을 추가하는 경우 다음과 같이 수행해야 합니다.
|
||||
|
||||
1. 테스트해야 할 내용을 테스트하는 새로운 전용 작업을 생성합니다.
|
||||
2. 새로운 작업은 항상 성공해야만 녹색 ✓를 받을 수 있습니다(아래에 자세한 내용이 있습니다).
|
||||
3. 다양한 PR 유형에 대한 확인을 위해
|
||||
(사용자 포크 브랜치, 포크되지 않은 브랜치, github.com UI 직접 파일 편집에서 생성된 브랜치, 강제 푸시 등 PR의 유형은 아주 다양합니다.)
|
||||
3. 다양한 PR 유형에 대한 확인을 위해
|
||||
(사용자 포크 브랜치, 포크되지 않은 브랜치, github.com UI 직접 파일 편집에서 생성된 브랜치, 강제 푸시 등 PR의 유형은 아주 다양합니다.)
|
||||
며칠 동안 실험 작업의 로그를 모니터링하면서 실행해봅니다.
|
||||
(의도적으로 항상 녹색을 표시하므로 작업 전체가 녹색은 아니라는 점에 유의합니다.)
|
||||
4. 모든 것이 안정적인지 확인한 후, 새로운 변경 사항을 기존 작업에 병합합니다.
|
||||
@ -1234,7 +1234,7 @@ CI 기능을 테스트하는 것은 일반 CI 작동에 방해가 될 수 있기
|
||||
|
||||
그러나 새로운 CI 기능이 개발 중인 동안, 항상 성공하도록 할 수 있는 방법은 무엇일까요?
|
||||
|
||||
TravisCI와 같은 일부 CI는 `ignore-step-failure`를 지원하며 전체 작업을 성공한 것으로 보고하지만,
|
||||
TravisCI와 같은 일부 CI는 `ignore-step-failure`를 지원하며 전체 작업을 성공한 것으로 보고하지만,
|
||||
현재 우리가 사용하는 CircleCI와 Github Actions는 이를 지원하지 않습니다.
|
||||
|
||||
따라서 다음과 같은 해결책을 사용할 수 있습니다.
|
||||
@ -1264,12 +1264,12 @@ TravisCI와 같은 일부 CI는 `ignore-step-failure`를 지원하며 전체 작
|
||||
cmd_that_may_fail || true
|
||||
```
|
||||
|
||||
결과에 만족한 후에는 물론, 실험적인 단계 또는 작업을 일반 작업의 나머지 부분과 통합하면서
|
||||
`set +euo pipefail` 또는 기타 추가한 요소를 제거하여
|
||||
결과에 만족한 후에는 물론, 실험적인 단계 또는 작업을 일반 작업의 나머지 부분과 통합하면서
|
||||
`set +euo pipefail` 또는 기타 추가한 요소를 제거하여
|
||||
실험 작업이 일반 CI 작동에 방해되지 않도록 해야 합니다.
|
||||
|
||||
이 전반적인 과정은 실험 단계가 PR의 전반적인 상태에 영향을 주지 않고 실패하도록
|
||||
`allow-failure`와 같은 기능을 설정할 수 있다면 훨씬 더 쉬웠을 것입니다.
|
||||
이 전반적인 과정은 실험 단계가 PR의 전반적인 상태에 영향을 주지 않고 실패하도록
|
||||
`allow-failure`와 같은 기능을 설정할 수 있다면 훨씬 더 쉬웠을 것입니다.
|
||||
그러나 앞에서 언급한 바와 같이 CircleCI와 Github Actions는 현재 이러한 기능들 지원하지 않습니다.
|
||||
|
||||
이 기능의 지원을 위한 투표에 참여하고 CI 관련 스레드들에서 이러한 상황을 확인할 수도 있습니다.
|
||||
|
@ -61,7 +61,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
@ -60,7 +60,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risk.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/flax/speech-recognition/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
|
@ -45,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
|
||||
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
|
||||
|
||||
|
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -43,7 +43,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -53,7 +53,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -46,7 +46,8 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/instance-segmentation/requirements.txt")
|
||||
|
||||
|
||||
|
@ -52,7 +52,8 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/instance-segmentation/requirements.txt")
|
||||
|
||||
|
||||
|
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -58,7 +58,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -60,7 +60,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
# You should update this to your particular problem to have better documentation of `model_type`
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/object-detection/requirements.txt")
|
||||
|
||||
|
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
logger = get_logger(__name__)
|
||||
|
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -46,7 +46,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt")
|
||||
|
||||
|
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
|
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
|
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
@ -133,6 +133,10 @@ class DataTrainingArguments:
|
||||
)
|
||||
},
|
||||
)
|
||||
preprocessing_num_workers: Optional[int] = field(
|
||||
default=None,
|
||||
metadata={"help": "The number of processes to use for the preprocessing."},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
|
||||
)
|
||||
@ -573,6 +577,7 @@ def main():
|
||||
raw_datasets = raw_datasets.map(
|
||||
preprocess_function,
|
||||
batched=True,
|
||||
num_proc=data_args.preprocessing_num_workers,
|
||||
load_from_cache_file=not data_args.overwrite_cache,
|
||||
desc="Running tokenizer on dataset",
|
||||
)
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
|
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
|
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
|
||||
|
@ -90,7 +90,7 @@ tornado==6.4.1
|
||||
tqdm==4.66.3
|
||||
traitlets
|
||||
git+https://github.com/huggingface/transformers.git
|
||||
urllib3==1.26.18
|
||||
urllib3==1.26.19
|
||||
wcwidth==0.2.5
|
||||
webencodings==0.5.1
|
||||
wget==3.2
|
||||
|
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version(
|
||||
"datasets>=1.8.0", "To fix: pip install -r examples/tensorflow/contrastive-image-text/requirements.txt"
|
||||
|
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
|
||||
|
||||
|
@ -50,7 +50,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -62,7 +62,7 @@ except (ModuleNotFoundError, ImportError):
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
# region Checking dependencies
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
task_to_keys = {
|
||||
"cola": ("sentence", None),
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
# region Dependencies and constants
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.42.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
@ -36,18 +36,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<b>Deutsch</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -31,18 +31,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<b>Español</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -36,18 +36,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<b>Français</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -56,18 +56,18 @@ checkpoint: जाँच बिंदु
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<b>हिन्दी</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -66,18 +66,18 @@ user: ユーザ
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<b>日本語</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -31,18 +31,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<b>한국어</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -36,18 +36,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<b>Рortuguês</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -36,18 +36,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<b>Русский</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<p>
|
||||
</h4>
|
||||
|
@ -38,18 +38,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<b>తెలుగు</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -36,17 +36,17 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<b>Tiếng việt</b> |
|
||||
</p>
|
||||
</h4>
|
@ -57,17 +57,17 @@ checkpoint: 检查点
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<b>简体中文</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -68,18 +68,18 @@ user: 使用者
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<b>繁體中文</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
5
setup.py
5
setup.py
@ -124,6 +124,7 @@ _deps = [
|
||||
"jax>=0.4.1,<=0.4.13",
|
||||
"jaxlib>=0.4.1,<=0.4.13",
|
||||
"jieba",
|
||||
"jinja2>=3.1.0",
|
||||
"kenlm",
|
||||
# Keras pin - this is to make sure Keras 3 doesn't destroy us. Remove or change when we have proper support.
|
||||
"keras>2.9,<2.16",
|
||||
@ -131,7 +132,7 @@ _deps = [
|
||||
"librosa",
|
||||
"nltk",
|
||||
"natten>=0.14.6,<0.15.0",
|
||||
"numpy>=1.17",
|
||||
"numpy>=1.17,<2.0",
|
||||
"onnxconverter-common",
|
||||
"onnxruntime-tools>=1.4.2",
|
||||
"onnxruntime>=1.4.0",
|
||||
@ -429,7 +430,7 @@ install_requires = [
|
||||
|
||||
setup(
|
||||
name="transformers",
|
||||
version="4.42.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||
version="4.42.1", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||
author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)",
|
||||
author_email="transformers@huggingface.co",
|
||||
description="State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow",
|
||||
|
@ -18,7 +18,7 @@
|
||||
# to defer the actual importing for when the objects are requested. This way `import transformers` provides the names
|
||||
# in the namespace without actually importing anything (and especially none of the backends).
|
||||
|
||||
__version__ = "4.42.0.dev0"
|
||||
__version__ = "4.42.1"
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@ -435,6 +435,7 @@ _import_structure = {
|
||||
],
|
||||
"models.fuyu": ["FuyuConfig"],
|
||||
"models.gemma": ["GemmaConfig"],
|
||||
"models.gemma2": ["Gemma2Config"],
|
||||
"models.git": [
|
||||
"GitConfig",
|
||||
"GitProcessor",
|
||||
@ -473,6 +474,12 @@ _import_structure = {
|
||||
"InstructBlipQFormerConfig",
|
||||
"InstructBlipVisionConfig",
|
||||
],
|
||||
"models.instructblipvideo": [
|
||||
"InstructBlipVideoConfig",
|
||||
"InstructBlipVideoProcessor",
|
||||
"InstructBlipVideoQFormerConfig",
|
||||
"InstructBlipVideoVisionConfig",
|
||||
],
|
||||
"models.jamba": ["JambaConfig"],
|
||||
"models.jetmoe": ["JetMoeConfig"],
|
||||
"models.kosmos2": [
|
||||
@ -510,6 +517,10 @@ _import_structure = {
|
||||
"LlavaNextConfig",
|
||||
"LlavaNextProcessor",
|
||||
],
|
||||
"models.llava_next_video": [
|
||||
"LlavaNextVideoConfig",
|
||||
"LlavaNextVideoProcessor",
|
||||
],
|
||||
"models.longformer": [
|
||||
"LongformerConfig",
|
||||
"LongformerTokenizer",
|
||||
@ -654,6 +665,7 @@ _import_structure = {
|
||||
"RoFormerConfig",
|
||||
"RoFormerTokenizer",
|
||||
],
|
||||
"models.rt_detr": ["RTDetrConfig", "RTDetrResNetConfig"],
|
||||
"models.rwkv": ["RwkvConfig"],
|
||||
"models.sam": [
|
||||
"SamConfig",
|
||||
@ -1136,10 +1148,12 @@ else:
|
||||
_import_structure["models.idefics"].extend(["IdeficsImageProcessor"])
|
||||
_import_structure["models.idefics2"].extend(["Idefics2ImageProcessor"])
|
||||
_import_structure["models.imagegpt"].extend(["ImageGPTFeatureExtractor", "ImageGPTImageProcessor"])
|
||||
_import_structure["models.instructblipvideo"].extend(["InstructBlipVideoImageProcessor"])
|
||||
_import_structure["models.layoutlmv2"].extend(["LayoutLMv2FeatureExtractor", "LayoutLMv2ImageProcessor"])
|
||||
_import_structure["models.layoutlmv3"].extend(["LayoutLMv3FeatureExtractor", "LayoutLMv3ImageProcessor"])
|
||||
_import_structure["models.levit"].extend(["LevitFeatureExtractor", "LevitImageProcessor"])
|
||||
_import_structure["models.llava_next"].append("LlavaNextImageProcessor")
|
||||
_import_structure["models.llava_next_video"].append("LlavaNextVideoImageProcessor")
|
||||
_import_structure["models.mask2former"].append("Mask2FormerImageProcessor")
|
||||
_import_structure["models.maskformer"].extend(["MaskFormerFeatureExtractor", "MaskFormerImageProcessor"])
|
||||
_import_structure["models.mobilenet_v1"].extend(["MobileNetV1FeatureExtractor", "MobileNetV1ImageProcessor"])
|
||||
@ -1153,6 +1167,7 @@ else:
|
||||
_import_structure["models.pix2struct"].extend(["Pix2StructImageProcessor"])
|
||||
_import_structure["models.poolformer"].extend(["PoolFormerFeatureExtractor", "PoolFormerImageProcessor"])
|
||||
_import_structure["models.pvt"].extend(["PvtImageProcessor"])
|
||||
_import_structure["models.rt_detr"].extend(["RTDetrImageProcessor"])
|
||||
_import_structure["models.sam"].extend(["SamImageProcessor"])
|
||||
_import_structure["models.segformer"].extend(["SegformerFeatureExtractor", "SegformerImageProcessor"])
|
||||
_import_structure["models.seggpt"].extend(["SegGptImageProcessor"])
|
||||
@ -2167,6 +2182,15 @@ else:
|
||||
"GemmaPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.gemma2"].extend(
|
||||
[
|
||||
"Gemma2ForCausalLM",
|
||||
"Gemma2ForSequenceClassification",
|
||||
"Gemma2ForTokenClassification",
|
||||
"Gemma2Model",
|
||||
"Gemma2PreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.git"].extend(
|
||||
[
|
||||
"GitForCausalLM",
|
||||
@ -2316,6 +2340,14 @@ else:
|
||||
"InstructBlipVisionModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.instructblipvideo"].extend(
|
||||
[
|
||||
"InstructBlipVideoForConditionalGeneration",
|
||||
"InstructBlipVideoPreTrainedModel",
|
||||
"InstructBlipVideoQFormerModel",
|
||||
"InstructBlipVideoVisionModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.jamba"].extend(
|
||||
[
|
||||
"JambaForCausalLM",
|
||||
@ -2415,6 +2447,12 @@ else:
|
||||
"LlavaNextPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.llava_next_video"].extend(
|
||||
[
|
||||
"LlavaNextVideoForConditionalGeneration",
|
||||
"LlavaNextVideoPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.longformer"].extend(
|
||||
[
|
||||
"LongformerForMaskedLM",
|
||||
@ -3004,6 +3042,15 @@ else:
|
||||
"load_tf_weights_in_roformer",
|
||||
]
|
||||
)
|
||||
_import_structure["models.rt_detr"].extend(
|
||||
[
|
||||
"RTDetrForObjectDetection",
|
||||
"RTDetrModel",
|
||||
"RTDetrPreTrainedModel",
|
||||
"RTDetrResNetBackbone",
|
||||
"RTDetrResNetPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.rwkv"].extend(
|
||||
[
|
||||
"RwkvForCausalLM",
|
||||
@ -5025,6 +5072,7 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from .models.fuyu import FuyuConfig
|
||||
from .models.gemma import GemmaConfig
|
||||
from .models.gemma2 import Gemma2Config
|
||||
from .models.git import (
|
||||
GitConfig,
|
||||
GitProcessor,
|
||||
@ -5068,6 +5116,12 @@ if TYPE_CHECKING:
|
||||
InstructBlipQFormerConfig,
|
||||
InstructBlipVisionConfig,
|
||||
)
|
||||
from .models.instructblipvideo import (
|
||||
InstructBlipVideoConfig,
|
||||
InstructBlipVideoProcessor,
|
||||
InstructBlipVideoQFormerConfig,
|
||||
InstructBlipVideoVisionConfig,
|
||||
)
|
||||
from .models.jamba import JambaConfig
|
||||
from .models.jetmoe import JetMoeConfig
|
||||
from .models.kosmos2 import (
|
||||
@ -5105,6 +5159,10 @@ if TYPE_CHECKING:
|
||||
LlavaNextConfig,
|
||||
LlavaNextProcessor,
|
||||
)
|
||||
from .models.llava_next_video import (
|
||||
LlavaNextVideoConfig,
|
||||
LlavaNextVideoProcessor,
|
||||
)
|
||||
from .models.longformer import (
|
||||
LongformerConfig,
|
||||
LongformerTokenizer,
|
||||
@ -5270,6 +5328,10 @@ if TYPE_CHECKING:
|
||||
RoFormerConfig,
|
||||
RoFormerTokenizer,
|
||||
)
|
||||
from .models.rt_detr import (
|
||||
RTDetrConfig,
|
||||
RTDetrResNetConfig,
|
||||
)
|
||||
from .models.rwkv import RwkvConfig
|
||||
from .models.sam import (
|
||||
SamConfig,
|
||||
@ -5757,6 +5819,7 @@ if TYPE_CHECKING:
|
||||
from .models.idefics import IdeficsImageProcessor
|
||||
from .models.idefics2 import Idefics2ImageProcessor
|
||||
from .models.imagegpt import ImageGPTFeatureExtractor, ImageGPTImageProcessor
|
||||
from .models.instructblipvideo import InstructBlipVideoImageProcessor
|
||||
from .models.layoutlmv2 import (
|
||||
LayoutLMv2FeatureExtractor,
|
||||
LayoutLMv2ImageProcessor,
|
||||
@ -5767,6 +5830,7 @@ if TYPE_CHECKING:
|
||||
)
|
||||
from .models.levit import LevitFeatureExtractor, LevitImageProcessor
|
||||
from .models.llava_next import LlavaNextImageProcessor
|
||||
from .models.llava_next_video import LlavaNextVideoImageProcessor
|
||||
from .models.mask2former import Mask2FormerImageProcessor
|
||||
from .models.maskformer import (
|
||||
MaskFormerFeatureExtractor,
|
||||
@ -5792,6 +5856,7 @@ if TYPE_CHECKING:
|
||||
PoolFormerImageProcessor,
|
||||
)
|
||||
from .models.pvt import PvtImageProcessor
|
||||
from .models.rt_detr import RTDetrImageProcessor
|
||||
from .models.sam import SamImageProcessor
|
||||
from .models.segformer import SegformerFeatureExtractor, SegformerImageProcessor
|
||||
from .models.seggpt import SegGptImageProcessor
|
||||
@ -6640,6 +6705,13 @@ if TYPE_CHECKING:
|
||||
GemmaModel,
|
||||
GemmaPreTrainedModel,
|
||||
)
|
||||
from .models.gemma2 import (
|
||||
Gemma2ForCausalLM,
|
||||
Gemma2ForSequenceClassification,
|
||||
Gemma2ForTokenClassification,
|
||||
Gemma2Model,
|
||||
Gemma2PreTrainedModel,
|
||||
)
|
||||
from .models.git import (
|
||||
GitForCausalLM,
|
||||
GitModel,
|
||||
@ -6755,6 +6827,12 @@ if TYPE_CHECKING:
|
||||
InstructBlipQFormerModel,
|
||||
InstructBlipVisionModel,
|
||||
)
|
||||
from .models.instructblipvideo import (
|
||||
InstructBlipVideoForConditionalGeneration,
|
||||
InstructBlipVideoPreTrainedModel,
|
||||
InstructBlipVideoQFormerModel,
|
||||
InstructBlipVideoVisionModel,
|
||||
)
|
||||
from .models.jamba import (
|
||||
JambaForCausalLM,
|
||||
JambaForSequenceClassification,
|
||||
@ -6830,6 +6908,10 @@ if TYPE_CHECKING:
|
||||
LlavaNextForConditionalGeneration,
|
||||
LlavaNextPreTrainedModel,
|
||||
)
|
||||
from .models.llava_next_video import (
|
||||
LlavaNextVideoForConditionalGeneration,
|
||||
LlavaNextVideoPreTrainedModel,
|
||||
)
|
||||
from .models.longformer import (
|
||||
LongformerForMaskedLM,
|
||||
LongformerForMultipleChoice,
|
||||
@ -7295,6 +7377,13 @@ if TYPE_CHECKING:
|
||||
RoFormerPreTrainedModel,
|
||||
load_tf_weights_in_roformer,
|
||||
)
|
||||
from .models.rt_detr import (
|
||||
RTDetrForObjectDetection,
|
||||
RTDetrModel,
|
||||
RTDetrPreTrainedModel,
|
||||
RTDetrResNetBackbone,
|
||||
RTDetrResNetPreTrainedModel,
|
||||
)
|
||||
from .models.rwkv import (
|
||||
RwkvForCausalLM,
|
||||
RwkvModel,
|
||||
|
@ -123,7 +123,7 @@ caption = image_captioner(image)
|
||||
```<end_action>
|
||||
|
||||
---
|
||||
Above example were using tools that might not exist for you. You only have acces to those Tools:
|
||||
Above example were using tools that might not exist for you. You only have access to those Tools:
|
||||
<<tool_names>>
|
||||
|
||||
Remember to make sure that variables you use are all defined.
|
||||
@ -145,7 +145,7 @@ The $ACTION_JSON_BLOB should only contain a SINGLE action, do NOT return a list
|
||||
"action_input": $INPUT
|
||||
}<end_action>
|
||||
|
||||
Make sure to have the $INPUT as a dictionnary in the right format for the tool you are using, and do not put variable names as input if you can find the right values.
|
||||
Make sure to have the $INPUT as a dictionary in the right format for the tool you are using, and do not put variable names as input if you can find the right values.
|
||||
|
||||
You should ALWAYS use the following format:
|
||||
|
||||
@ -250,7 +250,7 @@ Action:
|
||||
}<end_action>
|
||||
|
||||
|
||||
Above example were using notional tools that might not exist for you. You only have acces to those tools:
|
||||
Above example were using notional tools that might not exist for you. You only have access to those tools:
|
||||
<<tool_descriptions>>
|
||||
|
||||
Here are the rules you should always follow to solve your task:
|
||||
|
@ -628,7 +628,7 @@ def evaluate_ast(
|
||||
|
||||
Args:
|
||||
expression (`ast.AST`):
|
||||
The code to evaluate, as an abastract syntax tree.
|
||||
The code to evaluate, as an abstract syntax tree.
|
||||
state (`Dict[str, Any]`):
|
||||
A dictionary mapping variable names to values. The `state` is updated if need be when the evaluation
|
||||
encounters assignements.
|
||||
@ -640,7 +640,7 @@ def evaluate_ast(
|
||||
Add more at your own risk!
|
||||
"""
|
||||
if isinstance(expression, ast.Assign):
|
||||
# Assignement -> we evaluate the assignement which should update the state
|
||||
# Assignement -> we evaluate the assignment which should update the state
|
||||
# We return the variable assigned as it may be used to determine the final result.
|
||||
return evaluate_assign(expression, state, tools)
|
||||
elif isinstance(expression, ast.AugAssign):
|
||||
|
@ -18,7 +18,7 @@ and remove unnecessary dependencies.
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from typing import Optional, Tuple, Union
|
||||
from typing import List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
@ -581,6 +581,213 @@ def spectrogram(
|
||||
return spectrogram
|
||||
|
||||
|
||||
def spectrogram_batch(
|
||||
waveform_list: List[np.ndarray],
|
||||
window: np.ndarray,
|
||||
frame_length: int,
|
||||
hop_length: int,
|
||||
fft_length: Optional[int] = None,
|
||||
power: Optional[float] = 1.0,
|
||||
center: bool = True,
|
||||
pad_mode: str = "reflect",
|
||||
onesided: bool = True,
|
||||
preemphasis: Optional[float] = None,
|
||||
mel_filters: Optional[np.ndarray] = None,
|
||||
mel_floor: float = 1e-10,
|
||||
log_mel: Optional[str] = None,
|
||||
reference: float = 1.0,
|
||||
min_value: float = 1e-10,
|
||||
db_range: Optional[float] = None,
|
||||
remove_dc_offset: Optional[bool] = None,
|
||||
dtype: np.dtype = np.float32,
|
||||
) -> List[np.ndarray]:
|
||||
"""
|
||||
Calculates spectrograms for a list of waveforms using the Short-Time Fourier Transform, optimized for batch processing.
|
||||
This function extends the capabilities of the `spectrogram` function to handle multiple waveforms efficiently by leveraging broadcasting.
|
||||
|
||||
It supports generating various types of spectrograms:
|
||||
|
||||
- amplitude spectrogram (`power = 1.0`)
|
||||
- power spectrogram (`power = 2.0`)
|
||||
- complex-valued spectrogram (`power = None`)
|
||||
- log spectrogram (use `log_mel` argument)
|
||||
- mel spectrogram (provide `mel_filters`)
|
||||
- log-mel spectrogram (provide `mel_filters` and `log_mel`)
|
||||
|
||||
How this works:
|
||||
|
||||
1. The input waveform is split into frames of size `frame_length` that are partially overlapping by `frame_length
|
||||
- hop_length` samples.
|
||||
2. Each frame is multiplied by the window and placed into a buffer of size `fft_length`.
|
||||
3. The DFT is taken of each windowed frame.
|
||||
4. The results are stacked into a spectrogram.
|
||||
|
||||
We make a distinction between the following "blocks" of sample data, each of which may have a different lengths:
|
||||
|
||||
- The analysis frame. This is the size of the time slices that the input waveform is split into.
|
||||
- The window. Each analysis frame is multiplied by the window to avoid spectral leakage.
|
||||
- The FFT input buffer. The length of this determines how many frequency bins are in the spectrogram.
|
||||
|
||||
In this implementation, the window is assumed to be zero-padded to have the same size as the analysis frame. A
|
||||
padded window can be obtained from `window_function()`. The FFT input buffer may be larger than the analysis frame,
|
||||
typically the next power of two.
|
||||
|
||||
Note: This function is designed for efficient batch processing of multiple waveforms but retains compatibility with individual waveform processing methods like `librosa.stft`.
|
||||
|
||||
Args:
|
||||
waveform_list (`List[np.ndarray]` with arrays of shape `(length,)`):
|
||||
The list of input waveforms, each a single-channel (mono) signal.
|
||||
window (`np.ndarray` of shape `(frame_length,)`):
|
||||
The windowing function to apply, including zero-padding if necessary.
|
||||
frame_length (`int`):
|
||||
The length of each frame for analysis.
|
||||
hop_length (`int`):
|
||||
The step size between successive frames.
|
||||
fft_length (`int`, *optional*):
|
||||
The size of the FFT buffer, defining frequency bin resolution.
|
||||
power (`float`, *optional*, defaults to 1.0):
|
||||
Determines the type of spectrogram: 1.0 for amplitude, 2.0 for power, None for complex.
|
||||
center (`bool`, *optional*, defaults to `True`):
|
||||
Whether to center-pad the waveform frames.
|
||||
pad_mode (`str`, *optional*, defaults to `"reflect"`):
|
||||
The padding strategy when `center` is `True`.
|
||||
onesided (`bool`, *optional*, defaults to `True`):
|
||||
If True, returns a one-sided spectrogram for real input signals.
|
||||
preemphasis (`float`, *optional*):
|
||||
Applies a pre-emphasis filter to each frame.
|
||||
mel_filters (`np.ndarray`, *optional*):
|
||||
Mel filter bank for converting to mel spectrogram.
|
||||
mel_floor (`float`, *optional*, defaults to 1e-10):
|
||||
Floor value for mel spectrogram to avoid log(0).
|
||||
log_mel (`str`, *optional*):
|
||||
Specifies log scaling strategy; options are None, "log", "log10", "dB".
|
||||
reference (`float`, *optional*, defaults to 1.0):
|
||||
Reference value for dB conversion in log_mel.
|
||||
min_value (`float`, °optional*, defaults to 1e-10):
|
||||
Minimum floor value for log scale conversions.
|
||||
db_range (`float`, *optional*):
|
||||
Dynamic range for dB scale spectrograms.
|
||||
remove_dc_offset (`bool`, *optional*):
|
||||
Whether to remove the DC offset from each frame.
|
||||
dtype (`np.dtype`, *optional*, defaults to `np.float32`):
|
||||
Data type of the output spectrogram.
|
||||
|
||||
Returns:
|
||||
List[`np.ndarray`]: A list of spectrogram arrays, one for each input waveform.
|
||||
"""
|
||||
window_length = len(window)
|
||||
|
||||
if fft_length is None:
|
||||
fft_length = frame_length
|
||||
|
||||
if frame_length > fft_length:
|
||||
raise ValueError(f"frame_length ({frame_length}) may not be larger than fft_length ({fft_length})")
|
||||
|
||||
if window_length != frame_length:
|
||||
raise ValueError(f"Length of the window ({window_length}) must equal frame_length ({frame_length})")
|
||||
|
||||
if hop_length <= 0:
|
||||
raise ValueError("hop_length must be greater than zero")
|
||||
|
||||
# Check the dimensions of the waveform
|
||||
for waveform in waveform_list:
|
||||
if waveform.ndim != 1:
|
||||
raise ValueError(f"Input waveform must have only one dimension, shape is {waveform.shape}")
|
||||
|
||||
# Check if waveform is complex
|
||||
for waveform in waveform_list:
|
||||
if np.iscomplexobj(waveform):
|
||||
raise ValueError("Complex-valued input waveforms are not currently supported")
|
||||
|
||||
# Center pad the waveform
|
||||
if center:
|
||||
padding = [(int(frame_length // 2), int(frame_length // 2))]
|
||||
waveform_list = [
|
||||
np.pad(
|
||||
waveform,
|
||||
padding,
|
||||
mode=pad_mode,
|
||||
)
|
||||
for waveform in waveform_list
|
||||
]
|
||||
original_waveform_lengths = [
|
||||
len(waveform) for waveform in waveform_list
|
||||
] # these lengths will be used to remove padding later
|
||||
|
||||
# Batch pad the waveform
|
||||
max_length = max(original_waveform_lengths)
|
||||
padded_waveform_batch = np.array(
|
||||
[
|
||||
np.pad(waveform, (0, max_length - len(waveform)), mode="constant", constant_values=0)
|
||||
for waveform in waveform_list
|
||||
],
|
||||
dtype=dtype,
|
||||
)
|
||||
|
||||
# Promote to float64, since np.fft uses float64 internally
|
||||
padded_waveform_batch = padded_waveform_batch.astype(np.float64)
|
||||
window = window.astype(np.float64)
|
||||
|
||||
# Split waveform into frames of frame_length size
|
||||
num_frames = int(1 + np.floor((padded_waveform_batch.shape[1] - frame_length) / hop_length))
|
||||
# these lengths will be used to remove padding later
|
||||
true_num_frames = [int(1 + np.floor((length - frame_length) / hop_length)) for length in original_waveform_lengths]
|
||||
num_batches = padded_waveform_batch.shape[0]
|
||||
|
||||
num_frequency_bins = (fft_length // 2) + 1 if onesided else fft_length
|
||||
spectrogram = np.empty((num_batches, num_frames, num_frequency_bins), dtype=np.complex64)
|
||||
|
||||
# rfft is faster than fft
|
||||
fft_func = np.fft.rfft if onesided else np.fft.fft
|
||||
buffer = np.zeros((num_batches, fft_length))
|
||||
|
||||
for frame_idx in range(num_frames):
|
||||
timestep = frame_idx * hop_length
|
||||
buffer[:, :frame_length] = padded_waveform_batch[:, timestep : timestep + frame_length]
|
||||
|
||||
if remove_dc_offset:
|
||||
buffer[:, :frame_length] -= buffer[:, :frame_length].mean(axis=1, keepdims=True)
|
||||
|
||||
if preemphasis is not None:
|
||||
buffer[:, 1:frame_length] -= preemphasis * buffer[:, : frame_length - 1]
|
||||
buffer[:, 0] *= 1 - preemphasis
|
||||
|
||||
buffer[:, :frame_length] *= window
|
||||
|
||||
spectrogram[:, frame_idx] = fft_func(buffer)
|
||||
|
||||
# Note: ** is much faster than np.power
|
||||
if power is not None:
|
||||
spectrogram = np.abs(spectrogram, dtype=np.float64) ** power
|
||||
|
||||
# Apply mel filters if provided
|
||||
if mel_filters is not None:
|
||||
result = np.tensordot(spectrogram, mel_filters.T, axes=([2], [1]))
|
||||
spectrogram = np.maximum(mel_floor, result)
|
||||
|
||||
# Convert to log scale if specified
|
||||
if power is not None and log_mel is not None:
|
||||
if log_mel == "log":
|
||||
spectrogram = np.log(spectrogram)
|
||||
elif log_mel == "log10":
|
||||
spectrogram = np.log10(spectrogram)
|
||||
elif log_mel == "dB":
|
||||
if power == 1.0:
|
||||
spectrogram = amplitude_to_db_batch(spectrogram, reference, min_value, db_range)
|
||||
elif power == 2.0:
|
||||
spectrogram = power_to_db_batch(spectrogram, reference, min_value, db_range)
|
||||
else:
|
||||
raise ValueError(f"Cannot use log_mel option '{log_mel}' with power {power}")
|
||||
else:
|
||||
raise ValueError(f"Unknown log_mel option: {log_mel}")
|
||||
|
||||
spectrogram = np.asarray(spectrogram, dtype)
|
||||
|
||||
spectrogram_list = [spectrogram[i, : true_num_frames[i], :].T for i in range(len(true_num_frames))]
|
||||
|
||||
return spectrogram_list
|
||||
|
||||
|
||||
def power_to_db(
|
||||
spectrogram: np.ndarray,
|
||||
reference: float = 1.0,
|
||||
@ -632,6 +839,55 @@ def power_to_db(
|
||||
return spectrogram
|
||||
|
||||
|
||||
def power_to_db_batch(
|
||||
spectrogram: np.ndarray,
|
||||
reference: float = 1.0,
|
||||
min_value: float = 1e-10,
|
||||
db_range: Optional[float] = None,
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Converts a batch of power spectrograms to the decibel scale. This computes `10 * log10(spectrogram / reference)`,
|
||||
using basic logarithm properties for numerical stability.
|
||||
|
||||
This function supports batch processing, where each item in the batch is an individual power (mel) spectrogram.
|
||||
|
||||
Args:
|
||||
spectrogram (`np.ndarray`):
|
||||
The input batch of power (mel) spectrograms. Expected shape is (batch_size, *spectrogram_shape).
|
||||
Note that a power spectrogram has the amplitudes squared!
|
||||
reference (`float`, *optional*, defaults to 1.0):
|
||||
Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set
|
||||
the loudest part to 0 dB. Must be greater than zero.
|
||||
min_value (`float`, *optional*, defaults to `1e-10`):
|
||||
The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking
|
||||
`log(0)`. The default of `1e-10` corresponds to a minimum of -100 dB. Must be greater than zero.
|
||||
db_range (`float`, *optional*):
|
||||
Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the
|
||||
peak value and the smallest value will never be more than 80 dB. Must be greater than zero.
|
||||
|
||||
Returns:
|
||||
`np.ndarray`: the batch of spectrograms in decibels
|
||||
"""
|
||||
if reference <= 0.0:
|
||||
raise ValueError("reference must be greater than zero")
|
||||
if min_value <= 0.0:
|
||||
raise ValueError("min_value must be greater than zero")
|
||||
|
||||
reference = max(min_value, reference)
|
||||
|
||||
spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None)
|
||||
spectrogram = 10.0 * (np.log10(spectrogram) - np.log10(reference))
|
||||
|
||||
if db_range is not None:
|
||||
if db_range <= 0.0:
|
||||
raise ValueError("db_range must be greater than zero")
|
||||
# Apply db_range clipping per batch item
|
||||
max_values = spectrogram.max(axis=(1, 2), keepdims=True)
|
||||
spectrogram = np.clip(spectrogram, a_min=max_values - db_range, a_max=None)
|
||||
|
||||
return spectrogram
|
||||
|
||||
|
||||
def amplitude_to_db(
|
||||
spectrogram: np.ndarray,
|
||||
reference: float = 1.0,
|
||||
@ -681,6 +937,51 @@ def amplitude_to_db(
|
||||
return spectrogram
|
||||
|
||||
|
||||
def amplitude_to_db_batch(
|
||||
spectrogram: np.ndarray, reference: float = 1.0, min_value: float = 1e-5, db_range: Optional[float] = None
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Converts a batch of amplitude spectrograms to the decibel scale. This computes `20 * log10(spectrogram / reference)`,
|
||||
using basic logarithm properties for numerical stability.
|
||||
|
||||
The function supports batch processing, where each item in the batch is an individual amplitude (mel) spectrogram.
|
||||
|
||||
Args:
|
||||
spectrogram (`np.ndarray`):
|
||||
The input batch of amplitude (mel) spectrograms. Expected shape is (batch_size, *spectrogram_shape).
|
||||
reference (`float`, *optional*, defaults to 1.0):
|
||||
Sets the input spectrogram value that corresponds to 0 dB. For example, use `np.max(spectrogram)` to set
|
||||
the loudest part to 0 dB. Must be greater than zero.
|
||||
min_value (`float`, *optional*, defaults to `1e-5`):
|
||||
The spectrogram will be clipped to this minimum value before conversion to decibels, to avoid taking
|
||||
`log(0)`. The default of `1e-5` corresponds to a minimum of -100 dB. Must be greater than zero.
|
||||
db_range (`float`, *optional*):
|
||||
Sets the maximum dynamic range in decibels. For example, if `db_range = 80`, the difference between the
|
||||
peak value and the smallest value will never be more than 80 dB. Must be greater than zero.
|
||||
|
||||
Returns:
|
||||
`np.ndarray`: the batch of spectrograms in decibels
|
||||
"""
|
||||
if reference <= 0.0:
|
||||
raise ValueError("reference must be greater than zero")
|
||||
if min_value <= 0.0:
|
||||
raise ValueError("min_value must be greater than zero")
|
||||
|
||||
reference = max(min_value, reference)
|
||||
|
||||
spectrogram = np.clip(spectrogram, a_min=min_value, a_max=None)
|
||||
spectrogram = 20.0 * (np.log10(spectrogram) - np.log10(reference))
|
||||
|
||||
if db_range is not None:
|
||||
if db_range <= 0.0:
|
||||
raise ValueError("db_range must be greater than zero")
|
||||
# Apply db_range clipping per batch item
|
||||
max_values = spectrogram.max(axis=(1, 2), keepdims=True)
|
||||
spectrogram = np.clip(spectrogram, a_min=max_values - db_range, a_max=None)
|
||||
|
||||
return spectrogram
|
||||
|
||||
|
||||
### deprecated functions below this line ###
|
||||
|
||||
|
||||
@ -773,7 +1074,7 @@ def stft(frames: np.array, windowing_function: np.array, fft_window_size: int =
|
||||
frames (`np.array` of dimension `(num_frames, fft_window_size)`):
|
||||
A framed audio signal obtained using `audio_utils.fram_wav`.
|
||||
windowing_function (`np.array` of dimension `(nb_frequency_bins, nb_mel_filters)`:
|
||||
A array reprensenting the function that will be used to reduces the amplitude of the discontinuities at the
|
||||
A array representing the function that will be used to reduces the amplitude of the discontinuities at the
|
||||
boundaries of each frame when computing the STFT. Each frame will be multiplied by the windowing_function.
|
||||
For more information on the discontinuities, called *Spectral leakage*, refer to [this
|
||||
tutorial]https://download.ni.com/evaluation/pxi/Understanding%20FFTs%20and%20Windowing.pdf
|
||||
|
@ -214,7 +214,7 @@ class QuantizedCacheConfig(CacheConfig):
|
||||
compute_dtype (`torch.dtype`, *optional*, defaults to `torch.float16`):
|
||||
The defualt dtype used for computations in the model. Keys and Values will be cast to this dtype after dequantization.
|
||||
device (`str`, *optional*, defaults to `"cpu"`):
|
||||
Device on which to peform computations, should be same as the model's device.
|
||||
Device on which to perform computations, should be same as the model's device.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@ -395,21 +395,21 @@ class DynamicCache(Cache):
|
||||
cache.update(key_states, value_states, layer_idx)
|
||||
return cache
|
||||
|
||||
def crop(self, maximum_length: int):
|
||||
"""Crop the past key values up to a new `maximum_length` in terms of tokens. `maximum_length` can also be
|
||||
negative to remove `maximum_length` tokens. This is used in assisted decoding and contrastive search."""
|
||||
def crop(self, max_length: int):
|
||||
"""Crop the past key values up to a new `max_length` in terms of tokens. `max_length` can also be
|
||||
negative to remove `max_length` tokens. This is used in assisted decoding and contrastive search."""
|
||||
|
||||
# In case it is negative
|
||||
if maximum_length < 0:
|
||||
maximum_length = self.get_seq_length() - abs(maximum_length)
|
||||
if max_length < 0:
|
||||
max_length = self.get_seq_length() - abs(max_length)
|
||||
|
||||
if self.get_seq_length() <= maximum_length:
|
||||
if self.get_seq_length() <= max_length:
|
||||
return
|
||||
|
||||
self._seen_tokens = maximum_length
|
||||
self._seen_tokens = max_length
|
||||
for idx in range(len(self.key_cache)):
|
||||
self.key_cache[idx] = self.key_cache[idx][..., :maximum_length, :]
|
||||
self.value_cache[idx] = self.value_cache[idx][..., :maximum_length, :]
|
||||
self.key_cache[idx] = self.key_cache[idx][..., :max_length, :]
|
||||
self.value_cache[idx] = self.value_cache[idx][..., :max_length, :]
|
||||
|
||||
def batch_split(self, full_batch_size: int, split_size: int) -> List["DynamicCache"]:
|
||||
"""Split the current instance into a list of `DynamicCache` by the batch size. This will be used by
|
||||
@ -970,3 +970,125 @@ class SlidingWindowCache(StaticCache):
|
||||
# in theory there is no limit because the sliding window size is fixed
|
||||
# no matter how long the sentence is
|
||||
return None
|
||||
|
||||
|
||||
class HybridCache(Cache):
|
||||
def __init__(self, config: PretrainedConfig, max_batch_size, max_cache_len, device="cpu", dtype=None) -> None:
|
||||
if not hasattr(config, "sliding_window") or config.sliding_window is None:
|
||||
raise ValueError(
|
||||
"Setting `cache_implementation` to 'sliding_window' requires the model config supporting "
|
||||
"sliding window attention, please check if there is a `sliding_window` field in the model "
|
||||
"config and it's not set to None."
|
||||
)
|
||||
self.max_cache_len = max_cache_len
|
||||
self.max_batch_size = max_batch_size
|
||||
# Some model define a custom `head_dim` != config.hidden_size // config.num_attention_heads
|
||||
self.head_dim = (
|
||||
config.head_dim if hasattr(config, "head_dim") else config.hidden_size // config.num_attention_heads
|
||||
)
|
||||
|
||||
self.dtype = dtype if dtype is not None else torch.float32
|
||||
self.num_key_value_heads = (
|
||||
config.num_attention_heads if config.num_key_value_heads is None else config.num_key_value_heads
|
||||
)
|
||||
self.is_sliding = torch.tensor(
|
||||
[i % 2 for i in range(config.num_hidden_layers)], dtype=torch.bool, device=device
|
||||
)
|
||||
self.key_cache: List[torch.Tensor] = []
|
||||
self.value_cache: List[torch.Tensor] = []
|
||||
global_cache_shape = (max_batch_size, self.num_key_value_heads, max_cache_len, self.head_dim)
|
||||
sliding_cache_shape = (
|
||||
max_batch_size,
|
||||
self.num_key_value_heads,
|
||||
min(config.sliding_window, max_cache_len),
|
||||
self.head_dim,
|
||||
)
|
||||
for i in range(config.num_hidden_layers):
|
||||
# Note: `mark_static_address` is used to tag the cache as an fixed data pointer, preventing cuda graph
|
||||
# breaks when updating the cache.
|
||||
cache_shape = global_cache_shape if not self.is_sliding[i] else sliding_cache_shape
|
||||
new_layer_key_cache = torch.zeros(cache_shape, dtype=self.dtype, device=device)
|
||||
new_layer_value_cache = torch.zeros(cache_shape, dtype=self.dtype, device=device)
|
||||
torch._dynamo.mark_static_address(new_layer_key_cache)
|
||||
torch._dynamo.mark_static_address(new_layer_value_cache)
|
||||
self.key_cache.append(new_layer_key_cache)
|
||||
self.value_cache.append(new_layer_value_cache)
|
||||
|
||||
def _sliding_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len):
|
||||
if cache_position.shape[0] > max_cache_len:
|
||||
k_out = key_states[:, :, -max_cache_len:, :]
|
||||
v_out = value_states[:, :, -max_cache_len:, :]
|
||||
# Assumption: caches are all zeros at this point, `+=` is equivalent to `=` but compile-friendly
|
||||
self.key_cache[layer_idx] += k_out
|
||||
self.value_cache[layer_idx] += v_out
|
||||
# we should return the whole states instead of k_out, v_out to take the whole prompt
|
||||
# into consideration when building kv cache instead of just throwing away tokens outside of the window
|
||||
return key_states, value_states
|
||||
|
||||
slicing = torch.ones(max_cache_len, dtype=torch.long, device=value_states.device).cumsum(0)
|
||||
cache_position = cache_position.clamp(0, max_cache_len - 1)
|
||||
to_shift = cache_position >= max_cache_len - 1
|
||||
indices = (slicing + to_shift[-1].int() - 1) % max_cache_len
|
||||
k_out = k_out[:, :, indices]
|
||||
v_out = v_out[:, :, indices]
|
||||
|
||||
k_out[:, :, cache_position] = key_states
|
||||
v_out[:, :, cache_position] = value_states
|
||||
# `_.zero()` followed by `+=` is equivalent `=`, but compile-friendly (without graph breaks due to assignment)
|
||||
self.key_cache[layer_idx].zero_()
|
||||
self.value_cache[layer_idx].zero_()
|
||||
|
||||
self.key_cache[layer_idx] += k_out
|
||||
self.value_cache[layer_idx] += v_out
|
||||
return k_out, v_out
|
||||
|
||||
def _static_update(self, cache_position, layer_idx, key_states, value_states, k_out, v_out, max_cache_len):
|
||||
k_out[:, :, cache_position] = key_states
|
||||
v_out[:, :, cache_position] = value_states
|
||||
|
||||
self.key_cache[layer_idx] = k_out
|
||||
self.value_cache[layer_idx] = v_out
|
||||
return k_out, v_out
|
||||
|
||||
def update(
|
||||
self,
|
||||
key_states: torch.Tensor,
|
||||
value_states: torch.Tensor,
|
||||
layer_idx: int,
|
||||
cache_kwargs: Optional[Dict[str, Any]] = None,
|
||||
sliding_window: Optional[int] = None,
|
||||
) -> Tuple[torch.Tensor]:
|
||||
cache_position = cache_kwargs.get("cache_position")
|
||||
self.key_cache[layer_idx] = self.key_cache[layer_idx].to(device=key_states.device)
|
||||
self.value_cache[layer_idx] = self.value_cache[layer_idx].to(device=value_states.device)
|
||||
k_out = self.key_cache[layer_idx]
|
||||
v_out = self.value_cache[layer_idx]
|
||||
if sliding_window:
|
||||
update_fn = self._sliding_update
|
||||
else:
|
||||
update_fn = self._static_update
|
||||
|
||||
return update_fn(
|
||||
cache_position,
|
||||
layer_idx,
|
||||
key_states,
|
||||
value_states,
|
||||
k_out,
|
||||
v_out,
|
||||
k_out.shape[2],
|
||||
)
|
||||
|
||||
def get_max_length(self) -> Optional[int]:
|
||||
# in theory there is no limit because the sliding window size is fixed
|
||||
# no matter how long the sentence is
|
||||
return self.max_cache_len
|
||||
|
||||
def get_seq_length(self, layer_idx: Optional[int] = 0):
|
||||
return None
|
||||
|
||||
def reset(self):
|
||||
"""Resets the cache values while preserving the objects"""
|
||||
for layer_idx in range(len(self.key_cache)):
|
||||
# In-place ops prevent breaking the static address
|
||||
self.key_cache[layer_idx].zero_()
|
||||
self.value_cache[layer_idx].zero_()
|
||||
|
@ -622,6 +622,17 @@ class SpmConverter(Converter):
|
||||
def converted(self) -> Tokenizer:
|
||||
tokenizer = self.tokenizer(self.proto)
|
||||
|
||||
# Add user defined symbols (type == 4) from sentnecepiece (https://github.com/google/sentencepiece/blob/6225e08edb2577757163b3f5dbba4c0b670ef445/src/sentencepiece_model.proto#L299C29-L299C33)
|
||||
user_defined_symbols = [
|
||||
AddedToken(token, normalized=False, special=False)
|
||||
for token in [p.piece for p in self.proto.pieces if p.type == 4]
|
||||
]
|
||||
control_symbols = [
|
||||
AddedToken(token, normalized=False, special=True) for token in self.proto.trainer_spec.control_symbols
|
||||
]
|
||||
|
||||
tokenizer.add_tokens(user_defined_symbols + control_symbols)
|
||||
|
||||
# Tokenizer assemble
|
||||
normalizer = self.normalizer(self.proto)
|
||||
if normalizer is not None:
|
||||
@ -1330,10 +1341,6 @@ class GemmaConvert(SpmConverter):
|
||||
raise Exception(
|
||||
"You're trying to run a `Unigram` model but you're file was trained with a different algorithm"
|
||||
)
|
||||
user_defined_symbols = [
|
||||
AddedToken(token, normalized=True, special=False) for token in proto.trainer_spec.user_defined_symbols
|
||||
]
|
||||
tokenizer.add_tokens(user_defined_symbols)
|
||||
return tokenizer
|
||||
|
||||
|
||||
|
@ -31,13 +31,14 @@ deps = {
|
||||
"jax": "jax>=0.4.1,<=0.4.13",
|
||||
"jaxlib": "jaxlib>=0.4.1,<=0.4.13",
|
||||
"jieba": "jieba",
|
||||
"jinja2": "jinja2>=3.1.0",
|
||||
"kenlm": "kenlm",
|
||||
"keras": "keras>2.9,<2.16",
|
||||
"keras-nlp": "keras-nlp>=0.3.1",
|
||||
"librosa": "librosa",
|
||||
"nltk": "nltk",
|
||||
"natten": "natten>=0.14.6,<0.15.0",
|
||||
"numpy": "numpy>=1.17",
|
||||
"numpy": "numpy>=1.17,<2.0",
|
||||
"onnxconverter-common": "onnxconverter-common",
|
||||
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
|
||||
"onnxruntime": "onnxruntime>=1.4.0",
|
||||
|
@ -111,24 +111,11 @@ class AssistedCandidateGenerator(CandidateGenerator):
|
||||
# Prepare the kwargs for the assistant model
|
||||
assistant_kwargs = {}
|
||||
for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads
|
||||
if key not in ("encoder_outputs", "assistant_encoder_outputs"):
|
||||
if key not in ("encoder_outputs", "assistant_encoder_outputs", "past_key_values"):
|
||||
assistant_kwargs[key] = (
|
||||
value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value)
|
||||
)
|
||||
|
||||
# Remove potential default DynamicCache if assistant does not support it
|
||||
if "past_key_values" in assistant_kwargs.keys():
|
||||
if (
|
||||
isinstance(assistant_kwargs["past_key_values"], DynamicCache)
|
||||
and not self.assistant_model._supports_cache_class
|
||||
):
|
||||
# Cache is empty -> remove it from kwargs
|
||||
if len(assistant_kwargs["past_key_values"]) == 0:
|
||||
del assistant_kwargs["past_key_values"]
|
||||
# Cache is not empty -> convert to legacy
|
||||
else:
|
||||
assistant_kwargs["past_key_values"] = assistant_kwargs["past_key_values"].to_legacy_cache()
|
||||
|
||||
if "assistant_encoder_outputs" in model_kwargs:
|
||||
assistant_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"]
|
||||
elif assistant_model.config.is_encoder_decoder:
|
||||
@ -363,15 +350,15 @@ class PromptLookupCandidateGenerator(CandidateGenerator):
|
||||
return
|
||||
|
||||
|
||||
def _crop_past_key_values(model, past_key_values, maximum_length):
|
||||
def _crop_past_key_values(model, past_key_values, max_length):
|
||||
"""Crops the past key values up to a certain maximum length."""
|
||||
new_past = []
|
||||
if model.config.is_encoder_decoder:
|
||||
for idx in range(len(past_key_values)):
|
||||
new_past.append(
|
||||
(
|
||||
past_key_values[idx][0][:, :, :maximum_length, :],
|
||||
past_key_values[idx][1][:, :, :maximum_length, :],
|
||||
past_key_values[idx][0][:, :, :max_length, :],
|
||||
past_key_values[idx][1][:, :, :max_length, :],
|
||||
past_key_values[idx][2],
|
||||
past_key_values[idx][3],
|
||||
)
|
||||
@ -384,8 +371,8 @@ def _crop_past_key_values(model, past_key_values, maximum_length):
|
||||
for idx in range(len(past_key_values)):
|
||||
new_past.append(
|
||||
(
|
||||
past_key_values[idx][0][:, :, :maximum_length],
|
||||
past_key_values[idx][1][:, :maximum_length, :],
|
||||
past_key_values[idx][0][:, :, :max_length],
|
||||
past_key_values[idx][1][:, :max_length, :],
|
||||
)
|
||||
)
|
||||
past_key_values = tuple(new_past)
|
||||
@ -395,19 +382,19 @@ def _crop_past_key_values(model, past_key_values, maximum_length):
|
||||
):
|
||||
if model.config.multi_query:
|
||||
for idx in range(len(past_key_values)):
|
||||
past_key_values[idx] = past_key_values[idx][:, :maximum_length, :]
|
||||
past_key_values[idx] = past_key_values[idx][:, :max_length, :]
|
||||
else:
|
||||
for idx in range(len(past_key_values)):
|
||||
past_key_values[idx] = past_key_values[idx][:, :, :maximum_length, :]
|
||||
past_key_values[idx] = past_key_values[idx][:, :, :max_length, :]
|
||||
elif isinstance(past_key_values, DynamicCache):
|
||||
past_key_values.crop(maximum_length)
|
||||
past_key_values.crop(max_length)
|
||||
|
||||
elif past_key_values is not None:
|
||||
for idx in range(len(past_key_values)):
|
||||
new_past.append(
|
||||
(
|
||||
past_key_values[idx][0][:, :, :maximum_length, :],
|
||||
past_key_values[idx][1][:, :, :maximum_length, :],
|
||||
past_key_values[idx][0][:, :, :max_length, :],
|
||||
past_key_values[idx][1][:, :, :max_length, :],
|
||||
)
|
||||
)
|
||||
past_key_values = tuple(new_past)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user