mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-27 14:54:34 +08:00
Compare commits
115 Commits
enable_tf_
...
v4.23.0
| Author | SHA1 | Date | |
|---|---|---|---|
| 9ae22fe3c1 | |||
| df2f28120d | |||
| 5f5e264a12 | |||
| c6a928cadb | |||
| d739a707d9 | |||
| 870a9542be | |||
| 692c5be74e | |||
| a7bc4221c0 | |||
| 25cfd911d0 | |||
| 9df953a855 | |||
| d866b4858a | |||
| 4dd784c32f | |||
| b0b962ccca | |||
| e150c4e2fe | |||
| e3f028f3af | |||
| af69360bf9 | |||
| ba71bf4cae | |||
| 4824741c4c | |||
| 3080bb4754 | |||
| 298f6a98c2 | |||
| 7d5ce6802e | |||
| c523a86929 | |||
| 3410705730 | |||
| 83dc49b69b | |||
| 1241a4993b | |||
| 4107445a0f | |||
| cbb8a37929 | |||
| 8b6bba54a7 | |||
| d92e22d1f2 | |||
| 9ac586b3c8 | |||
| 983451a13e | |||
| de4d71ea07 | |||
| 34e0cc6d86 | |||
| 7418a48e34 | |||
| 6ef16f2b67 | |||
| 06514b3e1a | |||
| c2b83d540e | |||
| e6fc2016ad | |||
| 994b7a4eea | |||
| a26d71d6ae | |||
| 5fef17f490 | |||
| fa4bcd5274 | |||
| e9a49babee | |||
| 331ea019d7 | |||
| 56af8df359 | |||
| 41ec5d0ced | |||
| b29ebdf4d8 | |||
| e162cebfa3 | |||
| 969534af4b | |||
| 46fd04b481 | |||
| 7e348aac96 | |||
| ae3e3bc60a | |||
| ce2620194b | |||
| f0b490151e | |||
| 7e7f62bfa7 | |||
| bad353cebf | |||
| 45e14038f2 | |||
| 7598791c09 | |||
| ad98642a82 | |||
| d9101b71bc | |||
| 226b8ef063 | |||
| 071df6eb13 | |||
| c875a96eb1 | |||
| 4cbc797b27 | |||
| e794ca5b16 | |||
| 2f53ab5745 | |||
| 60db81ff60 | |||
| c54bb1ad79 | |||
| e12bbe3b4d | |||
| 512fa41c53 | |||
| 07e94bf159 | |||
| 6268694e27 | |||
| bf7eb0c9b3 | |||
| 971da2e6ec | |||
| 587d84b178 | |||
| 6dce9e0cdd | |||
| 6fd254a37d | |||
| a9782881a4 | |||
| d6e920449e | |||
| 2403dbd607 | |||
| f134d38553 | |||
| cd024da6f8 | |||
| ca3ebc44e0 | |||
| cc263e9bb4 | |||
| 9b630168a9 | |||
| ac5ea74ee8 | |||
| 3a1a56a8fe | |||
| fe10796f4f | |||
| 534cd8ff94 | |||
| 4c962d5e79 | |||
| c7ec0afce0 | |||
| ca26277e33 | |||
| 008531c18a | |||
| 68f50f3453 | |||
| 18c06208c4 | |||
| c28d04e9e2 | |||
| 36f52e9593 | |||
| 5cd16f01db | |||
| cfb777f27c | |||
| 6a08162ad4 | |||
| f33858d18a | |||
| 2fba98e585 | |||
| 3e2dd7f92d | |||
| dad578e4c3 | |||
| e396358104 | |||
| 582d085bb2 | |||
| 368b649af6 | |||
| 4fd32a1f49 | |||
| f3d2f7a6e0 | |||
| 49d62b0178 | |||
| 1a1893e5d8 | |||
| 163cd15279 | |||
| f16bbf1475 | |||
| cca6e6fea1 | |||
| 01eb34ab45 |
@ -79,10 +79,19 @@ jobs:
|
||||
path: ~/transformers/tests_fetched_summary.txt
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
mv test_list.txt test_preparation/test_list.txt
|
||||
cp test_list.txt test_preparation/test_list.txt
|
||||
else
|
||||
touch test_preparation/test_list.txt
|
||||
fi
|
||||
- run: python utils/tests_fetcher.py --filter_pipeline_tests
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
mv test_list.txt test_preparation/filtered_test_list.txt
|
||||
else
|
||||
touch test_preparation/filtered_test_list.txt
|
||||
fi
|
||||
- store_artifacts:
|
||||
path: ~/transformers/test_preparation/filtered_test_list.txt
|
||||
- run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/examples_tests_fetched_summary.txt
|
||||
@ -97,6 +106,7 @@ jobs:
|
||||
root: test_preparation/
|
||||
paths:
|
||||
test_list.txt
|
||||
filtered_test_list.txt
|
||||
examples_test_list.txt
|
||||
|
||||
# To run all tests for the nightly build
|
||||
@ -110,6 +120,8 @@ jobs:
|
||||
mkdir test_preparation
|
||||
echo "tests" > test_preparation/test_list.txt
|
||||
echo "tests" > test_preparation/examples_test_list.txt
|
||||
- run: python utils/tests_fetcher.py --filter_pipeline_tests
|
||||
- run: mv test_list.txt test_preparation/filtered_test_list.txt
|
||||
|
||||
- persist_to_workspace:
|
||||
root: test_preparation/
|
||||
@ -132,7 +144,7 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
@ -152,7 +164,7 @@ jobs:
|
||||
key: v0.5-torch_and_tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/filtered_test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -174,7 +186,7 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
@ -192,7 +204,7 @@ jobs:
|
||||
key: v0.5-torch_and_flax-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/filtered_test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -213,7 +225,7 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
@ -231,7 +243,7 @@ jobs:
|
||||
key: v0.5-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/test_list.txt) | tee tests_output.txt
|
||||
- run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -252,7 +264,7 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
@ -269,7 +281,7 @@ jobs:
|
||||
key: v0.5-tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/test_list.txt) | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -290,7 +302,7 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
@ -306,7 +318,7 @@ jobs:
|
||||
key: v0.5-flax-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/test_list.txt) | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -318,7 +330,6 @@ jobs:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
@ -345,7 +356,7 @@ jobs:
|
||||
key: v0.5-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch -m is_pipeline_test $(cat test_preparation/test_list.txt) | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch tests/pipelines | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -357,7 +368,6 @@ jobs:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
@ -382,7 +392,7 @@ jobs:
|
||||
key: v0.5-tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf $(cat test_preparation/test_list.txt) -m is_pipeline_test | tee tests_output.txt
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf tests/pipelines | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -401,7 +411,7 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
@ -409,6 +419,16 @@ jobs:
|
||||
keys:
|
||||
- v0.5-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
- v0.5-custom_tokenizers-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y cmake
|
||||
- run:
|
||||
name: install jumanpp
|
||||
command: |
|
||||
wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz
|
||||
tar xvf jumanpp-2.0.0-rc3.tar.xz
|
||||
mkdir jumanpp-2.0.0-rc3/bld
|
||||
cd jumanpp-2.0.0-rc3/bld
|
||||
sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local
|
||||
sudo make install
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]
|
||||
- run: python -m unidic download
|
||||
@ -547,7 +567,7 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
@ -565,7 +585,7 @@ jobs:
|
||||
key: v0.5-hub-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/test_list.txt) -m is_staging_test | tee tests_output.txt
|
||||
- run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/filtered_test_list.txt) -m is_staging_test | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
@ -586,7 +606,7 @@ jobs:
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
@ -600,7 +620,7 @@ jobs:
|
||||
key: v0.5-onnx-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/test_list.txt) -k onnx | tee tests_output.txt
|
||||
- run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/filtered_test_list.txt) -k onnx | tee tests_output.txt
|
||||
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
@ -680,7 +700,7 @@ jobs:
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
at: ~/transformers/filtered_test_list.txt
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
1
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,6 +1,5 @@
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve transformers
|
||||
labels: [ "bug" ]
|
||||
body:
|
||||
- type: textarea
|
||||
id: system-info
|
||||
|
||||
2
.github/workflows/build-docker-images.yml
vendored
2
.github/workflows/build-docker-images.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
|
||||
36
.github/workflows/self-past.yml
vendored
36
.github/workflows/self-past.yml
vendored
@ -15,6 +15,11 @@ on:
|
||||
version:
|
||||
required: true
|
||||
type: string
|
||||
# Use this to control the commit to test against
|
||||
sha:
|
||||
default: 'main'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
@ -67,18 +72,19 @@ jobs:
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Checkout transformers
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Cleanup
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf tests/models/__pycache__
|
||||
rm -rf reports
|
||||
|
||||
- id: set-matrix
|
||||
working-directory: /transformers
|
||||
name: Identify models to test
|
||||
run: |
|
||||
cd tests
|
||||
@ -99,7 +105,7 @@ jobs:
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
@ -130,6 +136,15 @@ jobs:
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: Save job name
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
matrix_folders=${matrix_folders/'models_'/'models/'}
|
||||
job_name="Model tests ($matrix_folders, ${{ matrix.machine_type }})"
|
||||
echo "$job_name"
|
||||
echo "$job_name" > /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/job_name.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -152,7 +167,7 @@ jobs:
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
@ -183,6 +198,15 @@ jobs:
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: Save job name
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
matrix_folders=${matrix_folders/'models_'/'models/'}
|
||||
job_name="Model tests ($matrix_folders, ${{ matrix.machine_type }})"
|
||||
echo "$job_name"
|
||||
echo "$job_name" > /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/job_name.txt
|
||||
|
||||
- name: Test suite reports artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v2
|
||||
|
||||
8
.github/workflows/self-scheduled.yml
vendored
8
.github/workflows/self-scheduled.yml
vendored
@ -256,10 +256,8 @@ jobs:
|
||||
|
||||
- name: Run all pipeline tests on GPU
|
||||
working-directory: /transformers
|
||||
env:
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests
|
||||
python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_torch_pipeline_gpu tests/pipelines
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
@ -301,10 +299,8 @@ jobs:
|
||||
|
||||
- name: Run all pipeline tests on GPU
|
||||
working-directory: /transformers
|
||||
env:
|
||||
RUN_PIPELINE_TESTS: yes
|
||||
run: |
|
||||
python3 -m pytest -n 1 -v --dist=loadfile -m is_pipeline_test --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests
|
||||
python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_tests_tf_pipeline_gpu tests/pipelines
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ always() }}
|
||||
|
||||
6
.github/workflows/stale.yml
vendored
6
.github/workflows/stale.yml
vendored
@ -12,10 +12,10 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v1
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.7
|
||||
|
||||
@ -24,4 +24,4 @@ jobs:
|
||||
pip install PyGithub
|
||||
- name: Close stale issues
|
||||
run: |
|
||||
python scripts/stale.py
|
||||
python scripts/stale.py
|
||||
|
||||
18
README.md
18
README.md
@ -55,13 +55,13 @@ limitations under the License.
|
||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||
</h3>
|
||||
|
||||
🤗 Transformers provides thousands of pretrained models to perform tasks on different modalities such as text, vision, and audio.
|
||||
🤗 Transformers provides thousands of pretrained models to perform tasks on different modalities such as text, vision, and audio.
|
||||
|
||||
These models can be applied on:
|
||||
|
||||
* 📝 Text, for tasks like text classification, information extraction, question answering, summarization, translation, text generation, in over 100 languages.
|
||||
* 🖼️ Images, for tasks like image classification, object detection, and segmentation.
|
||||
* 🗣️ Audio, for tasks like speech recognition and audio classification.
|
||||
* 📝 Text, for tasks like text classification, information extraction, question answering, summarization, translation, text generation, in over 100 languages.
|
||||
* 🖼️ Images, for tasks like image classification, object detection, and segmentation.
|
||||
* 🗣️ Audio, for tasks like speech recognition and audio classification.
|
||||
|
||||
Transformer models can also perform tasks on **several modalities combined**, such as table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering.
|
||||
|
||||
@ -278,7 +278,7 @@ Current number of checkpoints: ** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
@ -300,6 +300,7 @@ Current number of checkpoints: ** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
@ -328,6 +329,7 @@ Current number of checkpoints: ** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
@ -373,6 +375,7 @@ Current number of checkpoints: ** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
@ -385,11 +388,12 @@ Current number of checkpoints: ](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
@ -405,7 +409,7 @@ Current number of checkpoints: .
|
||||
|
||||
These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://huggingface.co/docs/transformers/examples).
|
||||
These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://github.com/huggingface/transformers/tree/main/examples).
|
||||
|
||||
|
||||
## Learn more
|
||||
|
||||
28
README_ko.md
28
README_ko.md
@ -59,7 +59,7 @@ limitations under the License.
|
||||
|
||||
🤗 Transformers는 이러한 사전학습 모델을 빠르게 다운로드해 특정 텍스트에 사용하고, 원하는 데이터로 fine-tuning해 커뮤니티나 우리의 [모델 허브](https://huggingface.co/models)에 공유할 수 있도록 API를 제공합니다. 또한, 모델 구조를 정의하는 각 파이썬 모듈은 완전히 독립적이여서 연구 실험을 위해 손쉽게 수정할 수 있습니다.
|
||||
|
||||
🤗 Transformers는 가장 유명한 3개의 딥러닝 라이브러리를 지원합니다. 이들은 서로 완벽히 연동됩니다 — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/). 간단하게 이 라이브러리 중 하나로 모델을 학습하고, 또 다른 라이브러리로 추론을 위해 모델을 불러올 수 있습니다.
|
||||
🤗 Transformers는 가장 유명한 3개의 딥러닝 라이브러리를 지원합니다. 이들은 서로 완벽히 연동됩니다 — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/), [TensorFlow](https://www.tensorflow.org/). 간단하게 이 라이브러리 중 하나로 모델을 학습하고, 또 다른 라이브러리로 추론을 위해 모델을 불러올 수 있습니다.
|
||||
|
||||
## 온라인 데모
|
||||
|
||||
@ -74,7 +74,7 @@ limitations under the License.
|
||||
- [DistilBERT를 이용한 질문 답변](https://huggingface.co/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
|
||||
- [T5로 번역하기](https://huggingface.co/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
|
||||
|
||||
**[Transformer와 글쓰기](https://transformer.huggingface.co)** 는 이 저장소의 텍스트 생성 능력에 관한 Hugging Face 팀의 공식 데모입니다.
|
||||
**[Transformer와 글쓰기](https://transformer.huggingface.co)** 는 이 저장소의 텍스트 생성 능력에 관한 Hugging Face 팀의 공식 데모입니다.
|
||||
|
||||
## Hugging Face 팀의 커스텀 지원을 원한다면
|
||||
|
||||
@ -228,7 +228,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
@ -250,6 +250,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
@ -257,7 +258,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
@ -278,6 +279,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
@ -295,7 +297,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu.
|
||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, Peter J. Liu.
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||
@ -305,9 +307,9 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (from Facebook) released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela.
|
||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
||||
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||
@ -323,10 +325,11 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
@ -335,13 +338,14 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
@ -351,7 +355,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos)** (from Huazhong University of Science & Technology) released with the paper [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) by Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu.
|
||||
1. **[YOSO](https://huggingface.co/docs/transformers/model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.
|
||||
1. 새로운 모델을 올리고 싶나요? 우리가 **상세한 가이드와 템플릿** 으로 새로운 모델을 올리도록 도와드릴게요. 가이드와 템플릿은 이 저장소의 [`templates`](./templates) 폴더에서 확인하실 수 있습니다. [컨트리뷰션 가이드라인](./CONTRIBUTING.md)을 꼭 확인해주시고, PR을 올리기 전에 메인테이너에게 연락하거나 이슈를 오픈해 피드백을 받으시길 바랍니다.
|
||||
1. 새로운 모델을 올리고 싶나요? 우리가 **상세한 가이드와 템플릿** 으로 새로운 모델을 올리도록 도와드릴게요. 가이드와 템플릿은 이 저장소의 [`templates`](./templates) 폴더에서 확인하실 수 있습니다. [컨트리뷰션 가이드라인](./CONTRIBUTING.md)을 꼭 확인해주시고, PR을 올리기 전에 메인테이너에게 연락하거나 이슈를 오픈해 피드백을 받으시길 바랍니다.
|
||||
|
||||
각 모델이 Flax, PyTorch, TensorFlow으로 구현되었는지 또는 🤗 Tokenizers 라이브러리가 지원하는 토크나이저를 사용하는지 확인하려면, [이 표](https://huggingface.co/docs/transformers/index#supported-frameworks)를 확인하세요.
|
||||
|
||||
|
||||
@ -245,14 +245,14 @@ conda install -c huggingface transformers
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (来自 Google Research) 伴随论文 [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) 由 Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed 发布。
|
||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (来自 Facebook) 伴随论文 [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) 由 Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston 发布。
|
||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (来自 Alexa) 伴随论文 [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) 由 Adrian de Wynter and Daniel J. Perry 发布。
|
||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (来自 Google Research) 伴随论文 [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) 由 Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel 发布。
|
||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (来自 Inria/Facebook/Sorbonne) 伴随论文 [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) 由 Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot 发布。
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (来自 Google Research) 伴随论文 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 由 Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 发布。
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (来自 YituTech) 伴随论文 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 由 Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 发布。
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (来自 Facebook AI) 伴随论文 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 由 Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 发布。
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (来自 Tsinghua University) 伴随论文 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 由 Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 发布。
|
||||
@ -274,6 +274,7 @@ conda install -c huggingface transformers
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (来自 Facebook AI) 伴随论文 [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) 由 Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela 发布。
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。
|
||||
@ -281,7 +282,7 @@ conda install -c huggingface transformers
|
||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (来自 KAIST) 伴随论文 [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) 由 Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim 发布。
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (来自 OpenAI) 伴随论文 [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) 由 Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever 发布。
|
||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (来自 EleutherAI) 随仓库 [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) 发布。作者为 Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy 发布。
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (来自 ABEJA) 由 Shinya Otani, Takayoshi Makabe, Anuj Arora, Kyo Hattori。
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (来自 OpenAI) 伴随论文 [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) 由 Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever** 发布。
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (来自 EleutherAI) 伴随论文 [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) 由 Ben Wang and Aran Komatsuzaki 发布。
|
||||
@ -302,7 +303,8 @@ conda install -c huggingface transformers
|
||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (来自 Facebook) 伴随论文 [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) 由 Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert 发布。
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (来自 Facebook) 伴随论文 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 由 Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 发布。
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** 用 [OPUS](http://opus.nlpl.eu/) 数据训练的机器翻译模型由 Jörg Tiedemann 发布。[Marian Framework](https://marian-nmt.github.io/) 由微软翻译团队开发。
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov >>>>>>> Fix rebase
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) 由 Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer 发布。
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) 由 Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan 发布。
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (来自 NVIDIA) 伴随论文 [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) 由 Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro 发布。
|
||||
@ -312,14 +314,14 @@ conda install -c huggingface transformers
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (来自 Apple) 伴随论文 [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) 由 Sachin Mehta and Mohammad Rastegari 发布。
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (来自 Microsoft Research) 伴随论文 [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) 由 Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu 发布。
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (来自 Google AI) 伴随论文 [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) 由 Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel 发布。
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (来自 中国人民大学 AI Box) 伴随论文 [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) 由 Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen 发布。
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (来自华为诺亚方舟实验室) 伴随论文 [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) 由 Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu 发布。
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (来自 Meta) 伴随论文 [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) 由 the NLLB team 发布。
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (来自 the University of Wisconsin - Madison) 伴随论文 [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) 由 Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh 发布。
|
||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (来自 Meta AI) 伴随论文 [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) 由 Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al 发布。
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (来自 Google AI) 伴随论文 [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) 由 Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby 发布。
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (来自 Google) 伴随论文 [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) 由 Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu 发布。
|
||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。
|
||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (来自 Google) 伴随论文 [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) 由 Jason Phang, Yao Zhao, Peter J. Liu 发布。
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (来自 Deepmind) 伴随论文 [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) 由 Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira 发布。
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (来自 VinAI Research) 伴随论文 [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) 由 Dat Quoc Nguyen and Anh Tuan Nguyen 发布。
|
||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (来自 UCLA NLP) 伴随论文 [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) 由 Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang 发布。
|
||||
@ -329,9 +331,9 @@ conda install -c huggingface transformers
|
||||
1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (来自 Facebook) 伴随论文 [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) 由 Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela 发布。
|
||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (来自 Google Research) 伴随论文 [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) 由 Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang 发布。
|
||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (来自 Google Research) 伴随论文 [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) 由 Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya 发布。
|
||||
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
||||
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (来自 Google Research) 伴随论文 [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) 由 Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder 发布。
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (来自 Facebook), 伴随论文 [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) 由 Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov 发布。
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (来自 ZhuiyiTechnology), 伴随论文 [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) 由 Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu 发布。
|
||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (来自 NVIDIA) 伴随论文 [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) 由 Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo 发布。
|
||||
@ -347,10 +349,11 @@ conda install -c huggingface transformers
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (来自 Microsoft Research) 伴随论文 [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) 由 Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou 发布。
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (来自 Microsoft Research) 伴随论文 [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) 由 Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang 发布。
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (来自 Microsoft Research) 伴随论文 [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) 由 Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu 发布。
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (来自 Tsinghua University and Nankai University) 伴随论文 [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) 由 Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu 发布。
|
||||
@ -359,13 +362,14 @@ conda install -c huggingface transformers
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (来自 Meta AI) 伴随论文 [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) 由 Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick 发布。
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (来自 Facebook AI) 伴随论文 [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) 由 Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli 发布。
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (来自 Microsoft Research) 伴随论文 [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) 由 Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou 发布。
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (来自 Facebook AI), 伴随论文 [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) 由 Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov 发布。
|
||||
|
||||
@ -257,14 +257,14 @@ conda install -c huggingface transformers
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
@ -286,6 +286,7 @@ conda install -c huggingface transformers
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
@ -293,7 +294,7 @@ conda install -c huggingface transformers
|
||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released with the paper [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
@ -314,7 +315,8 @@ conda install -c huggingface transformers
|
||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
@ -341,9 +343,9 @@ conda install -c huggingface transformers
|
||||
1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (from Facebook) released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela.
|
||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
||||
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Research) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/pdf/2010.12821.pdf) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper a [Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper a [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864v1.pdf) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||
@ -359,25 +361,27 @@ conda install -c huggingface transformers
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/pdf/2202.09741.pdf) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
|
||||
@ -32,7 +32,6 @@ warnings.simplefilter(action="ignore", category=FutureWarning)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipeline are tested")
|
||||
config.addinivalue_line(
|
||||
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested"
|
||||
)
|
||||
|
||||
@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
# Generating the documentation
|
||||
|
||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||
To generate the documentation, you first have to build it. Several packages are necessary to build the doc,
|
||||
you can install them with the following command, at the root of the code repository:
|
||||
|
||||
```bash
|
||||
@ -33,7 +33,7 @@ pip install git+https://github.com/huggingface/doc-builder
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look like before committing for instance). You don't have to commit the built documentation.
|
||||
check how they look before committing for instance). You don't have to commit the built documentation.
|
||||
|
||||
---
|
||||
|
||||
@ -88,7 +88,7 @@ the filename without the extension in the [`_toctree.yml`](https://github.com/hu
|
||||
|
||||
## Renaming section headers and moving sections
|
||||
|
||||
It helps to keep the old links working when renaming section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums and Social media and it'd be make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||
It helps to keep the old links working when renaming the section header and/or moving sections from one document to another. This is because the old links are likely to be used in Issues, Forums, and Social media and it'd make for a much more superior user experience if users reading those months later could still easily navigate to the originally intended information.
|
||||
|
||||
Therefore we simply keep a little map of moved sections at the end of the document where the original section was. The key is to preserve the original anchor.
|
||||
|
||||
@ -99,7 +99,7 @@ Sections that were moved:
|
||||
|
||||
[ <a href="#section-b">Section A</a><a id="section-a"></a> ]
|
||||
```
|
||||
and of course if you moved it to another file, then:
|
||||
and of course, if you moved it to another file, then:
|
||||
|
||||
```
|
||||
Sections that were moved:
|
||||
@ -109,7 +109,7 @@ Sections that were moved:
|
||||
|
||||
Use the relative style to link to the new file so that the versioned docs continue to work.
|
||||
|
||||
For an example of a rich moved sections set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/main_classes/trainer.mdx).
|
||||
For an example of a rich moved section set please see the very end of [the Trainer doc](https://github.com/huggingface/transformers/blob/main/docs/source/en/main_classes/trainer.mdx).
|
||||
|
||||
|
||||
## Writing Documentation - Specification
|
||||
@ -126,7 +126,7 @@ Adding a new tutorial or section is done in two steps:
|
||||
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
|
||||
|
||||
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
|
||||
depending on the intended targets (beginners, more advanced users or researchers) it should go in section two, three or
|
||||
depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or
|
||||
four.
|
||||
|
||||
### Translating
|
||||
@ -177,8 +177,8 @@ not to be displayed in the documentation, you can do so by specifying which meth
|
||||
- save_vocabulary
|
||||
```
|
||||
|
||||
If you just want to add a method that is not documented (for instance magic method like `__call__` are not documented
|
||||
byt default) you can put the list of methods to add in a list that contains `all`:
|
||||
If you just want to add a method that is not documented (for instance magic methods like `__call__` are not documented
|
||||
by default) you can put the list of methods to add in a list that contains `all`:
|
||||
|
||||
```
|
||||
## XXXTokenizer
|
||||
@ -191,9 +191,9 @@ byt default) you can put the list of methods to add in a list that contains `all
|
||||
### Writing source documentation
|
||||
|
||||
Values that should be put in `code` should either be surrounded by backticks: \`like so\`. Note that argument names
|
||||
and objects like True, None or any strings should usually be put in `code`.
|
||||
and objects like True, None, or any strings should usually be put in `code`.
|
||||
|
||||
When mentioning a class, function or method, it is recommended to use our syntax for internal links so that our tool
|
||||
When mentioning a class, function, or method, it is recommended to use our syntax for internal links so that our tool
|
||||
adds a link to its documentation with this syntax: \[\`XXXClass\`\] or \[\`function\`\]. This requires the class or
|
||||
function to be in the main package.
|
||||
|
||||
@ -207,7 +207,7 @@ The same works for methods so you can either use \[\`XXXClass.method\`\] or \[~\
|
||||
#### Defining arguments in a method
|
||||
|
||||
Arguments should be defined with the `Args:` (or `Arguments:` or `Parameters:`) prefix, followed by a line return and
|
||||
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon and its
|
||||
an indentation. The argument should be followed by its type, with its shape if it is a tensor, a colon, and its
|
||||
description:
|
||||
|
||||
```
|
||||
@ -216,7 +216,7 @@ description:
|
||||
```
|
||||
|
||||
If the description is too long to fit in one line, another indentation is necessary before writing the description
|
||||
after th argument.
|
||||
after the argument.
|
||||
|
||||
Here's an example showcasing everything so far:
|
||||
|
||||
@ -266,7 +266,7 @@ Multi-line code blocks can be useful for displaying examples. They are done betw
|
||||
````
|
||||
|
||||
We follow the [doctest](https://docs.python.org/3/library/doctest.html) syntax for the examples to automatically test
|
||||
the results stay consistent with the library.
|
||||
the results to stay consistent with the library.
|
||||
|
||||
#### Writing a return block
|
||||
|
||||
@ -274,27 +274,27 @@ The return block should be introduced with the `Returns:` prefix, followed by a
|
||||
The first line should be the type of the return, followed by a line return. No need to indent further for the elements
|
||||
building the return.
|
||||
|
||||
Here's an example for a single value return:
|
||||
Here's an example of a single value return:
|
||||
|
||||
```
|
||||
Returns:
|
||||
`List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token.
|
||||
```
|
||||
|
||||
Here's an example for tuple return, comprising several objects:
|
||||
Here's an example of a tuple return, comprising several objects:
|
||||
|
||||
```
|
||||
Returns:
|
||||
`tuple(torch.FloatTensor)` comprising various elements depending on the configuration ([`BertConfig`]) and inputs:
|
||||
- ** loss** (*optional*, returned when `masked_lm_labels` is provided) `torch.FloatTensor` of shape `(1,)` --
|
||||
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||
Total loss is the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
|
||||
- **prediction_scores** (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`) --
|
||||
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
|
||||
```
|
||||
|
||||
#### Adding an image
|
||||
|
||||
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
|
||||
Due to the rapidly growing repository, it is important to make sure that no files that would significantly weigh down the repository are added. This includes images, videos, and other non-text files. We prefer to leverage a hf.co hosted `dataset` like
|
||||
the ones hosted on [`hf-internal-testing`](https://huggingface.co/hf-internal-testing) in which to place these files and reference
|
||||
them by URL. We recommend putting them in the following dataset: [huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
|
||||
If an external contribution, feel free to add the images to your PR and ask a Hugging Face member to migrate your images
|
||||
@ -364,7 +364,7 @@ We use pytests' [doctest integration](https://docs.pytest.org/doctest.html) to v
|
||||
For Transformers, the doctests are run on a daily basis via GitHub Actions as can be
|
||||
seen [here](https://github.com/huggingface/transformers/actions/workflows/doctests.yml).
|
||||
|
||||
To include your example in the daily doctests, you need add the filename that
|
||||
To include your example in the daily doctests, you need to add the filename that
|
||||
contains the example docstring to the [documentation_tests.txt](../utils/documentation_tests.txt).
|
||||
|
||||
### For Python files
|
||||
@ -426,6 +426,6 @@ Here are a few tips to help you debug the doctests and make them pass:
|
||||
|
||||
- The outputs of the code need to match the expected output **exactly**, so make sure you have the same outputs. In particular doctest will see a difference between single quotes and double quotes, or a missing parenthesis. The only exceptions to that rule are:
|
||||
* whitespace: one give whitespace (space, tabulation, new line) is equivalent to any number of whitespace, so you can add new lines where there are spaces to make your output more readable.
|
||||
* numerical values: you should never put more than 4 or 5 digits to expected results as different setups or library versions might get you slightly different results. `doctest` is configure to ignore any difference lower than the precision to which you wrote (so 1e-4 if you write 4 digits).
|
||||
* numerical values: you should never put more than 4 or 5 digits to expected results as different setups or library versions might get you slightly different results. `doctest` is configured to ignore any difference lower than the precision to which you wrote (so 1e-4 if you write 4 digits).
|
||||
- Don't leave a block of code that is very long to execute. If you can't make it fast, you can either not use the doctest syntax on it (so that it's ignored), or if you want to use the doctest syntax to show the results, you can add a comment `# doctest: +SKIP` at the end of the lines of code too long to execute
|
||||
- Each line of code that produces a result needs to have that result written below. You can ignore an output if you don't want to show it in your code example by adding a comment ` # doctest: +IGNORE_RESULT` at the end of the line of code producing it.
|
||||
|
||||
@ -31,9 +31,11 @@
|
||||
- local: sagemaker
|
||||
title: Run training on Amazon SageMaker
|
||||
- local: converting_tensorflow_models
|
||||
title: Converting TensorFlow Checkpoints
|
||||
title: Converting from TensorFlow checkpoints
|
||||
- local: serialization
|
||||
title: Export 🤗 Transformers models
|
||||
title: Export to ONNX
|
||||
- local: torchscript
|
||||
title: Export to TorchScript
|
||||
- local: troubleshooting
|
||||
title: Troubleshoot
|
||||
title: General usage
|
||||
@ -109,6 +111,8 @@
|
||||
title: How to contribute to transformers?
|
||||
- local: add_new_model
|
||||
title: How to add a model to 🤗 Transformers?
|
||||
- local: add_tensorflow_model
|
||||
title: How to convert a 🤗 Transformers model to TensorFlow?
|
||||
- local: add_new_pipeline
|
||||
title: How to add a pipeline to 🤗 Transformers?
|
||||
- local: testing
|
||||
@ -241,6 +245,8 @@
|
||||
title: Encoder Decoder Models
|
||||
- local: model_doc/ernie
|
||||
title: ERNIE
|
||||
- local: model_doc/esm
|
||||
title: ESM
|
||||
- local: model_doc/flaubert
|
||||
title: FlauBERT
|
||||
- local: model_doc/fnet
|
||||
@ -279,6 +285,8 @@
|
||||
title: M2M100
|
||||
- local: model_doc/marian
|
||||
title: MarianMT
|
||||
- local: model_doc/markuplm
|
||||
title: MarkupLM
|
||||
- local: model_doc/mbart
|
||||
title: MBart and MBart-50
|
||||
- local: model_doc/megatron-bert
|
||||
@ -441,6 +449,8 @@
|
||||
title: Wav2Vec2Phoneme
|
||||
- local: model_doc/wavlm
|
||||
title: WavLM
|
||||
- local: model_doc/whisper
|
||||
title: Whisper
|
||||
- local: model_doc/xls_r
|
||||
title: XLS-R
|
||||
- local: model_doc/xlsr_wav2vec2
|
||||
@ -492,6 +502,11 @@
|
||||
- local: model_doc/trajectory_transformer
|
||||
title: Trajectory Transformer
|
||||
title: Reinforcement learning models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/time_series_transformer
|
||||
title: Time Series Transformer
|
||||
title: Time series models
|
||||
title: Models
|
||||
- sections:
|
||||
- local: internal/modeling_utils
|
||||
@ -507,4 +522,4 @@
|
||||
- local: internal/file_utils
|
||||
title: General Utilities
|
||||
title: Internal Helpers
|
||||
title: API
|
||||
title: API
|
||||
|
||||
@ -106,7 +106,7 @@ own regarding how code should be written :-)
|
||||
for a good example).
|
||||
2. The code should be fully understandable, even by a non-native English speaker. This means you should pick
|
||||
descriptive variable names and avoid abbreviations. As an example, `activation` is preferred to `act`.
|
||||
One-letter variable names are strongly discouraged unless it's an index in a for loop.
|
||||
One-letter variable names are strongly discouraged unless it's an index in a for loop.
|
||||
3. More generally we prefer longer explicit code to short magical one.
|
||||
4. Avoid subclassing `nn.Sequential` in PyTorch but subclass `nn.Module` and write the forward pass, so that anyone
|
||||
using your code can quickly debug it by adding print statements or breaking points.
|
||||
@ -222,7 +222,7 @@ cd ..
|
||||
5. To port *brand_new_bert*, you will also need access to its original repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git
|
||||
git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git
|
||||
cd brand_new_bert
|
||||
pip install -e .
|
||||
```
|
||||
@ -683,10 +683,11 @@ work left to be done should be a cakewalk 😊.
|
||||
At this point, you have successfully added a new model. However, it is very much possible that the model does not yet
|
||||
fully comply with the required design. To make sure, the implementation is fully compatible with 🤗 Transformers, all
|
||||
common tests should pass. The Cookiecutter should have automatically added a test file for your model, probably under
|
||||
the same `tests/test_modeling_brand_new_bert.py`. Run this test file to verify that all common tests pass:
|
||||
the same `tests/models/brand_new_bert/test_modeling_brand_new_bert.py`. Run this test file to verify that all common
|
||||
tests pass:
|
||||
|
||||
```bash
|
||||
pytest tests/test_modeling_brand_new_bert.py
|
||||
pytest tests/models/brand_new_bert/test_modeling_brand_new_bert.py
|
||||
```
|
||||
|
||||
Having fixed all common tests, it is now crucial to ensure that all the nice work you have done is well tested, so that
|
||||
@ -700,7 +701,7 @@ Cookiecutter, called `BrandNewBertModelIntegrationTests` and only has to be fill
|
||||
tests are passing, run
|
||||
|
||||
```bash
|
||||
RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests
|
||||
RUN_SLOW=1 pytest -sv tests/models/brand_new_bert/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests
|
||||
```
|
||||
|
||||
<Tip>
|
||||
@ -758,7 +759,8 @@ contain a couple of hard-coded integration tests.
|
||||
**10. Run End-to-end integration tests**
|
||||
|
||||
Having added the tokenizer, you should also add a couple of end-to-end integration tests using both the model and the
|
||||
tokenizer to `tests/test_modeling_brand_new_bert.py` in 🤗 Transformers. Such a test should show on a meaningful
|
||||
tokenizer to `tests/models/brand_new_bert/test_modeling_brand_new_bert.py` in 🤗 Transformers.
|
||||
Such a test should show on a meaningful
|
||||
text-to-text sample that the 🤗 Transformers implementation works as expected. A meaningful text-to-text sample can
|
||||
include *e.g.* a source-to-target-translation pair, an article-to-summary pair, a question-to-answer pair, etc… If none
|
||||
of the ported checkpoints has been fine-tuned on a downstream task it is enough to simply rely on the model tests. In a
|
||||
|
||||
346
docs/source/en/add_tensorflow_model.mdx
Normal file
346
docs/source/en/add_tensorflow_model.mdx
Normal file
@ -0,0 +1,346 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
-->
|
||||
|
||||
# How to convert a 🤗 Transformers model to TensorFlow?
|
||||
|
||||
Having multiple frameworks available to use with 🤗 Transformers gives you flexibility to play their strengths when
|
||||
designing your application, but it implies that compatibility must be added on a per-model basis. The good news is that
|
||||
adding TensorFlow compatibility to an existing model is simpler than [adding a new model from scratch](add_new_model)!
|
||||
Whether you wish to have a deeper understanding of large TensorFlow models, make a major open-source contribution, or
|
||||
enable TensorFlow for your model of choice, this guide is for you.
|
||||
|
||||
This guide empowers you, a member of our community, to contribute TensorFlow model weights and/or
|
||||
architectures to be used in 🤗 Transformers, with minimal supervision from the Hugging Face team. Writing a new model
|
||||
is no small feat, but hopefully this guide will make it less of a rollercoaster 🎢 and more of a walk in the park 🚶.
|
||||
Harnessing our collective experiences is absolutely critical to make this process increasingly easier, and thus we
|
||||
highly encourage that you suggest improvements to this guide!
|
||||
|
||||
Before you dive deeper, it is recommended that you check the following resources if you're new to 🤗 Transformers:
|
||||
- [General overview of 🤗 Transformers](add_new_model#general-overview-of-transformers)
|
||||
- [Hugging Face's TensorFlow Philosophy](https://huggingface.co/blog/tensorflow-philosophy)
|
||||
|
||||
In the remainder of this guide, you will learn what's needed to add a new TensorFlow model architecture, the
|
||||
procedure to convert PyTorch into TensorFlow model weights, and how to efficiently debug mismatches across ML
|
||||
frameworks. Let's get started!
|
||||
|
||||
<Tip>
|
||||
|
||||
Are you unsure whether the model you wish to use already has a corresponding TensorFlow architecture?
|
||||
|
||||
|
||||
|
||||
Check the `model_type` field of the `config.json` of your model of choice
|
||||
([example](https://huggingface.co/bert-base-uncased/blob/main/config.json#L14)). If the corresponding model folder in
|
||||
🤗 Transformers has a file whose name starts with "modeling_tf", it means that it has a corresponding TensorFlow
|
||||
architecture ([example](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert)).
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
## Step-by-step guide to add TensorFlow model architecture code
|
||||
|
||||
There are many ways to design a large model architecture, and multiple ways of implementing said design. However,
|
||||
you might recall from our [general overview of 🤗 Transformers](add_new_model#general-overview-of-transformers)
|
||||
that we are an opinionated bunch - the ease of use of 🤗 Transformers relies on consistent design choices. From
|
||||
experience, we can tell you a few important things about adding TensorFlow models:
|
||||
|
||||
- Don't reinvent the wheel! More often that not, there are at least two reference implementations you should check: the
|
||||
PyTorch equivalent of the model you are implementing and other TensorFlow models for the same class of problems.
|
||||
- Great model implementations survive the test of time. This doesn't happen because the code is pretty, but rather
|
||||
because the code is clear, easy to debug and build upon. If you make the life of the maintainers easy with your
|
||||
TensorFlow implementation, by replicating the same patterns as in other TensorFlow models and minimizing the mismatch
|
||||
to the PyTorch implementation, you ensure your contribution will be long lived.
|
||||
- Ask for help when you're stuck! The 🤗 Transformers team is here to help, and we've probably found solutions to the same
|
||||
problems you're facing.
|
||||
|
||||
Here's an overview of the steps needed to add a TensorFlow model architecture:
|
||||
1. Select the model you wish to convert
|
||||
2. Prepare transformers dev environment
|
||||
3. (Optional) Understand theoretical aspects and the existing implementation
|
||||
4. Implement the model architecture
|
||||
5. Implement model tests
|
||||
6. Submit the pull request
|
||||
7. (Optional) Build demos and share with the world
|
||||
|
||||
### 1.-3. Prepare your model contribution
|
||||
|
||||
**1. Select the model you wish to convert**
|
||||
|
||||
Let's start off with the basics: the first thing you need to know is the architecture you want to convert. If you
|
||||
don't have your eyes set on a specific architecture, asking the 🤗 Transformers team for suggestions is a great way to
|
||||
maximize your impact - we will guide you towards the most prominent architectures that are missing on the TensorFlow
|
||||
side. If the specific model you want to use with TensorFlow already has a TensorFlow architecture implementation in
|
||||
🤗 Transformers but is lacking weights, feel free to jump straight into the
|
||||
[weight conversion section](#adding-tensorflow-weights-to-hub)
|
||||
of this page.
|
||||
|
||||
For simplicity, the remainder of this guide assumes you've decided to contribute with the TensorFlow version of
|
||||
*BrandNewBert* (the same example as in the [guide](add_new_model) to add a new model from scratch).
|
||||
|
||||
<Tip>
|
||||
|
||||
Before starting the work on a TensorFlow model architecture, double-check that there is no ongoing effort to do so.
|
||||
You can search for `BrandNewBert` on the
|
||||
[pull request GitHub page](https://github.com/huggingface/transformers/pulls?q=is%3Apr) to confirm that there is no
|
||||
TensorFlow-related pull request.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
**2. Prepare transformers dev environment**
|
||||
|
||||
Having selected the model architecture, open an draft PR to signal your intention to work on it. Follow the
|
||||
instructions below to set up your environment and open a draft PR.
|
||||
|
||||
1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the 'Fork' button on the
|
||||
repository's page. This creates a copy of the code under your GitHub user account.
|
||||
|
||||
2. Clone your `transformers` fork to your local disk, and add the base repository as a remote:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/[your Github handle]/transformers.git
|
||||
cd transformers
|
||||
git remote add upstream https://github.com/huggingface/transformers.git
|
||||
```
|
||||
|
||||
3. Set up a development environment, for instance by running the following command:
|
||||
|
||||
```bash
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
pip install -e ".[dev]"
|
||||
```
|
||||
|
||||
**Note:** You don't need to have CUDA installed. Making the new model work on CPU is sufficient.
|
||||
|
||||
4. Create a branch with a descriptive name from your main branch
|
||||
|
||||
```bash
|
||||
git checkout -b add_tf_brand_new_bert
|
||||
```
|
||||
|
||||
5. Fetch and rebase to current main
|
||||
|
||||
```bash
|
||||
git fetch upstream
|
||||
git rebase upstream/main
|
||||
```
|
||||
|
||||
6. Add an empty `.py` file in `transformers/src/models/brandnewbert/` named `modeling_tf_brandnewbert.py`. This will
|
||||
be your TensorFlow model file.
|
||||
|
||||
7. Push the changes to your account using:
|
||||
|
||||
```bash
|
||||
git add .
|
||||
git commit -m "initial commit"
|
||||
git push -u origin add_tf_brand_new_bert
|
||||
```
|
||||
|
||||
8. Once you are satisfied, go to the webpage of your fork on GitHub. Click on “Pull request”. Make sure to add the
|
||||
GitHub handle of some members of the Hugging Face team as reviewers, so that the Hugging Face team gets notified for
|
||||
future changes.
|
||||
|
||||
9. Change the PR into a draft by clicking on “Convert to draft” on the right of the GitHub pull request web page.
|
||||
|
||||
|
||||
Now you have set up a development environment to port *BrandNewBert* to TensorFlow in 🤗 Transformers.
|
||||
|
||||
|
||||
**3. (Optional) Understand theoretical aspects and the existing implementation**
|
||||
|
||||
You should take some time to read *BrandNewBert's* paper, if such descriptive work exists. There might be large
|
||||
sections of the paper that are difficult to understand. If this is the case, this is fine - don't worry! The goal is
|
||||
not to get a deep theoretical understanding of the paper, but to extract the necessary information required to
|
||||
effectively re-implement the model in 🤗 Transformers using TensorFlow. That being said, you don't have to spend too
|
||||
much time on the theoretical aspects, but rather focus on the practical ones, namely the existing model documentation
|
||||
page (e.g. [model docs for BERT](model_doc/bert)).
|
||||
|
||||
After you've grasped the basics of the models you are about to implement, it's important to understand the existing
|
||||
implementation. This is a great chance to confirm that a working implementation matches your expectations for the
|
||||
model, as well as to foresee technical challenges on the TensorFlow side.
|
||||
|
||||
It's perfectly natural that you feel overwhelmed with the amount of information that you've just absorbed. It is
|
||||
definitely not a requirement that you understand all facets of the model at this stage. Nevertheless, we highly
|
||||
encourage you to clear any pressing questions in our [forum](https://discuss.huggingface.co/).
|
||||
|
||||
|
||||
### 4. Model implementation
|
||||
|
||||
Now it's time to finally start coding. Our suggested starting point is the PyTorch file itself: copy the contents of
|
||||
`modeling_brand_new_bert.py` inside `src/transformers/models/brand_new_bert/` into
|
||||
`modeling_tf_brand_new_bert.py`. The goal of this section is to modify the file and update the import structure of
|
||||
🤗 Transformers such that you can import `TFBrandNewBert` and
|
||||
`TFBrandNewBert.from_pretrained(model_repo, from_pt=True)` sucessfully loads a working TensorFlow *BrandNewBert* model.
|
||||
|
||||
Sadly, there is no prescription to convert a PyTorch model into TensorFlow. You can, however, follow our selection of
|
||||
tips to make the process as smooth as possible:
|
||||
- Prepend `TF` to the name of all classes (e.g. `BrandNewBert` becomes `TFBrandNewBert`).
|
||||
- Most PyTorch operations have a direct TensorFlow replacement. For example, `torch.nn.Linear` corresponds to
|
||||
`tf.keras.layers.Dense`, `torch.nn.Dropout` corresponds to `tf.keras.layers.Dropout`, etc. If you're not sure
|
||||
about a specific operation, you can use the [TensorFlow documentation](https://www.tensorflow.org/api_docs/python/tf)
|
||||
or the [PyTorch documentation](https://pytorch.org/docs/stable/).
|
||||
- Look for patterns in the 🤗 Transformers codebase. If you come across a certain operation that doesn't have a direct
|
||||
replacement, the odds are that someone else already had the same problem.
|
||||
- By default, keep the same variable names and structure as in PyTorch. This will make it easier to debug, track
|
||||
issues, and add fixes down the line.
|
||||
- Some layers have different default values in each framework. A notable example is the batch normalization layer's
|
||||
epsilon (`1e-5` in [PyTorch](https://pytorch.org/docs/stable/generated/torch.nn.BatchNorm2d.html#torch.nn.BatchNorm2d)
|
||||
and `1e-3` in [TensorFlow](https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization)).
|
||||
Double-check the documentation!
|
||||
- PyTorch's `nn.Parameter` variables typically need to be initialized within TF Layer's `build()`. See the following
|
||||
example: [PyTorch](https://github.com/huggingface/transformers/blob/655f72a6896c0533b1bdee519ed65a059c2425ac/src/transformers/models/vit_mae/modeling_vit_mae.py#L212) /
|
||||
[TensorFlow](https://github.com/huggingface/transformers/blob/655f72a6896c0533b1bdee519ed65a059c2425ac/src/transformers/models/vit_mae/modeling_tf_vit_mae.py#L220)
|
||||
- If the PyTorch model has a `#copied from ...` on top of a function, the odds are that your TensorFlow model can also
|
||||
borrow that function from the architecture it was copied from, assuming it has a TensorFlow architecture.
|
||||
- Assigning the `name` attribute correctly in TensorFlow functions is critical to do the `from_pt=True` weight
|
||||
cross-loading. `name` is almost always the name of the corresponding variable in the PyTorch code. If `name` is not
|
||||
properly set, you will see it in the error message when loading the model weights.
|
||||
- The logic of the base model class, `BrandNewBertModel`, will actually reside in `TFBrandNewBertMainLayer`, a Keras
|
||||
layer subclass ([example](https://github.com/huggingface/transformers/blob/4fd32a1f499e45f009c2c0dea4d81c321cba7e02/src/transformers/models/bert/modeling_tf_bert.py#L719)).
|
||||
`TFBrandNewBertModel` will simply be a wrapper around this layer.
|
||||
- Keras models need to be built in order to load pretrained weights. For that reason, `TFBrandNewBertPreTrainedModel`
|
||||
will need to hold an example of inputs to the model, the `dummy_inputs`
|
||||
([example](https://github.com/huggingface/transformers/blob/4fd32a1f499e45f009c2c0dea4d81c321cba7e02/src/transformers/models/bert/modeling_tf_bert.py#L916)).
|
||||
- If you get stuck, ask for help - we're here to help you! 🤗
|
||||
|
||||
In addition to the model file itself, you will also need to add the pointers to the model classes and related
|
||||
documentation pages. You can complete this part entirely following the patterns in other PRs
|
||||
([example](https://github.com/huggingface/transformers/pull/18020/files)). Here's a list of the needed manual
|
||||
changes:
|
||||
- Include all public classes of *BrandNewBert* in `src/transformers/__init__.py`
|
||||
- Add *BrandNewBert* classes to the corresponing Auto classes in `src/transformers/models/auto/modeling_tf_auto.py`
|
||||
- Include the modeling file in the documentation test file list in `utils/documentation_tests.txt`
|
||||
- Add the lazy loading classes related to *BrandNewBert* in `src/transformers/utils/dummy_tf_objects.py`
|
||||
- Update the import structures for the public classes in `src/transformers/models/brand_new_bert/__init__.py`
|
||||
- Add the documentation pointers to the public methods of *BrandNewBert* in `docs/source/en/model_doc/brand_new_bert.mdx`
|
||||
- Add yourself to the list of contributors to *BrandNewBert* in `docs/source/en/model_doc/brand_new_bert.mdx`
|
||||
- Finally, add a green tick ✅ to the TensorFlow column of *BrandNewBert* in `docs/source/en/index.mdx`
|
||||
|
||||
When you're happy with your implementation, run the following checklist to confirm that your model architecture is
|
||||
ready:
|
||||
1. All layers that behave differently at train time (e.g. Dropout) are called with a `training` argument, which is
|
||||
propagated all the way from the top-level classes
|
||||
2. You have used `#copied from ...` whenever possible
|
||||
3. `TFBrandNewBertMainLayer` and all classes that use it have their `call` function decorated with `@unpack_inputs`
|
||||
4. `TFBrandNewBertMainLayer` is decorated with `@keras_serializable`
|
||||
5. A TensorFlow model can be loaded from PyTorch weights using `TFBrandNewBert.from_pretrained(model_repo, from_pt=True)`
|
||||
6. You can call the TensorFlow model using the expected input format
|
||||
|
||||
|
||||
### 5. Add model tests
|
||||
|
||||
Hurray, you've implemented a TensorFlow model! Now it's time to add tests to make sure that your model behaves as
|
||||
expected. As in the previous section, we suggest you start by copying the `test_modeling_brand_new_bert.py` file in
|
||||
`tests/models/brand_new_bert/` into `test_modeling_tf_brand_new_bert.py`, and continue by making the necessary
|
||||
TensorFlow replacements. For now, in all `.from_pretrained()` calls, you should use the `from_pt=True` flag to load
|
||||
the existing PyTorch weights.
|
||||
|
||||
After you're done, it's time for the moment of truth: run the tests! 😬
|
||||
|
||||
```bash
|
||||
NVIDIA_TF32_OVERRIDE=0 RUN_SLOW=1 RUN_PT_TF_CROSS_TESTS=1 \
|
||||
py.test -vv tests/models/brand_new_bert/test_modeling_tf_brand_new_bert.py
|
||||
```
|
||||
|
||||
The most likely outcome is that you'll see a bunch of errors. Don't worry, this is expected! Debugging ML models is
|
||||
notoriously hard, and the key ingredient to success is patience (and `breakpoint()`). In our experience, the hardest
|
||||
problems arise from subtle mismatches between ML frameworks, for which we have a few pointers at the end of this guide.
|
||||
In other cases, a general test might not be directly applicable to your model, in which case we suggest an override
|
||||
at the model test class level. Regardless of the issue, don't hesitate to ask for help in your draft pull request if
|
||||
you're stuck.
|
||||
|
||||
When all tests pass, congratulations, your model is nearly ready to be added to the 🤗 Transformers library! 🎉
|
||||
|
||||
### 6.-7. Ensure everyone can use your model
|
||||
|
||||
**6. Submit the pull request**
|
||||
|
||||
Once you're done with the implementation and the tests, it's time to submit a pull request. Before pushing your code,
|
||||
run our code formatting utility, `make fixup` 🪄. This will automatically fix any formatting issues, which would cause
|
||||
our automatic checks to fail.
|
||||
|
||||
It's now time to convert your draft pull request into a real pull request. To do so, click on the "Ready for
|
||||
review" button and add Joao (`@gante`) and Matt (`@Rocketknight1`) as reviewers. A model pull request will need
|
||||
at least 3 reviewers, but they will take care of finding appropriate additional reviewers for your model.
|
||||
|
||||
After all reviewers are happy with the state of your PR, the final action point is to remove the `from_pt=True` flag in
|
||||
`.from_pretrained()` calls. Since there are no TensorFlow weights, you will have to add them! Check the section
|
||||
below for instructions on how to do it.
|
||||
|
||||
Finally, when the TensorFlow weights get merged, you have at least 3 reviewer approvals, and all CI checks are
|
||||
green, double-check the tests locally one last time
|
||||
|
||||
```bash
|
||||
NVIDIA_TF32_OVERRIDE=0 RUN_SLOW=1 RUN_PT_TF_CROSS_TESTS=1 \
|
||||
py.test -vv tests/models/brand_new_bert/test_modeling_tf_brand_new_bert.py
|
||||
```
|
||||
|
||||
and we will merge your PR! Congratulations on the milestone 🎉
|
||||
|
||||
**7. (Optional) Build demos and share with the world**
|
||||
|
||||
One of the hardest parts about open-source is discovery. How can the other users learn about the existence of your
|
||||
fabulous TensorFlow contribution? With proper communication, of course! 📣
|
||||
|
||||
There are two main ways to share your model with the community:
|
||||
- Build demos. These include Gradio demos, notebooks, and other fun ways to show off your model. We highly
|
||||
encourage you to add a notebook to our [community-driven demos](https://huggingface.co/docs/transformers/community).
|
||||
- Share stories on social media like Twitter and LinkedIn. You should be proud of your work and share
|
||||
your achievement with the community - your model can now be used by thousands of engineers and researchers around
|
||||
the world 🌍! We will be happy to retweet your posts and help you share your work with the community.
|
||||
|
||||
|
||||
## Adding TensorFlow weights to 🤗 Hub
|
||||
|
||||
Assuming that the TensorFlow model architecture is available in 🤗 Transformers, converting PyTorch weights into
|
||||
TensorFlow weights is a breeze!
|
||||
|
||||
Here's how to do it:
|
||||
1. Make sure you are logged into your Hugging Face account in your terminal. You can log in using the command
|
||||
`huggingface-cli login` (you can find your access tokens [here](https://huggingface.co/settings/tokens))
|
||||
2. Run `transformers-cli pt-to-tf --model-name foo/bar`, where `foo/bar` is the name of the model repository
|
||||
containing the PyTorch weights you want to convert
|
||||
3. Tag `@joaogante` and `@Rocketknight1` in the 🤗 Hub PR the command above has just created
|
||||
|
||||
That's it! 🎉
|
||||
|
||||
|
||||
## Debugging mismatches across ML frameworks 🐛
|
||||
|
||||
At some point, when adding a new architecture or when creating TensorFlow weights for an existing architecture, you
|
||||
might come across errors compaining about mismatches between PyTorch and TensorFlow. You might even decide to open the
|
||||
model architecture code for the two frameworks, and find that they look identical. What's going on? 🤔
|
||||
|
||||
First of all, let's talk about why understanding these mismatches matters. Many community members will use 🤗
|
||||
Transformers models out of the box, and trust that our models behave as expected. When there is a large mismatch
|
||||
between the two frameworks, it implies that the model is not following the reference implementation for at least one
|
||||
of the frameworks. This might lead to silent failures, in which the model runs but has poor performance. This is
|
||||
arguably worse than a model that fails to run at all! To that end, we aim at having a framework mismatch smaller than
|
||||
`1e-5` at all stages of the model.
|
||||
|
||||
As in other numerical problems, the devil is in the details. And as in any detail-oriented craft, the secret
|
||||
ingredient here is patience. Here is our suggested workflow for when you come across this type of issues:
|
||||
1. Locate the source of mismatches. The model you're converting probably has near identical inner variables up to a
|
||||
certain point. Place `breakpoint()` statements in the two frameworks' architectures, and compare the values of the
|
||||
numerical variables in a top-down fashion until you find the source of the problems.
|
||||
2. Now that you've pinpointed the source of the issue, get in touch with the 🤗 Transformers team. It is possible
|
||||
that we've seen a similar problem before and can promptly provide a solution. As a fallback, scan popular pages
|
||||
like StackOverflow and GitHub issues.
|
||||
3. If there is no solution in sight, it means you'll have to go deeper. The good news is that you've located the
|
||||
issue, so you can focus on the problematic instruction, abstracting away the rest of the model! The bad news is
|
||||
that you'll have to venture into the source implementation of said instruction. In some cases, you might find an
|
||||
issue with a reference implementation - don't abstain from opening an issue in the upstream repository.
|
||||
|
||||
In some cases, in dicussion with the 🤗 Transformers team, we might find that the fixing the mismatch is infeasible.
|
||||
When the mismatch is very small in the output layers of the model (but potentially large in the hidden states), we
|
||||
might decide to ignore it in favor of distributing the model. The `pt-to-tf` CLI mentioned above has a `--max-error`
|
||||
flag to override the error message at weight conversion time.
|
||||
@ -10,7 +10,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Converting Tensorflow Checkpoints
|
||||
# Converting From Tensorflow Checkpoints
|
||||
|
||||
A command-line interface is provided to convert original Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM checkpoints to models
|
||||
that can be loaded using the `from_pretrained` methods of the library.
|
||||
|
||||
@ -90,6 +90,7 @@ The documentation is organized into five sections:
|
||||
1. **[ELECTRA](model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ERNIE](model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||
1. **[ESM](model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FLAVA](model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||
1. **[FNet](model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
@ -118,6 +119,7 @@ The documentation is organized into five sections:
|
||||
1. **[M-CTC-T](model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||
1. **[M2M100](model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MarkupLM](model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MaskFormer](model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[mBART](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[mBART-50](model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
@ -163,6 +165,7 @@ The documentation is organized into five sections:
|
||||
1. **[T5v1.1](model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[TAPEX](model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||
1. **[Time Series Transformer](model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
@ -180,6 +183,7 @@ The documentation is organized into five sections:
|
||||
1. **[Wav2Vec2-Conformer](model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||
1. **[Wav2Vec2Phoneme](model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[X-CLIP](model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||
1. **[XGLM](model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
@ -238,6 +242,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||
| ERNIE | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ESM | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| FLAVA | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
@ -264,6 +269,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| M-CTC-T | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| M2M100 | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Marian | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||
| MarkupLM | ✅ | ✅ | ✅ | ❌ | ❌ |
|
||||
| MaskFormer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| mBART | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Megatron-BERT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
@ -306,6 +312,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| Swin Transformer V2 | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| T5 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| TAPAS | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Time Series Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Trajectory Transformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Transformer-XL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| TrOCR | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
@ -323,6 +330,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| Wav2Vec2 | ✅ | ❌ | ✅ | ✅ | ✅ |
|
||||
| Wav2Vec2-Conformer | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| WavLM | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Whisper | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| X-CLIP | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| XGLM | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| XLM | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
|
||||
@ -43,6 +43,7 @@ There are two categories of pipeline abstractions to be aware about:
|
||||
- [`VisualQuestionAnsweringPipeline`]
|
||||
- [`ZeroShotClassificationPipeline`]
|
||||
- [`ZeroShotImageClassificationPipeline`]
|
||||
- [`ZeroShotObjectDetectionPipeline`]
|
||||
|
||||
## The pipeline abstraction
|
||||
|
||||
@ -456,6 +457,12 @@ See [`TokenClassificationPipeline`] for all details.
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### ZeroShotObjectDetectionPipeline
|
||||
|
||||
[[autodoc]] ZeroShotObjectDetectionPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
## Parent class: `Pipeline`
|
||||
|
||||
[[autodoc]] Pipeline
|
||||
|
||||
@ -174,6 +174,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its
|
||||
|
||||
[[autodoc]] AutoModelForInstanceSegmentation
|
||||
|
||||
## AutoModelForZeroShotObjectDetection
|
||||
|
||||
[[autodoc]] AutoModelForZeroShotObjectDetection
|
||||
|
||||
## TFAutoModel
|
||||
|
||||
[[autodoc]] TFAutoModel
|
||||
|
||||
@ -55,3 +55,8 @@ Several smaller versions of the models have been trained on the same dataset. BL
|
||||
|
||||
[[autodoc]] BloomForTokenClassification
|
||||
- forward
|
||||
|
||||
## BloomForQuestionAnswering
|
||||
|
||||
[[autodoc]] BloomForQuestionAnswering
|
||||
- forward
|
||||
|
||||
@ -171,9 +171,10 @@ mean Average Precision (mAP) and Panoptic Quality (PQ). The latter objects are i
|
||||
[[autodoc]] DetrFeatureExtractor
|
||||
- __call__
|
||||
- pad_and_create_pixel_mask
|
||||
- post_process
|
||||
- post_process_segmentation
|
||||
- post_process_panoptic
|
||||
- post_process_object_detection
|
||||
- post_process_semantic_segmentation
|
||||
- post_process_instance_segmentation
|
||||
- post_process_panoptic_segmentation
|
||||
|
||||
## DetrModel
|
||||
|
||||
|
||||
109
docs/source/en/model_doc/esm.mdx
Normal file
109
docs/source/en/model_doc/esm.mdx
Normal file
@ -0,0 +1,109 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# ESM
|
||||
|
||||
## Overview
|
||||
This page provides code and pre-trained weights for Transformer protein language models from Meta AI's Fundamental
|
||||
AI Research Team, providing the state-of-the-art ESM-2, and the previously released ESM-1b and ESM-1v. Transformer
|
||||
protein language models were introduced in the paper [Biological structure and function emerge from scaling
|
||||
unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by
|
||||
Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott,
|
||||
C. Lawrence Zitnick, Jerry Ma, and Rob Fergus.
|
||||
The first version of this paper was [preprinted in 2019](https://www.biorxiv.org/content/10.1101/622803v1?versioned=true).
|
||||
|
||||
ESM-2 outperforms all tested single-sequence protein language models across a range of structure prediction tasks,
|
||||
and enables atomic resolution structure prediction.
|
||||
It was released with the paper [Language models of protein sequences at the scale of evolution enable accurate
|
||||
structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie,
|
||||
Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido and Alexander Rives.
|
||||
|
||||
|
||||
The abstract from
|
||||
"Biological structure and function emerge from scaling unsupervised learning to 250
|
||||
million protein sequences" is
|
||||
|
||||
|
||||
*In the field of artificial intelligence, a combination of scale in data and model capacity enabled by unsupervised
|
||||
learning has led to major advances in representation learning and statistical generation. In the life sciences, the
|
||||
anticipated growth of sequencing promises unprecedented data on natural sequence diversity. Protein language modeling
|
||||
at the scale of evolution is a logical step toward predictive and generative artificial intelligence for biology. To
|
||||
this end, we use unsupervised learning to train a deep contextual language model on 86 billion amino acids across 250
|
||||
million protein sequences spanning evolutionary diversity. The resulting model contains information about biological
|
||||
properties in its representations. The representations are learned from sequence data alone. The learned representation
|
||||
space has a multiscale organization reflecting structure from the level of biochemical properties of amino acids to
|
||||
remote homology of proteins. Information about secondary and tertiary structure is encoded in the representations and
|
||||
can be identified by linear projections. Representation learning produces features that generalize across a range of
|
||||
applications, enabling state-of-the-art supervised prediction of mutational effect and secondary structure and
|
||||
improving state-of-the-art features for long-range contact prediction.*
|
||||
|
||||
|
||||
The abstract from
|
||||
"Language models of protein sequences at the scale of evolution enable accurate structure prediction" is
|
||||
|
||||
*Large language models have recently been shown to develop emergent capabilities with scale, going beyond
|
||||
simple pattern matching to perform higher level reasoning and generate lifelike images and text. While
|
||||
language models trained on protein sequences have been studied at a smaller scale, little is known about
|
||||
what they learn about biology as they are scaled up. In this work we train models up to 15 billion parameters,
|
||||
the largest language models of proteins to be evaluated to date. We find that as models are scaled they learn
|
||||
information enabling the prediction of the three-dimensional structure of a protein at the resolution of
|
||||
individual atoms. We present ESMFold for high accuracy end-to-end atomic level structure prediction directly
|
||||
from the individual sequence of a protein. ESMFold has similar accuracy to AlphaFold2 and RoseTTAFold for
|
||||
sequences with low perplexity that are well understood by the language model. ESMFold inference is an
|
||||
order of magnitude faster than AlphaFold2, enabling exploration of the structural space of metagenomic
|
||||
proteins in practical timescales.*
|
||||
|
||||
|
||||
|
||||
|
||||
Tips:
|
||||
|
||||
- ESM models are trained with a masked language modeling (MLM) objective.
|
||||
|
||||
The original code can be found [here](https://github.com/facebookresearch/esm) and was
|
||||
was developed by the Fundamental AI Research team at Meta AI.
|
||||
This model was contributed to huggingface by [jasonliu](https://huggingface.co/jasonliu)
|
||||
and [Matt](https://huggingface.co/Rocketknight1).
|
||||
|
||||
## EsmConfig
|
||||
|
||||
[[autodoc]] EsmConfig
|
||||
- all
|
||||
|
||||
## EsmTokenizer
|
||||
|
||||
[[autodoc]] EsmTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
|
||||
## EsmModel
|
||||
|
||||
[[autodoc]] EsmModel
|
||||
- forward
|
||||
|
||||
## EsmForMaskedLM
|
||||
|
||||
[[autodoc]] EsmForMaskedLM
|
||||
- forward
|
||||
|
||||
## EsmForSequenceClassification
|
||||
|
||||
[[autodoc]] EsmForSequenceClassification
|
||||
- forward
|
||||
|
||||
## EsmForTokenClassification
|
||||
|
||||
[[autodoc]] EsmForTokenClassification
|
||||
- forward
|
||||
246
docs/source/en/model_doc/markuplm.mdx
Normal file
246
docs/source/en/model_doc/markuplm.mdx
Normal file
@ -0,0 +1,246 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# MarkupLM
|
||||
|
||||
## Overview
|
||||
|
||||
The MarkupLM model was proposed in [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document
|
||||
Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. MarkupLM is BERT, but
|
||||
applied to HTML pages instead of raw text documents. The model incorporates additional embedding layers to improve
|
||||
performance, similar to [LayoutLM](layoutlm).
|
||||
|
||||
The model can be used for tasks like question answering on web pages or information extraction from web pages. It obtains
|
||||
state-of-the-art results on 2 important benchmarks:
|
||||
- [WebSRC](https://x-lance.github.io/WebSRC/), a dataset for Web-Based Structual Reading Comprehension (a bit like SQuAD but for web pages)
|
||||
- [SWDE](https://www.researchgate.net/publication/221299838_From_one_tree_to_a_forest_a_unified_solution_for_structured_web_data_extraction), a dataset
|
||||
for information extraction from web pages (basically named-entity recogntion on web pages)
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Multimodal pre-training with text, layout, and image has made significant progress for Visually-rich Document
|
||||
Understanding (VrDU), especially the fixed-layout documents such as scanned document images. While, there are still a
|
||||
large number of digital documents where the layout information is not fixed and needs to be interactively and
|
||||
dynamically rendered for visualization, making existing layout-based pre-training approaches not easy to apply. In this
|
||||
paper, we propose MarkupLM for document understanding tasks with markup languages as the backbone such as
|
||||
HTML/XML-based documents, where text and markup information is jointly pre-trained. Experiment results show that the
|
||||
pre-trained MarkupLM significantly outperforms the existing strong baseline models on several document understanding
|
||||
tasks. The pre-trained model and code will be publicly available.*
|
||||
|
||||
Tips:
|
||||
- In addition to `input_ids`, [`~MarkupLMModel.forward`] expects 2 additional inputs, namely `xpath_tags_seq` and `xpath_subs_seq`.
|
||||
These are the XPATH tags and subscripts respectively for each token in the input sequence.
|
||||
- One can use [`MarkupLMProcessor`] to prepare all data for the model. Refer to the [usage guide](#usage-markuplmprocessor) for more info.
|
||||
- Demo notebooks can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/MarkupLM).
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/markuplm_architecture.jpg"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> MarkupLM architecture. Taken from the <a href="https://arxiv.org/abs/2110.08518">original paper.</a> </small>
|
||||
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/markuplm).
|
||||
|
||||
## Usage: MarkupLMProcessor
|
||||
|
||||
The easiest way to prepare data for the model is to use [`MarkupLMProcessor`], which internally combines a feature extractor
|
||||
([`MarkupLMFeatureExtractor`]) and a tokenizer ([`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]). The feature extractor is
|
||||
used to extract all nodes and xpaths from the HTML strings, which are then provided to the tokenizer, which turns them into the
|
||||
token-level inputs of the model (`input_ids` etc.). Note that you can still use the feature extractor and tokenizer separately,
|
||||
if you only want to handle one of the two tasks.
|
||||
|
||||
```python
|
||||
from transformers import MarkupLMFeatureExtractor, MarkupLMTokenizerFast, MarkupLMProcessor
|
||||
|
||||
feature_extractor = MarkupLMFeatureExtractor()
|
||||
tokenizer = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base")
|
||||
processor = MarkupLMProcessor(feature_extractor, tokenizer)
|
||||
```
|
||||
|
||||
In short, one can provide HTML strings (and possibly additional data) to [`MarkupLMProcessor`],
|
||||
and it will create the inputs expected by the model. Internally, the processor first uses
|
||||
[`MarkupLMFeatureExtractor`] to get a list of nodes and corresponding xpaths. The nodes and
|
||||
xpaths are then provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which converts them
|
||||
to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_subs_seq`, `xpath_tags_seq`.
|
||||
Optionally, one can provide node labels to the processor, which are turned into token-level `labels`.
|
||||
|
||||
[`MarkupLMFeatureExtractor`] uses [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/), a Python library for
|
||||
pulling data out of HTML and XML files, under the hood. Note that you can still use your own parsing solution of
|
||||
choice, and provide the nodes and xpaths yourself to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`].
|
||||
|
||||
In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these
|
||||
use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs).
|
||||
|
||||
**Use case 1: web page classification (training, inference) + token classification (inference), parse_html = True**
|
||||
|
||||
This is the simplest case, in which the processor will use the feature extractor to get all nodes and xpaths from the HTML.
|
||||
|
||||
```python
|
||||
>>> from transformers import MarkupLMProcessor
|
||||
|
||||
>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base")
|
||||
|
||||
>>> html_string = """
|
||||
... <!DOCTYPE html>
|
||||
... <html>
|
||||
... <head>
|
||||
... <title>Hello world</title>
|
||||
... </head>
|
||||
... <body>
|
||||
|
||||
... <h1>Welcome</h1>
|
||||
... <p>Here is my website.</p>
|
||||
|
||||
... </body>
|
||||
... </html>"""
|
||||
|
||||
>>> # note that you can also add provide all tokenizer parameters here such as padding, truncation
|
||||
>>> encoding = processor(html_string, return_tensors="pt")
|
||||
>>> print(encoding.keys())
|
||||
dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])
|
||||
```
|
||||
|
||||
**Use case 2: web page classification (training, inference) + token classification (inference), parse_html=False**
|
||||
|
||||
In case one already has obtained all nodes and xpaths, one doesn't need the feature extractor. In that case, one should
|
||||
provide the nodes and corresponding xpaths themselves to the processor, and make sure to set `parse_html` to `False`.
|
||||
|
||||
```python
|
||||
>>> from transformers import MarkupLMProcessor
|
||||
|
||||
>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base")
|
||||
>>> processor.parse_html = False
|
||||
|
||||
>>> nodes = ["hello", "world", "how", "are"]
|
||||
>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"]
|
||||
>>> encoding = processor(nodes=nodes, xpaths=xpaths, return_tensors="pt")
|
||||
>>> print(encoding.keys())
|
||||
dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])
|
||||
```
|
||||
|
||||
**Use case 3: token classification (training), parse_html=False**
|
||||
|
||||
For token classification tasks (such as [SWDE](https://paperswithcode.com/dataset/swde)), one can also provide the
|
||||
corresponding node labels in order to train a model. The processor will then convert these into token-level `labels`.
|
||||
By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the
|
||||
`ignore_index` of PyTorch's CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can
|
||||
initialize the tokenizer with `only_label_first_subword` set to `False`.
|
||||
|
||||
```python
|
||||
>>> from transformers import MarkupLMProcessor
|
||||
|
||||
>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base")
|
||||
>>> processor.parse_html = False
|
||||
|
||||
>>> nodes = ["hello", "world", "how", "are"]
|
||||
>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"]
|
||||
>>> node_labels = [1, 2, 2, 1]
|
||||
>>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt")
|
||||
>>> print(encoding.keys())
|
||||
dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq', 'labels'])
|
||||
```
|
||||
|
||||
**Use case 4: web page question answering (inference), parse_html=True**
|
||||
|
||||
For question answering tasks on web pages, you can provide a question to the processor. By default, the
|
||||
processor will use the feature extractor to get all nodes and xpaths, and create [CLS] question tokens [SEP] word tokens [SEP].
|
||||
|
||||
```python
|
||||
>>> from transformers import MarkupLMProcessor
|
||||
|
||||
>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base")
|
||||
|
||||
>>> html_string = """
|
||||
... <!DOCTYPE html>
|
||||
... <html>
|
||||
... <head>
|
||||
... <title>Hello world</title>
|
||||
... </head>
|
||||
... <body>
|
||||
|
||||
... <h1>Welcome</h1>
|
||||
... <p>My name is Niels.</p>
|
||||
|
||||
... </body>
|
||||
... </html>"""
|
||||
|
||||
>>> question = "What's his name?"
|
||||
>>> encoding = processor(html_string, questions=question, return_tensors="pt")
|
||||
>>> print(encoding.keys())
|
||||
dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])
|
||||
```
|
||||
|
||||
**Use case 5: web page question answering (inference), apply_ocr=False**
|
||||
|
||||
For question answering tasks (such as WebSRC), you can provide a question to the processor. If you have extracted
|
||||
all nodes and xpaths yourself, you can provide them directly to the processor. Make sure to set `parse_html` to `False`.
|
||||
|
||||
```python
|
||||
>>> from transformers import MarkupLMProcessor
|
||||
|
||||
>>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base")
|
||||
>>> processor.parse_html = False
|
||||
|
||||
>>> nodes = ["hello", "world", "how", "are"]
|
||||
>>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"]
|
||||
>>> question = "What's his name?"
|
||||
>>> encoding = processor(nodes=nodes, xpaths=xpaths, questions=question, return_tensors="pt")
|
||||
>>> print(encoding.keys())
|
||||
dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])
|
||||
```
|
||||
|
||||
## MarkupLMConfig
|
||||
|
||||
[[autodoc]] MarkupLMConfig
|
||||
- all
|
||||
|
||||
## MarkupLMFeatureExtractor
|
||||
|
||||
[[autodoc]] MarkupLMFeatureExtractor
|
||||
- __call__
|
||||
|
||||
## MarkupLMTokenizer
|
||||
|
||||
[[autodoc]] MarkupLMTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## MarkupLMTokenizerFast
|
||||
|
||||
[[autodoc]] MarkupLMTokenizerFast
|
||||
- all
|
||||
|
||||
## MarkupLMProcessor
|
||||
|
||||
[[autodoc]] MarkupLMProcessor
|
||||
- __call__
|
||||
|
||||
## MarkupLMModel
|
||||
|
||||
[[autodoc]] MarkupLMModel
|
||||
- forward
|
||||
|
||||
## MarkupLMForSequenceClassification
|
||||
|
||||
[[autodoc]] MarkupLMForSequenceClassification
|
||||
- forward
|
||||
|
||||
## MarkupLMForTokenClassification
|
||||
|
||||
[[autodoc]] MarkupLMForTokenClassification
|
||||
- forward
|
||||
|
||||
## MarkupLMForQuestionAnswering
|
||||
|
||||
[[autodoc]] MarkupLMForQuestionAnswering
|
||||
- forward
|
||||
@ -58,6 +58,7 @@ This model was contributed by [francesco](https://huggingface.co/francesco). The
|
||||
- encode_inputs
|
||||
- post_process_segmentation
|
||||
- post_process_semantic_segmentation
|
||||
- post_process_instance_segmentation
|
||||
- post_process_panoptic_segmentation
|
||||
|
||||
## MaskFormerModel
|
||||
|
||||
@ -59,6 +59,11 @@ The original code can be found [here](https://github.com/facebookresearch/metase
|
||||
[[autodoc]] OPTForSequenceClassification
|
||||
- forward
|
||||
|
||||
## OPTForQuestionAnswering
|
||||
|
||||
[[autodoc]] OPTForQuestionAnswering
|
||||
- forward
|
||||
|
||||
## FlaxOPTModel
|
||||
|
||||
[[autodoc]] FlaxOPTModel
|
||||
|
||||
73
docs/source/en/model_doc/time_series_transformer.mdx
Normal file
73
docs/source/en/model_doc/time_series_transformer.mdx
Normal file
@ -0,0 +1,73 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Time Series Transformer
|
||||
|
||||
<Tip>
|
||||
|
||||
This is a recently introduced model so the API hasn't been tested extensively. There may be some bugs or slight
|
||||
breaking changes to fix it in the future. If you see something strange, file a [Github Issue](https://github.com/huggingface/transformers/issues/new?assignees=&labels=&template=bug-report.md&title).
|
||||
|
||||
</Tip>
|
||||
|
||||
## Overview
|
||||
|
||||
The Time Series Transformer model is a vanilla encoder-decoder Transformer for time series forecasting.
|
||||
|
||||
Tips:
|
||||
|
||||
- Similar to other models in the library, [`TimeSeriesTransformerModel`] is the raw Transformer without any head on top, and [`TimeSeriesTransformerForPrediction`]
|
||||
adds a distribution head on top of the former, which can be used for time-series forecasting. Note that this is a so-called probabilistic forecasting model, not a
|
||||
point forecasting model. This means that the model learns a distribution, from which one can sample. The model doesn't directly output values.
|
||||
- [`TimeSeriesTransformerForPrediction`] consists of 2 blocks: an encoder, which takes a `context_length` of time series values as input (called `past_values`),
|
||||
and a decoder, which predicts a `prediction_length` of time series values into the future (called `future_values`). During training, one needs to provide
|
||||
pairs of (`past_values` and `future_values`) to the model.
|
||||
- In addition to the raw (`past_values` and `future_values`), one typically provides additional features to the model. These can be the following:
|
||||
- `past_time_features`: temporal features which the model will add to `past_values`. These serve as "positional encodings" for the Transformer encoder.
|
||||
Examples are "day of the month", "month of the year", etc. as scalar values (and then stacked together as a vector).
|
||||
e.g. if a given time-series value was obtained on the 11th of August, then one could have [11, 8] as time feature vector (11 being "day of the month", 8 being "month of the year").
|
||||
- `future_time_features`: temporal features which the model will add to `future_values`. These serve as "positional encodings" for the Transformer decoder.
|
||||
Examples are "day of the month", "month of the year", etc. as scalar values (and then stacked together as a vector).
|
||||
e.g. if a given time-series value was obtained on the 11th of August, then one could have [11, 8] as time feature vector (11 being "day of the month", 8 being "month of the year").
|
||||
- `static_categorical_features`: categorical features which are static over time (i.e., have the same value for all `past_values` and `future_values`).
|
||||
An example here is the store ID or region ID that identifies a given time-series.
|
||||
Note that these features need to be known for ALL data points (also those in the future).
|
||||
- `static_real_features`: real-valued features which are static over time (i.e., have the same value for all `past_values` and `future_values`).
|
||||
An example here is the image representation of the product for which you have the time-series values (like the [ResNet](resnet) embedding of a "shoe" picture,
|
||||
if your time-series is about the sales of shoes).
|
||||
Note that these features need to be known for ALL data points (also those in the future).
|
||||
- The model is trained using "teacher-forcing", similar to how a Transformer is trained for machine translation. This means that, during training, one shifts the
|
||||
`future_values` one position to the right as input to the decoder, prepended by the last value of `past_values`. At each time step, the model needs to predict the
|
||||
next target. So the set-up of training is similar to a GPT model for language, except that there's no notion of `decoder_start_token_id` (we just use the last value
|
||||
of the context as initial input for the decoder).
|
||||
- At inference time, we give the final value of the `past_values` as input to the decoder. Next, we can sample from the model to make a prediction at the next time step,
|
||||
which is then fed to the decoder in order to make the next prediction (also called autoregressive generation).
|
||||
|
||||
|
||||
This model was contributed by [kashif](<https://huggingface.co/kashif).
|
||||
|
||||
|
||||
## TimeSeriesTransformerConfig
|
||||
|
||||
[[autodoc]] TimeSeriesTransformerConfig
|
||||
|
||||
|
||||
## TimeSeriesTransformerModel
|
||||
|
||||
[[autodoc]] TimeSeriesTransformerModel
|
||||
- forward
|
||||
|
||||
|
||||
## TimeSeriesTransformerForPrediction
|
||||
|
||||
[[autodoc]] TimeSeriesTransformerForPrediction
|
||||
- forward
|
||||
79
docs/source/en/model_doc/whisper.mdx
Normal file
79
docs/source/en/model_doc/whisper.mdx
Normal file
@ -0,0 +1,79 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Whisper
|
||||
|
||||
## Overview
|
||||
|
||||
The Whisper model was proposed in [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We study the capabilities of speech processing systems trained simply to predict large amounts of transcripts of audio on the internet. When scaled to 680,000 hours of multilingual and multitask supervision, the resulting models generalize well to standard benchmarks and are often competitive with prior fully supervised results but in a zeroshot transfer setting without the need for any finetuning. When compared to humans, the models approach their accuracy and robustness. We are releasing models and inference code to serve as a foundation for further work on robust speech processing.*
|
||||
|
||||
|
||||
Tips:
|
||||
|
||||
- The model usually performs well without requiring any finetuning.
|
||||
- The architecture follows a classic encoder-decoder architecture, which means that it relies on the [`~generation_utils.GenerationMixin.generate`] function for inference.
|
||||
- One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text.
|
||||
|
||||
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts).
|
||||
The original code can be found [here](https://github.com/openai/whisper).
|
||||
|
||||
|
||||
## WhisperConfig
|
||||
|
||||
[[autodoc]] WhisperConfig
|
||||
|
||||
## WhisperTokenizer
|
||||
|
||||
[[autodoc]] WhisperTokenizer
|
||||
- build_inputs_with_special_tokens
|
||||
- get_special_tokens_mask
|
||||
- create_token_type_ids_from_sequences
|
||||
- save_vocabulary
|
||||
|
||||
## WhisperFeatureExtractor
|
||||
|
||||
[[autodoc]] WhisperFeatureExtractor
|
||||
- __call__
|
||||
|
||||
## WhisperProcessor
|
||||
|
||||
[[autodoc]] WhisperProcessor
|
||||
- __call__
|
||||
- from_pretrained
|
||||
- save_pretrained
|
||||
- batch_decode
|
||||
- decode
|
||||
|
||||
## WhisperModel
|
||||
|
||||
[[autodoc]] WhisperModel
|
||||
- forward
|
||||
|
||||
## WhisperForConditionalGeneration
|
||||
|
||||
[[autodoc]] WhisperForConditionalGeneration
|
||||
- forward
|
||||
|
||||
|
||||
## TFWhisperModel
|
||||
|
||||
[[autodoc]] TFWhisperModel
|
||||
- call
|
||||
|
||||
## TFWhisperForConditionalGeneration
|
||||
|
||||
[[autodoc]] TFWhisperForConditionalGeneration
|
||||
- call
|
||||
@ -10,36 +10,36 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Export 🤗 Transformers Models
|
||||
# Export to ONNX
|
||||
|
||||
If you need to deploy 🤗 Transformers models in production environments, we
|
||||
recommend exporting them to a serialized format that can be loaded and executed
|
||||
on specialized runtimes and hardware. In this guide, we'll show you how to
|
||||
export 🤗 Transformers models in two widely used formats: ONNX and TorchScript.
|
||||
If you need to deploy 🤗 Transformers models in production environments, we recommend
|
||||
exporting them to a serialized format that can be loaded and executed on specialized
|
||||
runtimes and hardware. In this guide, we'll show you how to export 🤗 Transformers
|
||||
models to [ONNX (Open Neural Network eXchange)](http://onnx.ai).
|
||||
|
||||
Once exported, a model can optimized for inference via techniques such as
|
||||
quantization and pruning. If you are interested in optimizing your models to run
|
||||
with maximum efficiency, check out the [🤗 Optimum
|
||||
<Tip>
|
||||
|
||||
Once exported, a model can be optimized for inference via techniques such as
|
||||
quantization and pruning. If you are interested in optimizing your models to run with
|
||||
maximum efficiency, check out the [🤗 Optimum
|
||||
library](https://github.com/huggingface/optimum).
|
||||
|
||||
## ONNX
|
||||
</Tip>
|
||||
|
||||
The [ONNX (Open Neural Network eXchange)](http://onnx.ai) project is an open
|
||||
standard that defines a common set of operators and a common file format to
|
||||
represent deep learning models in a wide variety of frameworks, including
|
||||
PyTorch and TensorFlow. When a model is exported to the ONNX format, these
|
||||
operators are used to construct a computational graph (often called an
|
||||
_intermediate representation_) which represents the flow of data through the
|
||||
neural network.
|
||||
ONNX is an open standard that defines a common set of operators and a common file format
|
||||
to represent deep learning models in a wide variety of frameworks, including PyTorch and
|
||||
TensorFlow. When a model is exported to the ONNX format, these operators are used to
|
||||
construct a computational graph (often called an _intermediate representation_) which
|
||||
represents the flow of data through the neural network.
|
||||
|
||||
By exposing a graph with standardized operators and data types, ONNX makes it
|
||||
easy to switch between frameworks. For example, a model trained in PyTorch can
|
||||
be exported to ONNX format and then imported in TensorFlow (and vice versa).
|
||||
By exposing a graph with standardized operators and data types, ONNX makes it easy to
|
||||
switch between frameworks. For example, a model trained in PyTorch can be exported to
|
||||
ONNX format and then imported in TensorFlow (and vice versa).
|
||||
|
||||
🤗 Transformers provides a `transformers.onnx` package that enables you to
|
||||
convert model checkpoints to an ONNX graph by leveraging configuration objects.
|
||||
These configuration objects come ready made for a number of model architectures,
|
||||
and are designed to be easily extendable to other architectures.
|
||||
🤗 Transformers provides a [`transformers.onnx`](main_classes/onnx) package that enables
|
||||
you to convert model checkpoints to an ONNX graph by leveraging configuration objects.
|
||||
These configuration objects come ready made for a number of model architectures, and are
|
||||
designed to be easily extendable to other architectures.
|
||||
|
||||
Ready-made configurations include the following architectures:
|
||||
|
||||
@ -94,7 +94,9 @@ Ready-made configurations include the following architectures:
|
||||
- RoFormer
|
||||
- SegFormer
|
||||
- SqueezeBERT
|
||||
- Swin Transformer
|
||||
- T5
|
||||
- Vision Encoder decoder
|
||||
- ViT
|
||||
- XLM
|
||||
- XLM-RoBERTa
|
||||
@ -106,10 +108,10 @@ In the next two sections, we'll show you how to:
|
||||
* Export a supported model using the `transformers.onnx` package.
|
||||
* Export a custom model for an unsupported architecture.
|
||||
|
||||
### Exporting a model to ONNX
|
||||
## Exporting a model to ONNX
|
||||
|
||||
To export a 🤗 Transformers model to ONNX, you'll first need to install some
|
||||
extra dependencies:
|
||||
To export a 🤗 Transformers model to ONNX, you'll first need to install some extra
|
||||
dependencies:
|
||||
|
||||
```bash
|
||||
pip install transformers[onnx]
|
||||
@ -141,7 +143,7 @@ Exporting a checkpoint using a ready-made configuration can be done as follows:
|
||||
python -m transformers.onnx --model=distilbert-base-uncased onnx/
|
||||
```
|
||||
|
||||
which should show the following logs:
|
||||
You should see the following logs:
|
||||
|
||||
```bash
|
||||
Validating ONNX model...
|
||||
@ -152,13 +154,13 @@ Validating ONNX model...
|
||||
All good, model saved at: onnx/model.onnx
|
||||
```
|
||||
|
||||
This exports an ONNX graph of the checkpoint defined by the `--model` argument.
|
||||
In this example it is `distilbert-base-uncased`, but it can be any checkpoint on
|
||||
the Hugging Face Hub or one that's stored locally.
|
||||
This exports an ONNX graph of the checkpoint defined by the `--model` argument. In this
|
||||
example, it is `distilbert-base-uncased`, but it can be any checkpoint on the Hugging
|
||||
Face Hub or one that's stored locally.
|
||||
|
||||
The resulting `model.onnx` file can then be run on one of the [many
|
||||
accelerators](https://onnx.ai/supported-tools.html#deployModel) that support the
|
||||
ONNX standard. For example, we can load and run the model with [ONNX
|
||||
accelerators](https://onnx.ai/supported-tools.html#deployModel) that support the ONNX
|
||||
standard. For example, we can load and run the model with [ONNX
|
||||
Runtime](https://onnxruntime.ai/) as follows:
|
||||
|
||||
```python
|
||||
@ -172,9 +174,8 @@ Runtime](https://onnxruntime.ai/) as follows:
|
||||
>>> outputs = session.run(output_names=["last_hidden_state"], input_feed=dict(inputs))
|
||||
```
|
||||
|
||||
The required output names (i.e. `["last_hidden_state"]`) can be obtained by
|
||||
taking a look at the ONNX configuration of each model. For example, for
|
||||
DistilBERT we have:
|
||||
The required output names (like `["last_hidden_state"]`) can be obtained by taking a
|
||||
look at the ONNX configuration of each model. For example, for DistilBERT we have:
|
||||
|
||||
```python
|
||||
>>> from transformers.models.distilbert import DistilBertConfig, DistilBertOnnxConfig
|
||||
@ -185,20 +186,19 @@ DistilBERT we have:
|
||||
["last_hidden_state"]
|
||||
```
|
||||
|
||||
The process is identical for TensorFlow checkpoints on the Hub. For example, we
|
||||
can export a pure TensorFlow checkpoint from the [Keras
|
||||
The process is identical for TensorFlow checkpoints on the Hub. For example, we can
|
||||
export a pure TensorFlow checkpoint from the [Keras
|
||||
organization](https://huggingface.co/keras-io) as follows:
|
||||
|
||||
```bash
|
||||
python -m transformers.onnx --model=keras-io/transformers-qa onnx/
|
||||
```
|
||||
|
||||
To export a model that's stored locally, you'll need to have the model's weights
|
||||
and tokenizer files stored in a directory. For example, we can load and save a
|
||||
checkpoint as follows:
|
||||
To export a model that's stored locally, you'll need to have the model's weights and
|
||||
tokenizer files stored in a directory. For example, we can load and save a checkpoint as
|
||||
follows:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
<frameworkcontent> <pt>
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||
|
||||
@ -216,8 +216,7 @@ argument of the `transformers.onnx` package to the desired directory:
|
||||
```bash
|
||||
python -m transformers.onnx --model=local-pt-checkpoint onnx/
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
</pt> <tf>
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
||||
|
||||
@ -235,14 +234,13 @@ argument of the `transformers.onnx` package to the desired directory:
|
||||
```bash
|
||||
python -m transformers.onnx --model=local-tf-checkpoint onnx/
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
</tf> </frameworkcontent>
|
||||
|
||||
### Selecting features for different model topologies
|
||||
## Selecting features for different model tasks
|
||||
|
||||
Each ready-made configuration comes with a set of _features_ that enable you to
|
||||
export models for different types of topologies or tasks. As shown in the table
|
||||
below, each feature is associated with a different auto class:
|
||||
Each ready-made configuration comes with a set of _features_ that enable you to export
|
||||
models for different types of tasks. As shown in the table below, each feature is
|
||||
associated with a different `AutoClass`:
|
||||
|
||||
| Feature | Auto Class |
|
||||
| ------------------------------------ | ------------------------------------ |
|
||||
@ -255,7 +253,7 @@ below, each feature is associated with a different auto class:
|
||||
| `token-classification` | `AutoModelForTokenClassification` |
|
||||
|
||||
For each configuration, you can find the list of supported features via the
|
||||
`FeaturesManager`. For example, for DistilBERT we have:
|
||||
[`~transformers.onnx.FeaturesManager`]. For example, for DistilBERT we have:
|
||||
|
||||
```python
|
||||
>>> from transformers.onnx.features import FeaturesManager
|
||||
@ -266,15 +264,15 @@ For each configuration, you can find the list of supported features via the
|
||||
```
|
||||
|
||||
You can then pass one of these features to the `--feature` argument in the
|
||||
`transformers.onnx` package. For example, to export a text-classification model
|
||||
we can pick a fine-tuned model from the Hub and run:
|
||||
`transformers.onnx` package. For example, to export a text-classification model we can
|
||||
pick a fine-tuned model from the Hub and run:
|
||||
|
||||
```bash
|
||||
python -m transformers.onnx --model=distilbert-base-uncased-finetuned-sst-2-english \
|
||||
--feature=sequence-classification onnx/
|
||||
```
|
||||
|
||||
which will display the following logs:
|
||||
This displays the following logs:
|
||||
|
||||
```bash
|
||||
Validating ONNX model...
|
||||
@ -285,37 +283,42 @@ Validating ONNX model...
|
||||
All good, model saved at: onnx/model.onnx
|
||||
```
|
||||
|
||||
Notice that in this case, the output names from the fine-tuned model are
|
||||
`logits` instead of the `last_hidden_state` we saw with the
|
||||
`distilbert-base-uncased` checkpoint earlier. This is expected since the
|
||||
fine-tuned model has a sequence classification head.
|
||||
Notice that in this case, the output names from the fine-tuned model are `logits`
|
||||
instead of the `last_hidden_state` we saw with the `distilbert-base-uncased` checkpoint
|
||||
earlier. This is expected since the fine-tuned model has a sequence classification head.
|
||||
|
||||
<Tip>
|
||||
|
||||
The features that have a `with-past` suffix (e.g. `causal-lm-with-past`)
|
||||
correspond to model topologies with precomputed hidden states (key and values
|
||||
in the attention blocks) that can be used for fast autoregressive decoding.
|
||||
The features that have a `with-past` suffix (like `causal-lm-with-past`) correspond to
|
||||
model classes with precomputed hidden states (key and values in the attention blocks)
|
||||
that can be used for fast autoregressive decoding.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
For `VisionEncoderDecoder` type models, the encoder and decoder parts are
|
||||
exported separately as two ONNX files named `encoder_model.onnx` and `decoder_model.onnx` respectively.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
### Exporting a model for an unsupported architecture
|
||||
## Exporting a model for an unsupported architecture
|
||||
|
||||
If you wish to export a model whose architecture is not natively supported by
|
||||
the library, there are three main steps to follow:
|
||||
If you wish to export a model whose architecture is not natively supported by the
|
||||
library, there are three main steps to follow:
|
||||
|
||||
1. Implement a custom ONNX configuration.
|
||||
2. Export the model to ONNX.
|
||||
3. Validate the outputs of the PyTorch and exported models.
|
||||
|
||||
In this section, we'll look at how DistilBERT was implemented to show what's
|
||||
involved with each step.
|
||||
In this section, we'll look at how DistilBERT was implemented to show what's involved
|
||||
with each step.
|
||||
|
||||
#### Implementing a custom ONNX configuration
|
||||
### Implementing a custom ONNX configuration
|
||||
|
||||
Let's start with the ONNX configuration object. We provide three abstract
|
||||
classes that you should inherit from, depending on the type of model
|
||||
architecture you wish to export:
|
||||
Let's start with the ONNX configuration object. We provide three abstract classes that
|
||||
you should inherit from, depending on the type of model architecture you wish to export:
|
||||
|
||||
* Encoder-based models inherit from [`~onnx.config.OnnxConfig`]
|
||||
* Decoder-based models inherit from [`~onnx.config.OnnxConfigWithPast`]
|
||||
@ -347,25 +350,24 @@ Since DistilBERT is an encoder-based model, its configuration inherits from
|
||||
... )
|
||||
```
|
||||
|
||||
Every configuration object must implement the `inputs` property and return a
|
||||
mapping, where each key corresponds to an expected input, and each value
|
||||
indicates the axis of that input. For DistilBERT, we can see that two inputs are
|
||||
required: `input_ids` and `attention_mask`. These inputs have the same shape of
|
||||
`(batch_size, sequence_length)` which is why we see the same axes used in the
|
||||
configuration.
|
||||
Every configuration object must implement the `inputs` property and return a mapping,
|
||||
where each key corresponds to an expected input, and each value indicates the axis of
|
||||
that input. For DistilBERT, we can see that two inputs are required: `input_ids` and
|
||||
`attention_mask`. These inputs have the same shape of `(batch_size, sequence_length)`
|
||||
which is why we see the same axes used in the configuration.
|
||||
|
||||
<Tip>
|
||||
|
||||
Notice that `inputs` property for `DistilBertOnnxConfig` returns an
|
||||
`OrderedDict`. This ensures that the inputs are matched with their relative
|
||||
position within the `PreTrainedModel.forward()` method when tracing the graph.
|
||||
We recommend using an `OrderedDict` for the `inputs` and `outputs` properties
|
||||
when implementing custom ONNX configurations.
|
||||
Notice that `inputs` property for `DistilBertOnnxConfig` returns an `OrderedDict`. This
|
||||
ensures that the inputs are matched with their relative position within the
|
||||
`PreTrainedModel.forward()` method when tracing the graph. We recommend using an
|
||||
`OrderedDict` for the `inputs` and `outputs` properties when implementing custom ONNX
|
||||
configurations.
|
||||
|
||||
</Tip>
|
||||
|
||||
Once you have implemented an ONNX configuration, you can instantiate it by
|
||||
providing the base model's configuration as follows:
|
||||
Once you have implemented an ONNX configuration, you can instantiate it by providing the
|
||||
base model's configuration as follows:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoConfig
|
||||
@ -374,8 +376,8 @@ providing the base model's configuration as follows:
|
||||
>>> onnx_config = DistilBertOnnxConfig(config)
|
||||
```
|
||||
|
||||
The resulting object has several useful properties. For example you can view the
|
||||
ONNX operator set that will be used during the export:
|
||||
The resulting object has several useful properties. For example, you can view the ONNX
|
||||
operator set that will be used during the export:
|
||||
|
||||
```python
|
||||
>>> print(onnx_config.default_onnx_opset)
|
||||
@ -389,15 +391,14 @@ You can also view the outputs associated with the model as follows:
|
||||
OrderedDict([("last_hidden_state", {0: "batch", 1: "sequence"})])
|
||||
```
|
||||
|
||||
Notice that the outputs property follows the same structure as the inputs; it
|
||||
returns an `OrderedDict` of named outputs and their shapes. The output structure
|
||||
is linked to the choice of feature that the configuration is initialised with.
|
||||
By default, the ONNX configuration is initialized with the `default` feature
|
||||
that corresponds to exporting a model loaded with the `AutoModel` class. If you
|
||||
want to export a different model topology, just provide a different feature to
|
||||
the `task` argument when you initialize the ONNX configuration. For example, if
|
||||
we wished to export DistilBERT with a sequence classification head, we could
|
||||
use:
|
||||
Notice that the outputs property follows the same structure as the inputs; it returns an
|
||||
`OrderedDict` of named outputs and their shapes. The output structure is linked to the
|
||||
choice of feature that the configuration is initialised with. By default, the ONNX
|
||||
configuration is initialized with the `default` feature that corresponds to exporting a
|
||||
model loaded with the `AutoModel` class. If you want to export a model for another task,
|
||||
just provide a different feature to the `task` argument when you initialize the ONNX
|
||||
configuration. For example, if we wished to export DistilBERT with a sequence
|
||||
classification head, we could use:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoConfig
|
||||
@ -410,18 +411,18 @@ OrderedDict([('logits', {0: 'batch'})])
|
||||
|
||||
<Tip>
|
||||
|
||||
All of the base properties and methods associated with [`~onnx.config.OnnxConfig`] and the
|
||||
other configuration classes can be overriden if needed. Check out
|
||||
[`BartOnnxConfig`] for an advanced example.
|
||||
All of the base properties and methods associated with [`~onnx.config.OnnxConfig`] and
|
||||
the other configuration classes can be overriden if needed. Check out [`BartOnnxConfig`]
|
||||
for an advanced example.
|
||||
|
||||
</Tip>
|
||||
|
||||
#### Exporting the model
|
||||
### Exporting the model
|
||||
|
||||
Once you have implemented the ONNX configuration, the next step is to export the
|
||||
model. Here we can use the `export()` function provided by the
|
||||
`transformers.onnx` package. This function expects the ONNX configuration, along
|
||||
with the base model and tokenizer, and the path to save the exported file:
|
||||
Once you have implemented the ONNX configuration, the next step is to export the model.
|
||||
Here we can use the `export()` function provided by the `transformers.onnx` package.
|
||||
This function expects the ONNX configuration, along with the base model and tokenizer,
|
||||
and the path to save the exported file:
|
||||
|
||||
```python
|
||||
>>> from pathlib import Path
|
||||
@ -436,10 +437,9 @@ with the base model and tokenizer, and the path to save the exported file:
|
||||
>>> onnx_inputs, onnx_outputs = export(tokenizer, base_model, onnx_config, onnx_config.default_onnx_opset, onnx_path)
|
||||
```
|
||||
|
||||
The `onnx_inputs` and `onnx_outputs` returned by the `export()` function are
|
||||
lists of the keys defined in the `inputs` and `outputs` properties of the
|
||||
configuration. Once the model is exported, you can test that the model is well
|
||||
formed as follows:
|
||||
The `onnx_inputs` and `onnx_outputs` returned by the `export()` function are lists of
|
||||
the keys defined in the `inputs` and `outputs` properties of the configuration. Once the
|
||||
model is exported, you can test that the model is well formed as follows:
|
||||
|
||||
```python
|
||||
>>> import onnx
|
||||
@ -450,21 +450,20 @@ formed as follows:
|
||||
|
||||
<Tip>
|
||||
|
||||
If your model is larger than 2GB, you will see that many additional files are
|
||||
created during the export. This is _expected_ because ONNX uses [Protocol
|
||||
Buffers](https://developers.google.com/protocol-buffers/) to store the model and
|
||||
these have a size limit of 2GB. See the [ONNX
|
||||
documentation](https://github.com/onnx/onnx/blob/master/docs/ExternalData.md)
|
||||
for instructions on how to load models with external data.
|
||||
If your model is larger than 2GB, you will see that many additional files are created
|
||||
during the export. This is _expected_ because ONNX uses [Protocol
|
||||
Buffers](https://developers.google.com/protocol-buffers/) to store the model and these
|
||||
have a size limit of 2GB. See the [ONNX
|
||||
documentation](https://github.com/onnx/onnx/blob/master/docs/ExternalData.md) for
|
||||
instructions on how to load models with external data.
|
||||
|
||||
</Tip>
|
||||
|
||||
#### Validating the model outputs
|
||||
### Validating the model outputs
|
||||
|
||||
The final step is to validate that the outputs from the base and exported model
|
||||
agree within some absolute tolerance. Here we can use the
|
||||
`validate_model_outputs()` function provided by the `transformers.onnx` package
|
||||
as follows:
|
||||
The final step is to validate that the outputs from the base and exported model agree
|
||||
within some absolute tolerance. Here we can use the `validate_model_outputs()` function
|
||||
provided by the `transformers.onnx` package as follows:
|
||||
|
||||
```python
|
||||
>>> from transformers.onnx import validate_model_outputs
|
||||
@ -474,220 +473,23 @@ as follows:
|
||||
... )
|
||||
```
|
||||
|
||||
This function uses the `OnnxConfig.generate_dummy_inputs()` method to generate
|
||||
inputs for the base and exported model, and the absolute tolerance can be
|
||||
defined in the configuration. We generally find numerical agreement in the 1e-6
|
||||
to 1e-4 range, although anything smaller than 1e-3 is likely to be OK.
|
||||
This function uses the [`~transformers.onnx.OnnxConfig.generate_dummy_inputs`] method to
|
||||
generate inputs for the base and exported model, and the absolute tolerance can be
|
||||
defined in the configuration. We generally find numerical agreement in the 1e-6 to 1e-4
|
||||
range, although anything smaller than 1e-3 is likely to be OK.
|
||||
|
||||
### Contributing a new configuration to 🤗 Transformers
|
||||
## Contributing a new configuration to 🤗 Transformers
|
||||
|
||||
We are looking to expand the set of ready-made configurations and welcome
|
||||
contributions from the community! If you would like to contribute your addition
|
||||
to the library, you will need to:
|
||||
We are looking to expand the set of ready-made configurations and welcome contributions
|
||||
from the community! If you would like to contribute your addition to the library, you
|
||||
will need to:
|
||||
|
||||
* Implement the ONNX configuration in the corresponding `configuration_<model_name>.py`
|
||||
file
|
||||
* Include the model architecture and corresponding features in [`~onnx.features.FeatureManager`]
|
||||
* Include the model architecture and corresponding features in
|
||||
[`~onnx.features.FeatureManager`]
|
||||
* Add your model architecture to the tests in `test_onnx_v2.py`
|
||||
|
||||
Check out how the configuration for [IBERT was
|
||||
contributed](https://github.com/huggingface/transformers/pull/14868/files) to
|
||||
get an idea of what's involved.
|
||||
|
||||
## TorchScript
|
||||
|
||||
<Tip>
|
||||
|
||||
This is the very beginning of our experiments with TorchScript and we are still exploring its capabilities with
|
||||
variable-input-size models. It is a focus of interest to us and we will deepen our analysis in upcoming releases,
|
||||
with more code examples, a more flexible implementation, and benchmarks comparing python-based codes with compiled
|
||||
TorchScript.
|
||||
|
||||
</Tip>
|
||||
|
||||
According to Pytorch's documentation: "TorchScript is a way to create serializable and optimizable models from PyTorch
|
||||
code". Pytorch's two modules [JIT and TRACE](https://pytorch.org/docs/stable/jit.html) allow the developer to export
|
||||
their model to be re-used in other programs, such as efficiency-oriented C++ programs.
|
||||
|
||||
We have provided an interface that allows the export of 🤗 Transformers models to TorchScript so that they can be reused
|
||||
in a different environment than a Pytorch-based python program. Here we explain how to export and use our models using
|
||||
TorchScript.
|
||||
|
||||
Exporting a model requires two things:
|
||||
|
||||
- a forward pass with dummy inputs.
|
||||
- model instantiation with the `torchscript` flag.
|
||||
|
||||
These necessities imply several things developers should be careful about. These are detailed below.
|
||||
|
||||
### TorchScript flag and tied weights
|
||||
|
||||
This flag is necessary because most of the language models in this repository have tied weights between their
|
||||
`Embedding` layer and their `Decoding` layer. TorchScript does not allow the export of models that have tied
|
||||
weights, therefore it is necessary to untie and clone the weights beforehand.
|
||||
|
||||
This implies that models instantiated with the `torchscript` flag have their `Embedding` layer and `Decoding`
|
||||
layer separate, which means that they should not be trained down the line. Training would de-synchronize the two
|
||||
layers, leading to unexpected results.
|
||||
|
||||
This is not the case for models that do not have a Language Model head, as those do not have tied weights. These models
|
||||
can be safely exported without the `torchscript` flag.
|
||||
|
||||
### Dummy inputs and standard lengths
|
||||
|
||||
The dummy inputs are used to do a model forward pass. While the inputs' values are propagating through the layers,
|
||||
Pytorch keeps track of the different operations executed on each tensor. These recorded operations are then used to
|
||||
create the "trace" of the model.
|
||||
|
||||
The trace is created relatively to the inputs' dimensions. It is therefore constrained by the dimensions of the dummy
|
||||
input, and will not work for any other sequence length or batch size. When trying with a different size, an error such
|
||||
as:
|
||||
|
||||
`The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2`
|
||||
|
||||
will be raised. It is therefore recommended to trace the model with a dummy input size at least as large as the largest
|
||||
input that will be fed to the model during inference. Padding can be performed to fill the missing values. As the model
|
||||
will have been traced with a large input size however, the dimensions of the different matrix will be large as well,
|
||||
resulting in more calculations.
|
||||
|
||||
It is recommended to be careful of the total number of operations done on each input and to follow performance closely
|
||||
when exporting varying sequence-length models.
|
||||
|
||||
### Using TorchScript in Python
|
||||
|
||||
Below is an example, showing how to save, load models as well as how to use the trace for inference.
|
||||
|
||||
#### Saving a model
|
||||
|
||||
This snippet shows how to use TorchScript to export a `BertModel`. Here the `BertModel` is instantiated according
|
||||
to a `BertConfig` class and then saved to disk under the filename `traced_bert.pt`
|
||||
|
||||
```python
|
||||
from transformers import BertModel, BertTokenizer, BertConfig
|
||||
import torch
|
||||
|
||||
enc = BertTokenizer.from_pretrained("bert-base-uncased")
|
||||
|
||||
# Tokenizing input text
|
||||
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
|
||||
tokenized_text = enc.tokenize(text)
|
||||
|
||||
# Masking one of the input tokens
|
||||
masked_index = 8
|
||||
tokenized_text[masked_index] = "[MASK]"
|
||||
indexed_tokens = enc.convert_tokens_to_ids(tokenized_text)
|
||||
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
|
||||
|
||||
# Creating a dummy input
|
||||
tokens_tensor = torch.tensor([indexed_tokens])
|
||||
segments_tensors = torch.tensor([segments_ids])
|
||||
dummy_input = [tokens_tensor, segments_tensors]
|
||||
|
||||
# Initializing the model with the torchscript flag
|
||||
# Flag set to True even though it is not necessary as this model does not have an LM Head.
|
||||
config = BertConfig(
|
||||
vocab_size_or_config_json_file=32000,
|
||||
hidden_size=768,
|
||||
num_hidden_layers=12,
|
||||
num_attention_heads=12,
|
||||
intermediate_size=3072,
|
||||
torchscript=True,
|
||||
)
|
||||
|
||||
# Instantiating the model
|
||||
model = BertModel(config)
|
||||
|
||||
# The model needs to be in evaluation mode
|
||||
model.eval()
|
||||
|
||||
# If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag
|
||||
model = BertModel.from_pretrained("bert-base-uncased", torchscript=True)
|
||||
|
||||
# Creating the trace
|
||||
traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors])
|
||||
torch.jit.save(traced_model, "traced_bert.pt")
|
||||
```
|
||||
|
||||
#### Loading a model
|
||||
|
||||
This snippet shows how to load the `BertModel` that was previously saved to disk under the name `traced_bert.pt`.
|
||||
We are re-using the previously initialised `dummy_input`.
|
||||
|
||||
```python
|
||||
loaded_model = torch.jit.load("traced_bert.pt")
|
||||
loaded_model.eval()
|
||||
|
||||
all_encoder_layers, pooled_output = loaded_model(*dummy_input)
|
||||
```
|
||||
|
||||
#### Using a traced model for inference
|
||||
|
||||
Using the traced model for inference is as simple as using its `__call__` dunder method:
|
||||
|
||||
```python
|
||||
traced_model(tokens_tensor, segments_tensors)
|
||||
```
|
||||
|
||||
### Deploying HuggingFace TorchScript models on AWS using the Neuron SDK
|
||||
|
||||
AWS introduced the [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/)
|
||||
instance family for low cost, high performance machine learning inference in the cloud.
|
||||
The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware accelerator,
|
||||
specializing in deep learning inferencing workloads.
|
||||
[AWS Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#)
|
||||
is the SDK for Inferentia that supports tracing and optimizing transformers models for
|
||||
deployment on Inf1. The Neuron SDK provides:
|
||||
|
||||
|
||||
1. Easy-to-use API with one line of code change to trace and optimize a TorchScript model for inference in the cloud.
|
||||
2. Out of the box performance optimizations for [improved cost-performance](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>)
|
||||
3. Support for HuggingFace transformers models built with either [PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html)
|
||||
or [TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html).
|
||||
|
||||
#### Implications
|
||||
|
||||
Transformers Models based on the [BERT (Bidirectional Encoder Representations from Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert)
|
||||
architecture, or its variants such as [distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert)
|
||||
and [roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta)
|
||||
will run best on Inf1 for non-generative tasks such as Extractive Question Answering,
|
||||
Sequence Classification, Token Classification. Alternatively, text generation
|
||||
tasks can be adapted to run on Inf1, according to this [AWS Neuron MarianMT tutorial](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html).
|
||||
More information about models that can be converted out of the box on Inferentia can be
|
||||
found in the [Model Architecture Fit section of the Neuron documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia).
|
||||
|
||||
#### Dependencies
|
||||
|
||||
Using AWS Neuron to convert models requires the following dependencies and environment:
|
||||
|
||||
* A [Neuron SDK environment](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide),
|
||||
which comes pre-configured on [AWS Deep Learning AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html).
|
||||
|
||||
#### Converting a Model for AWS Neuron
|
||||
|
||||
Using the same script as in [Using TorchScript in Python](https://huggingface.co/docs/transformers/main/en/serialization#using-torchscript-in-python)
|
||||
to trace a "BertModel", you import `torch.neuron` framework extension to access
|
||||
the components of the Neuron SDK through a Python API.
|
||||
|
||||
```python
|
||||
from transformers import BertModel, BertTokenizer, BertConfig
|
||||
import torch
|
||||
import torch.neuron
|
||||
```
|
||||
And only modify the tracing line of code
|
||||
|
||||
from:
|
||||
|
||||
```python
|
||||
torch.jit.trace(model, [tokens_tensor, segments_tensors])
|
||||
```
|
||||
|
||||
to:
|
||||
|
||||
```python
|
||||
torch.neuron.trace(model, [token_tensor, segments_tensors])
|
||||
```
|
||||
|
||||
This change enables Neuron SDK to trace the model and optimize it to run in Inf1 instances.
|
||||
|
||||
To learn more about AWS Neuron SDK features, tools, example tutorials and latest updates,
|
||||
please see the [AWS NeuronSDK documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html).
|
||||
contributed](https://github.com/huggingface/transformers/pull/14868/files) to get an
|
||||
idea of what's involved.
|
||||
225
docs/source/en/torchscript.mdx
Normal file
225
docs/source/en/torchscript.mdx
Normal file
@ -0,0 +1,225 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Export to TorchScript
|
||||
|
||||
<Tip>
|
||||
|
||||
This is the very beginning of our experiments with TorchScript and we are still
|
||||
exploring its capabilities with variable-input-size models. It is a focus of interest to
|
||||
us and we will deepen our analysis in upcoming releases, with more code examples, a more
|
||||
flexible implementation, and benchmarks comparing Python-based codes with compiled
|
||||
TorchScript.
|
||||
|
||||
</Tip>
|
||||
|
||||
According to the [TorchScript documentation](https://pytorch.org/docs/stable/jit.html):
|
||||
|
||||
> TorchScript is a way to create serializable and optimizable models from PyTorch code.
|
||||
|
||||
There are two PyTorch modules, [JIT and
|
||||
TRACE](https://pytorch.org/docs/stable/jit.html), that allow developers to export their
|
||||
models to be reused in other programs like efficiency-oriented C++ programs.
|
||||
|
||||
We provide an interface that allows you to export 🤗 Transformers models to TorchScript
|
||||
so they can be reused in a different environment than PyTorch-based Python programs.
|
||||
Here, we explain how to export and use our models using TorchScript.
|
||||
|
||||
Exporting a model requires two things:
|
||||
|
||||
- model instantiation with the `torchscript` flag
|
||||
- a forward pass with dummy inputs
|
||||
|
||||
These necessities imply several things developers should be careful about as detailed
|
||||
below.
|
||||
|
||||
## TorchScript flag and tied weights
|
||||
|
||||
The `torchscript` flag is necessary because most of the 🤗 Transformers language models
|
||||
have tied weights between their `Embedding` layer and their `Decoding` layer.
|
||||
TorchScript does not allow you to export models that have tied weights, so it is
|
||||
necessary to untie and clone the weights beforehand.
|
||||
|
||||
Models instantiated with the `torchscript` flag have their `Embedding` layer and
|
||||
`Decoding` layer separated, which means that they should not be trained down the line.
|
||||
Training would desynchronize the two layers, leading to unexpected results.
|
||||
|
||||
This is not the case for models that do not have a language model head, as those do not
|
||||
have tied weights. These models can be safely exported without the `torchscript` flag.
|
||||
|
||||
## Dummy inputs and standard lengths
|
||||
|
||||
The dummy inputs are used for a models forward pass. While the inputs' values are
|
||||
propagated through the layers, PyTorch keeps track of the different operations executed
|
||||
on each tensor. These recorded operations are then used to create the *trace* of the
|
||||
model.
|
||||
|
||||
The trace is created relative to the inputs' dimensions. It is therefore constrained by
|
||||
the dimensions of the dummy input, and will not work for any other sequence length or
|
||||
batch size. When trying with a different size, the following error is raised:
|
||||
|
||||
```
|
||||
`The expanded size of the tensor (3) must match the existing size (7) at non-singleton dimension 2`
|
||||
```
|
||||
|
||||
We recommended you trace the model with a dummy input size at least as large as the
|
||||
largest input that will be fed to the model during inference. Padding can help fill the
|
||||
missing values. However, since the model is traced with a larger input size, the
|
||||
dimensions of the matrix will also be large, resulting in more calculations.
|
||||
|
||||
Be careful of the total number of operations done on each input and follow the
|
||||
performance closely when exporting varying sequence-length models.
|
||||
|
||||
## Using TorchScript in Python
|
||||
|
||||
This section demonstrates how to save and load models as well as how to use the trace
|
||||
for inference.
|
||||
|
||||
### Saving a model
|
||||
|
||||
To export a `BertModel` with TorchScript, instantiate `BertModel` from the `BertConfig`
|
||||
class and then save it to disk under the filename `traced_bert.pt`:
|
||||
|
||||
```python
|
||||
from transformers import BertModel, BertTokenizer, BertConfig
|
||||
import torch
|
||||
|
||||
enc = BertTokenizer.from_pretrained("bert-base-uncased")
|
||||
|
||||
# Tokenizing input text
|
||||
text = "[CLS] Who was Jim Henson ? [SEP] Jim Henson was a puppeteer [SEP]"
|
||||
tokenized_text = enc.tokenize(text)
|
||||
|
||||
# Masking one of the input tokens
|
||||
masked_index = 8
|
||||
tokenized_text[masked_index] = "[MASK]"
|
||||
indexed_tokens = enc.convert_tokens_to_ids(tokenized_text)
|
||||
segments_ids = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1]
|
||||
|
||||
# Creating a dummy input
|
||||
tokens_tensor = torch.tensor([indexed_tokens])
|
||||
segments_tensors = torch.tensor([segments_ids])
|
||||
dummy_input = [tokens_tensor, segments_tensors]
|
||||
|
||||
# Initializing the model with the torchscript flag
|
||||
# Flag set to True even though it is not necessary as this model does not have an LM Head.
|
||||
config = BertConfig(
|
||||
vocab_size_or_config_json_file=32000,
|
||||
hidden_size=768,
|
||||
num_hidden_layers=12,
|
||||
num_attention_heads=12,
|
||||
intermediate_size=3072,
|
||||
torchscript=True,
|
||||
)
|
||||
|
||||
# Instantiating the model
|
||||
model = BertModel(config)
|
||||
|
||||
# The model needs to be in evaluation mode
|
||||
model.eval()
|
||||
|
||||
# If you are instantiating the model with *from_pretrained* you can also easily set the TorchScript flag
|
||||
model = BertModel.from_pretrained("bert-base-uncased", torchscript=True)
|
||||
|
||||
# Creating the trace
|
||||
traced_model = torch.jit.trace(model, [tokens_tensor, segments_tensors])
|
||||
torch.jit.save(traced_model, "traced_bert.pt")
|
||||
```
|
||||
|
||||
### Loading a model
|
||||
|
||||
Now you can load the previously saved `BertModel`, `traced_bert.pt`, from disk and use
|
||||
it on the previously initialised `dummy_input`:
|
||||
|
||||
```python
|
||||
loaded_model = torch.jit.load("traced_bert.pt")
|
||||
loaded_model.eval()
|
||||
|
||||
all_encoder_layers, pooled_output = loaded_model(*dummy_input)
|
||||
```
|
||||
|
||||
### Using a traced model for inference
|
||||
|
||||
Use the traced model for inference by using its `__call__` dunder method:
|
||||
|
||||
```python
|
||||
traced_model(tokens_tensor, segments_tensors)
|
||||
```
|
||||
|
||||
## Deploy Hugging Face TorchScript models to AWS with the Neuron SDK
|
||||
|
||||
AWS introduced the [Amazon EC2 Inf1](https://aws.amazon.com/ec2/instance-types/inf1/)
|
||||
instance family for low cost, high performance machine learning inference in the cloud.
|
||||
The Inf1 instances are powered by the AWS Inferentia chip, a custom-built hardware
|
||||
accelerator, specializing in deep learning inferencing workloads. [AWS
|
||||
Neuron](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/#) is the SDK for
|
||||
Inferentia that supports tracing and optimizing transformers models for deployment on
|
||||
Inf1. The Neuron SDK provides:
|
||||
|
||||
|
||||
1. Easy-to-use API with one line of code change to trace and optimize a TorchScript
|
||||
model for inference in the cloud.
|
||||
2. Out of the box performance optimizations for [improved
|
||||
cost-performance](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/benchmark/>).
|
||||
3. Support for Hugging Face transformers models built with either
|
||||
[PyTorch](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/bert_tutorial/tutorial_pretrained_bert.html)
|
||||
or
|
||||
[TensorFlow](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/tensorflow/huggingface_bert/huggingface_bert.html).
|
||||
|
||||
### Implications
|
||||
|
||||
Transformers models based on the [BERT (Bidirectional Encoder Representations from
|
||||
Transformers)](https://huggingface.co/docs/transformers/main/model_doc/bert)
|
||||
architecture, or its variants such as
|
||||
[distilBERT](https://huggingface.co/docs/transformers/main/model_doc/distilbert) and
|
||||
[roBERTa](https://huggingface.co/docs/transformers/main/model_doc/roberta) run best on
|
||||
Inf1 for non-generative tasks such as extractive question answering, sequence
|
||||
classification, and token classification. However, text generation tasks can still be
|
||||
adapted to run on Inf1 according to this [AWS Neuron MarianMT
|
||||
tutorial](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/src/examples/pytorch/transformers-marianmt.html).
|
||||
More information about models that can be converted out of the box on Inferentia can be
|
||||
found in the [Model Architecture
|
||||
Fit](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/models/models-inferentia.html#models-inferentia)
|
||||
section of the Neuron documentation.
|
||||
|
||||
### Dependencies
|
||||
|
||||
Using AWS Neuron to convert models requires a [Neuron SDK
|
||||
environment](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/neuron-guide/neuron-frameworks/pytorch-neuron/index.html#installation-guide)
|
||||
which comes preconfigured on [AWS Deep Learning
|
||||
AMI](https://docs.aws.amazon.com/dlami/latest/devguide/tutorial-inferentia-launching.html).
|
||||
|
||||
### Converting a model for AWS Neuron
|
||||
|
||||
Convert a model for AWS NEURON using the same code from [Using TorchScript in
|
||||
Python](serialization#using-torchscript-in-python) to trace a `BertModel`. Import the
|
||||
`torch.neuron` framework extension to access the components of the Neuron SDK through a
|
||||
Python API:
|
||||
|
||||
```python
|
||||
from transformers import BertModel, BertTokenizer, BertConfig
|
||||
import torch
|
||||
import torch.neuron
|
||||
```
|
||||
|
||||
You only need to modify the following line:
|
||||
|
||||
```diff
|
||||
- torch.jit.trace(model, [tokens_tensor, segments_tensors])
|
||||
+ torch.neuron.trace(model, [token_tensor, segments_tensors])
|
||||
```
|
||||
|
||||
This enables the Neuron SDK to trace the model and optimize it for Inf1 instances.
|
||||
|
||||
To learn more about AWS Neuron SDK features, tools, example tutorials and latest
|
||||
updates, please see the [AWS NeuronSDK
|
||||
documentation](https://awsdocs-neuron.readthedocs-hosted.com/en/latest/index.html).
|
||||
@ -65,7 +65,7 @@ Cualquier parámetro adicional para tu tarea también se puede incluir en el [`p
|
||||
|
||||
### Selecciona un modelo y un tokenizador
|
||||
|
||||
El [`pipeline`] acepta cualquier modelo del [Model Hub](https://huggingface.co/models). Hay etiquetas en el Model Hub que te permiten filtrar por el modelo que te gustaría utilizar para tu tarea. Una vez que hayas elegido un modelo apropiado, cárgalo con la clase `AutoModelFor` y [`AutoTokenizer'] correspondientes. Por ejemplo, carga la clase [`AutoModelForCausalLM`] para una tarea de modelado de lenguaje causal:
|
||||
El [`pipeline`] acepta cualquier modelo del [Model Hub](https://huggingface.co/models). Hay etiquetas en el Model Hub que te permiten filtrar por el modelo que te gustaría utilizar para tu tarea. Una vez que hayas elegido un modelo apropiado, cárgalo con la clase `AutoModelFor` y [`AutoTokenizer`] correspondientes. Por ejemplo, carga la clase [`AutoModelForCausalLM`] para una tarea de modelado de lenguaje causal:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
@ -39,7 +39,7 @@ Comienza cargando el dataset de [Yelp Reviews](https://huggingface.co/datasets/y
|
||||
'text': 'My expectations for McDonalds are t rarely high. But for one to still fail so spectacularly...that takes something special!\\nThe cashier took my friends\'s order, then promptly ignored me. I had to force myself in front of a cashier who opened his register to wait on the person BEHIND me. I waited over five minutes for a gigantic order that included precisely one kid\'s meal. After watching two people who ordered after me be handed their food, I asked where mine was. The manager started yelling at the cashiers for \\"serving off their orders\\" when they didn\'t have their food. But neither cashier was anywhere near those controls, and the manager was the one serving food to customers and clearing the boards.\\nThe manager was rude when giving me my order. She didn\'t make sure that I had everything ON MY RECEIPT, and never even had the decency to apologize that I felt I was getting poor service.\\nI\'ve eaten at various McDonalds restaurants for over 30 years. I\'ve worked at more than one location. I expect bad days, bad moods, and the occasional mistake. But I have yet to have a decent experience at this store. It will remain a place I avoid unless someone in my party needs to avoid illness from low blood sugar. Perhaps I should go back to the racially biased service of Steak n Shake instead!'}
|
||||
```
|
||||
|
||||
Como ya sabes, necesitas un tokenizador para procesar el texto e incluir una estrategia para el padding y el truncamiento para manejar cualquier longitud de secuencia variable. Para procesar tu dataset en un solo paso, utiliza el método de 🤗 Datasets mappara aplicar una función de preprocesamiento sobre todo el dataset:
|
||||
Como ya sabes, necesitas un tokenizador para procesar el texto e incluir una estrategia para el padding y el truncamiento para manejar cualquier longitud de secuencia variable. Para procesar tu dataset en un solo paso, utiliza el método de 🤗 Datasets map para aplicar una función de preprocesamiento sobre todo el dataset:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
@ -80,7 +80,7 @@ Comienza cargando tu modelo y especifica el número de labels previstas. A parti
|
||||
<Tip>
|
||||
|
||||
Verás una advertencia acerca de que algunos de los pesos pre-entrenados no están siendo utilizados y que algunos pesos están siendo inicializados al azar. No te preocupes, esto es completamente normal.
|
||||
No te preocupes, esto es completamente normal. El head/cabezal pre-entrenado del modelo BERT se descarta y se sustituye por un head de clasificación inicializado aleatoriamente. Puedes aplicar fine-tuning a este nuevo head del modelo en tu tarea de clasificación de secuencias haciendo transfer learning del modelo pre-entrenado.
|
||||
El head/cabezal pre-entrenado del modelo BERT se descarta y se sustituye por un head de clasificación inicializado aleatoriamente. Puedes aplicar fine-tuning a este nuevo head del modelo en tu tarea de clasificación de secuencias haciendo transfer learning del modelo pre-entrenado.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
@ -39,3 +39,7 @@
|
||||
- sections:
|
||||
- local: add_new_pipeline
|
||||
title: Come aggiungere una pipeline a 🤗 Transformers?
|
||||
- local: add_new_model
|
||||
title: Come aggiungere un modello a 🤗 Transformers?
|
||||
title: Guide How-to
|
||||
|
||||
|
||||
775
docs/source/it/add_new_model.mdx
Normal file
775
docs/source/it/add_new_model.mdx
Normal file
@ -0,0 +1,775 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
-->
|
||||
|
||||
# Come aggiungere un modello a 🤗 Transformers?
|
||||
|
||||
Aggiungere un nuovo modello é spesso difficile e richiede una profonda conoscenza della libreria 🤗 Transformers e anche
|
||||
della repository originale del modello. A Hugging Face cerchiamo di dare alla community sempre piú poteri per aggiungere
|
||||
modelli independentemente. Quindi, per alcuni nuovi modelli che la community vuole aggiungere a 🤗 Transformers, abbiamo
|
||||
creato una specifica *call-for-model-addition* che spiega passo dopo passo come aggiungere il modello richiesto. Con
|
||||
questo *call-for-model-addition* vogliamo insegnare a volenterosi e esperti collaboratori della community come implementare
|
||||
un modello in 🤗 Transformers.
|
||||
|
||||
Se questo é qualcosa che può interessarvi, siete liberi di controllare l'attuale “calls-for-model-addition” [qui](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model/open_model_proposals/README.md)
|
||||
e contattarci.
|
||||
|
||||
Se il modello sarà selezionato, allora potrete lavorare insieme a un membro di Hugging Face per integrare il modello in 🤗
|
||||
Transformers. Così facendo, ci guadagnerai in una comprensione totale, sia teorica che pratica, del modello proposto. Inoltre,
|
||||
sarai l'artefice di un importante contributo open-source a 🤗 Transformers. Durante l'implementazione avrai l'opportunità di:
|
||||
|
||||
- ottenere più comprensione delle best practices in open-source
|
||||
- capire i principi di design di una della librerie NLP più popolari
|
||||
- capire come efficientemente testare complessi modelli NLP
|
||||
- capire come integrare utilit Python come `black`, `isort`, `make fix-copies` in una libreria per garantire sempre di avere un codice leggibile e pulito
|
||||
|
||||
Siamo anche contenti se vuoi aggiungere un modello che non può essere trovato nella cartella “calls-for-model-addition”.
|
||||
Le seguenti sezioni spiegano in dettaglio come aggiungere un nuovo modello. Può anche essere molto utile controllare modelli
|
||||
già aggiunti [qui](https://github.com/huggingface/transformers/pulls?q=is%3Apr+label%3A%22PR+for+Model+Addition%22+is%3Aclosed),
|
||||
per capire se richiamano il modello che vorreste aggiungere.
|
||||
|
||||
Per cominciare, vediamo una panoramica general della libreria Transformers.
|
||||
|
||||
## Panoramica generale su 🤗 Transformers
|
||||
|
||||
Prima di tutto, vediamo in generale 🤗 Transformers. 🤗 Transformers é una libreria molto strutturata, quindi
|
||||
puà essere che a volte ci sia un disaccordo con alcune filosofie della libreria o scelte di design. Dalla nostra esperienza,
|
||||
tuttavia, abbiamo trovato che le scelte fondamentali di design della libreria sono cruciali per usare 🤗 Transformers efficacemente
|
||||
su larga scala, mantenendo i costi a un livello accettabile.
|
||||
|
||||
Un buon primo punto di partenza per capire al meglio la libreria é leggere la [documentazione sulla nostra filosofia](filosofia)
|
||||
Da qui, ci sono alcune scelte sul modo di lavorare che cerchiamo di applicare a tutti i modelli:
|
||||
|
||||
- La composizione é generalmente favorita sulla sovra-astrazione
|
||||
- Duplicare il codice non é sempre male, soprattutto se migliora notevolmente la leggibilità e accessibilità del modello
|
||||
- Tutti i files creati per il nuovo modello devono il piu possibile "compatti". Questo vuol dire che quando qualcuno leggerá il codice
|
||||
di uno specifico modello, potrá vedere solo il corrispettivo file `modeling_....py` senza avere multiple dipendenze.
|
||||
|
||||
|
||||
La cosa piú importante, é che consideriamo la libreria non solo un mezzo per dare un prodotto, *per esempio* dare la possibilità
|
||||
di usare BERT per inferenza, ma é anche il prodotto reale che noi vogliamo migliorare sempre più. Quindi, quando aggiungi
|
||||
un modello, non sei solo la persona che userà il modello, ma rappresenti anche tutti coloro che leggeranno,
|
||||
cercheranno di capire e modificare il tuo modello.
|
||||
|
||||
Tenendo questi principi in mente, immergiamoci nel design generale della libreria.
|
||||
|
||||
### Panoramica sui modelli
|
||||
|
||||
Per aggiungere con successo un modello, é importante capire l'interazione tra il tuo modello e la sua configurazione,
|
||||
[`PreTrainedModel`], e [`PretrainedConfig`]. Per dare un esempio, chiameremo il modello da aggiungere a 🤗 Transformers
|
||||
`BrandNewBert`.
|
||||
|
||||
Diamo un'occhiata:
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_overview.png"/>
|
||||
|
||||
Come potete vedere, ci basiamo sull'ereditarietà in 🤗 Transformers, tenendo però il livello di astrazione a un minimo
|
||||
assoluto. Non ci sono mai più di due livelli di astrazione per ogni modello nella libreria. `BrandNewBertModel` eredita
|
||||
da `BrandNewBertPreTrainedModel` che, a sua volta, eredita da [`PreTrainedModel`] - semplice no?
|
||||
Come regola generale, vogliamo essere sicuri che un nuovo modello dipenda solo da [`PreTrainedModel`]. Le funzionalità
|
||||
importanti che sono automaticamente conferite a ogni nuovo modello sono [`~PreTrainedModel.from_pretrained`]
|
||||
e [`~PreTrainedModel.save_pretrained`], che sono usate per serializzazione e deserializzazione. Tutte le altre importanti
|
||||
funzionalità, come ad esempio `BrandNewBertModel.forward` devono essere definite completamente nel nuovo script
|
||||
`modeling_brand_new_bert.py`. Inoltre, vogliamo essere sicuri che un modello con uno specifico head layer, come
|
||||
`BrandNewBertForMaskedLM` non erediti da `BrandNewBertModel`, ma piuttosto usi `BrandNewBertModel`
|
||||
come componente che può essere chiamata nel passaggio forward per mantenere il livello di astrazione basso. Ogni
|
||||
nuovo modello richieste una classe di configurazione, chiamata `BrandNewBertConfig`. Questa configurazione é sempre
|
||||
mantenuta come un attributo in [`PreTrainedModel`], e quindi può essere accessibile tramite l'attributo `config`
|
||||
per tutte le classi che ereditano da `BrandNewBertPreTrainedModel`:
|
||||
|
||||
```python
|
||||
model = BrandNewBertModel.from_pretrained("brandy/brand_new_bert")
|
||||
model.config # il modello ha accesso al suo config
|
||||
```
|
||||
|
||||
Analogamente al modello, la configurazione eredita le funzionalità base di serializzazione e deserializzazione da
|
||||
[`PretrainedConfig`]. É da notare che la configurazione e il modello sono sempre serializzati in due formati differenti -
|
||||
il modello é serializzato in un file *pytorch_model.bin* mentre la configurazione con *config.json*. Chiamando
|
||||
[`~PreTrainedModel.save_pretrained`] automaticamente chiamerà [`~PretrainedConfig.save_pretrained`], cosicché sia il
|
||||
modello che la configurazione siano salvati.
|
||||
|
||||
|
||||
### Stile per il codice
|
||||
|
||||
Quando codifichi un nuovo modello, tieni presente che Transformers ha una sua struttura di fondo come libreria, perciò
|
||||
ci sono alcuni fatti da considerare su come scrivere un codice :-)
|
||||
|
||||
1. Il forward pass del tuo modello dev'essere scritto completamente nel file del modello, mentre dev'essere indipendente
|
||||
da altri modelli nella libreria. Se vuoi riutilizzare un blocco di codice da un altro modello, copia e incolla il codice con un commento `# Copied from` in cima al codice (guarda [qui](https://github.com/huggingface/transformers/blob/v4.17.0/src/transformers/models/roberta/modeling_roberta.py#L160)
|
||||
per un ottimo esempio).
|
||||
2. Il codice dev'essere interamente comprensibile, anche da persone che non parlano in inglese. Questo significa che le
|
||||
variabili devono avere un nome descrittivo e bisogna evitare abbreviazioni. Per esempio, `activation` é molto meglio
|
||||
che `act`. Le variabili con una lettera sono da evitare fortemente, almeno che non sia per un indce in un for loop.
|
||||
3. Generamente é meglio avere un codice esplicito e piú lungo che un codice corto e magico.
|
||||
4. Evita di subclassare `nn.Sequential` in Pytorch, puoi subclassare `nn.Module` e scrivere il forward pass, cosicché
|
||||
chiunque può effettuare debug sul tuo codice, aggiungendo print o breaking points.
|
||||
5. La tua function-signature dev'essere type-annoted. Per il resto, é meglio preferire variabili con un nome accettabile
|
||||
piuttosto che annotazioni per aumentare la comprensione e leggibilità del codice.
|
||||
|
||||
### Panoramica sui tokenizers
|
||||
|
||||
Questa sezione sarà creata al piu presto :-(
|
||||
|
||||
## Aggiungere un modello a 🤗 Transformers passo dopo passo
|
||||
|
||||
Ci sono differenti modi per aggiungere un modello a Hugging Face. Qui trovi una lista di blog posts da parte della community su come aggiungere un modello:
|
||||
|
||||
1. [Aggiungere GPT2](https://medium.com/huggingface/from-tensorflow-to-pytorch-265f40ef2a28) scritto da [Thomas](https://huggingface.co/thomwolf)
|
||||
2. [Aggiungere WMT19 MT](https://huggingface.co/blog/porting-fsmt) scritto da [Stas](https://huggingface.co/stas)
|
||||
|
||||
Per esperienza, possiamo dirti che quando si aggiunge un modello é meglio tenere a mente le seguenti considerazioni:
|
||||
|
||||
- Non sfondare una porta giá aperta! La maggior parte del codice che aggiungerai per un nuovo modello 🤗 Transformers
|
||||
esiste già da qualche parte in 🤗 Transformers. Prendi un po' di tempo per trovare codici simili in modelli e tokenizers esistenti e fare un copia-incolla. Ricorda che [grep](https://www.gnu.org/software/grep/) e [rg](https://github.com/BurntSushi/ripgrep) sono tuoi buoni amici. Inoltre, ricorda che puó essere molto probabile che il tokenizer per il tuo modello sia basato sull'implementazione di un altro modello, e il codice del tuo modello stesso su un altro ancora. *Per esempio* il modello FSMT é basato su BART, mentre il tokenizer di FSMT é basato su XLM.
|
||||
- Ricorda che qui é piu una sfida ingegneristica che scientifica. Spendi piú tempo per create un efficiente ambiente di debugging piuttosto che cercare di capire tutti gli aspetti teorici dell'articolo del modello.
|
||||
- Chiedi aiuto se sei in panne! I modelli sono la parte principale di 🤗 Transformers, perciò qui a Hugging Face siamo più che contenti di aiutarti in ogni passo per aggiungere il tuo modello. Non esitare a chiedere se vedi che non riesci a progredire.
|
||||
|
||||
Di seguito, diamo una ricetta generale per aiutare a portare un modello in 🤗 Transformers.
|
||||
|
||||
La lista seguente é un sommario di tutto quello che é stato fatto per aggiungere un modello, e può essere usata come To-Do List:
|
||||
|
||||
- 1. ☐ (Opzionale) Capire gli aspetti teorici del modello
|
||||
- 2. ☐ Preparare l'ambiente dev per transformers
|
||||
- 3. ☐ Preparare l'ambiente debugging della repository originale
|
||||
- 4. ☐ Create uno script che gestisca con successo il forward pass usando la repository originale e checkpoint
|
||||
- 5. ☐ Aggiungere con successo lo scheletro del modello a Transformers
|
||||
- 6. ☐ Convertire i checkpoint original a Transformers checkpoint
|
||||
- 7. ☐ Effettuare con successo la forward pass in Transformers, di modo che dia un output identico al checkpoint originale
|
||||
- 8. ☐ Finire i tests per il modello in Transformers
|
||||
- 9. ☐ Aggiungere con successo Tokenizer in Transformers
|
||||
- 10. ☐ Testare e provare gli integration tests da capo a fine
|
||||
- 11. ☐ Completare i docs
|
||||
- 12. ☐ Caricare i moedl weights all'hub
|
||||
- 13. ☐ Sottomettere una pull request
|
||||
- 14. ☐ (Opzionale) Aggiungere un notebook con una demo
|
||||
|
||||
Per cominciare di solito consigliamo `BrandNewBert`, partendo dalla teoria, di modo da avere una buona comprensione della teoria generale. TUttavia, se preferisci imparare l'aspetto teorico del modello mentre *lavori* sul modello é ok immergersi direttamente nel codice di `BrandNewBert`. Questa opzione puó essere buona se le tue skills ingegneristiche sono meglio che quelle teoriche, o se il paper `BrandNewBert` ti dá problemi, o se semplicemente ti piace programmare piú che leggere articoli scientifici.
|
||||
|
||||
### 1. (Opzionale) Aspetti teorici di BrandNewBert
|
||||
|
||||
Allora con calma, prendi un po' di tempo per leggere l'articolo su *BrandNewBert* . Sicuramente, alcune sezioni dell'articolo sono molto complesse, ma non preoccuparti! L'obiettivo non é avere una compresione immensa della teoria alla base, ma estrarre le informazioni necessarie per re-implementare con successo il modello in 🤗 Transformers. Quindi, non impazzire sugli aspetti teorici, ma piuttosto focalizzati su quelli pratici, ossia:
|
||||
|
||||
- Che tipo di modello é *brand_new_bert*? É solo un encoder in stile BERT? O tipo decoder come GPT2? O encoder e decoder stile BART? Dai un'occhiata a [model_summary](model_summary) se non sei famigliare con le differenze tra questi modelli
|
||||
- Quali sono le applicazioni di *brand_new_bert*? Classificazione di testo? Generazione di testo? O per tasks del genere seq2seq?
|
||||
- Quali sono le nuove aggiunte al modello che lo rendono diverso da BERT/GPT-2/BART?
|
||||
- Quali modelli estistenti in [🤗 Transformers models](https://huggingface.co/transformers/#contents) sono molto simili a *brand_new_bert*?
|
||||
- Che tipo di tokenizer si usa in questo caso? Un sentencepiece tokenizer? O un word piece tokenizer? Il tokenizer é lo stesso di BERT o BART?
|
||||
|
||||
Una volta che senti che hai avuto una bella overview dell'architettura del modello, puoi scrivere senza problemi al team di Hugging Face per ogni domanda che tu hai. Questo puó includere domande sull'architettura del modello, o sull'attention layer, etc. Saremo molto felici di aiutarti :)
|
||||
|
||||
|
||||
### 2. Prepare il tuo ambiente
|
||||
|
||||
1. Forka la [repository](https://github.com/huggingface/transformers) cliccando sul tasto ‘Fork' nella pagina della repository. Questo crea una copia del codice nel tuo account GitHub
|
||||
|
||||
2. Clona il tuo fork `transfomers` sul tuo dico locale, e aggiungi la repository base come remota:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/[your Github handle]/transformers.git
|
||||
cd transformers
|
||||
git remote add upstream https://github.com/huggingface/transformers.git
|
||||
```
|
||||
|
||||
|
||||
3. Crea un ambiente di sviluppo, per esempio tramite questo comando:
|
||||
|
||||
```bash
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
pip install -e ".[dev]"
|
||||
```
|
||||
|
||||
quindi torna alla directory principale:
|
||||
|
||||
```bash
|
||||
cd ..
|
||||
```
|
||||
|
||||
|
||||
4. Attenzione, raccomandiamo di aggiungere la versione di PyTorch di *brand_new_bert* a Transfomers. Per installare PyTorch, basta seguire queste istruzioni https://pytorch.org/get-started/locally/.
|
||||
|
||||
**Nota bene:** Non c'é bisogno di installare o avere installato CUDA. Il nuovo modello può funzionare senza problemi su una CPU.
|
||||
|
||||
|
||||
5. Per trasferire *brand_new_bert* To port *brand_new_bert* avrai bisogno anche accesso alla sua repository originale:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/org_that_created_brand_new_bert_org/brand_new_bert.git
|
||||
cd brand_new_bert
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
Ok, ora hai un ambiente di sviluppo per portare *brand_new_bert* in 🤗 Transformers.
|
||||
|
||||
|
||||
### 3.-4. Provare un pretrained checkpoint usando la repo originale
|
||||
|
||||
Per cominciare, comincerai a lavorare sulla repo originale di *brand_new_bert*. Come spesso accade, l'implementazione originale é molto sullo stile "ricerca". Questo significa che a volte la documentazione non é al top, magari manca qualche cosa e il codice puó essere difficile da capire. Tuttavia, questa é e dev'essere la motivazione per reimplementare *brand_new_bert*. In Hugging Face, uno degli obiettivi principali é di *mettere le persone sulle spalle dei giganti*, il che si traduce, in questo contesto, di prendere un modello funzionante e riscriverlo e renderlo il piú possibile **accessibile, user-friendly, e leggibile**. Questa é la top motivazione per re-implementare modelli in 🤗 Transformers - cercare di creare nuove complesse tecnologie NLP accessibili a **chiunque**.
|
||||
|
||||
Riuscire a far girare il modello pretrained originale dalla repository ufficiale é spesso il passo **piu arduo**. Dalla nostra esperienza, é molto importante spendere un p' di tempo per diventare familiari con il codice base originale. Come test, prova a capire i seguenti punti:
|
||||
|
||||
- Dove si trovano i pretrained weights?
|
||||
- Come caricare i pretrained weights nel modello corrispondente?
|
||||
- Come girare un tokenizer independentemente dal modello?
|
||||
- Prova a tracciare un singolo forward pass, cosicché potrai sapere che classi e funzioni sono richieste per un semplice forward pass. Di solito, dovrai reimplementare queste funzioni e basta
|
||||
- Prova a localizzare i componenti importanti del modello: Dove si trova la classe del modello? Ci sono sotto classi nel modello *per esempio* EngoderModel, DecoderMOdel? Dove si trova il self-attention layer? Ci sono molteplici differenti layer di attention, *per esempio * *self-attention*, *cross-attention*...?
|
||||
- Come puoi fare debug sul modello nell'ambiente originale della repo? Devi aggiungere dei *print* o puoi usare *ipdb* come debugger interattivo, o vabene anche un IDE efficiente per debug come PyCharm?
|
||||
|
||||
É molto importante che prima di cominciare a trasferire il modello nuovo tu spenda tempo a fare debug del codice originale in maniera **efficiente**! Inoltre, ricorda che tutta la library é open-soruce, quindi non temere di aprire issue o fare una pull request nella repo originale. Tutti coloro che mantengono la repository saranno piú che felici di avere qualcuno che guarda e gioca con i loro codici!
|
||||
|
||||
A questo punto, sta a te decidere quale ambiente per debug vuoi usare. Noi consilgiamo di evitare setup con GPU, che potrebbero costare assai, lavorare su una CPU puó essere un ottimo punto di partenza per indagare la repository originale e per cominciare a scrivere il codice per 🤗 Transformers. Solo alla fine, quando il modello é stato portato con successo in 🤗 Transformers, allora si potrá verificare il suo funzionamento su GPU.
|
||||
|
||||
In generale ci sono due possibili ambienti di debug per il testare il modello originale:
|
||||
|
||||
- [Jupyter notebooks](https://jupyter.org/) / [google colab](https://colab.research.google.com/notebooks/intro.ipynb)
|
||||
- Scripts locali in Python
|
||||
|
||||
Il vantaggio dei Jupyter notebooks é la possibilità di eseguire cella per cella, il che può essere utile per decomporre tutte le componenti logiche, cosi da a vere un ciclo di debug più rapido, siccome si possono salvare i risultati da steps intermedi. Inoltre, i notebooks spesso sono molto facili da condividere con altri contributors, il che può essere molto utile se vuoi chiedere aiuto al team di Hugging Face. Se sei famigliare con Jupyter notebooks allora racommandiamo di lavorare in questa maniera.
|
||||
|
||||
Ovviamente se non siete abituati a lavorare con i notebook, questo può essere uno svantaggio nell'usare questa tecnologia, sprecando un sacco di tempo per setup e portare tutto al nuovo ambiente, siccome non potreste neanche usare dei tools di debug come `ipdb`.
|
||||
|
||||
Per ogni pratica code-base, é sempre meglio come primo step caricare un **piccolo** checkpoint pretrained e cercare di riprodurre un singolo forward pass usando un vettore fittizio di IDs fatti da numeri interi. Un esempio per uno script simile, in pseudocodice é:
|
||||
|
||||
```python
|
||||
model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/")
|
||||
input_ids = [0, 4, 5, 2, 3, 7, 9] # vector of input ids
|
||||
original_output = model.predict(input_ids)
|
||||
```
|
||||
|
||||
Per quanto riguarda la strategia di debugging, si può scegliere tra:
|
||||
|
||||
- Decomporre il modello originario in piccole componenenti e testare ognuna di esse
|
||||
- Decomporre il modello originario nel *tokenizer* originale e nel *modello* originale, testare un forward pass su questi,
|
||||
e usare dei print statement o breakpoints intermedi per verificare
|
||||
|
||||
Ancora una volta, siete liberi di scegliere quale strategia sia ottimale per voi. Spesso una strategia é piu
|
||||
avvantaggiosa di un'altra, ma tutto dipende dall'code-base originario.
|
||||
|
||||
Se il code-base vi permette di decomporre il modello in piccole sub-componenenti, *per esempio* se il code-base
|
||||
originario può essere facilmente testato in eager mode, allora vale la pena effettuare un debugging di questo genere.
|
||||
Ricordate che ci sono dei vantaggi nel decidere di prendere la strada piu impegnativa sin da subito:
|
||||
|
||||
- negli stage piu finali, quando bisognerà comparare il modello originario all'implementazione in Hugging Face, potrete verificare
|
||||
automaticamente ogni componente, individualmente, di modo che ci sia una corrispondenza 1:1
|
||||
- avrete l'opportunità di decomporre un problema molto grande in piccoli passi, così da strutturare meglio il vostro lavoro
|
||||
- separare il modello in componenti logiche vi aiuterà ad avere un'ottima overview sul design del modello, quindi una migliore
|
||||
comprensione del modello stesso
|
||||
- verso gli stage finali i test fatti componente per componente vi aiuterà ad essere sicuri di non andare avanti e indietro
|
||||
nell'implementazione, così da continuare la modifica del codice senza interruzione
|
||||
|
||||
Un ottimo esempio di come questo può essere fatto é dato da [Lysandre](https://gist.github.com/LysandreJik/db4c948f6b4483960de5cbac598ad4ed)
|
||||
per il modello ELECTRA
|
||||
|
||||
Tuttavia, se il code-base originale é molto complesso o le componenti intermedie possono essere testate solo in tramite
|
||||
compilazione, potrebbe richiedere parecchio tempo o addirittura essere impossibile separare il modello in piccole sotto-componenti.
|
||||
Un buon esempio é [MeshTensorFlow di T5](https://github.com/tensorflow/mesh/tree/master/mesh_tensorflow). Questa libreria
|
||||
é molto complessa e non offre un metodo semplice di decomposizione in sotto-componenti. Per simili librerie, potrete fare
|
||||
affidamento ai print statements.
|
||||
|
||||
In ogni caso, indipendentemente da quale strategia scegliete, la procedura raccomandata é di cominciare a fare debug dal
|
||||
primo layer al layer finale.
|
||||
É consigliato recuperare gli output dai layers, tramite print o sotto-componenti, nel seguente ordine:
|
||||
|
||||
1. Recuperare gli IDs di input dati al modello
|
||||
2. Recuperare i word embeddings
|
||||
3. Recuperare l'input del primo Transformer layer
|
||||
4. Recuperare l'output del primo Transformer layer
|
||||
5. Recuperare l'output dei seguenti `n - 1` Transformer layers
|
||||
6. Recuperare l'output dell'intero BrandNewBert Model
|
||||
|
||||
Gli IDs in input dovrebbero essere un arrary di interi, *per esempio* `input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]`
|
||||
|
||||
Gli output dei seguenti layer di solito dovrebbero essere degli array di float multi-dimensionali come questo:
|
||||
|
||||
```
|
||||
[[
|
||||
[-0.1465, -0.6501, 0.1993, ..., 0.1451, 0.3430, 0.6024],
|
||||
[-0.4417, -0.5920, 0.3450, ..., -0.3062, 0.6182, 0.7132],
|
||||
[-0.5009, -0.7122, 0.4548, ..., -0.3662, 0.6091, 0.7648],
|
||||
...,
|
||||
[-0.5613, -0.6332, 0.4324, ..., -0.3792, 0.7372, 0.9288],
|
||||
[-0.5416, -0.6345, 0.4180, ..., -0.3564, 0.6992, 0.9191],
|
||||
[-0.5334, -0.6403, 0.4271, ..., -0.3339, 0.6533, 0.8694]]],
|
||||
```
|
||||
|
||||
Ci aspettiamo che ogni modello aggiunto a 🤗 Transformers passi con successo un paio di test d'integrazione. Questo
|
||||
significa che il modello originale e la sua implementazione in 🤗 Transformers abbiano lo stesso output con una precisione
|
||||
di 0.001! Siccome é normale che lo stesso esatto modello, scritto in librerie diverse, possa dare output leggermente
|
||||
diversi, la tolleranza accettata é 1e-3 (0.001). Ricordate che i due modelli devono dare output quasi identici. Dunque,
|
||||
é molto conveniente comparare gli output intermedi di 🤗 Transformers molteplici volte con gli output intermedi del
|
||||
modello originale di *brand_new_bert*. Di seguito vi diamo alcuni consigli per avere un ambiente di debug il piu efficiente
|
||||
possibile:
|
||||
|
||||
- Trovate la migliore strategia per fare debug dei risultati intermedi. Per esempio, é la repository originale scritta in PyTorch?
|
||||
Se si, molto probabilmente dovrete dedicare un po' di tempo per scrivere degli script piu lunghi, così da decomporre il
|
||||
modello originale in piccole sotto-componenti, in modo da poter recuperare i valori intermedi. Oppure, la repo originale
|
||||
é scritta in Tensorflow 1? Se é così dovrete fare affidamento ai print di Tensorflow [tf.print](https://www.tensorflow.org/api_docs/python/tf/print)
|
||||
per avere i valori intermedi. Altro caso, la repo é scritta in Jax? Allora assicuratevi che il modello non sia in **jit**
|
||||
quanto testate il foward pass, *per esempio* controllate [questo link](https://github.com/google/jax/issues/196).
|
||||
- Usate i più piccoli pretrained checkpoint che potete trovare. Piu piccolo é il checkpoint, piu velocemente sarà il vostro
|
||||
ciclo di debug. Non é efficiente avere un pretrained model così gigante che per il forward pass impieghi piu di 10 secondi.
|
||||
Nel caso in cui i checkpoints siano molto grandi, e non si possa trovare di meglio, allora é buona consuetudine ricorrere
|
||||
a fare un dummy model nel nuovo ambiente, con weights inizializzati random e salvare quei weights per comprare la versione 🤗 Transformers
|
||||
con il vostro modello
|
||||
- Accertatevi di usare la via piu semplice per chiamare il forward pass nella repo originale. Sarebbe opportuno trovare
|
||||
la funzione originaria che chiami **solo** un singolo forward pass, *per esempio* questa funzione spesso viene chiamata
|
||||
`predict`, `evaluate`, `forward` o `__call__`. Siate sicuri di non fare debug su una funzione che chiami `forward` molteplici
|
||||
volte, *per esempio* per generare testo, come `autoregressive_sample`, `generate`.
|
||||
- Cercate di separare la tokenization dal forward pass del modello. Se la repo originaria mostra esempio dove potete dare
|
||||
come input una stringa, provate a cercare dove nella forward call la stringa viene cambiata in input ids e cominciate il
|
||||
debug da questo punto. Questo vi garantisce un ottimo punto di partenza per scrivere un piccolo script personale dove dare
|
||||
gli input al modello, anziche delle stringhe in input.
|
||||
- Assicuratevi che il debugging **non** sia in training mode. Spesso questo potra il modello a dare degli output random, per
|
||||
via dei molteplici dropout layers. Assicuratevi che il forward pass nell'ambiente di debug sia **deterministico**, cosicche
|
||||
i dropout non siano usati. Alternativamente, potete usare *transformers.utils.set_seed* se la vecchia e nuova implementazione
|
||||
sono nello stesso framework.
|
||||
|
||||
La seguente sezione vi da ulteriori dettagli e accorgimenti su come potete fare tutto questo per *brand_new_bert*.
|
||||
|
||||
|
||||
### 5.-14. Trasferire BrandNewBert in 🤗 Transformers
|
||||
|
||||
Allora cominciamo ad aggiungere un nuovo codice in 🤗 Transformers. Andate nel vostro fork clone di 🤗 Transformers:
|
||||
|
||||
|
||||
```bash
|
||||
cd transformers
|
||||
```
|
||||
|
||||
Nel caso speciale in cui stiate aggiungendo un modello, la cui architettura sia identica a una di un modello già esistente,
|
||||
dovrete solo aggiugnere uno script di conversione, come descritto [qui](#write-a-conversion-script).
|
||||
In questo caso, potete riutilizzare l'intera architettura del modello gia esistente.
|
||||
|
||||
Se questo non é il caso, cominciamo con il generare un nuovo modello. Avrete due opzioni:
|
||||
|
||||
- `transformers-cli add-new-model-like` per aggiungere un nuovo modello come uno che gia esiste
|
||||
- `transformers-cli add-new-model` per aggiungere un nuovo modello da un nostro template (questo assomigliera a BERT o Bart, in base al modello che selezionerete)
|
||||
|
||||
In entrambi i casi, l'output vi darà un questionario da riempire con informazioni basi sul modello. Il secondo comando richiede di installare
|
||||
un `cookiecutter` - maggiori informazioni [qui](https://github.com/huggingface/transformers/tree/main/templates/adding_a_new_model).
|
||||
|
||||
**Aprire una Pull Request in main huggingface/transformers repo**
|
||||
|
||||
Prime di cominciare ad adattare il codice automaticamente generato, aprite una nuova PR come "Work in progress (WIP)",
|
||||
*per esempio* "[WIP] Aggiungere *brand_new_bert*", cosicché il team di Hugging Face possa lavorare al vostro fianco nell'
|
||||
integrare il modello in 🤗 Transformers.
|
||||
|
||||
Questi sarebbero gli step generali da seguire:
|
||||
|
||||
1. Creare un branch dal main branch con un nome descrittivo
|
||||
|
||||
```bash
|
||||
git checkout -b add_brand_new_bert
|
||||
```
|
||||
|
||||
2. Commit del codice automaticamente generato
|
||||
|
||||
```bash
|
||||
git add .
|
||||
git commit
|
||||
```
|
||||
|
||||
3. Fare fetch e rebase del main esistente
|
||||
|
||||
```bash
|
||||
git fetch upstream
|
||||
git rebase upstream/main
|
||||
```
|
||||
|
||||
4. Push dei cambiamenti al proprio account:
|
||||
|
||||
```bash
|
||||
git push -u origin a-descriptive-name-for-my-changes
|
||||
```
|
||||
|
||||
5. Una volte che siete soddisfatti dei nuovi cambiamenti, andate sulla webpage del vostro fork su GitHub. Cliccate "Pull request".
|
||||
Assiuratevi di aggiungere alcuni membri di Hugging Face come reviewers, nel riguardo alla destra della pagina della PR, cosicche il team
|
||||
Hugging Face verrà notificato anche per i futuri cambiamenti.
|
||||
|
||||
6. Cambiare la PR a draft, cliccando su "Convert to draft" alla destra della pagina della PR
|
||||
|
||||
Da quel punto in poi, ricordate di fare commit di ogni progresso e cambiamento, cosicche venga mostrato nella PR. Inoltre,
|
||||
ricordatevi di tenere aggiornato il vostro lavoro con il main esistente:
|
||||
|
||||
```bash
|
||||
git fetch upstream
|
||||
git merge upstream/main
|
||||
```
|
||||
|
||||
In generale, tutte le domande che avrete riguardo al modello o l'implementazione dovranno essere fatte nella vostra PR
|
||||
e discusse/risolte nella PR stessa. In questa maniera, il team di Hugging Face sarà sempre notificato quando farete commit
|
||||
di un nuovo codice o se avrete qualche domanda. É molto utile indicare al team di Hugging Face il codice a cui fate riferimento
|
||||
nella domanda, cosicche il team potra facilmente capire il problema o la domanda.
|
||||
|
||||
Per fare questo andate sulla tab "Files changed", dove potrete vedere tutti i vostri cambiamenti al codice, andate sulla linea
|
||||
dove volete chiedere una domanda, e cliccate sul simbolo "+" per aggiungere un commento. Ogni volta che una domanda o problema
|
||||
é stato risolto, cliccate sul bottone "Resolve".
|
||||
|
||||
In questa stessa maniera, Hugging Face aprirà domande o commenti nel rivedere il vostro codice. Mi raccomando, chiedete più
|
||||
domande possibili nella pagina della vostra PR. Se avete domande molto generali, non molto utili per il pubblico, siete liberi
|
||||
di chiedere al team Hugging Face direttamente su slack o email.
|
||||
|
||||
|
||||
**5. Adattare i codici per brand_new_bert**
|
||||
|
||||
Per prima cosa, ci focalizzeremo sul modello e non sui tokenizer. Tutto il codice relative dovrebbe trovarsi in
|
||||
`src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` e
|
||||
`src/transformers/models/brand_new_bert/configuration_brand_new_bert.py`.
|
||||
|
||||
Ora potete finalmente cominciare il codice :). Il codice generato in
|
||||
`src/transformers/models/brand_new_bert/modeling_brand_new_bert.py` avrà sia la stessa architettura di BERT se é un
|
||||
modello encoder-only o BART se é encoder-decoder. A questo punto, ricordatevi cio che avete imparato all'inizio, riguardo
|
||||
agli aspetti teorici del modello: *In che maniera il modello che sto implmementando é diverso da BERT o BART?*. Implementare
|
||||
questi cambi spesso vuol dire cambiare il layer *self-attention*, l'ordine dei layer di normalizzazione e così via...
|
||||
Ancora una volta ripetiamo, é molto utile vedere architetture simili di modelli gia esistenti in Transformers per avere
|
||||
un'idea migliore su come implementare il modello.
|
||||
|
||||
**Notate** che a questo punto non dovete avere subito un codice tutto corretto o pulito. Piuttosto, é consigliato cominciare con un
|
||||
codice poco pulito, con copia-incolla del codice originale in `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`
|
||||
fino a che non avrete tutto il codice necessario. In base alla nostra esperienza, é molto meglio aggiungere una prima bozza
|
||||
del codice richiesto e poi correggere e migliorare iterativamente. L'unica cosa essenziale che deve funzionare qui é la seguente
|
||||
instanza:
|
||||
|
||||
```python
|
||||
from transformers import BrandNewBertModel, BrandNewBertConfig
|
||||
|
||||
model = BrandNewBertModel(BrandNewBertConfig())
|
||||
```
|
||||
|
||||
Questo comando creerà un modello con i parametri di default definiti in `BrandNewBergConfig()` e weights random. Questo garantisce
|
||||
che `init()` di tutte le componenti funzioni correttamente.
|
||||
|
||||
|
||||
**6. Scrivere uno script di conversione**
|
||||
|
||||
Il prossimo step é scrivere uno script per convertire il checkpoint che avete usato per fare debug su *brand_new_berts* nella
|
||||
repo originale in un checkpoint per la nuova implementazione di *brand_new_bert* in 🤗 Transformers. Non é consigliato scrivere
|
||||
lo script di conversione da zero, ma piuttosto cercate e guardate script gia esistenti in 🤗 Transformers, così da trovarne
|
||||
uno simile al vostro modello. Di solito basta fare una copia di uno script gia esistente e adattarlo al vostro caso.
|
||||
Non esistate a chiedre al team di Hugging Face a riguardo.
|
||||
|
||||
- Se state convertendo un modello da TensorFlow a PyTorch, un ottimo inizio é vedere [questo script di conversione per BERT](https://github.com/huggingface/transformers/blob/7acfa95afb8194f8f9c1f4d2c6028224dbed35a2/src/transformers/models/bert/modeling_bert.py#L91)
|
||||
- Se state convertendo un modello da PyTorch a PyTorch, [lo script di conversione di BART può esservi utile](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py)
|
||||
|
||||
Qui di seguito spiegheremo come i modelli PyTorch salvano i weights per ogni layer e come i nomi dei layer sono definiti. In PyTorch,
|
||||
il nomde del layer é definito dal nome della class attribute che date al layer. Definiamo un modello dummy in PyTorch,
|
||||
chiamato `SimpleModel`:
|
||||
|
||||
```python
|
||||
from torch import nn
|
||||
|
||||
|
||||
class SimpleModel(nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.dense = nn.Linear(10, 10)
|
||||
self.intermediate = nn.Linear(10, 10)
|
||||
self.layer_norm = nn.LayerNorm(10)
|
||||
```
|
||||
Ora possiamo creare un'instanza di questa definizione di modo da inizializzare a random weights: `dense`, `intermediate`, `layer_norm`.
|
||||
Possiamo usare print per vedere l'architettura del modello:
|
||||
|
||||
```python
|
||||
model = SimpleModel()
|
||||
|
||||
print(model)
|
||||
```
|
||||
|
||||
Da cui si ottiene:
|
||||
|
||||
```
|
||||
SimpleModel(
|
||||
(dense): Linear(in_features=10, out_features=10, bias=True)
|
||||
(intermediate): Linear(in_features=10, out_features=10, bias=True)
|
||||
(layer_norm): LayerNorm((10,), eps=1e-05, elementwise_affine=True)
|
||||
)
|
||||
```
|
||||
|
||||
Si può vedere come i nomi dei layers siano definiti dal nome della class attribute in PyTorch. I valori dei weights di uno
|
||||
specifico layer possono essere visualizzati:
|
||||
|
||||
|
||||
```python
|
||||
print(model.dense.weight.data)
|
||||
```
|
||||
|
||||
ad esempio:
|
||||
|
||||
```
|
||||
tensor([[-0.0818, 0.2207, -0.0749, -0.0030, 0.0045, -0.1569, -0.1598, 0.0212,
|
||||
-0.2077, 0.2157],
|
||||
[ 0.1044, 0.0201, 0.0990, 0.2482, 0.3116, 0.2509, 0.2866, -0.2190,
|
||||
0.2166, -0.0212],
|
||||
[-0.2000, 0.1107, -0.1999, -0.3119, 0.1559, 0.0993, 0.1776, -0.1950,
|
||||
-0.1023, -0.0447],
|
||||
[-0.0888, -0.1092, 0.2281, 0.0336, 0.1817, -0.0115, 0.2096, 0.1415,
|
||||
-0.1876, -0.2467],
|
||||
[ 0.2208, -0.2352, -0.1426, -0.2636, -0.2889, -0.2061, -0.2849, -0.0465,
|
||||
0.2577, 0.0402],
|
||||
[ 0.1502, 0.2465, 0.2566, 0.0693, 0.2352, -0.0530, 0.1859, -0.0604,
|
||||
0.2132, 0.1680],
|
||||
[ 0.1733, -0.2407, -0.1721, 0.1484, 0.0358, -0.0633, -0.0721, -0.0090,
|
||||
0.2707, -0.2509],
|
||||
[-0.1173, 0.1561, 0.2945, 0.0595, -0.1996, 0.2988, -0.0802, 0.0407,
|
||||
0.1829, -0.1568],
|
||||
[-0.1164, -0.2228, -0.0403, 0.0428, 0.1339, 0.0047, 0.1967, 0.2923,
|
||||
0.0333, -0.0536],
|
||||
[-0.1492, -0.1616, 0.1057, 0.1950, -0.2807, -0.2710, -0.1586, 0.0739,
|
||||
0.2220, 0.2358]]).
|
||||
```
|
||||
|
||||
Nello script di conversione, dovreste riempire quei valori di inizializzazione random con gli stessi weights del corrispondente
|
||||
layer nel checkpoint. *Per esempio*
|
||||
|
||||
```python
|
||||
# retrieve matching layer weights, e.g. by
|
||||
# recursive algorithm
|
||||
layer_name = "dense"
|
||||
pretrained_weight = array_of_dense_layer
|
||||
|
||||
model_pointer = getattr(model, "dense")
|
||||
|
||||
model_pointer.weight.data = torch.from_numpy(pretrained_weight)
|
||||
```
|
||||
|
||||
Così facendo, dovete verificare che ogni inizializzazione random di un peso del modello PyTorch e il suo corrispondente peso nel pretrained checkpoint
|
||||
siano esattamente gli stessi e uguali in **dimensione/shape e nome**. Per fare questo, é **necessario** aggiungere un `assert`
|
||||
per la dimensione/shape e nome:
|
||||
|
||||
```python
|
||||
assert (
|
||||
model_pointer.weight.shape == pretrained_weight.shape
|
||||
), f"Pointer shape of random weight {model_pointer.shape} and array shape of checkpoint weight {pretrained_weight.shape} mismatched"
|
||||
```
|
||||
|
||||
Inoltre, dovrete fare il print sia dei nomi che dei weights per essere sicuri che siano gli stessi:
|
||||
|
||||
```python
|
||||
logger.info(f"Initialize PyTorch weight {layer_name} from {pretrained_weight.name}")
|
||||
```
|
||||
|
||||
Se la dimensione o il nome non sono uguali, probabilmente avete sbagliato ad assegnare il peso nel checkpoint o nel layer costrutture di
|
||||
🤗 Transformers.
|
||||
|
||||
Una dimensione sbagliata può essere dovuta ad un errore nei parameteri in `BrandNewBertConfig()`. Tuttavia, può essere anche
|
||||
che l'implementazione del layer in PyTorch richieda di fare una transposizione della matrice dei weights.
|
||||
|
||||
Infine, controllate **tutti** che tutti i weights inizializzati e fate print di tutti i weights del checkpoint che non sono stati
|
||||
usati per l'inizializzazione, di modo da essere sicuri che il modello sia correttamente convertito. É normale che ci siano
|
||||
errori nel test di conversione, fai per un errore in `BrandNewBertConfig()`, o un errore nell'architettura in 🤗 Transformers,
|
||||
o un bug in `init()`.
|
||||
|
||||
Questo step dev'essere fatto tramite iterazioni fino a che non si raggiungano gli stessi valori per i weights. Una volta che
|
||||
il checkpoint é stato correttamente caricato in 🤗 Transformers, potete salvare il modello in una cartella di vostra scelta
|
||||
`/path/to/converted/checkpoint/folder` che contenga sia
|
||||
`pytorch_model.bin` che `config.json`:
|
||||
|
||||
```python
|
||||
model.save_pretrained("/path/to/converted/checkpoint/folder")
|
||||
```
|
||||
|
||||
|
||||
**7. Implementare il forward pass**
|
||||
|
||||
Una volta che i weights pretrained sono stati correttamente caricati in 🤗 Transformers, dovrete assicurarvi che il forward pass
|
||||
sia correttamente implementato. [Qui](#provare-un-pretrained-checkpoint-usando-la-repo-originale), avete give creato e provato
|
||||
uno script che testi il forward pass del modello usando la repo originaria. Ora dovrete fare lo stesso con uno script analogo
|
||||
usando l'implementazione in 🤗 Transformers anziché l'originale. Piu o meno lo script dovrebbe essere:
|
||||
|
||||
```python
|
||||
model = BrandNewBertModel.from_pretrained("/path/to/converted/checkpoint/folder")
|
||||
input_ids = [0, 4, 4, 3, 2, 4, 1, 7, 19]
|
||||
output = model(input_ids).last_hidden_states
|
||||
```
|
||||
|
||||
Di solito l'output da 🤗 Transformers non é uguale uguale all'output originario, sopratto la prima volta. Non vi abbattete -
|
||||
é normale! Prima di tutto assicuratevi che non ci siano errori o che non vengano segnalati degli errori nella forward pass.
|
||||
Spesso capita che ci siano dimensioni sbagliate o data type sbagliati, *ad esempio* `torch.long` anziche `torch.float32`.
|
||||
Non esistate a chiedere al team Hugging Face!
|
||||
|
||||
Nella parte finale assicuratevi che l'implementazione 🤗 Transformers funzioni correttamente cosi da testare che gli output
|
||||
siano equivalenti a una precisione di `1e-3`. Controllate che `outputs.shape` siano le stesse tra 🤗 Transformers e l'implementazione
|
||||
originaria. Poi, controllate che i valori in output siano identici. Questa é sicuramente la parte più difficile, qui una serie
|
||||
di errori comuni quando gli output non sono uguali:
|
||||
|
||||
- Alcuni layers non sono stati aggiunti, *ad esempio* un *activation* layer non é stato aggiunto, o ci si é scordati di una connessione
|
||||
- La matrice del word embedding non é stata ripareggiata
|
||||
- Ci sono degli embeddings posizionali sbagliati perché l'implementazione originaria ha un offset
|
||||
- Il dropout é in azione durante il forward pass. Per sistemare questo errore controllate che *model.training = False* e che
|
||||
il dropout non sia stato attivato nel forward pass, * per esempio * passate *self.training* a [PyTorch's functional dropout](https://pytorch.org/docs/stable/nn.functional.html?highlight=dropout#torch.nn.functional.dropout)
|
||||
|
||||
La miglior maniera per sistemare il problema é di vedere all'implementazione originaria del forward pass e in 🤗 Transformers
|
||||
fianco a fianco e vedere se ci sono delle differenze. In teoria, con debug e print degli output intermedie di entrambe le
|
||||
implementazioni nel forward pass nell'esatta posizione del network dovrebbe aiutarvi a vedere dove ci sono differenze tra
|
||||
i due frameworks. Come prima mossa controllate che `input_ids` siano identici in entrambi gli scripts. Da lì andate fino
|
||||
all'ultimo layer. Potrete notare una differenza tra le due implementazioni a quel punto.
|
||||
|
||||
Una volta che lo stesso output é stato ragguingi, verificate gli output con `torch.allclose(original_output, output, atol=1e-3)`.
|
||||
A questo punto se é tutto a posto: complimenti! Le parti seguenti saranno una passeggiata 😊.
|
||||
|
||||
|
||||
**8. Aggiungere i test necessari per il modello**
|
||||
|
||||
A questo punto avete aggiunto con successo il vostro nuovo modello. Tuttavia, é molto probabile che il modello non sia
|
||||
del tutto ok con il design richiesto. Per essere sicuri che l'implementazione sia consona e compatibile con 🤗 Transformers é
|
||||
necessario implementare dei tests. Il Cookiecutter dovrebbe fornire automaticamente dei file per test per il vostro modello,
|
||||
di solito nella folder `tests/test_modeling_brand_new_bert.py`. Provate questo per verificare l'ok nei test piu comuni:
|
||||
|
||||
```bash
|
||||
pytest tests/test_modeling_brand_new_bert.py
|
||||
```
|
||||
|
||||
Una volta sistemati i test comuni, bisogna assicurarsi che il vostro lavoro sia correttamente testato cosicchè:
|
||||
|
||||
- a) La community puo capire in maniera semplice il vostro lavoro controllando tests specifici del modello *brand_new_bert*,
|
||||
- b) Implementazioni future del vostro modello non rompano alcune feature importante del modello.
|
||||
|
||||
Per prima cosa agguingete dei test d'integrazione. Questi sono essenziali perche fanno la stessa funzione degli scripts di
|
||||
debug usati precedentemente. Un template per questi tests esiste gia nel Cookiecutter ed é sotto il nome di `BrandNewBertModelIntegrationTests`,
|
||||
voi dovrete solo completarlo. Una volta che questi tests sono OK, provate:
|
||||
|
||||
```bash
|
||||
RUN_SLOW=1 pytest -sv tests/test_modeling_brand_new_bert.py::BrandNewBertModelIntegrationTests
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
Nel caso siate su Windows, sostituite `RUN_SLOW=1` con `SET RUN_SLOW=1`
|
||||
|
||||
</Tip>
|
||||
|
||||
Di seguito, tutte le features che sono utili e necessarire per *brand_new_bert* devono essere testate in test separati,
|
||||
contenuti in `BrandNewBertModelTester`/ `BrandNewBertModelTest`. spesso la gente si scorda questi test, ma ricordate che sono utili per:
|
||||
|
||||
|
||||
- Aiuta gli utenti a capire il vostro codice meglio, richiamando l'attenzione su queste nuove features
|
||||
- Developers e contributors futuri potranno velocemente testare nuove implementazioni del modello testanto questi casi speciali.
|
||||
|
||||
|
||||
**9. Implementare il tokenizer**
|
||||
|
||||
A questo punto avremo bisogno un tokenizer per *brand_new_bert*. Di solito il tokenizer é uguale ad altri modelli in 🤗 Transformers.
|
||||
|
||||
É importante che troviate il file con il tokenizer originale e che lo carichiate in 🤗 Transformers.
|
||||
|
||||
Per controllare che il tokenizer funzioni in modo corretto, create uno script nella repo originaria che riceva come input
|
||||
una stringa e ritorni gli `input_ids`. Piu o meno questo potrebbe essere il codice:
|
||||
|
||||
```python
|
||||
input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words."
|
||||
model = BrandNewBertModel.load_pretrained_checkpoint("/path/to/checkpoint/")
|
||||
input_ids = model.tokenize(input_str)
|
||||
```
|
||||
|
||||
Potrebbe richiedere un po' di tempo, ma guardate ancora alla repo originaria per trovare la funzione corretta del tokenizer.
|
||||
A volte capita di dover riscrivere il tokenizer nella repo originaria, di modo da avere come output gli `input_ids`.
|
||||
A quel punto uno script analogo é necessario in 🤗 Transformers:
|
||||
|
||||
```python
|
||||
from transformers import BrandNewBertTokenizer
|
||||
|
||||
input_str = "This is a long example input string containing special characters .$?-, numbers 2872 234 12 and words."
|
||||
|
||||
tokenizer = BrandNewBertTokenizer.from_pretrained("/path/to/tokenizer/folder/")
|
||||
|
||||
input_ids = tokenizer(input_str).input_ids
|
||||
```
|
||||
|
||||
Una volta che `input_ids` sono uguali, bisogna aggiungere un test per il tokenizer.
|
||||
|
||||
Il file test per tokenizer di *brand_new_brand* dovrebbe avere un paio di hard-coded test d'integrazione.
|
||||
|
||||
|
||||
**10. Test end-to-end**
|
||||
|
||||
Ora che avete il tokenizer, dovrete aggiungere dei test d'integrazione per l'intero workflow in `tests/test_modeling_brand_new_bert.py` in 🤗 Transformer.
|
||||
Questi test devono mostrare che un significante campione text-to-text funzioni come ci si aspetta nell'implementazione di 🤗 Transformers.
|
||||
*Per esempio* potreste usare dei source-to-target-translation, o un sommario di un articolo, o un domanda-risposta e cosi via.
|
||||
Se nessuno dei checkpoints é stato ultra parametrizzato per task simili, allora i tests per il modello sono piu che sufficienti.
|
||||
Nello step finale dovete assicurarvi che il modello sia totalmente funzionale, e consigliamo anche di provare a testare su GPU.
|
||||
Puo succedere che ci si scordi un `.to(self.device)` ad esempio. Se non avete accesso a GPU, il team Hugging Face puo provvedere
|
||||
a testare questo aspetto per voi.
|
||||
|
||||
**11. Aggiungere una Docstring**
|
||||
|
||||
Siete quasi alla fine! L'ultima cosa rimasta é avere una bella docstring e una pagina doc. Il Cookiecutter dovrebbe provvedere già
|
||||
un template chiamato `docs/source/model_doc/brand_new_bert.rst`, che dovrete compilare. La prima cosa che un utente farà
|
||||
per usare il vostro modello sarà dare una bella lettura al doc. Quindi proponete una documentazione chiara e concisa. É molto
|
||||
utile per la community avere anche delle *Tips* per mostrare come il modello puo' essere usato. Non esitate a chiedere a Hugging Face
|
||||
riguardo alle docstirng.
|
||||
|
||||
Quindi, assicuratevi che la docstring sia stata aggiunta a `src/transformers/models/brand_new_bert/modeling_brand_new_bert.py`.
|
||||
Assicuratevi che la docstring sia corretta e che includa tutti i necessari input e output. Abbiamo una guida dettagliata per
|
||||
scrivere la documentazione e docstring.
|
||||
|
||||
|
||||
**Rifattorizzare il codice**
|
||||
|
||||
Perfetto! Ora che abbiamo tutto per *brand_new_bert* controllate che lo stile del codice sia ok:
|
||||
|
||||
```bash
|
||||
make style
|
||||
```
|
||||
|
||||
E che il codice passi i quality check:
|
||||
|
||||
```bash
|
||||
make quality
|
||||
```
|
||||
|
||||
A volte capita che manchino delle informazioninella docstring o alcuni nomi sbagliati, questo farà fallire i tests sopra.
|
||||
Ripetiamo: chiedete pure a Hugging Face, saremo lieti di aiutarvi.
|
||||
|
||||
Per ultimo, fare del refactoring del codice una volta che é stato creato.
|
||||
|
||||
Avete finito con il codice, congratulazioni! 🎉 Siete fantasticiiiiiii! 😎
|
||||
|
||||
**12. Caricare il modello sul model hub**
|
||||
|
||||
In questa ultima parte dovrete convertire e caricare il modello, con tutti i checkpoints, nel model hub e aggiungere una
|
||||
model card per ogni checkpoint caricato. Leggete la nostra guida [Model sharing and uploading Page](model_sharing) per
|
||||
avere familiarità con l'hub. Di solito in questa parte lavorate a fianco di Hugging face per decidere un nome che sia ok
|
||||
per ogni checkpoint, per ottenere i permessi necessari per caricare il modello nell'organizzazione dell'autore di *brand_new_bert*.
|
||||
Il metodo `push_to_hub`, presente in tutti i modelli `transformers`, é una maniera rapida e indolore per caricare il vostro checkpoint sull'hub:
|
||||
|
||||
```python
|
||||
brand_new_bert.push_to_hub(
|
||||
repo_path_or_name="brand_new_bert",
|
||||
# Uncomment the following line to push to an organization
|
||||
# organization="<ORGANIZATION>",
|
||||
commit_message="Add model",
|
||||
use_temp_dir=True,
|
||||
)
|
||||
```
|
||||
|
||||
Vale la pena spendere un po' di tempo per creare una model card ad-hoc per ogni checkpoint. Le model cards dovrebbero
|
||||
suggerire le caratteristiche specifiche del checkpoint, *per esempio* su che dataset il checkpoint é stato pretrained o fine-tuned.
|
||||
O che su che genere di task il modello lavoro? E anche buona pratica includere del codice su come usare il modello correttamente.
|
||||
|
||||
|
||||
**13. (Opzionale) Aggiungere un notebook**
|
||||
|
||||
É molto utile aggiungere un notebook, che dimostri in dettaglio come *brand_new_bert* si utilizzi per fare inferenza e/o
|
||||
fine-tuned su specifiche task. Non é una cosa obbligatoria da avere nella vostra PR, ma é molto utile per la community.
|
||||
|
||||
**14. Sottomettere la PR**
|
||||
|
||||
L'ultimissimo step! Ovvero il merge della PR nel main. Di solito il team Hugging face a questo punto vi avrà gia aiutato,
|
||||
ma é ok prendere un po' di tempo per pulire la descirzione e commenti nel codice.
|
||||
|
||||
|
||||
### Condividete il vostro lavoro!!
|
||||
|
||||
É ora tempo di prendere un po' di credito dalla communità per il vostro lavoro! Caricare e implementare un nuovo modello
|
||||
é un grandissimo contributo per Transformers e l'intera community NLP. Il codice e la conversione dei modelli pre-trained sara
|
||||
sicuramente utilizzato da centinaia o migliaia di sviluppatori e ricercatori. Siate fieri e orgogliosi di condividere il vostro
|
||||
traguardo con l'intera community :)
|
||||
|
||||
** Avete create un altro modello che é super facile da usare per tutti quanti nella community! 🤯**
|
||||
@ -349,7 +349,7 @@ class FlaxDataCollatorForT5MLM:
|
||||
if batch["input_ids"].shape[-1] != self.input_length:
|
||||
raise ValueError(
|
||||
f"`input_ids` are incorrectly preprocessed. `input_ids` length is {batch['input_ids'].shape[-1]}, but"
|
||||
f" should be {self.target_length}."
|
||||
f" should be {self.input_length}."
|
||||
)
|
||||
|
||||
if batch["labels"].shape[-1] != self.target_length:
|
||||
|
||||
@ -61,7 +61,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
||||
@ -54,7 +54,7 @@ from transformers.utils import check_min_version, get_full_repo_name, send_examp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
||||
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
|
||||
@ -45,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
|
||||
|
||||
|
||||
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
|
||||
|
||||
|
||||
@ -23,8 +23,8 @@ This directory contains 2 scripts that showcase how to fine-tune any model suppo
|
||||
Try out the inference widget here: https://huggingface.co/google/vit-base-patch16-224
|
||||
|
||||
Content:
|
||||
- [PyTorch version, Trainer](#pytorch-version-no-trainer)
|
||||
- [PyTorch version, no Trainer](#pytorch-version-trainer)
|
||||
- [PyTorch version, Trainer](#pytorch-version-trainer)
|
||||
- [PyTorch version, no Trainer](#pytorch-version-no-trainer)
|
||||
|
||||
## PyTorch version, Trainer
|
||||
|
||||
@ -208,4 +208,4 @@ This command is the same and will work for:
|
||||
|
||||
Note that this library is in alpha release so your feedback is more than welcome if you encounter any problem using it.
|
||||
|
||||
Regarding using custom data with this script, we refer to [using your own data](#using-your-own-data).
|
||||
Regarding using custom data with this script, we refer to [using your own data](#using-your-own-data).
|
||||
|
||||
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
|
||||
|
||||
|
||||
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@ -43,7 +43,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
||||
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
||||
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
||||
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
@ -47,7 +47,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
||||
@ -47,7 +47,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -56,7 +56,7 @@ from transformers.utils import PaddingStrategy, check_min_version, get_full_repo
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
# You should update this to your particular problem to have better documentation of `model_type`
|
||||
|
||||
@ -49,7 +49,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ from utils_qa import postprocess_qa_predictions_with_beam_search
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
||||
@ -56,7 +56,7 @@ from utils_qa import postprocess_qa_predictions_with_beam_search
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
||||
@ -57,7 +57,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
||||
@ -45,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
||||
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt")
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
@ -317,6 +317,8 @@ class DataCollatorCTCWithPadding:
|
||||
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
|
||||
|
||||
batch["labels"] = labels
|
||||
if "attention_mask" in batch:
|
||||
batch["attention_mask"] = batch["attention_mask"].to(torch.long)
|
||||
|
||||
return batch
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
|
||||
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
||||
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
@ -669,7 +669,6 @@ def main():
|
||||
"max_length": args.val_max_target_length if args is not None else config.max_length,
|
||||
"num_beams": args.num_beams,
|
||||
}
|
||||
samples_seen = 0
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
with torch.no_grad():
|
||||
generated_tokens = accelerator.unwrap_model(model).generate(
|
||||
@ -686,7 +685,7 @@ def main():
|
||||
# If we did not pad to max length, we need to pad the labels too
|
||||
labels = accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=tokenizer.pad_token_id)
|
||||
|
||||
generated_tokens, labels = accelerator.gather((generated_tokens, labels))
|
||||
generated_tokens, labels = accelerator.gather_for_metrics((generated_tokens, labels))
|
||||
generated_tokens = generated_tokens.cpu().numpy()
|
||||
labels = labels.cpu().numpy()
|
||||
|
||||
@ -699,14 +698,8 @@ def main():
|
||||
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
|
||||
|
||||
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
|
||||
# If we are in a multiprocess environment, the last batch has duplicates
|
||||
if accelerator.num_processes > 1:
|
||||
if step == len(eval_dataloader) - 1:
|
||||
decoded_preds = decoded_preds[: len(eval_dataloader.dataset) - samples_seen]
|
||||
decoded_labels = decoded_labels[: len(eval_dataloader.dataset) - samples_seen]
|
||||
else:
|
||||
samples_seen += len(decoded_labels)
|
||||
|
||||
decoded_preds, decoded_labels = accelerator.gather_for_metrics(decoded_preds, decoded_labels)
|
||||
metric.add_batch(
|
||||
predictions=decoded_preds,
|
||||
references=decoded_labels,
|
||||
|
||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
|
||||
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
|
||||
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
|
||||
|
||||
|
||||
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
|
||||
|
||||
@ -95,7 +95,7 @@ jedi==0.18.1
|
||||
Jinja2==2.11.3
|
||||
jinja2-time==0.2.0
|
||||
jmespath==0.10.0
|
||||
joblib==1.1.0
|
||||
joblib==1.2.0
|
||||
jsonschema==4.4.0
|
||||
keras==2.8.0
|
||||
Keras-Preprocessing==1.1.2
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
|
||||
Author: [@vasudevgupta7](https://github.com/vasudevgupta7)
|
||||
Author: [@vasudevgupta7](https://github.com/thevasudevgupta/)
|
||||
|
||||
## Intro
|
||||
|
||||
@ -57,4 +57,4 @@ wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/r
|
||||
python3 evaluate.py
|
||||
```
|
||||
|
||||
You can find our checkpoint on HuggingFace Hub ([see this](https://huggingface.co/vasudevgupta/flax-bigbird-natural-questions)). In case you are interested in PyTorch BigBird fine-tuning, you can refer to [this repositary](https://github.com/vasudevgupta7/bigbird).
|
||||
You can find our checkpoint on HuggingFace Hub ([see this](https://huggingface.co/vasudevgupta/flax-bigbird-natural-questions)). In case you are interested in PyTorch BigBird fine-tuning, you can refer to [this repositary](https://github.com/thevasudevgupta/bigbird).
|
||||
|
||||
@ -29,7 +29,7 @@ ipython-genutils==0.2.0
|
||||
ipywidgets==7.5.1
|
||||
jedi==0.17.2
|
||||
Jinja2>=2.11.3
|
||||
joblib==0.16.0
|
||||
joblib==1.2.0
|
||||
jsonschema==3.2.0
|
||||
jupyter==1.0.0
|
||||
jupyter-client==6.1.7
|
||||
|
||||
@ -29,7 +29,7 @@ ipython-genutils==0.2.0
|
||||
ipywidgets==7.5.1
|
||||
jedi==0.17.2
|
||||
Jinja2>=2.11.3
|
||||
joblib==0.16.0
|
||||
joblib==1.2.0
|
||||
jsonschema==3.2.0
|
||||
jupyter==1.0.0
|
||||
jupyter-client==6.1.7
|
||||
|
||||
@ -50,7 +50,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -48,7 +48,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
# region Checking dependencies
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
||||
@ -47,7 +47,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
task_to_keys = {
|
||||
"cola": ("sentence", None),
|
||||
|
||||
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
# region Dependencies and constants
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.23.0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
||||
@ -18,9 +18,9 @@ limitations under the License.
|
||||
|
||||
You can find here a list of the official notebooks provided by Hugging Face.
|
||||
|
||||
Also, we would like to list here interesting content created by the community.
|
||||
If you wrote some notebook(s) leveraging 🤗 Transformers and would like be listed here, please open a
|
||||
Pull Request so it can be included under the Community notebooks.
|
||||
Also, we would like to list here interesting content created by the community.
|
||||
If you wrote some notebook(s) leveraging 🤗 Transformers and would like be listed here, please open a
|
||||
Pull Request so it can be included under the Community notebooks.
|
||||
|
||||
|
||||
## Hugging Face's notebooks 🤗
|
||||
@ -31,53 +31,53 @@ You can open any page of the documentation as a notebook in colab (there is a bu
|
||||
|
||||
| Notebook | Description | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [Quicktour of the library](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb) | A presentation of the various APIs in Transformers |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/en/transformers_doc/quicktour.ipynb)|
|
||||
| [Summary of the tasks](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb) | How to run the models of the Transformers library task by task |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)|
|
||||
| [Preprocessing data](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb) | How to use a tokenizer to preprocess your data |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)|
|
||||
| [Fine-tuning a pretrained model](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb) | How to use the Trainer to fine-tune a pretrained model |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)|
|
||||
| [Summary of the tokenizers](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb) | The differences between the tokenizers algorithm |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)|
|
||||
| [Multilingual models](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb) | How to use the multilingual models of the library |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)|
|
||||
| [Quicktour of the library](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb) | A presentation of the various APIs in Transformers |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/en/transformers_doc/quicktour.ipynb)|
|
||||
| [Summary of the tasks](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb) | How to run the models of the Transformers library task by task |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)|
|
||||
| [Preprocessing data](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb) | How to use a tokenizer to preprocess your data |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)|
|
||||
| [Fine-tuning a pretrained model](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb) | How to use the Trainer to fine-tune a pretrained model |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)|
|
||||
| [Summary of the tokenizers](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb) | The differences between the tokenizers algorithm |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)|
|
||||
| [Multilingual models](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb) | How to use the multilingual models of the library |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)|
|
||||
|
||||
|
||||
### PyTorch Examples
|
||||
|
||||
| Notebook | Description | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [Train your tokenizer](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | How to train and use your very own tokenizer |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)|
|
||||
| [Train your language model](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb) | How to easily start using transformers |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)|
|
||||
| [How to fine-tune a model on text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on any GLUE task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)|
|
||||
| [How to fine-tune a model on language modeling](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)|
|
||||
| [How to fine-tune a model on token classification](https://github.com/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)|
|
||||
| [How to fine-tune a model on question answering](https://github.com/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SQUAD. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)|
|
||||
| [How to fine-tune a model on multiple choice](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SWAG. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)|
|
||||
| [How to fine-tune a model on translation](https://github.com/huggingface/notebooks/blob/main/examples/translation.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on WMT. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation.ipynb)|
|
||||
| [How to fine-tune a model on summarization](https://github.com/huggingface/notebooks/blob/main/examples/summarization.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on XSUM. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)|
|
||||
| [How to fine-tune a speech recognition model in English](https://github.com/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| Show how to preprocess the data and fine-tune a pretrained Speech model on TIMIT | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)|
|
||||
| [How to fine-tune a speech recognition model in any language](https://github.com/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| Show how to preprocess the data and fine-tune a multi-lingually pretrained speech model on Common Voice | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)|
|
||||
| [How to fine-tune a model on audio classification](https://github.com/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained Speech model on Keyword Spotting | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)|
|
||||
| [How to train a language model from scratch](https://github.com/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| Highlight all the steps to effectively train Transformer model on custom data | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)|
|
||||
| [How to generate text](https://github.com/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| How to use different decoding methods for language generation with transformers | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)|
|
||||
| [How to generate text (with constraints)](https://github.com/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| How to guide language generation with user-provided constraints | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)|
|
||||
| [Train your tokenizer](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | How to train and use your very own tokenizer |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)|
|
||||
| [Train your language model](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb) | How to easily start using transformers |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)|
|
||||
| [How to fine-tune a model on text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on any GLUE task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)|
|
||||
| [How to fine-tune a model on language modeling](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)|
|
||||
| [How to fine-tune a model on token classification](https://github.com/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)|
|
||||
| [How to fine-tune a model on question answering](https://github.com/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SQUAD. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)|
|
||||
| [How to fine-tune a model on multiple choice](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SWAG. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)|
|
||||
| [How to fine-tune a model on translation](https://github.com/huggingface/notebooks/blob/main/examples/translation.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on WMT. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation.ipynb)|
|
||||
| [How to fine-tune a model on summarization](https://github.com/huggingface/notebooks/blob/main/examples/summarization.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on XSUM. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)|
|
||||
| [How to fine-tune a speech recognition model in English](https://github.com/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| Show how to preprocess the data and fine-tune a pretrained Speech model on TIMIT | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)|
|
||||
| [How to fine-tune a speech recognition model in any language](https://github.com/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| Show how to preprocess the data and fine-tune a multi-lingually pretrained speech model on Common Voice | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)|
|
||||
| [How to fine-tune a model on audio classification](https://github.com/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| Show how to preprocess the data and fine-tune a pretrained Speech model on Keyword Spotting | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)|
|
||||
| [How to train a language model from scratch](https://github.com/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| Highlight all the steps to effectively train Transformer model on custom data | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)|
|
||||
| [How to generate text](https://github.com/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| How to use different decoding methods for language generation with transformers | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)|
|
||||
| [How to generate text (with constraints)](https://github.com/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| How to guide language generation with user-provided constraints | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)|
|
||||
| [How to export model to ONNX](https://github.com/huggingface/notebooks/blob/main/examples/onnx-export.ipynb)| Highlight how to export and run inference workloads through ONNX |
|
||||
| [How to use Benchmarks](https://github.com/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| How to benchmark models with transformers | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)|
|
||||
| [Reformer](https://github.com/huggingface/blog/blob/main/notebooks/03_reformer.ipynb)| How Reformer pushes the limits of language modeling | [](https://colab.research.google.com/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)| [](https://studiolab.sagemaker.aws/import/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)|
|
||||
| [How to fine-tune a model on image classification (Torchvision)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) | Show how to preprocess the data using Torchvision and fine-tune any pretrained Vision model on Image Classification | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)|
|
||||
| [How to fine-tune a model on image classification (Albumentations)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) | Show how to preprocess the data using Albumentations and fine-tune any pretrained Vision model on Image Classification | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)|
|
||||
| [How to perform zero-shot object detection with OWL-ViT](https://github.com/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb) | Show how to perform zero-shot object detection on images with text queries| [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)|
|
||||
| [How to use Benchmarks](https://github.com/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| How to benchmark models with transformers | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)|
|
||||
| [Reformer](https://github.com/huggingface/blog/blob/main/notebooks/03_reformer.ipynb)| How Reformer pushes the limits of language modeling | [](https://colab.research.google.com/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)| [](https://studiolab.sagemaker.aws/import/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)|
|
||||
| [How to fine-tune a model on image classification (Torchvision)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) | Show how to preprocess the data using Torchvision and fine-tune any pretrained Vision model on Image Classification | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)|
|
||||
| [How to fine-tune a model on image classification (Albumentations)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) | Show how to preprocess the data using Albumentations and fine-tune any pretrained Vision model on Image Classification | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)|
|
||||
| [How to perform zero-shot object detection with OWL-ViT](https://github.com/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb) | Show how to perform zero-shot object detection on images with text queries| [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)|
|
||||
|
||||
### TensorFlow Examples
|
||||
|
||||
| Notebook | Description | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [Train your tokenizer](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | How to train and use your very own tokenizer |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)|
|
||||
| [Train your language model](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb) | How to easily start using transformers |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)|
|
||||
| [How to fine-tune a model on text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on any GLUE task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)|
|
||||
| [How to fine-tune a model on language modeling](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)|
|
||||
| [How to fine-tune a model on token classification](https://github.com/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)|
|
||||
| [How to fine-tune a model on question answering](https://github.com/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SQUAD. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)|
|
||||
| [How to fine-tune a model on multiple choice](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SWAG. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)|
|
||||
| [How to fine-tune a model on translation](https://github.com/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on WMT. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)|
|
||||
| [How to fine-tune a model on summarization](https://github.com/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on XSUM. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)|
|
||||
| [Train your tokenizer](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | How to train and use your very own tokenizer |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)|
|
||||
| [Train your language model](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb) | How to easily start using transformers |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)|
|
||||
| [How to fine-tune a model on text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on any GLUE task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)|
|
||||
| [How to fine-tune a model on language modeling](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a causal or masked LM task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)|
|
||||
| [How to fine-tune a model on token classification](https://github.com/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on a token classification task (NER, PoS). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)|
|
||||
| [How to fine-tune a model on question answering](https://github.com/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SQUAD. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)|
|
||||
| [How to fine-tune a model on multiple choice](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on SWAG. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)|
|
||||
| [How to fine-tune a model on translation](https://github.com/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on WMT. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)|
|
||||
| [How to fine-tune a model on summarization](https://github.com/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| Show how to preprocess the data and fine-tune a pretrained model on XSUM. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)|
|
||||
|
||||
### Optimum notebooks
|
||||
|
||||
@ -86,8 +86,10 @@ You can open any page of the documentation as a notebook in colab (there is a bu
|
||||
| Notebook | Description | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [How to quantize a model with ONNX Runtime for text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)| Show how to apply static and dynamic quantization on a model using [ONNX Runtime](https://github.com/microsoft/onnxruntime) for any GLUE task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)|
|
||||
| [How to quantize a model with Intel Neural Compressor for text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| Show how to apply static, dynamic and aware training quantization on a model using [Intel Neural Compressor (INC)](https://github.com/intel/neural-compressor) for any GLUE task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)|
|
||||
| [How to quantize a model with Intel Neural Compressor for text classification](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| Show how to apply static, dynamic and aware training quantization on a model using [Intel Neural Compressor (INC)](https://github.com/intel/neural-compressor) for any GLUE task. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_inc.ipynb)|
|
||||
| [How to fine-tune a model on text classification with ONNX Runtime](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)| Show how to preprocess the data and fine-tune a model on any GLUE task using [ONNX Runtime](https://github.com/microsoft/onnxruntime). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)|
|
||||
| [How to fine-tune a model on summarization with ONNX Runtime](https://github.com/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)| Show how to preprocess the data and fine-tune a model on XSUM using [ONNX Runtime](https://github.com/microsoft/onnxruntime). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)|
|
||||
|
||||
## Community notebooks:
|
||||
|
||||
More notebooks developed by the community are available [here](https:hf.co/docs/transformers/community#community-notebooks).
|
||||
More notebooks developed by the community are available [here](https://hf.co/docs/transformers/community#community-notebooks).
|
||||
|
||||
14
setup.py
14
setup.py
@ -133,7 +133,7 @@ _deps = [
|
||||
"packaging>=20.0",
|
||||
"parameterized",
|
||||
"phonemizer",
|
||||
"protobuf<=3.20.1",
|
||||
"protobuf<=3.20.2",
|
||||
"psutil",
|
||||
"pyyaml>=5.1",
|
||||
"pydantic",
|
||||
@ -148,6 +148,7 @@ _deps = [
|
||||
"rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
|
||||
"sacrebleu>=1.4.12,<2.0.0",
|
||||
"sacremoses",
|
||||
"safetensors>=0.2.1",
|
||||
"sagemaker>=2.31.0",
|
||||
"scikit-learn",
|
||||
"sentencepiece>=0.1.91,!=0.1.92",
|
||||
@ -168,6 +169,10 @@ _deps = [
|
||||
"unidic>=1.0.2",
|
||||
"unidic_lite>=1.0.7",
|
||||
"uvicorn",
|
||||
"beautifulsoup4",
|
||||
"sudachipy>=0.6.6",
|
||||
"sudachidict_core>=20220729",
|
||||
"pyknp>=0.6.1",
|
||||
]
|
||||
|
||||
|
||||
@ -237,7 +242,7 @@ class DepsTableUpdateCommand(Command):
|
||||
|
||||
extras = {}
|
||||
|
||||
extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic")
|
||||
extras["ja"] = deps_list("fugashi", "ipadic", "unidic_lite", "unidic", "sudachipy", "sudachidict_core", "pyknp")
|
||||
extras["sklearn"] = deps_list("scikit-learn")
|
||||
|
||||
extras["tf"] = deps_list("tensorflow", "onnxconverter-common", "tf2onnx", "tensorflow-text")
|
||||
@ -279,6 +284,7 @@ extras["vision"] = deps_list("Pillow")
|
||||
extras["timm"] = deps_list("timm")
|
||||
extras["codecarbon"] = deps_list("codecarbon")
|
||||
|
||||
|
||||
extras["sentencepiece"] = deps_list("sentencepiece", "protobuf")
|
||||
extras["testing"] = (
|
||||
deps_list(
|
||||
@ -300,6 +306,8 @@ extras["testing"] = (
|
||||
"protobuf", # Can be removed once we can unpin protobuf
|
||||
"sacremoses",
|
||||
"rjieba",
|
||||
"safetensors",
|
||||
"beautifulsoup4",
|
||||
)
|
||||
+ extras["retrieval"]
|
||||
+ extras["modelcreation"]
|
||||
@ -400,7 +408,7 @@ install_requires = [
|
||||
|
||||
setup(
|
||||
name="transformers",
|
||||
version="4.23.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||
version="4.23.0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||
author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)",
|
||||
author_email="transformers@huggingface.co",
|
||||
description="State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow",
|
||||
|
||||
@ -22,7 +22,7 @@
|
||||
# to defer the actual importing for when the objects are requested. This way `import transformers` provides the names
|
||||
# in the namespace without actually importing anything (and especially none of the backends).
|
||||
|
||||
__version__ = "4.23.0.dev0"
|
||||
__version__ = "4.23.0"
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@ -210,6 +210,7 @@ _import_structure = {
|
||||
"ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"ErnieConfig",
|
||||
],
|
||||
"models.esm": ["ESM_PRETRAINED_CONFIG_ARCHIVE_MAP", "EsmConfig", "EsmTokenizer"],
|
||||
"models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"],
|
||||
"models.flava": [
|
||||
"FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
@ -262,6 +263,13 @@ _import_structure = {
|
||||
"models.lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig", "LxmertTokenizer"],
|
||||
"models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"],
|
||||
"models.marian": ["MarianConfig"],
|
||||
"models.markuplm": [
|
||||
"MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"MarkupLMConfig",
|
||||
"MarkupLMFeatureExtractor",
|
||||
"MarkupLMProcessor",
|
||||
"MarkupLMTokenizer",
|
||||
],
|
||||
"models.maskformer": ["MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "MaskFormerConfig"],
|
||||
"models.mbart": ["MBartConfig"],
|
||||
"models.mbart50": [],
|
||||
@ -328,6 +336,10 @@ _import_structure = {
|
||||
"models.t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"],
|
||||
"models.tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig", "TapasTokenizer"],
|
||||
"models.tapex": ["TapexTokenizer"],
|
||||
"models.time_series_transformer": [
|
||||
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"TimeSeriesTransformerConfig",
|
||||
],
|
||||
"models.trajectory_transformer": [
|
||||
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"TrajectoryTransformerConfig",
|
||||
@ -378,6 +390,13 @@ _import_structure = {
|
||||
"WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"WavLMConfig",
|
||||
],
|
||||
"models.whisper": [
|
||||
"WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"WhisperConfig",
|
||||
"WhisperFeatureExtractor",
|
||||
"WhisperProcessor",
|
||||
"WhisperTokenizer",
|
||||
],
|
||||
"models.x_clip": [
|
||||
"XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
|
||||
"XCLIPConfig",
|
||||
@ -423,6 +442,7 @@ _import_structure = {
|
||||
"VisualQuestionAnsweringPipeline",
|
||||
"ZeroShotClassificationPipeline",
|
||||
"ZeroShotImageClassificationPipeline",
|
||||
"ZeroShotObjectDetectionPipeline",
|
||||
"pipeline",
|
||||
],
|
||||
"processing_utils": ["ProcessorMixin"],
|
||||
@ -470,6 +490,7 @@ _import_structure = {
|
||||
"is_psutil_available",
|
||||
"is_py3nvml_available",
|
||||
"is_pyctcdecode_available",
|
||||
"is_safetensors_available",
|
||||
"is_scipy_available",
|
||||
"is_sentencepiece_available",
|
||||
"is_sklearn_available",
|
||||
@ -570,6 +591,7 @@ else:
|
||||
_import_structure["models.led"].append("LEDTokenizerFast")
|
||||
_import_structure["models.longformer"].append("LongformerTokenizerFast")
|
||||
_import_structure["models.lxmert"].append("LxmertTokenizerFast")
|
||||
_import_structure["models.markuplm"].append("MarkupLMTokenizerFast")
|
||||
_import_structure["models.mbart"].append("MBartTokenizerFast")
|
||||
_import_structure["models.mbart50"].append("MBart50TokenizerFast")
|
||||
_import_structure["models.mobilebert"].append("MobileBertTokenizerFast")
|
||||
@ -805,6 +827,15 @@ else:
|
||||
_import_structure["modeling_utils"] = ["PreTrainedModel"]
|
||||
|
||||
# PyTorch models structure
|
||||
|
||||
_import_structure["models.time_series_transformer"].extend(
|
||||
[
|
||||
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TimeSeriesTransformerForPrediction",
|
||||
"TimeSeriesTransformerModel",
|
||||
"TimeSeriesTransformerPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.albert"].extend(
|
||||
[
|
||||
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -848,6 +879,7 @@ else:
|
||||
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING",
|
||||
"MODEL_MAPPING",
|
||||
"MODEL_WITH_LM_HEAD_MAPPING",
|
||||
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING",
|
||||
"AutoModel",
|
||||
"AutoModelForAudioClassification",
|
||||
"AutoModelForAudioFrameClassification",
|
||||
@ -875,6 +907,7 @@ else:
|
||||
"AutoModelForVision2Seq",
|
||||
"AutoModelForVisualQuestionAnswering",
|
||||
"AutoModelWithLMHead",
|
||||
"AutoModelForZeroShotObjectDetection",
|
||||
]
|
||||
)
|
||||
_import_structure["models.bart"].extend(
|
||||
@ -970,6 +1003,7 @@ else:
|
||||
"BloomPreTrainedModel",
|
||||
"BloomForSequenceClassification",
|
||||
"BloomForTokenClassification",
|
||||
"BloomForQuestionAnswering",
|
||||
]
|
||||
)
|
||||
_import_structure["models.blenderbot"].extend(
|
||||
@ -1000,6 +1034,7 @@ else:
|
||||
"CamembertForSequenceClassification",
|
||||
"CamembertForTokenClassification",
|
||||
"CamembertModel",
|
||||
"CamembertPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.canine"].extend(
|
||||
@ -1212,6 +1247,16 @@ else:
|
||||
"ErniePreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.esm"].extend(
|
||||
[
|
||||
"ESM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"EsmForMaskedLM",
|
||||
"EsmForSequenceClassification",
|
||||
"EsmForTokenClassification",
|
||||
"EsmModel",
|
||||
"EsmPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.flaubert"].extend(
|
||||
[
|
||||
"FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -1488,6 +1533,16 @@ else:
|
||||
"MaskFormerPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.markuplm"].extend(
|
||||
[
|
||||
"MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"MarkupLMForQuestionAnswering",
|
||||
"MarkupLMForSequenceClassification",
|
||||
"MarkupLMForTokenClassification",
|
||||
"MarkupLMModel",
|
||||
"MarkupLMPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.mbart"].extend(
|
||||
[
|
||||
"MBartForCausalLM",
|
||||
@ -1606,6 +1661,7 @@ else:
|
||||
"OPTModel",
|
||||
"OPTPreTrainedModel",
|
||||
"OPTForSequenceClassification",
|
||||
"OPTForQuestionAnswering",
|
||||
]
|
||||
)
|
||||
_import_structure["models.owlvit"].extend(
|
||||
@ -1818,6 +1874,14 @@ else:
|
||||
"Speech2TextPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.whisper"].extend(
|
||||
[
|
||||
"WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"WhisperForConditionalGeneration",
|
||||
"WhisperModel",
|
||||
"WhisperPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.speech_to_text_2"].extend(["Speech2Text2ForCausalLM", "Speech2Text2PreTrainedModel"])
|
||||
_import_structure["models.splinter"].extend(
|
||||
[
|
||||
@ -2251,6 +2315,7 @@ else:
|
||||
"TFCamembertForSequenceClassification",
|
||||
"TFCamembertForTokenClassification",
|
||||
"TFCamembertModel",
|
||||
"TFCamembertPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.clip"].extend(
|
||||
@ -2689,6 +2754,14 @@ else:
|
||||
"TFWav2Vec2PreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.whisper"].extend(
|
||||
[
|
||||
"TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TFWhisperForConditionalGeneration",
|
||||
"TFWhisperModel",
|
||||
"TFWhisperPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.xglm"].extend(
|
||||
[
|
||||
"TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -3140,6 +3213,7 @@ if TYPE_CHECKING:
|
||||
from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer
|
||||
from .models.encoder_decoder import EncoderDecoderConfig
|
||||
from .models.ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig
|
||||
from .models.esm import ESM_PRETRAINED_CONFIG_ARCHIVE_MAP, EsmConfig, EsmTokenizer
|
||||
from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer
|
||||
from .models.flava import (
|
||||
FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
@ -3192,6 +3266,13 @@ if TYPE_CHECKING:
|
||||
from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer
|
||||
from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config
|
||||
from .models.marian import MarianConfig
|
||||
from .models.markuplm import (
|
||||
MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
MarkupLMConfig,
|
||||
MarkupLMFeatureExtractor,
|
||||
MarkupLMProcessor,
|
||||
MarkupLMTokenizer,
|
||||
)
|
||||
from .models.maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
|
||||
from .models.mbart import MBartConfig
|
||||
from .models.mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig, MCTCTProcessor
|
||||
@ -3248,6 +3329,10 @@ if TYPE_CHECKING:
|
||||
from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config
|
||||
from .models.tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig, TapasTokenizer
|
||||
from .models.tapex import TapexTokenizer
|
||||
from .models.time_series_transformer import (
|
||||
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
TimeSeriesTransformerConfig,
|
||||
)
|
||||
from .models.trajectory_transformer import (
|
||||
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
TrajectoryTransformerConfig,
|
||||
@ -3282,6 +3367,13 @@ if TYPE_CHECKING:
|
||||
from .models.wav2vec2_phoneme import Wav2Vec2PhonemeCTCTokenizer
|
||||
from .models.wav2vec2_with_lm import Wav2Vec2ProcessorWithLM
|
||||
from .models.wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
|
||||
from .models.whisper import (
|
||||
WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
WhisperConfig,
|
||||
WhisperFeatureExtractor,
|
||||
WhisperProcessor,
|
||||
WhisperTokenizer,
|
||||
)
|
||||
from .models.x_clip import (
|
||||
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
|
||||
XCLIPConfig,
|
||||
@ -3328,6 +3420,7 @@ if TYPE_CHECKING:
|
||||
VisualQuestionAnsweringPipeline,
|
||||
ZeroShotClassificationPipeline,
|
||||
ZeroShotImageClassificationPipeline,
|
||||
ZeroShotObjectDetectionPipeline,
|
||||
pipeline,
|
||||
)
|
||||
from .processing_utils import ProcessorMixin
|
||||
@ -3380,6 +3473,7 @@ if TYPE_CHECKING:
|
||||
is_psutil_available,
|
||||
is_py3nvml_available,
|
||||
is_pyctcdecode_available,
|
||||
is_safetensors_available,
|
||||
is_scipy_available,
|
||||
is_sentencepiece_available,
|
||||
is_sklearn_available,
|
||||
@ -3465,6 +3559,7 @@ if TYPE_CHECKING:
|
||||
from .models.led import LEDTokenizerFast
|
||||
from .models.longformer import LongformerTokenizerFast
|
||||
from .models.lxmert import LxmertTokenizerFast
|
||||
from .models.markuplm import MarkupLMTokenizerFast
|
||||
from .models.mbart import MBartTokenizerFast
|
||||
from .models.mbart50 import MBart50TokenizerFast
|
||||
from .models.mobilebert import MobileBertTokenizerFast
|
||||
@ -3691,6 +3786,7 @@ if TYPE_CHECKING:
|
||||
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
|
||||
MODEL_FOR_VISION_2_SEQ_MAPPING,
|
||||
MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING,
|
||||
MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING,
|
||||
MODEL_MAPPING,
|
||||
MODEL_WITH_LM_HEAD_MAPPING,
|
||||
AutoModel,
|
||||
@ -3719,6 +3815,7 @@ if TYPE_CHECKING:
|
||||
AutoModelForVideoClassification,
|
||||
AutoModelForVision2Seq,
|
||||
AutoModelForVisualQuestionAnswering,
|
||||
AutoModelForZeroShotObjectDetection,
|
||||
AutoModelWithLMHead,
|
||||
)
|
||||
from .models.bart import (
|
||||
@ -3800,6 +3897,7 @@ if TYPE_CHECKING:
|
||||
from .models.bloom import (
|
||||
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
BloomForCausalLM,
|
||||
BloomForQuestionAnswering,
|
||||
BloomForSequenceClassification,
|
||||
BloomForTokenClassification,
|
||||
BloomModel,
|
||||
@ -3814,6 +3912,7 @@ if TYPE_CHECKING:
|
||||
CamembertForSequenceClassification,
|
||||
CamembertForTokenClassification,
|
||||
CamembertModel,
|
||||
CamembertPreTrainedModel,
|
||||
)
|
||||
from .models.canine import (
|
||||
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
@ -3984,6 +4083,14 @@ if TYPE_CHECKING:
|
||||
ErnieModel,
|
||||
ErniePreTrainedModel,
|
||||
)
|
||||
from .models.esm import (
|
||||
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
EsmForMaskedLM,
|
||||
EsmForSequenceClassification,
|
||||
EsmForTokenClassification,
|
||||
EsmModel,
|
||||
EsmPreTrainedModel,
|
||||
)
|
||||
from .models.flaubert import (
|
||||
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
FlaubertForMultipleChoice,
|
||||
@ -4196,6 +4303,14 @@ if TYPE_CHECKING:
|
||||
M2M100PreTrainedModel,
|
||||
)
|
||||
from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel
|
||||
from .models.markuplm import (
|
||||
MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
MarkupLMForQuestionAnswering,
|
||||
MarkupLMForSequenceClassification,
|
||||
MarkupLMForTokenClassification,
|
||||
MarkupLMModel,
|
||||
MarkupLMPreTrainedModel,
|
||||
)
|
||||
from .models.maskformer import (
|
||||
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
MaskFormerForInstanceSegmentation,
|
||||
@ -4302,6 +4417,7 @@ if TYPE_CHECKING:
|
||||
from .models.opt import (
|
||||
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
OPTForCausalLM,
|
||||
OPTForQuestionAnswering,
|
||||
OPTForSequenceClassification,
|
||||
OPTModel,
|
||||
OPTPreTrainedModel,
|
||||
@ -4521,6 +4637,12 @@ if TYPE_CHECKING:
|
||||
T5PreTrainedModel,
|
||||
load_tf_weights_in_t5,
|
||||
)
|
||||
from .models.time_series_transformer import (
|
||||
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TimeSeriesTransformerForPrediction,
|
||||
TimeSeriesTransformerModel,
|
||||
TimeSeriesTransformerPreTrainedModel,
|
||||
)
|
||||
from .models.trajectory_transformer import (
|
||||
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TrajectoryTransformerModel,
|
||||
@ -4641,6 +4763,12 @@ if TYPE_CHECKING:
|
||||
WavLMModel,
|
||||
WavLMPreTrainedModel,
|
||||
)
|
||||
from .models.whisper import (
|
||||
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
WhisperForConditionalGeneration,
|
||||
WhisperModel,
|
||||
WhisperPreTrainedModel,
|
||||
)
|
||||
from .models.x_clip import (
|
||||
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
XCLIPModel,
|
||||
@ -4864,6 +4992,7 @@ if TYPE_CHECKING:
|
||||
TFCamembertForSequenceClassification,
|
||||
TFCamembertForTokenClassification,
|
||||
TFCamembertModel,
|
||||
TFCamembertPreTrainedModel,
|
||||
)
|
||||
from .models.clip import (
|
||||
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
@ -5182,6 +5311,12 @@ if TYPE_CHECKING:
|
||||
TFWav2Vec2Model,
|
||||
TFWav2Vec2PreTrainedModel,
|
||||
)
|
||||
from .models.whisper import (
|
||||
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFWhisperForConditionalGeneration,
|
||||
TFWhisperModel,
|
||||
TFWhisperPreTrainedModel,
|
||||
)
|
||||
from .models.xglm import (
|
||||
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFXGLMForCausalLM,
|
||||
|
||||
@ -299,6 +299,8 @@ class PretrainedConfig(PushToHubMixin):
|
||||
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
|
||||
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
|
||||
self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
|
||||
self.suppress_tokens = kwargs.pop("suppress_tokens", None)
|
||||
self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None)
|
||||
|
||||
# Fine-tuning task arguments
|
||||
self.architectures = kwargs.pop("architectures", None)
|
||||
|
||||
@ -133,7 +133,7 @@ def check_onnxruntime_requirements(minimum_version: Version):
|
||||
|
||||
def ensure_valid_input(model, tokens, input_names):
|
||||
"""
|
||||
Ensure input are presented in the correct order, without any Non
|
||||
Ensure inputs are presented in the correct order, without any Non
|
||||
|
||||
Args:
|
||||
model: The model used to forward the input data
|
||||
|
||||
@ -1043,6 +1043,44 @@ class XGLMConverter(SpmConverter):
|
||||
)
|
||||
|
||||
|
||||
class MarkupLMConverter(Converter):
|
||||
def converted(self) -> Tokenizer:
|
||||
ot = self.original_tokenizer
|
||||
vocab = ot.encoder
|
||||
merges = list(ot.bpe_ranks.keys())
|
||||
|
||||
tokenizer = Tokenizer(
|
||||
BPE(
|
||||
vocab=vocab,
|
||||
merges=merges,
|
||||
dropout=None,
|
||||
continuing_subword_prefix="",
|
||||
end_of_word_suffix="",
|
||||
fuse_unk=False,
|
||||
unk_token=self.original_tokenizer.unk_token,
|
||||
)
|
||||
)
|
||||
|
||||
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space)
|
||||
tokenizer.decoder = decoders.ByteLevel()
|
||||
|
||||
cls = str(self.original_tokenizer.cls_token)
|
||||
sep = str(self.original_tokenizer.sep_token)
|
||||
cls_token_id = self.original_tokenizer.cls_token_id
|
||||
sep_token_id = self.original_tokenizer.sep_token_id
|
||||
|
||||
tokenizer.post_processor = processors.TemplateProcessing(
|
||||
single=f"{cls} $A {sep}",
|
||||
pair=f"{cls} $A {sep} $B {sep}",
|
||||
special_tokens=[
|
||||
(cls, cls_token_id),
|
||||
(sep, sep_token_id),
|
||||
],
|
||||
)
|
||||
|
||||
return tokenizer
|
||||
|
||||
|
||||
SLOW_TO_FAST_CONVERTERS = {
|
||||
"AlbertTokenizer": AlbertConverter,
|
||||
"BartTokenizer": RobertaConverter,
|
||||
@ -1072,6 +1110,7 @@ SLOW_TO_FAST_CONVERTERS = {
|
||||
"LongformerTokenizer": RobertaConverter,
|
||||
"LEDTokenizer": RobertaConverter,
|
||||
"LxmertTokenizer": BertConverter,
|
||||
"MarkupLMTokenizer": MarkupLMConverter,
|
||||
"MBartTokenizer": MBartConverter,
|
||||
"MBart50Tokenizer": MBart50Converter,
|
||||
"MPNetTokenizer": MPNetConverter,
|
||||
|
||||
@ -39,7 +39,7 @@ deps = {
|
||||
"packaging": "packaging>=20.0",
|
||||
"parameterized": "parameterized",
|
||||
"phonemizer": "phonemizer",
|
||||
"protobuf": "protobuf<=3.20.1",
|
||||
"protobuf": "protobuf<=3.20.2",
|
||||
"psutil": "psutil",
|
||||
"pyyaml": "pyyaml>=5.1",
|
||||
"pydantic": "pydantic",
|
||||
@ -54,6 +54,7 @@ deps = {
|
||||
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
|
||||
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
|
||||
"sacremoses": "sacremoses",
|
||||
"safetensors": "safetensors>=0.2.1",
|
||||
"sagemaker": "sagemaker>=2.31.0",
|
||||
"scikit-learn": "scikit-learn",
|
||||
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
|
||||
@ -74,4 +75,8 @@ deps = {
|
||||
"unidic": "unidic>=1.0.2",
|
||||
"unidic_lite": "unidic_lite>=1.0.7",
|
||||
"uvicorn": "uvicorn",
|
||||
"beautifulsoup4": "beautifulsoup4",
|
||||
"sudachipy": "sudachipy>=0.6.6",
|
||||
"sudachidict_core": "sudachidict_core>=20220729",
|
||||
"pyknp": "pyknp>=0.6.1",
|
||||
}
|
||||
|
||||
@ -79,6 +79,7 @@ from .utils import (
|
||||
has_file,
|
||||
http_user_agent,
|
||||
is_apex_available,
|
||||
is_bs4_available,
|
||||
is_coloredlogs_available,
|
||||
is_datasets_available,
|
||||
is_detectron2_available,
|
||||
|
||||
@ -702,3 +702,49 @@ class LogitNormalization(LogitsProcessor, LogitsWarper):
|
||||
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor:
|
||||
scores = scores.log_softmax(dim=-1)
|
||||
return scores
|
||||
|
||||
|
||||
class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor):
|
||||
r"""
|
||||
[`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts
|
||||
generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not
|
||||
sampled at the begining of the generation.
|
||||
"""
|
||||
|
||||
def __init__(self, begin_suppress_tokens, begin_index):
|
||||
self.begin_suppress_tokens = list(begin_suppress_tokens)
|
||||
self.begin_index = begin_index
|
||||
|
||||
def __call__(self, input_ids, scores):
|
||||
if input_ids.shape[1] == self.begin_index:
|
||||
scores[:, self.begin_suppress_tokens] = -float("inf")
|
||||
|
||||
return scores
|
||||
|
||||
|
||||
class SuppressTokensLogitsProcessor(LogitsProcessor):
|
||||
r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they
|
||||
are not sampled."""
|
||||
|
||||
def __init__(self, suppress_tokens):
|
||||
self.suppress_tokens = list(suppress_tokens)
|
||||
|
||||
def __call__(self, input_ids, scores):
|
||||
scores[:, self.suppress_tokens] = -float("inf")
|
||||
return scores
|
||||
|
||||
|
||||
class ForceTokensLogitsProcessor(LogitsProcessor):
|
||||
r"""This processor can be used to force a list of tokens. The processor will set their log probs to `inf` so that they
|
||||
are sampled at their corresponding index."""
|
||||
|
||||
def __init__(self, force_token_map):
|
||||
self.force_token_map = dict(force_token_map)
|
||||
|
||||
def __call__(self, input_ids, scores):
|
||||
generation_idx = input_ids.shape[-1]
|
||||
current_token = self.force_token_map.get(generation_idx, None)
|
||||
if current_token is not None:
|
||||
scores[:, :] = -float("inf")
|
||||
scores[:, current_token] = 0
|
||||
return scores
|
||||
|
||||
@ -504,3 +504,84 @@ class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor):
|
||||
axis=-1,
|
||||
)
|
||||
return scores
|
||||
|
||||
|
||||
class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor):
|
||||
r"""
|
||||
[`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts
|
||||
generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not
|
||||
sampled at the begining of the generation.
|
||||
"""
|
||||
|
||||
def __init__(self, begin_suppress_tokens, begin_index):
|
||||
self.begin_suppress_tokens = list(begin_suppress_tokens)
|
||||
self.begin_index = begin_index
|
||||
|
||||
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
|
||||
scores = tf.cond(
|
||||
tf.equal(cur_len, self.begin_index),
|
||||
lambda: tf.tensor_scatter_nd_update(
|
||||
scores,
|
||||
indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens],
|
||||
updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))],
|
||||
),
|
||||
lambda: scores,
|
||||
)
|
||||
return scores
|
||||
|
||||
|
||||
class TFSuppressTokensLogitsProcessor(TFLogitsProcessor):
|
||||
r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they
|
||||
are not sampled."""
|
||||
|
||||
def __init__(self, suppress_tokens):
|
||||
self.suppress_tokens = list(suppress_tokens)
|
||||
|
||||
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
|
||||
scores = tf.tensor_scatter_nd_update(
|
||||
scores,
|
||||
indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens],
|
||||
updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))],
|
||||
)
|
||||
return scores
|
||||
|
||||
|
||||
class TFForceTokensLogitsProcessor(TFLogitsProcessor):
|
||||
r"""This processor can be used to force a list of tokens. The processor will set their log probs to `0` and all
|
||||
other tokens to `-inf` so that they are sampled at their corresponding index."""
|
||||
|
||||
def __init__(self, force_token_map):
|
||||
force_token_map = dict(force_token_map)
|
||||
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
|
||||
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
|
||||
# Indexes without forced tokens will have an negative value.
|
||||
force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1
|
||||
for index, token in force_token_map.items():
|
||||
force_token_array[index] = token
|
||||
self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32)
|
||||
|
||||
def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
|
||||
def _force_token(generation_idx):
|
||||
batch_size = scores.shape[0]
|
||||
current_token = self.force_token_array[generation_idx]
|
||||
|
||||
new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf")
|
||||
indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1)
|
||||
updates = tf.zeros((batch_size,), dtype=scores.dtype)
|
||||
new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates)
|
||||
return new_scores
|
||||
|
||||
scores = tf.cond(
|
||||
tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]),
|
||||
# If the current length is geq than the length of force_token_array, the processor does nothing.
|
||||
lambda: tf.identity(scores),
|
||||
# Otherwise, it may force a certain token.
|
||||
lambda: tf.cond(
|
||||
tf.greater_equal(self.force_token_array[cur_len], 0),
|
||||
# Only valid (positive) tokens are forced
|
||||
lambda: _force_token(cur_len),
|
||||
# Otherwise, the processor does nothing.
|
||||
lambda: scores,
|
||||
),
|
||||
)
|
||||
return scores
|
||||
|
||||
@ -26,11 +26,14 @@ from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice
|
||||
from .generation_tf_logits_process import (
|
||||
TFForcedBOSTokenLogitsProcessor,
|
||||
TFForcedEOSTokenLogitsProcessor,
|
||||
TFForceTokensLogitsProcessor,
|
||||
TFLogitsProcessorList,
|
||||
TFMinLengthLogitsProcessor,
|
||||
TFNoBadWordsLogitsProcessor,
|
||||
TFNoRepeatNGramLogitsProcessor,
|
||||
TFRepetitionPenaltyLogitsProcessor,
|
||||
TFSuppressTokensAtBeginLogitsProcessor,
|
||||
TFSuppressTokensLogitsProcessor,
|
||||
TFTemperatureLogitsWarper,
|
||||
TFTopKLogitsWarper,
|
||||
TFTopPLogitsWarper,
|
||||
@ -401,6 +404,9 @@ class TFGenerationMixin:
|
||||
return_dict_in_generate=None,
|
||||
forced_bos_token_id=None,
|
||||
forced_eos_token_id=None,
|
||||
suppress_tokens: Optional[List[int]] = None,
|
||||
begin_suppress_tokens: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[int]] = None,
|
||||
**model_kwargs,
|
||||
) -> Union[TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]:
|
||||
r"""
|
||||
@ -494,6 +500,14 @@ class TFGenerationMixin:
|
||||
the target language token.
|
||||
forced_eos_token_id (`int`, *optional*):
|
||||
The id of the token to force as the last generated token when `max_length` is reached.
|
||||
suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`):
|
||||
A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set
|
||||
their log probs to `-inf` so that they are not sampled.
|
||||
begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`):
|
||||
A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens`
|
||||
logit processor will set their log probs to `-inf` so that they are not sampled.
|
||||
forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of tokens that will be forced as beginning tokens, before sampling.
|
||||
model_specific_kwargs:
|
||||
Additional model specific kwargs will be forwarded to the `forward` function of the model.
|
||||
|
||||
@ -609,6 +623,9 @@ class TFGenerationMixin:
|
||||
return_dict_in_generate=return_dict_in_generate,
|
||||
forced_bos_token_id=forced_bos_token_id,
|
||||
forced_eos_token_id=forced_eos_token_id,
|
||||
suppress_tokens=suppress_tokens,
|
||||
begin_suppress_tokens=begin_suppress_tokens,
|
||||
forced_decoder_ids=forced_decoder_ids,
|
||||
**model_kwargs,
|
||||
)
|
||||
|
||||
@ -648,6 +665,12 @@ class TFGenerationMixin:
|
||||
forced_eos_token_id = (
|
||||
forced_eos_token_id if forced_eos_token_id is not None else self.config.forced_eos_token_id
|
||||
)
|
||||
suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens
|
||||
begin_suppress_tokens = (
|
||||
begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens
|
||||
)
|
||||
if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"):
|
||||
forced_decoder_ids = self.config.forced_decoder_ids
|
||||
|
||||
output_scores = output_scores if output_scores is not None else self.config.output_scores
|
||||
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
||||
@ -1368,6 +1391,9 @@ class TFGenerationMixin:
|
||||
return_dict_in_generate=None,
|
||||
forced_bos_token_id=None,
|
||||
forced_eos_token_id=None,
|
||||
suppress_tokens=None,
|
||||
begin_suppress_tokens=None,
|
||||
forced_decoder_ids=None,
|
||||
**model_kwargs,
|
||||
) -> Union[TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]:
|
||||
r"""
|
||||
@ -1461,6 +1487,15 @@ class TFGenerationMixin:
|
||||
the target language token.
|
||||
forced_eos_token_id (`int`, *optional*):
|
||||
The id of the token to force as the last generated token when `max_length` is reached.
|
||||
suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`):
|
||||
A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set
|
||||
their log probs to `-inf` so that they are not sampled.
|
||||
begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`):
|
||||
A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens`
|
||||
logit processor will set their log probs to `-inf` so that they are not sampled.
|
||||
forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of tokens that will be forced as beginning tokens.
|
||||
|
||||
model_kwargs:
|
||||
Additional model specific kwargs will be forwarded to the `call` function of the model.
|
||||
|
||||
@ -1533,11 +1568,35 @@ class TFGenerationMixin:
|
||||
# generate sequences without allowing bad_words to be generated
|
||||
outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids)
|
||||
```"""
|
||||
|
||||
# 0. Validate the `.generate()` call
|
||||
self._validate_model_class()
|
||||
self._validate_model_kwargs(model_kwargs.copy())
|
||||
|
||||
# 1. Set generation parameters if not already defined
|
||||
# 1. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models)
|
||||
if input_ids is not None:
|
||||
if isinstance(input_ids, tf.Tensor) and input_ids.dtype.is_floating:
|
||||
pass
|
||||
elif isinstance(input_ids, np.ndarray) and np.issubdtype(input_ids.dtype, np.floating):
|
||||
pass
|
||||
else:
|
||||
input_ids = tf.cast(input_ids, tf.int32)
|
||||
if attention_mask is not None:
|
||||
attention_mask = tf.cast(attention_mask, tf.int32)
|
||||
if "decoder_input_ids" in model_kwargs:
|
||||
if (
|
||||
isinstance(model_kwargs["decoder_input_ids"], tf.Tensor)
|
||||
and model_kwargs["decoder_input_ids"].dtype.is_floating
|
||||
):
|
||||
pass
|
||||
elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype(
|
||||
model_kwargs["decoder_input_ids"].dtype, np.floating
|
||||
):
|
||||
pass
|
||||
else:
|
||||
model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32)
|
||||
|
||||
# 2. Set generation parameters if not already defined
|
||||
length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
|
||||
early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping
|
||||
|
||||
@ -1582,12 +1641,12 @@ class TFGenerationMixin:
|
||||
"The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())"
|
||||
)
|
||||
|
||||
# 2. Define model inputs
|
||||
# 3. Define model inputs
|
||||
input_ids = self._prepare_model_inputs(input_ids, bos_token_id)
|
||||
# inputs_ids now has to be defined and cannot be None anymore
|
||||
batch_size = shape_list(input_ids)[0]
|
||||
|
||||
# 3. Prepare other model kwargs
|
||||
# 4. Prepare other model kwargs
|
||||
if output_attentions is not None:
|
||||
model_kwargs["output_attentions"] = output_attentions
|
||||
if output_hidden_states is not None:
|
||||
@ -1613,7 +1672,7 @@ class TFGenerationMixin:
|
||||
"generation results, please set `padding_side='left'` when initializing the tokenizer."
|
||||
)
|
||||
|
||||
# 4. Prepare model inputs which will be used for auto-regressive generation
|
||||
# 5. Prepare model inputs which will be used for auto-regressive generation
|
||||
if self.config.is_encoder_decoder:
|
||||
# if encoder-decoder, we create encoder_outputs and add to `model_kwargs`
|
||||
model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, model_kwargs)
|
||||
@ -1625,7 +1684,7 @@ class TFGenerationMixin:
|
||||
model_kwargs=model_kwargs,
|
||||
)
|
||||
|
||||
# 5. Prepare `max_length` depending on other stopping criteria.
|
||||
# 6. Prepare `max_length` depending on other stopping criteria.
|
||||
input_ids_seq_length = input_ids.shape[-1]
|
||||
if max_length is None and max_new_tokens is None:
|
||||
warnings.warn(
|
||||
@ -1661,25 +1720,29 @@ class TFGenerationMixin:
|
||||
"`max_new_tokens`."
|
||||
)
|
||||
|
||||
# 6. determine generation mode
|
||||
# 7. determine generation mode
|
||||
# TODO(Matt, Joao, Patrick) - add more use cases here
|
||||
is_greedy_gen_mode = (num_beams == 1) and do_sample is False
|
||||
is_sample_gen_mode = (num_beams == 1) and do_sample is True
|
||||
is_beam_gen_mode = (num_beams > 1) and do_sample is False
|
||||
|
||||
# 7. prepare distribution pre_processing samplers
|
||||
# 8. prepare distribution pre_processing samplers
|
||||
logits_processor = self._get_logits_processor(
|
||||
repetition_penalty=repetition_penalty,
|
||||
no_repeat_ngram_size=no_repeat_ngram_size,
|
||||
input_ids_seq_length=input_ids_seq_length,
|
||||
bad_words_ids=bad_words_ids,
|
||||
min_length=min_length,
|
||||
max_length=max_length,
|
||||
eos_token_id=eos_token_id,
|
||||
forced_bos_token_id=forced_bos_token_id,
|
||||
forced_eos_token_id=forced_eos_token_id,
|
||||
suppress_tokens=suppress_tokens,
|
||||
begin_suppress_tokens=begin_suppress_tokens,
|
||||
forced_decoder_ids=forced_decoder_ids,
|
||||
)
|
||||
|
||||
# 8. go into different generation modes
|
||||
# 9. go into different generation modes
|
||||
if is_greedy_gen_mode:
|
||||
if num_return_sequences > 1:
|
||||
raise ValueError(
|
||||
@ -1697,10 +1760,10 @@ class TFGenerationMixin:
|
||||
**model_kwargs,
|
||||
)
|
||||
elif is_sample_gen_mode:
|
||||
# 9. prepare logits warper
|
||||
# 10. prepare logits warper
|
||||
logits_warper = self._get_logits_warper(top_k=top_k, top_p=top_p, temperature=temperature)
|
||||
|
||||
# 10. expand input_ids with `num_return_sequences` additional sequences per batch
|
||||
# 11. expand input_ids with `num_return_sequences` additional sequences per batch
|
||||
input_ids, model_kwargs = self._expand_inputs_for_generation(
|
||||
input_ids,
|
||||
expand_size=num_return_sequences,
|
||||
@ -1708,7 +1771,7 @@ class TFGenerationMixin:
|
||||
**model_kwargs,
|
||||
)
|
||||
|
||||
# 11. run sample
|
||||
# 12. run sample
|
||||
return self.sample(
|
||||
input_ids,
|
||||
logits_processor=logits_processor,
|
||||
@ -1729,7 +1792,7 @@ class TFGenerationMixin:
|
||||
f"num_beams >= num_return_sequences, got {num_beams} and {num_return_sequences} (respectivelly)"
|
||||
)
|
||||
|
||||
# 9. broadcast inputs to the desired number of beams
|
||||
# 10. broadcast inputs to the desired number of beams
|
||||
input_ids = self._expand_to_num_beams(input_ids, num_beams=num_beams)
|
||||
|
||||
if "encoder_outputs" in model_kwargs:
|
||||
@ -1742,7 +1805,7 @@ class TFGenerationMixin:
|
||||
model_kwargs["attention_mask"], num_beams=num_beams
|
||||
)
|
||||
|
||||
# 10. run beam search
|
||||
# 11. run beam search
|
||||
return self.beam_search(
|
||||
input_ids,
|
||||
max_length=max_length,
|
||||
@ -1970,7 +2033,7 @@ class TFGenerationMixin:
|
||||
def _initialize_past(past, num_padding_values, batch_axis):
|
||||
"""initialize past with zeros -- the structure depends on `batch_axis`"""
|
||||
if batch_axis == 0:
|
||||
padding_values = tf.scatter_nd(indices=[[2, 1]], updates=[num_padding_values], shape=(4, 2))
|
||||
padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32)
|
||||
new_past = ()
|
||||
for past_layer in past:
|
||||
new_past_layer = list(past_layer)
|
||||
@ -2075,12 +2138,16 @@ class TFGenerationMixin:
|
||||
self,
|
||||
repetition_penalty: float,
|
||||
no_repeat_ngram_size: int,
|
||||
input_ids_seq_length: int,
|
||||
bad_words_ids: List[List[int]],
|
||||
min_length: int,
|
||||
max_length: int,
|
||||
eos_token_id: int,
|
||||
forced_bos_token_id: int,
|
||||
forced_eos_token_id: int,
|
||||
suppress_tokens: Optional[List[int]] = None,
|
||||
begin_suppress_tokens: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[int]] = None,
|
||||
) -> TFLogitsProcessorList:
|
||||
"""
|
||||
This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`]
|
||||
@ -2094,6 +2161,12 @@ class TFGenerationMixin:
|
||||
)
|
||||
bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids
|
||||
eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id
|
||||
suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens
|
||||
begin_suppress_tokens = (
|
||||
begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens
|
||||
)
|
||||
if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"):
|
||||
forced_decoder_ids = self.config.forced_decoder_ids
|
||||
|
||||
# instantiate processors list
|
||||
if repetition_penalty is not None and repetition_penalty != 1.0:
|
||||
@ -2108,7 +2181,16 @@ class TFGenerationMixin:
|
||||
processors.append(TFForcedBOSTokenLogitsProcessor(forced_bos_token_id))
|
||||
if forced_eos_token_id is not None:
|
||||
processors.append(TFForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id))
|
||||
|
||||
if suppress_tokens is not None:
|
||||
processors.append(TFSuppressTokensLogitsProcessor(suppress_tokens))
|
||||
if begin_suppress_tokens is not None:
|
||||
begin_index = input_ids_seq_length
|
||||
begin_index = begin_index if (input_ids_seq_length > 1 or forced_bos_token_id is None) else begin_index + 1
|
||||
if forced_decoder_ids is not None:
|
||||
begin_index += forced_decoder_ids[-1][0] # generation starts after the last token that is forced
|
||||
processors.append(TFSuppressTokensAtBeginLogitsProcessor(begin_suppress_tokens, begin_index))
|
||||
if forced_decoder_ids is not None:
|
||||
processors.append(TFForceTokensLogitsProcessor(forced_decoder_ids))
|
||||
return processors
|
||||
|
||||
def greedy_search(
|
||||
|
||||
@ -30,6 +30,7 @@ from .generation_logits_process import (
|
||||
ExponentialDecayLengthPenalty,
|
||||
ForcedBOSTokenLogitsProcessor,
|
||||
ForcedEOSTokenLogitsProcessor,
|
||||
ForceTokensLogitsProcessor,
|
||||
HammingDiversityLogitsProcessor,
|
||||
InfNanRemoveLogitsProcessor,
|
||||
LogitNormalization,
|
||||
@ -39,6 +40,8 @@ from .generation_logits_process import (
|
||||
NoRepeatNGramLogitsProcessor,
|
||||
PrefixConstrainedLogitsProcessor,
|
||||
RepetitionPenaltyLogitsProcessor,
|
||||
SuppressTokensAtBeginLogitsProcessor,
|
||||
SuppressTokensLogitsProcessor,
|
||||
TemperatureLogitsWarper,
|
||||
TopKLogitsWarper,
|
||||
TopPLogitsWarper,
|
||||
@ -691,6 +694,9 @@ class GenerationMixin:
|
||||
exponential_decay_length_penalty: Tuple,
|
||||
logits_processor: Optional[LogitsProcessorList],
|
||||
renormalize_logits: Optional[bool],
|
||||
suppress_tokens: Optional[List[int]] = None,
|
||||
begin_suppress_tokens: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[int]] = None,
|
||||
) -> LogitsProcessorList:
|
||||
"""
|
||||
This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`]
|
||||
@ -725,6 +731,12 @@ class GenerationMixin:
|
||||
if exponential_decay_length_penalty is not None
|
||||
else self.config.exponential_decay_length_penalty
|
||||
)
|
||||
suppress_tokens = suppress_tokens if suppress_tokens is not None else self.config.suppress_tokens
|
||||
begin_suppress_tokens = (
|
||||
begin_suppress_tokens if begin_suppress_tokens is not None else self.config.begin_suppress_tokens
|
||||
)
|
||||
if forced_decoder_ids is None and hasattr(self.config, "forced_decoder_ids"):
|
||||
forced_decoder_ids = self.config.forced_decoder_ids
|
||||
# instantiate processors list
|
||||
|
||||
# the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files
|
||||
@ -762,6 +774,16 @@ class GenerationMixin:
|
||||
processors.append(
|
||||
ExponentialDecayLengthPenalty(exponential_decay_length_penalty, eos_token_id, input_ids_seq_length)
|
||||
)
|
||||
if suppress_tokens is not None:
|
||||
processors.append(SuppressTokensLogitsProcessor(suppress_tokens))
|
||||
if begin_suppress_tokens is not None:
|
||||
begin_index = input_ids_seq_length
|
||||
begin_index = begin_index if (input_ids_seq_length > 1 or forced_bos_token_id is None) else begin_index + 1
|
||||
if forced_decoder_ids is not None:
|
||||
begin_index += forced_decoder_ids[-1][0] # generation starts after the last token that is forced
|
||||
processors.append(SuppressTokensAtBeginLogitsProcessor(begin_suppress_tokens, begin_index))
|
||||
if forced_decoder_ids is not None:
|
||||
processors.append(ForceTokensLogitsProcessor(forced_decoder_ids))
|
||||
processors = self._merge_criteria_processor_list(processors, logits_processor)
|
||||
# `LogitNormalization` should always be the last logit processor, when present
|
||||
if renormalize_logits is True:
|
||||
@ -931,7 +953,10 @@ class GenerationMixin:
|
||||
forced_eos_token_id: Optional[int] = None,
|
||||
remove_invalid_values: Optional[bool] = None,
|
||||
synced_gpus: Optional[bool] = False,
|
||||
exponential_decay_length_penalty: Optional[Tuple[Union[int, float]]] = None,
|
||||
exponential_decay_length_penalty: Optional[Tuple[int, float]] = None,
|
||||
suppress_tokens: Optional[List[int]] = None,
|
||||
begin_suppress_tokens: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[int]] = None,
|
||||
**model_kwargs,
|
||||
) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]:
|
||||
r"""
|
||||
@ -1090,6 +1115,14 @@ class GenerationMixin:
|
||||
This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been
|
||||
generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates
|
||||
where penalty starts and `decay_factor` represents the factor of exponential decay
|
||||
suppress_tokens (`List[int]`, *optional*, defaults to `model.config.suppress_tokens`):
|
||||
A list of tokens that will be supressed at generation. The `SupressTokens` logit processor will set
|
||||
their log probs to `-inf` so that they are not sampled.
|
||||
begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`):
|
||||
A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens`
|
||||
logit processor will set their log probs to `-inf` so that they are not sampled.
|
||||
forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of tokens that will be forced as beginning tokens, before sampling.
|
||||
|
||||
model_kwargs:
|
||||
Additional model specific kwargs will be forwarded to the `forward` function of the model. If the model
|
||||
@ -1337,13 +1370,15 @@ class GenerationMixin:
|
||||
exponential_decay_length_penalty=exponential_decay_length_penalty,
|
||||
logits_processor=logits_processor,
|
||||
renormalize_logits=renormalize_logits,
|
||||
suppress_tokens=suppress_tokens,
|
||||
begin_suppress_tokens=begin_suppress_tokens,
|
||||
forced_decoder_ids=forced_decoder_ids,
|
||||
)
|
||||
|
||||
# 8. prepare stopping criteria
|
||||
stopping_criteria = self._get_stopping_criteria(
|
||||
max_length=max_length, max_time=max_time, stopping_criteria=stopping_criteria
|
||||
)
|
||||
|
||||
# 9. go into different generation modes
|
||||
if is_greedy_gen_mode:
|
||||
if num_return_sequences > 1:
|
||||
|
||||
@ -26,6 +26,7 @@ from typing import Any, Dict, List, Optional, Union
|
||||
import requests
|
||||
import yaml
|
||||
from huggingface_hub import model_info
|
||||
from huggingface_hub.utils import HFValidationError
|
||||
|
||||
from . import __version__
|
||||
from .models.auto.modeling_auto import (
|
||||
@ -378,7 +379,7 @@ class TrainingSummary:
|
||||
for tag in info.tags:
|
||||
if tag.startswith("license:"):
|
||||
self.license = tag[8:]
|
||||
except requests.exceptions.HTTPError:
|
||||
except (requests.exceptions.HTTPError, HFValidationError):
|
||||
pass
|
||||
|
||||
def create_model_index(self, metric_mapping):
|
||||
|
||||
@ -274,6 +274,9 @@ class TFSequenceClassificationLoss:
|
||||
def hf_compute_loss(self, labels, logits):
|
||||
if logits.shape.rank == 1 or logits.shape[1] == 1:
|
||||
loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
|
||||
if labels.shape.rank == 1:
|
||||
# MeanSquaredError returns a scalar loss if the labels are 1D, so avoid that
|
||||
labels = tf.expand_dims(labels, axis=-1)
|
||||
else:
|
||||
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
|
||||
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
|
||||
@ -1046,6 +1049,8 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
|
||||
# Save config and origin of the pretrained weights if given in model
|
||||
self.config = config
|
||||
self.name_or_path = config.name_or_path
|
||||
# Set the serving spec quickly to ensure that Keras doesn't use the specific dummy input shapes as the spec
|
||||
self._set_save_spec(self.serving.input_signature[0])
|
||||
|
||||
def get_config(self):
|
||||
return self.config.to_dict()
|
||||
@ -1094,29 +1099,6 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def save(
|
||||
self,
|
||||
filepath,
|
||||
overwrite=True,
|
||||
include_optimizer=True,
|
||||
save_format=None,
|
||||
signatures=None,
|
||||
options=None,
|
||||
save_traces=True,
|
||||
):
|
||||
# Very simple wrapper that ensures we set the correct serving signature when saving
|
||||
if signatures is None and hasattr(self, "serving"):
|
||||
signatures = self.serving
|
||||
super().save(
|
||||
filepath,
|
||||
overwrite=overwrite,
|
||||
include_optimizer=include_optimizer,
|
||||
save_format=save_format,
|
||||
signatures=signatures,
|
||||
options=options,
|
||||
save_traces=save_traces,
|
||||
)
|
||||
|
||||
def get_input_embeddings(self) -> tf.keras.layers.Layer:
|
||||
"""
|
||||
Returns the model's input embeddings layer.
|
||||
|
||||
@ -50,6 +50,8 @@ from .pytorch_utils import ( # noqa: F401
|
||||
from .utils import (
|
||||
DUMMY_INPUTS,
|
||||
FLAX_WEIGHTS_NAME,
|
||||
SAFE_WEIGHTS_INDEX_NAME,
|
||||
SAFE_WEIGHTS_NAME,
|
||||
TF2_WEIGHTS_NAME,
|
||||
TF_WEIGHTS_NAME,
|
||||
WEIGHTS_INDEX_NAME,
|
||||
@ -65,6 +67,7 @@ from .utils import (
|
||||
is_bitsandbytes_available,
|
||||
is_offline_mode,
|
||||
is_remote_url,
|
||||
is_safetensors_available,
|
||||
logging,
|
||||
replace_return_docstrings,
|
||||
)
|
||||
@ -86,6 +89,10 @@ if is_accelerate_available():
|
||||
else:
|
||||
get_balanced_memory = None
|
||||
|
||||
if is_safetensors_available():
|
||||
from safetensors import safe_open
|
||||
from safetensors.torch import load_file as safe_load_file
|
||||
from safetensors.torch import save_file as safe_save_file
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
@ -241,7 +248,9 @@ def dtype_byte_size(dtype):
|
||||
return bit_size // 8
|
||||
|
||||
|
||||
def shard_checkpoint(state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB"):
|
||||
def shard_checkpoint(
|
||||
state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME
|
||||
):
|
||||
"""
|
||||
Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
|
||||
given size.
|
||||
@ -263,6 +272,8 @@ def shard_checkpoint(state_dict: Dict[str, torch.Tensor], max_shard_size: Union[
|
||||
max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
|
||||
The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
|
||||
(like `"5MB"`).
|
||||
weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`):
|
||||
The name of the model save file.
|
||||
"""
|
||||
max_shard_size = convert_file_size_to_int(max_shard_size)
|
||||
|
||||
@ -289,13 +300,16 @@ def shard_checkpoint(state_dict: Dict[str, torch.Tensor], max_shard_size: Union[
|
||||
|
||||
# If we only have one shard, we return it
|
||||
if len(sharded_state_dicts) == 1:
|
||||
return {WEIGHTS_NAME: sharded_state_dicts[0]}, None
|
||||
return {weights_name: sharded_state_dicts[0]}, None
|
||||
|
||||
# Otherwise, let's build the index
|
||||
weight_map = {}
|
||||
shards = {}
|
||||
for idx, shard in enumerate(sharded_state_dicts):
|
||||
shard_file = WEIGHTS_NAME.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin")
|
||||
shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin")
|
||||
shard_file = shard_file.replace(
|
||||
".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors"
|
||||
)
|
||||
shards[shard_file] = shard
|
||||
for key in shard.keys():
|
||||
weight_map[key] = shard_file
|
||||
@ -367,6 +381,20 @@ def load_state_dict(checkpoint_file: Union[str, os.PathLike]):
|
||||
"""
|
||||
Reads a PyTorch checkpoint file, returning properly formatted errors if they arise.
|
||||
"""
|
||||
if checkpoint_file.endswith(".safetensors") and is_safetensors_available():
|
||||
# Check format of the archive
|
||||
with safe_open(checkpoint_file, framework="pt") as f:
|
||||
metadata = f.metadata()
|
||||
if metadata.get("format") not in ["pt", "tf", "flax"]:
|
||||
raise OSError(
|
||||
f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure "
|
||||
"you save your model with the `save_pretrained` method."
|
||||
)
|
||||
elif metadata["format"] != "pt":
|
||||
raise NotImplementedError(
|
||||
f"Conversion from a {metadata['format']} safetensors archive to PyTorch is not implemented yet."
|
||||
)
|
||||
return safe_load_file(checkpoint_file)
|
||||
try:
|
||||
return torch.load(checkpoint_file, map_location="cpu")
|
||||
except Exception as e:
|
||||
@ -1468,6 +1496,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
save_function: Callable = torch.save,
|
||||
push_to_hub: bool = False,
|
||||
max_shard_size: Union[int, str] = "10GB",
|
||||
safe_serialization: bool = False,
|
||||
**kwargs,
|
||||
):
|
||||
"""
|
||||
@ -1503,6 +1532,9 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
|
||||
</Tip>
|
||||
|
||||
safe_serialization (`bool`, *optional*, defaults to `False`):
|
||||
Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
|
||||
|
||||
kwargs:
|
||||
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
|
||||
"""
|
||||
@ -1511,6 +1543,8 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
"`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead."
|
||||
)
|
||||
is_main_process = kwargs.pop("save_config")
|
||||
if safe_serialization and not is_safetensors_available():
|
||||
raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.")
|
||||
|
||||
if os.path.isfile(save_directory):
|
||||
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
|
||||
@ -1560,15 +1594,17 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
del state_dict[ignore_key]
|
||||
|
||||
# Shard the model if it is too big.
|
||||
shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size)
|
||||
weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
|
||||
shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name)
|
||||
|
||||
# Clean the folder from a previous save
|
||||
for filename in os.listdir(save_directory):
|
||||
full_filename = os.path.join(save_directory, filename)
|
||||
# If we have a shard file that is not going to be replaced, we delete it, but only from the main process
|
||||
# in distributed settings to avoid race conditions.
|
||||
weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "")
|
||||
if (
|
||||
filename.startswith(WEIGHTS_NAME[:-4])
|
||||
filename.startswith(weights_no_suffix)
|
||||
and os.path.isfile(full_filename)
|
||||
and filename not in shards.keys()
|
||||
and is_main_process
|
||||
@ -1577,12 +1613,18 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
|
||||
# Save the model
|
||||
for shard_file, shard in shards.items():
|
||||
save_function(shard, os.path.join(save_directory, shard_file))
|
||||
if safe_serialization:
|
||||
# At some point we will need to deal better with save_function (used for TPU and other distributed
|
||||
# joyfulness), but for now this enough.
|
||||
safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"})
|
||||
else:
|
||||
save_function(shard, os.path.join(save_directory, shard_file))
|
||||
|
||||
if index is None:
|
||||
logger.info(f"Model weights saved in {os.path.join(save_directory, WEIGHTS_NAME)}")
|
||||
else:
|
||||
save_index_file = os.path.join(save_directory, WEIGHTS_INDEX_NAME)
|
||||
save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
|
||||
save_index_file = os.path.join(save_directory, save_index_file)
|
||||
# Save the index as well
|
||||
with open(save_index_file, "w", encoding="utf-8") as f:
|
||||
content = json.dumps(index, indent=2, sort_keys=True) + "\n"
|
||||
@ -1735,7 +1777,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
|
||||
To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
|
||||
more information about each option see [designing a device
|
||||
map](https://hf.co/docs/accelerate/main/big_modeling#designing-a-device-map).
|
||||
map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
|
||||
max_memory (`Dict`, *optional*):
|
||||
A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
|
||||
GPU and the available CPU RAM if unset.
|
||||
@ -1966,6 +2008,17 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
):
|
||||
# Load from a Flax checkpoint in priority if from_flax
|
||||
archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
|
||||
elif is_safetensors_available() and os.path.isfile(
|
||||
os.path.join(pretrained_model_name_or_path, subfolder, SAFE_WEIGHTS_NAME)
|
||||
):
|
||||
# Load from a safetensors checkpoint
|
||||
archive_file = os.path.join(pretrained_model_name_or_path, subfolder, SAFE_WEIGHTS_NAME)
|
||||
elif is_safetensors_available() and os.path.isfile(
|
||||
os.path.join(pretrained_model_name_or_path, subfolder, SAFE_WEIGHTS_INDEX_NAME)
|
||||
):
|
||||
# Load from a sharded safetensors checkpoint
|
||||
archive_file = os.path.join(pretrained_model_name_or_path, subfolder, SAFE_WEIGHTS_INDEX_NAME)
|
||||
is_sharded = True
|
||||
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)):
|
||||
# Load from a PyTorch checkpoint
|
||||
archive_file = os.path.join(pretrained_model_name_or_path, subfolder, WEIGHTS_NAME)
|
||||
@ -2013,6 +2066,8 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
filename = TF2_WEIGHTS_NAME
|
||||
elif from_flax:
|
||||
filename = FLAX_WEIGHTS_NAME
|
||||
elif is_safetensors_available():
|
||||
filename = SAFE_WEIGHTS_NAME
|
||||
else:
|
||||
filename = WEIGHTS_NAME
|
||||
|
||||
@ -2033,8 +2088,21 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMix
|
||||
)
|
||||
resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
|
||||
|
||||
# Since we set _raise_exceptions_for_missing_entries=False, we don't get an expection but a None
|
||||
# Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None
|
||||
# result when internet is up, the repo and revision exist, but the file does not.
|
||||
if resolved_archive_file is None and filename == SAFE_WEIGHTS_NAME:
|
||||
# Maybe the checkpoint is sharded, we try to grab the index name in this case.
|
||||
resolved_archive_file = cached_file(
|
||||
pretrained_model_name_or_path, SAFE_WEIGHTS_INDEX_NAME, **cached_file_kwargs
|
||||
)
|
||||
if resolved_archive_file is not None:
|
||||
is_sharded = True
|
||||
else:
|
||||
# This repo has no safetensors file of any kind, we switch to PyTorch.
|
||||
filename = WEIGHTS_NAME
|
||||
resolved_archive_file = cached_file(
|
||||
pretrained_model_name_or_path, WEIGHTS_NAME, **cached_file_kwargs
|
||||
)
|
||||
if resolved_archive_file is None and filename == WEIGHTS_NAME:
|
||||
# Maybe the checkpoint is sharded, we try to grab the index name in this case.
|
||||
resolved_archive_file = cached_file(
|
||||
|
||||
@ -60,6 +60,7 @@ from . import (
|
||||
electra,
|
||||
encoder_decoder,
|
||||
ernie,
|
||||
esm,
|
||||
flaubert,
|
||||
flava,
|
||||
fnet,
|
||||
@ -88,6 +89,7 @@ from . import (
|
||||
lxmert,
|
||||
m2m_100,
|
||||
marian,
|
||||
markuplm,
|
||||
maskformer,
|
||||
mbart,
|
||||
mbart50,
|
||||
@ -137,6 +139,7 @@ from . import (
|
||||
t5,
|
||||
tapas,
|
||||
tapex,
|
||||
time_series_transformer,
|
||||
trajectory_transformer,
|
||||
transfo_xl,
|
||||
trocr,
|
||||
@ -156,6 +159,7 @@ from . import (
|
||||
wav2vec2_phoneme,
|
||||
wav2vec2_with_lm,
|
||||
wavlm,
|
||||
whisper,
|
||||
x_clip,
|
||||
xglm,
|
||||
xlm,
|
||||
|
||||
@ -69,6 +69,7 @@ else:
|
||||
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING",
|
||||
"MODEL_MAPPING",
|
||||
"MODEL_WITH_LM_HEAD_MAPPING",
|
||||
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING",
|
||||
"AutoModel",
|
||||
"AutoModelForAudioClassification",
|
||||
"AutoModelForAudioFrameClassification",
|
||||
@ -96,6 +97,7 @@ else:
|
||||
"AutoModelForVisualQuestionAnswering",
|
||||
"AutoModelForDocumentQuestionAnswering",
|
||||
"AutoModelWithLMHead",
|
||||
"AutoModelForZeroShotObjectDetection",
|
||||
]
|
||||
|
||||
try:
|
||||
@ -215,6 +217,7 @@ if TYPE_CHECKING:
|
||||
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
|
||||
MODEL_FOR_VISION_2_SEQ_MAPPING,
|
||||
MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING,
|
||||
MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING,
|
||||
MODEL_MAPPING,
|
||||
MODEL_WITH_LM_HEAD_MAPPING,
|
||||
AutoModel,
|
||||
@ -243,6 +246,7 @@ if TYPE_CHECKING:
|
||||
AutoModelForVideoClassification,
|
||||
AutoModelForVision2Seq,
|
||||
AutoModelForVisualQuestionAnswering,
|
||||
AutoModelForZeroShotObjectDetection,
|
||||
AutoModelWithLMHead,
|
||||
)
|
||||
|
||||
|
||||
@ -64,6 +64,7 @@ CONFIG_MAPPING_NAMES = OrderedDict(
|
||||
("electra", "ElectraConfig"),
|
||||
("encoder-decoder", "EncoderDecoderConfig"),
|
||||
("ernie", "ErnieConfig"),
|
||||
("esm", "EsmConfig"),
|
||||
("flaubert", "FlaubertConfig"),
|
||||
("flava", "FlavaConfig"),
|
||||
("fnet", "FNetConfig"),
|
||||
@ -90,6 +91,7 @@ CONFIG_MAPPING_NAMES = OrderedDict(
|
||||
("lxmert", "LxmertConfig"),
|
||||
("m2m_100", "M2M100Config"),
|
||||
("marian", "MarianConfig"),
|
||||
("markuplm", "MarkupLMConfig"),
|
||||
("maskformer", "MaskFormerConfig"),
|
||||
("mbart", "MBartConfig"),
|
||||
("mctct", "MCTCTConfig"),
|
||||
@ -132,6 +134,7 @@ CONFIG_MAPPING_NAMES = OrderedDict(
|
||||
("swinv2", "Swinv2Config"),
|
||||
("t5", "T5Config"),
|
||||
("tapas", "TapasConfig"),
|
||||
("time_series_transformer", "TimeSeriesTransformerConfig"),
|
||||
("trajectory_transformer", "TrajectoryTransformerConfig"),
|
||||
("transfo-xl", "TransfoXLConfig"),
|
||||
("trocr", "TrOCRConfig"),
|
||||
@ -149,6 +152,7 @@ CONFIG_MAPPING_NAMES = OrderedDict(
|
||||
("wav2vec2", "Wav2Vec2Config"),
|
||||
("wav2vec2-conformer", "Wav2Vec2ConformerConfig"),
|
||||
("wavlm", "WavLMConfig"),
|
||||
("whisper", "WhisperConfig"),
|
||||
("xclip", "XCLIPConfig"),
|
||||
("xglm", "XGLMConfig"),
|
||||
("xlm", "XLMConfig"),
|
||||
@ -196,6 +200,7 @@ CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
|
||||
("dpt", "DPT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("electra", "ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("ernie", "ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("esm", "ESM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("flaubert", "FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("flava", "FLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("fnet", "FNET_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
@ -221,6 +226,7 @@ CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
|
||||
("luke", "LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("lxmert", "LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("m2m_100", "M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("markuplm", "MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("maskformer", "MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("mbart", "MBART_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("mctct", "MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
@ -258,6 +264,7 @@ CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
|
||||
("swinv2", "SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("t5", "T5_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("tapas", "TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("time_series_transformer", "TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("transfo-xl", "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("unispeech", "UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("unispeech-sat", "UNISPEECH_SAT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
@ -270,6 +277,7 @@ CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
|
||||
("vit_msn", "VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("wav2vec2", "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("wav2vec2-conformer", "WAV2VEC2_CONFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("whisper", "WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("xclip", "X_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("xglm", "XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("xlm", "XLM_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
@ -329,6 +337,7 @@ MODEL_NAMES_MAPPING = OrderedDict(
|
||||
("electra", "ELECTRA"),
|
||||
("encoder-decoder", "Encoder decoder"),
|
||||
("ernie", "ERNIE"),
|
||||
("esm", "ESM"),
|
||||
("flaubert", "FlauBERT"),
|
||||
("flava", "FLAVA"),
|
||||
("fnet", "FNet"),
|
||||
@ -357,6 +366,7 @@ MODEL_NAMES_MAPPING = OrderedDict(
|
||||
("lxmert", "LXMERT"),
|
||||
("m2m_100", "M2M100"),
|
||||
("marian", "Marian"),
|
||||
("markuplm", "MarkupLM"),
|
||||
("maskformer", "MaskFormer"),
|
||||
("mbart", "mBART"),
|
||||
("mbart50", "mBART-50"),
|
||||
@ -406,6 +416,7 @@ MODEL_NAMES_MAPPING = OrderedDict(
|
||||
("t5v1.1", "T5v1.1"),
|
||||
("tapas", "TAPAS"),
|
||||
("tapex", "TAPEX"),
|
||||
("time_series_transformer", "Time Series Transformer"),
|
||||
("trajectory_transformer", "Trajectory Transformer"),
|
||||
("transfo-xl", "Transformer-XL"),
|
||||
("trocr", "TrOCR"),
|
||||
@ -425,6 +436,7 @@ MODEL_NAMES_MAPPING = OrderedDict(
|
||||
("wav2vec2-conformer", "Wav2Vec2-Conformer"),
|
||||
("wav2vec2_phoneme", "Wav2Vec2Phoneme"),
|
||||
("wavlm", "WavLM"),
|
||||
("whisper", "Whisper"),
|
||||
("xclip", "X-CLIP"),
|
||||
("xglm", "XGLM"),
|
||||
("xlm", "XLM"),
|
||||
|
||||
@ -77,6 +77,7 @@ FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict(
|
||||
("vit_msn", "ViTFeatureExtractor"),
|
||||
("wav2vec2", "Wav2Vec2FeatureExtractor"),
|
||||
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
|
||||
("whisper", "WhisperFeatureExtractor"),
|
||||
("xclip", "CLIPFeatureExtractor"),
|
||||
("yolos", "YolosFeatureExtractor"),
|
||||
]
|
||||
|
||||
@ -63,6 +63,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("dpt", "DPTModel"),
|
||||
("electra", "ElectraModel"),
|
||||
("ernie", "ErnieModel"),
|
||||
("esm", "EsmModel"),
|
||||
("flaubert", "FlaubertModel"),
|
||||
("flava", "FlavaModel"),
|
||||
("fnet", "FNetModel"),
|
||||
@ -89,6 +90,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("lxmert", "LxmertModel"),
|
||||
("m2m_100", "M2M100Model"),
|
||||
("marian", "MarianModel"),
|
||||
("markuplm", "MarkupLMModel"),
|
||||
("maskformer", "MaskFormerModel"),
|
||||
("mbart", "MBartModel"),
|
||||
("mctct", "MCTCTModel"),
|
||||
@ -128,6 +130,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("swinv2", "Swinv2Model"),
|
||||
("t5", "T5Model"),
|
||||
("tapas", "TapasModel"),
|
||||
("time_series_transformer", "TimeSeriesTransformerModel"),
|
||||
("trajectory_transformer", "TrajectoryTransformerModel"),
|
||||
("transfo-xl", "TransfoXLModel"),
|
||||
("unispeech", "UniSpeechModel"),
|
||||
@ -143,6 +146,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("wav2vec2", "Wav2Vec2Model"),
|
||||
("wav2vec2-conformer", "Wav2Vec2ConformerModel"),
|
||||
("wavlm", "WavLMModel"),
|
||||
("whisper", "WhisperModel"),
|
||||
("xclip", "XCLIPModel"),
|
||||
("xglm", "XGLMModel"),
|
||||
("xlm", "XLMModel"),
|
||||
@ -230,6 +234,7 @@ MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
|
||||
("electra", "ElectraForMaskedLM"),
|
||||
("encoder-decoder", "EncoderDecoderModel"),
|
||||
("ernie", "ErnieForMaskedLM"),
|
||||
("esm", "EsmForMaskedLM"),
|
||||
("flaubert", "FlaubertWithLMHeadModel"),
|
||||
("fnet", "FNetForMaskedLM"),
|
||||
("fsmt", "FSMTForConditionalGeneration"),
|
||||
@ -247,6 +252,7 @@ MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
|
||||
("luke", "LukeForMaskedLM"),
|
||||
("m2m_100", "M2M100ForConditionalGeneration"),
|
||||
("marian", "MarianMTModel"),
|
||||
("markuplm", "MarkupLMForMaskedLM"),
|
||||
("megatron-bert", "MegatronBertForCausalLM"),
|
||||
("mobilebert", "MobileBertForMaskedLM"),
|
||||
("mpnet", "MPNetForMaskedLM"),
|
||||
@ -268,6 +274,7 @@ MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
|
||||
("tapas", "TapasForMaskedLM"),
|
||||
("transfo-xl", "TransfoXLLMHeadModel"),
|
||||
("wav2vec2", "Wav2Vec2ForMaskedLM"),
|
||||
("whisper", "WhisperForConditionalGeneration"),
|
||||
("xlm", "XLMWithLMHeadModel"),
|
||||
("xlm-roberta", "XLMRobertaForMaskedLM"),
|
||||
("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
|
||||
@ -465,6 +472,13 @@ MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
|
||||
]
|
||||
)
|
||||
|
||||
MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
# Model for Zero Shot Object Detection mapping
|
||||
("owlvit", "OwlViTForObjectDetection")
|
||||
]
|
||||
)
|
||||
|
||||
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
# Model for Seq2Seq Causal LM mapping
|
||||
@ -495,6 +509,7 @@ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
("speech-encoder-decoder", "SpeechEncoderDecoderModel"),
|
||||
("speech_to_text", "Speech2TextForConditionalGeneration"),
|
||||
("whisper", "WhisperForConditionalGeneration"),
|
||||
]
|
||||
)
|
||||
|
||||
@ -517,6 +532,7 @@ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
("distilbert", "DistilBertForSequenceClassification"),
|
||||
("electra", "ElectraForSequenceClassification"),
|
||||
("ernie", "ErnieForSequenceClassification"),
|
||||
("esm", "EsmForSequenceClassification"),
|
||||
("flaubert", "FlaubertForSequenceClassification"),
|
||||
("fnet", "FNetForSequenceClassification"),
|
||||
("funnel", "FunnelForSequenceClassification"),
|
||||
@ -530,6 +546,7 @@ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
("led", "LEDForSequenceClassification"),
|
||||
("longformer", "LongformerForSequenceClassification"),
|
||||
("luke", "LukeForSequenceClassification"),
|
||||
("markuplm", "MarkupLMForSequenceClassification"),
|
||||
("mbart", "MBartForSequenceClassification"),
|
||||
("megatron-bert", "MegatronBertForSequenceClassification"),
|
||||
("mobilebert", "MobileBertForSequenceClassification"),
|
||||
@ -565,6 +582,7 @@ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
|
||||
("bert", "BertForQuestionAnswering"),
|
||||
("big_bird", "BigBirdForQuestionAnswering"),
|
||||
("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"),
|
||||
("bloom", "BloomForQuestionAnswering"),
|
||||
("camembert", "CamembertForQuestionAnswering"),
|
||||
("canine", "CanineForQuestionAnswering"),
|
||||
("convbert", "ConvBertForQuestionAnswering"),
|
||||
@ -585,6 +603,7 @@ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
|
||||
("longformer", "LongformerForQuestionAnswering"),
|
||||
("luke", "LukeForQuestionAnswering"),
|
||||
("lxmert", "LxmertForQuestionAnswering"),
|
||||
("markuplm", "MarkupLMForQuestionAnswering"),
|
||||
("mbart", "MBartForQuestionAnswering"),
|
||||
("megatron-bert", "MegatronBertForQuestionAnswering"),
|
||||
("mobilebert", "MobileBertForQuestionAnswering"),
|
||||
@ -592,6 +611,7 @@ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
|
||||
("mvp", "MvpForQuestionAnswering"),
|
||||
("nezha", "NezhaForQuestionAnswering"),
|
||||
("nystromformer", "NystromformerForQuestionAnswering"),
|
||||
("opt", "OPTForQuestionAnswering"),
|
||||
("qdqbert", "QDQBertForQuestionAnswering"),
|
||||
("reformer", "ReformerForQuestionAnswering"),
|
||||
("rembert", "RemBertForQuestionAnswering"),
|
||||
@ -644,6 +664,7 @@ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
("distilbert", "DistilBertForTokenClassification"),
|
||||
("electra", "ElectraForTokenClassification"),
|
||||
("ernie", "ErnieForTokenClassification"),
|
||||
("esm", "EsmForTokenClassification"),
|
||||
("flaubert", "FlaubertForTokenClassification"),
|
||||
("fnet", "FNetForTokenClassification"),
|
||||
("funnel", "FunnelForTokenClassification"),
|
||||
@ -654,6 +675,7 @@ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
("layoutlmv3", "LayoutLMv3ForTokenClassification"),
|
||||
("longformer", "LongformerForTokenClassification"),
|
||||
("luke", "LukeForTokenClassification"),
|
||||
("markuplm", "MarkupLMForTokenClassification"),
|
||||
("megatron-bert", "MegatronBertForTokenClassification"),
|
||||
("mobilebert", "MobileBertForTokenClassification"),
|
||||
("mpnet", "MPNetForTokenClassification"),
|
||||
@ -816,6 +838,9 @@ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
|
||||
CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES
|
||||
)
|
||||
MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES)
|
||||
MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(
|
||||
CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES
|
||||
)
|
||||
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
|
||||
CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
|
||||
)
|
||||
@ -1002,6 +1027,15 @@ class AutoModelForObjectDetection(_BaseAutoModelClass):
|
||||
AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection")
|
||||
|
||||
|
||||
class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass):
|
||||
_model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
|
||||
|
||||
|
||||
AutoModelForZeroShotObjectDetection = auto_class_update(
|
||||
AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection"
|
||||
)
|
||||
|
||||
|
||||
class AutoModelForVideoClassification(_BaseAutoModelClass):
|
||||
_model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
|
||||
|
||||
|
||||
@ -80,6 +80,7 @@ TF_MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("vit", "TFViTModel"),
|
||||
("vit_mae", "TFViTMAEModel"),
|
||||
("wav2vec2", "TFWav2Vec2Model"),
|
||||
("whisper", "TFWhisperModel"),
|
||||
("xglm", "TFXGLMModel"),
|
||||
("xlm", "TFXLMModel"),
|
||||
("xlm-roberta", "TFXLMRobertaModel"),
|
||||
@ -145,6 +146,7 @@ TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
|
||||
("t5", "TFT5ForConditionalGeneration"),
|
||||
("tapas", "TFTapasForMaskedLM"),
|
||||
("transfo-xl", "TFTransfoXLLMHeadModel"),
|
||||
("whisper", "TFWhisperForConditionalGeneration"),
|
||||
("xlm", "TFXLMWithLMHeadModel"),
|
||||
("xlm-roberta", "TFXLMRobertaForMaskedLM"),
|
||||
("xlnet", "TFXLNetLMHeadModel"),
|
||||
@ -253,6 +255,7 @@ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
|
||||
TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
("speech_to_text", "TFSpeech2TextForConditionalGeneration"),
|
||||
("whisper", "TFWhisperForConditionalGeneration"),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
@ -46,6 +46,7 @@ PROCESSOR_MAPPING_NAMES = OrderedDict(
|
||||
("layoutlmv2", "LayoutLMv2Processor"),
|
||||
("layoutlmv3", "LayoutLMv3Processor"),
|
||||
("layoutxlm", "LayoutXLMProcessor"),
|
||||
("markuplm", "MarkupLMProcessor"),
|
||||
("owlvit", "OwlViTProcessor"),
|
||||
("sew", "Wav2Vec2Processor"),
|
||||
("sew-d", "Wav2Vec2Processor"),
|
||||
@ -60,6 +61,7 @@ PROCESSOR_MAPPING_NAMES = OrderedDict(
|
||||
("wav2vec2-conformer", "Wav2Vec2Processor"),
|
||||
("wav2vec2_with_lm", "Wav2Vec2ProcessorWithLM"),
|
||||
("wavlm", "Wav2Vec2Processor"),
|
||||
("whisper", "WhisperProcessor"),
|
||||
("xclip", "CLIPProcessor"),
|
||||
]
|
||||
)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user