mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-22 02:08:58 +08:00
Compare commits
142 Commits
v4.23.1
...
fix-pipeli
Author | SHA1 | Date | |
---|---|---|---|
aa0b9ddc83 | |||
a5d64fb058 | |||
4181320b8c | |||
82e360b7cb | |||
f2ecb9eec4 | |||
bf0addc56e | |||
35bd089a24 | |||
59e29be363 | |||
aa629e7a7c | |||
0027edf905 | |||
f4e31a9aa1 | |||
b6204c9e9b | |||
de64d671dc | |||
cbc1abc4af | |||
0b7b07ef03 | |||
3b3024da70 | |||
d7754c43d0 | |||
8aad4363d8 | |||
e4d56e818a | |||
2af36f957f | |||
d2e5b19b82 | |||
9bb26f2505 | |||
c06a5a3101 | |||
57505b1def | |||
339c5a5d9a | |||
dd464e22a7 | |||
3e4900208a | |||
8fcf562603 | |||
31cfe9c429 | |||
7972f995b3 | |||
2bd2de62c9 | |||
614f7d28a8 | |||
66dd80213c | |||
4e196df8c4 | |||
585f9c6d9e | |||
96f243c399 | |||
463226e2ee | |||
5ef2186692 | |||
78c1e7d253 | |||
10ea45b902 | |||
637af90d7f | |||
2d4572b5c9 | |||
f8244014a5 | |||
db94b746db | |||
62f28bc152 | |||
e82c1cb78e | |||
0e0b7cb72a | |||
59b7334c87 | |||
1967be98fa | |||
4f0337a08f | |||
c937f0b954 | |||
83a2e694f1 | |||
909f07092a | |||
6deac5c824 | |||
7036c956fe | |||
c7d1fb6964 | |||
0ac6b90563 | |||
71a27e3952 | |||
e64798296f | |||
7178b29a8e | |||
76b4239ec8 | |||
3d320c78c3 | |||
1f6a28c71c | |||
f06a6f7e37 | |||
036e808517 | |||
7180e17256 | |||
05a287ec1a | |||
117098421c | |||
0e83c9664b | |||
4212bb0d60 | |||
0903fc80b5 | |||
0ae3ec5b9d | |||
f173ceefc0 | |||
2719599a22 | |||
4a3578f23f | |||
f4b386765d | |||
1d4d9dc3c9 | |||
3ae21936e5 | |||
bbd150e92f | |||
504cd71a6b | |||
5dcb10d82a | |||
5418e3cef0 | |||
ef5899bf34 | |||
f6fa0f0bf0 | |||
6cd8676cf3 | |||
096838836d | |||
383ad81e68 | |||
4a5d63c958 | |||
51d21b7619 | |||
209bec4636 | |||
1973b7716b | |||
a2c90a7f7b | |||
f4ef78af54 | |||
5760a8fcf6 | |||
bdfcbe60cc | |||
4edb3e49f6 | |||
c7ad3ff593 | |||
9e29080439 | |||
eefcecaa35 | |||
72153ba611 | |||
2720d5fc18 | |||
af554e9de2 | |||
3ccda6d0b0 | |||
af539d6f0a | |||
5a8a532dcf | |||
e94384e4d8 | |||
4ed0fa3676 | |||
c60381e90d | |||
84125d7e73 | |||
4d367a3c81 | |||
e2dc558e9c | |||
e81cb010f8 | |||
7543e275d4 | |||
bb2cfd1824 | |||
69b81c0a5f | |||
fa9e18c65f | |||
957ce6465a | |||
67a3511443 | |||
8d68878cc0 | |||
5ca131f3d4 | |||
0b7b4c60c6 | |||
70a058bc65 | |||
d0d5aee1dd | |||
462cd641d9 | |||
8e4ee28e34 | |||
6c66c6c860 | |||
a3008c5a6d | |||
ab856f68df | |||
c66466133a | |||
e38cf93e7c | |||
8cb44aaf17 | |||
9ed80b0000 | |||
b651efe59e | |||
440bbd44aa | |||
e1a5cc338b | |||
d7dc774a79 | |||
a293a0e8a3 | |||
ae710425d2 | |||
335f9bcd34 | |||
b722a6be72 | |||
df8faba4db | |||
10100979ed |
@ -1,65 +1,12 @@
|
||||
version: 2.1
|
||||
setup: true
|
||||
orbs:
|
||||
gcp-gke: circleci/gcp-gke@1.0.4
|
||||
go: circleci/go@1.3.0
|
||||
|
||||
# TPU REFERENCES
|
||||
references:
|
||||
checkout_ml_testing: &checkout_ml_testing
|
||||
run:
|
||||
name: Checkout ml-testing-accelerators
|
||||
command: |
|
||||
git clone https://github.com/GoogleCloudPlatform/ml-testing-accelerators.git
|
||||
cd ml-testing-accelerators
|
||||
git fetch origin 5e88ac24f631c27045e62f0e8d5dfcf34e425e25:stable
|
||||
git checkout stable
|
||||
build_push_docker: &build_push_docker
|
||||
run:
|
||||
name: Configure Docker
|
||||
command: |
|
||||
gcloud --quiet auth configure-docker
|
||||
cd docker/transformers-pytorch-tpu
|
||||
if [ -z "$CIRCLE_PR_NUMBER" ]; then docker build --tag "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" -f Dockerfile --build-arg "TEST_IMAGE=1" . ; else docker build --tag "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" -f Dockerfile --build-arg "TEST_IMAGE=1" --build-arg "GITHUB_REF=pull/$CIRCLE_PR_NUMBER/head" . ; fi
|
||||
docker push "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID"
|
||||
deploy_cluster: &deploy_cluster
|
||||
run:
|
||||
name: Deploy the job on the kubernetes cluster
|
||||
command: |
|
||||
go get github.com/google/go-jsonnet/cmd/jsonnet && \
|
||||
export PATH=$PATH:$HOME/go/bin && \
|
||||
kubectl create -f docker/transformers-pytorch-tpu/dataset.yaml || true && \
|
||||
job_name=$(jsonnet -J ml-testing-accelerators/ docker/transformers-pytorch-tpu/bert-base-cased.jsonnet --ext-str image=$GCR_IMAGE_PATH --ext-str image-tag=$CIRCLE_WORKFLOW_JOB_ID | kubectl create -f -) && \
|
||||
job_name=${job_name#job.batch/} && \
|
||||
job_name=${job_name% created} && \
|
||||
echo "Waiting on kubernetes job: $job_name" && \
|
||||
i=0 && \
|
||||
# 30 checks spaced 30s apart = 900s total.
|
||||
max_checks=30 && \
|
||||
status_code=2 && \
|
||||
# Check on the job periodically. Set the status code depending on what
|
||||
# happened to the job in Kubernetes. If we try max_checks times and
|
||||
# still the job hasn't finished, give up and return the starting
|
||||
# non-zero status code.
|
||||
while [ $i -lt $max_checks ]; do ((i++)); if kubectl get jobs $job_name -o jsonpath='Failed:{.status.failed}' | grep "Failed:1"; then status_code=1 && break; elif kubectl get jobs $job_name -o jsonpath='Succeeded:{.status.succeeded}' | grep "Succeeded:1" ; then status_code=0 && break; else echo "Job not finished yet"; fi; sleep 30; done && \
|
||||
echo "Done waiting. Job status code: $status_code" && \
|
||||
pod_name=$(kubectl get po -l controller-uid=`kubectl get job $job_name -o "jsonpath={.metadata.labels.controller-uid}"` | awk 'match($0,!/NAME/) {print $1}') && \
|
||||
echo "GKE pod name: $pod_name" && \
|
||||
kubectl logs -f $pod_name --container=train
|
||||
echo "Done with log retrieval attempt." && \
|
||||
gcloud container images delete "$GCR_IMAGE_PATH:$CIRCLE_WORKFLOW_JOB_ID" --force-delete-tags && \
|
||||
exit $status_code
|
||||
delete_gke_jobs: &delete_gke_jobs
|
||||
run:
|
||||
name: Delete GKE Jobs
|
||||
command: |
|
||||
# Match jobs whose age matches patterns like '1h' or '1d', i.e. any job
|
||||
# that has been around longer than 1hr. First print all columns for
|
||||
# matches, then execute the delete.
|
||||
kubectl get job | awk 'match($4,/[0-9]+[dh]/) {print $0}'
|
||||
kubectl delete job $(kubectl get job | awk 'match($4,/[0-9]+[dh]/) {print $1}')
|
||||
|
||||
|
||||
continuation: circleci/continuation@0.1.0
|
||||
|
||||
parameters:
|
||||
nightly:
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
# Fetch the tests to run
|
||||
@ -76,7 +23,7 @@ jobs:
|
||||
- run: mkdir -p test_preparation
|
||||
- run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_fetched_summary.txt
|
||||
path: ~/transformers/tests_fetched_summary.txt
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
cp test_list.txt test_preparation/test_list.txt
|
||||
@ -90,24 +37,30 @@ jobs:
|
||||
else
|
||||
touch test_preparation/filtered_test_list.txt
|
||||
fi
|
||||
- run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
mv test_list.txt test_preparation/examples_test_list.txt
|
||||
else
|
||||
touch test_preparation/examples_test_list.txt
|
||||
fi
|
||||
- store_artifacts:
|
||||
path: test_preparation/test_list.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/test_preparation/filtered_test_list.txt
|
||||
- run: python utils/tests_fetcher.py --filters tests examples | tee examples_tests_fetched_summary.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/examples_tests_fetched_summary.txt
|
||||
path: test_preparation/examples_test_list.txt
|
||||
- run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
mv test_list.txt test_preparation/examples_test_list.txt
|
||||
else
|
||||
touch test_preparation/examples_test_list.txt
|
||||
fi
|
||||
|
||||
- persist_to_workspace:
|
||||
root: test_preparation/
|
||||
paths:
|
||||
test_list.txt
|
||||
filtered_test_list.txt
|
||||
examples_test_list.txt
|
||||
if [ ! -s test_preparation/generated_config.yml ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt
|
||||
- store_artifacts:
|
||||
path: test_preparation/generated_config.txt
|
||||
- continuation/continue:
|
||||
configuration_path: test_preparation/generated_config.yml
|
||||
|
||||
# To run all tests for the nightly build
|
||||
fetch_all_tests:
|
||||
@ -116,516 +69,20 @@ jobs:
|
||||
- image: cimg/python:3.7.12
|
||||
parallelism: 1
|
||||
steps:
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .
|
||||
- run: |
|
||||
mkdir test_preparation
|
||||
echo "tests" > test_preparation/test_list.txt
|
||||
echo "tests" > test_preparation/examples_test_list.txt
|
||||
- run: python utils/tests_fetcher.py --filter_pipeline_tests
|
||||
- run: mv test_list.txt test_preparation/filtered_test_list.txt
|
||||
|
||||
- persist_to_workspace:
|
||||
root: test_preparation/
|
||||
paths:
|
||||
test_list.txt
|
||||
|
||||
run_tests_torch_and_tf:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PT_TF_CROSS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-torch_and_tf-{{ checksum "setup.py" }}
|
||||
- v0.5-torch_and_tf-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs
|
||||
- run: git lfs install
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html
|
||||
- run: pip install tensorflow_probability
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- run: pip install git+https://github.com/huggingface/accelerate
|
||||
- save_cache:
|
||||
key: v0.5-torch_and_tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_tf $(cat test_preparation/filtered_test_list.txt) -m is_pt_tf_cross_test --durations=0 | tee tests_output.txt
|
||||
- run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation
|
||||
- run: cp test_preparation/generated_config.yml test_preparation/generated_config.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_torch_and_flax:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
RUN_PT_FLAX_CROSS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-torch_and_flax-{{ checksum "setup.py" }}
|
||||
- v0.5-torch_and_flax-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- run: pip install git+https://github.com/huggingface/accelerate
|
||||
- save_cache:
|
||||
key: v0.5-torch_and_flax-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_torch_and_flax $(cat test_preparation/filtered_test_list.txt) -m is_pt_flax_cross_test --durations=0 | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_torch:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-torch-{{ checksum "setup.py" }}
|
||||
- v0.5-torch-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- run: pip install git+https://github.com/huggingface/accelerate
|
||||
- save_cache:
|
||||
key: v0.5-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 3 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_torch $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_tf:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-tf-{{ checksum "setup.py" }}
|
||||
- v0.5-tf-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]
|
||||
- run: pip install tensorflow_probability
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.5-tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_tf $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_flax:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-flax-{{ checksum "setup.py" }}
|
||||
- v0.5-flax-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[flax,testing,sentencepiece,flax-speech,vision]
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.5-flax-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_flax $(cat test_preparation/filtered_test_list.txt) | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_pipelines_torch:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-torch-{{ checksum "setup.py" }}
|
||||
- v0.5-torch-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]
|
||||
- run: pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html
|
||||
- run: pip install https://github.com/kpu/kenlm/archive/master.zip
|
||||
- save_cache:
|
||||
key: v0.5-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_torch tests/pipelines | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_pipelines_tf:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-tf-{{ checksum "setup.py" }}
|
||||
- v0.5-tf-
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tf-cpu,testing,sentencepiece]
|
||||
- run: pip install tensorflow_probability
|
||||
- save_cache:
|
||||
key: v0.5-tf-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -rA -s --make-reports=tests_pipelines_tf tests/pipelines | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_custom_tokenizers:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
RUN_CUSTOM_TOKENIZERS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
- v0.5-custom_tokenizers-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y cmake
|
||||
- run:
|
||||
name: install jumanpp
|
||||
command: |
|
||||
wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz
|
||||
tar xvf jumanpp-2.0.0-rc3.tar.xz
|
||||
mkdir jumanpp-2.0.0-rc3/bld
|
||||
cd jumanpp-2.0.0-rc3/bld
|
||||
sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local
|
||||
sudo make install
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]
|
||||
- run: python -m unidic download
|
||||
- save_cache:
|
||||
key: v0.5-custom_tokenizers-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest --max-worker-restart=0 -s --make-reports=tests_custom_tokenizers ./tests/models/bert_japanese/test_tokenization_bert_japanese.py ./tests/models/openai/test_tokenization_openai.py ./tests/models/clip/test_tokenization_clip.py | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_examples_torch:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/examples_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-torch_examples-{{ checksum "setup.py" }}
|
||||
- v0.5-torch_examples-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,torch,sentencepiece,testing,torch-speech]
|
||||
- run: pip install -r examples/pytorch/_tests_requirements.txt
|
||||
- save_cache:
|
||||
key: v0.5-torch_examples-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_torch ./examples/pytorch/ | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/examples_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_examples_tensorflow:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/examples_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-tensorflow_examples-{{ checksum "setup.py" }}
|
||||
- v0.5-tensorflow_examples-
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[sklearn,tensorflow,sentencepiece,testing]
|
||||
- run: pip install -r examples/tensorflow/_tests_requirements.txt
|
||||
- save_cache:
|
||||
key: v0.5-tensorflow_examples-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_tensorflow ./examples/tensorflow/ | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tensorflow_examples_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_examples_flax:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/examples_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-flax_examples-{{ checksum "setup.py" }}
|
||||
- v0.5-flax_examples-
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[flax,testing,sentencepiece]
|
||||
- run: pip install -r examples/flax/_tests_requirements.txt
|
||||
- save_cache:
|
||||
key: v0.5-flax_examples-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 8 --max-worker-restart=0 --dist=loadfile -s --make-reports=examples_flax ./examples/flax/ | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/flax_examples_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_hub:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
HUGGINGFACE_CO_STAGING: yes
|
||||
RUN_GIT_LFS_TESTS: yes
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-hub-{{ checksum "setup.py" }}
|
||||
- v0.5-hub-
|
||||
- run: sudo apt-get -y update && sudo apt-get install git-lfs
|
||||
- run: |
|
||||
git config --global user.email "ci@dummy.com"
|
||||
git config --global user.name "ci"
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[torch,sentencepiece,testing]
|
||||
- save_cache:
|
||||
key: v0.5-hub-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest --max-worker-restart=0 -sv --make-reports=tests_hub $(cat test_preparation/filtered_test_list.txt) -m is_staging_test | tee tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
run_tests_onnxruntime:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/filtered_test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-onnx-{{ checksum "setup.py" }}
|
||||
- v0.5-onnx-
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba]
|
||||
- save_cache:
|
||||
key: v0.5-onnx-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 1 --max-worker-restart=0 --dist=loadfile -s --make-reports=tests_onnx $(cat test_preparation/filtered_test_list.txt) -k onnx | tee tests_output.txt
|
||||
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
path: test_preparation/generated_config.txt
|
||||
- continuation/continue:
|
||||
configuration_path: test_preparation/generated_config.yml
|
||||
|
||||
check_code_quality:
|
||||
working_directory: ~/transformers
|
||||
@ -641,7 +98,7 @@ jobs:
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-code_quality-{{ checksum "setup.py" }}
|
||||
- v0.5-code_quality-
|
||||
- v0.5-code-quality
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[all,quality]
|
||||
- save_cache:
|
||||
@ -670,7 +127,7 @@ jobs:
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-repository_consistency-{{ checksum "setup.py" }}
|
||||
- v0.5-repository_consistency-
|
||||
- v0.5-repository_consistency
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[all,quality]
|
||||
- save_cache:
|
||||
@ -687,196 +144,19 @@ jobs:
|
||||
- run: python utils/tests_fetcher.py --sanity_check
|
||||
- run: python utils/update_metadata.py --check-only
|
||||
|
||||
run_tests_layoutlmv2_and_v3:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
PYTEST_TIMEOUT: 120
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: ~/transformers/filtered_test_list.txt
|
||||
- run: |
|
||||
if [ ! -s test_preparation/test_list.txt ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
- restore_cache:
|
||||
keys:
|
||||
- v0.5-torch-{{ checksum "setup.py" }}
|
||||
- v0.5-torch-
|
||||
- run: sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev
|
||||
- run: pip install --upgrade pip
|
||||
- run: pip install .[torch,testing,vision]
|
||||
- run: pip install torchvision
|
||||
# The commit `36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0` in `detectron2` break things.
|
||||
# See https://github.com/facebookresearch/detectron2/commit/36a65a0907d90ed591479b2ebaa8b61cfa0b4ef0#comments.
|
||||
# TODO: Revert this change back once the above issue is fixed.
|
||||
- run: python -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
|
||||
- run: sudo apt install tesseract-ocr
|
||||
- run: pip install pytesseract
|
||||
- save_cache:
|
||||
key: v0.5-torch-{{ checksum "setup.py" }}
|
||||
paths:
|
||||
- '~/.cache/pip'
|
||||
- run: python -m pytest -n 1 --max-worker-restart=0 tests/models/*layoutlmv* --dist=loadfile -s --make-reports=tests_layoutlmv2_and_v3 --durations=100
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_output.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/reports
|
||||
|
||||
# TPU JOBS
|
||||
run_examples_tpu:
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
environment:
|
||||
OMP_NUM_THREADS: 1
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
resource_class: xlarge
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- go/install
|
||||
- *checkout_ml_testing
|
||||
- gcp-gke/install
|
||||
- gcp-gke/update-kubeconfig-with-credentials:
|
||||
cluster: $GKE_CLUSTER
|
||||
perform-login: true
|
||||
- setup_remote_docker
|
||||
- *build_push_docker
|
||||
- *deploy_cluster
|
||||
|
||||
cleanup-gke-jobs:
|
||||
docker:
|
||||
- image: cimg/python:3.7.12
|
||||
steps:
|
||||
- gcp-gke/install
|
||||
- gcp-gke/update-kubeconfig-with-credentials:
|
||||
cluster: $GKE_CLUSTER
|
||||
perform-login: true
|
||||
- *delete_gke_jobs
|
||||
|
||||
workflow_filters: &workflow_filters
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
workflows:
|
||||
version: 2
|
||||
build_and_test:
|
||||
setup_and_quality:
|
||||
when:
|
||||
not: <<pipeline.parameters.nightly>>
|
||||
jobs:
|
||||
- check_code_quality
|
||||
- check_repository_consistency
|
||||
- fetch_tests
|
||||
- run_examples_torch:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_examples_tensorflow:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_examples_flax:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_custom_tokenizers:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_torch_and_tf:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_torch_and_flax:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_torch:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_tf:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_flax:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_pipelines_torch:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_pipelines_tf:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_onnxruntime:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_hub:
|
||||
requires:
|
||||
- fetch_tests
|
||||
- run_tests_layoutlmv2_and_v3:
|
||||
requires:
|
||||
- fetch_tests
|
||||
nightly:
|
||||
triggers:
|
||||
- schedule:
|
||||
cron: "0 0 * * *"
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
jobs:
|
||||
- fetch_all_tests
|
||||
- run_examples_torch:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_examples_tensorflow:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_examples_flax:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_custom_tokenizers:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_torch_and_tf:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_torch_and_flax:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_torch:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_tf:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_flax:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_pipelines_torch:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_pipelines_tf:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_onnxruntime:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_hub:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
- run_tests_layoutlmv2_and_v3:
|
||||
requires:
|
||||
- fetch_all_tests
|
||||
|
||||
# tpu_testing_jobs:
|
||||
# triggers:
|
||||
# - schedule:
|
||||
# # Set to run at the first minute of every hour.
|
||||
# cron: "0 8 * * *"
|
||||
# filters:
|
||||
# branches:
|
||||
# only:
|
||||
# - main
|
||||
# jobs:
|
||||
# - cleanup-gke-jobs
|
||||
# - run_examples_tpu
|
||||
nightly:
|
||||
when: <<pipeline.parameters.nightly>>
|
||||
jobs:
|
||||
- check_code_quality
|
||||
- check_repository_consistency
|
||||
- fetch_all_tests
|
391
.circleci/create_circleci_config.py
Normal file
391
.circleci/create_circleci_config.py
Normal file
@ -0,0 +1,391 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
import yaml
|
||||
|
||||
|
||||
COMMON_ENV_VARIABLES = {"OMP_NUM_THREADS": 1, "TRANSFORMERS_IS_CI": True, "PYTEST_TIMEOUT": 120}
|
||||
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "s": None}
|
||||
DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.7.12"}]
|
||||
TORCH_SCATTER_INSTALL = "pip install torch-scatter -f https://pytorch-geometric.com/whl/torch-1.12.0+cpu.html"
|
||||
|
||||
|
||||
@dataclass
|
||||
class CircleCIJob:
|
||||
name: str
|
||||
additional_env: Dict[str, Any] = None
|
||||
cache_name: str = None
|
||||
cache_version: str = "0.5"
|
||||
docker_image: List[Dict[str, str]] = None
|
||||
install_steps: List[str] = None
|
||||
marker: Optional[str] = None
|
||||
parallelism: Optional[int] = 1
|
||||
pytest_num_workers: int = 8
|
||||
pytest_options: Dict[str, Any] = None
|
||||
resource_class: Optional[str] = "xlarge"
|
||||
tests_to_run: Optional[List[str]] = None
|
||||
working_directory: str = "~/transformers"
|
||||
|
||||
def __post_init__(self):
|
||||
# Deal with defaults for mutable attributes.
|
||||
if self.additional_env is None:
|
||||
self.additional_env = {}
|
||||
if self.cache_name is None:
|
||||
self.cache_name = self.name
|
||||
if self.docker_image is None:
|
||||
# Let's avoid changing the default list and make a copy.
|
||||
self.docker_image = copy.deepcopy(DEFAULT_DOCKER_IMAGE)
|
||||
if self.install_steps is None:
|
||||
self.install_steps = []
|
||||
if self.pytest_options is None:
|
||||
self.pytest_options = {}
|
||||
if isinstance(self.tests_to_run, str):
|
||||
self.tests_to_run = [self.tests_to_run]
|
||||
|
||||
def to_dict(self):
|
||||
job = {
|
||||
"working_directory": self.working_directory,
|
||||
"docker": self.docker_image,
|
||||
"environment": {**COMMON_ENV_VARIABLES, **self.additional_env},
|
||||
}
|
||||
if self.resource_class is not None:
|
||||
job["resource_class"] = self.resource_class
|
||||
if self.parallelism is not None:
|
||||
job["parallelism"] = self.parallelism
|
||||
steps = [
|
||||
"checkout",
|
||||
{"attach_workspace": {"at": "~/transformers/test_preparation"}},
|
||||
{
|
||||
"restore_cache": {
|
||||
"keys": [
|
||||
f"v{self.cache_version}-{self.cache_name}-" + '{{ checksum "setup.py" }}',
|
||||
f"v{self.cache_version}-{self.cache_name}-",
|
||||
]
|
||||
}
|
||||
},
|
||||
]
|
||||
steps.extend([{"run": l} for l in self.install_steps])
|
||||
steps.append(
|
||||
{
|
||||
"save_cache": {
|
||||
"key": f"v{self.cache_version}-{self.cache_name}-" + '{{ checksum "setup.py" }}',
|
||||
"paths": ["~/.cache/pip"],
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options}
|
||||
pytest_flags = [f"--{key}={value}" if value is not None else f"-{key}" for key, value in all_options.items()]
|
||||
pytest_flags.append(
|
||||
f"--make-reports={self.name}" if "examples" in self.name else f"--make-reports=tests_{self.name}"
|
||||
)
|
||||
test_command = f"python -m pytest -n {self.pytest_num_workers} " + " ".join(pytest_flags)
|
||||
if self.tests_to_run is None:
|
||||
test_command += " << pipeline.parameters.tests_to_run >>"
|
||||
else:
|
||||
test_command += " " + " ".join(self.tests_to_run)
|
||||
if self.marker is not None:
|
||||
test_command += f" -m {self.marker}"
|
||||
test_command += " | tee tests_output.txt"
|
||||
steps.append({"run": {"name": "Run tests", "command": test_command}})
|
||||
steps.append({"store_artifacts": {"path": "~/transformers/tests_output.txt"}})
|
||||
steps.append({"store_artifacts": {"path": "~/transformers/reports"}})
|
||||
job["steps"] = steps
|
||||
return job
|
||||
|
||||
@property
|
||||
def job_name(self):
|
||||
return self.name if "examples" in self.name else f"tests_{self.name}"
|
||||
|
||||
|
||||
# JOBS
|
||||
torch_and_tf_job = CircleCIJob(
|
||||
"torch_and_tf",
|
||||
additional_env={"RUN_PT_TF_CROSS_TESTS": True},
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng git-lfs",
|
||||
"git lfs install",
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[sklearn,tf-cpu,torch,testing,sentencepiece,torch-speech,vision]",
|
||||
TORCH_SCATTER_INSTALL,
|
||||
"pip install tensorflow_probability",
|
||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
||||
"pip install git+https://github.com/huggingface/accelerate",
|
||||
],
|
||||
marker="is_pt_tf_cross_test",
|
||||
pytest_options={"rA": None, "durations": 0},
|
||||
)
|
||||
|
||||
|
||||
torch_and_flax_job = CircleCIJob(
|
||||
"torch_and_flax",
|
||||
additional_env={"RUN_PT_FLAX_CROSS_TESTS": True},
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[sklearn,flax,torch,testing,sentencepiece,torch-speech,vision]",
|
||||
TORCH_SCATTER_INSTALL,
|
||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
||||
"pip install git+https://github.com/huggingface/accelerate",
|
||||
],
|
||||
marker="is_pt_flax_cross_test",
|
||||
pytest_options={"rA": None, "durations": 0},
|
||||
)
|
||||
|
||||
|
||||
torch_job = CircleCIJob(
|
||||
"torch",
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng time",
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]",
|
||||
TORCH_SCATTER_INSTALL,
|
||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
||||
"pip install git+https://github.com/huggingface/accelerate",
|
||||
],
|
||||
pytest_num_workers=3,
|
||||
)
|
||||
|
||||
|
||||
tf_job = CircleCIJob(
|
||||
"tf",
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]",
|
||||
"pip install tensorflow_probability",
|
||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
||||
],
|
||||
pytest_options={"rA": None},
|
||||
)
|
||||
|
||||
|
||||
flax_job = CircleCIJob(
|
||||
"flax",
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[flax,testing,sentencepiece,flax-speech,vision]",
|
||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
||||
],
|
||||
pytest_options={"rA": None},
|
||||
)
|
||||
|
||||
|
||||
pipelines_torch_job = CircleCIJob(
|
||||
"pipelines_torch",
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[sklearn,torch,testing,sentencepiece,torch-speech,vision,timm]",
|
||||
TORCH_SCATTER_INSTALL,
|
||||
"pip install https://github.com/kpu/kenlm/archive/master.zip",
|
||||
],
|
||||
pytest_options={"rA": None},
|
||||
tests_to_run="tests/pipelines/"
|
||||
)
|
||||
|
||||
|
||||
pipelines_tf_job = CircleCIJob(
|
||||
"pipelines_tf",
|
||||
install_steps=[
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[sklearn,tf-cpu,testing,sentencepiece]",
|
||||
"pip install tensorflow_probability",
|
||||
],
|
||||
pytest_options={"rA": None},
|
||||
tests_to_run="tests/pipelines/"
|
||||
)
|
||||
|
||||
|
||||
custom_tokenizers_job = CircleCIJob(
|
||||
"custom_tokenizers",
|
||||
additional_env={"RUN_CUSTOM_TOKENIZERS": True},
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y cmake",
|
||||
{
|
||||
"name": "install jumanpp",
|
||||
"command":
|
||||
"wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz\n"
|
||||
"tar xvf jumanpp-2.0.0-rc3.tar.xz\n"
|
||||
"mkdir jumanpp-2.0.0-rc3/bld\n"
|
||||
"cd jumanpp-2.0.0-rc3/bld\n"
|
||||
"sudo cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=/usr/local\n"
|
||||
"sudo make install\n",
|
||||
},
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]",
|
||||
"python -m unidic download",
|
||||
],
|
||||
parallelism=None,
|
||||
resource_class=None,
|
||||
tests_to_run=[
|
||||
"./tests/models/bert_japanese/test_tokenization_bert_japanese.py",
|
||||
"./tests/models/openai/test_tokenization_openai.py",
|
||||
"./tests/models/clip/test_tokenization_clip.py",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
examples_torch_job = CircleCIJob(
|
||||
"examples_torch",
|
||||
cache_name="torch_examples",
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev espeak-ng",
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[sklearn,torch,sentencepiece,testing,torch-speech]",
|
||||
"pip install -r examples/pytorch/_tests_requirements.txt",
|
||||
],
|
||||
tests_to_run="./examples/pytorch/",
|
||||
)
|
||||
|
||||
|
||||
examples_tensorflow_job = CircleCIJob(
|
||||
"examples_tensorflow",
|
||||
cache_name="tensorflow_examples",
|
||||
install_steps=[
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[sklearn,tensorflow,sentencepiece,testing]",
|
||||
"pip install -r examples/tensorflow/_tests_requirements.txt",
|
||||
],
|
||||
tests_to_run="./examples/tensorflow/",
|
||||
)
|
||||
|
||||
|
||||
examples_flax_job = CircleCIJob(
|
||||
"examples_flax",
|
||||
cache_name="flax_examples",
|
||||
install_steps=[
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[flax,testing,sentencepiece]",
|
||||
"pip install -r examples/flax/_tests_requirements.txt",
|
||||
],
|
||||
tests_to_run="./examples/flax/",
|
||||
)
|
||||
|
||||
|
||||
hub_job = CircleCIJob(
|
||||
"hub",
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install git-lfs",
|
||||
'git config --global user.email "ci@dummy.com"',
|
||||
'git config --global user.name "ci"',
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[torch,sentencepiece,testing]",
|
||||
],
|
||||
marker="is_staging_test",
|
||||
pytest_num_workers=1,
|
||||
)
|
||||
|
||||
|
||||
onnx_job = CircleCIJob(
|
||||
"onnx",
|
||||
install_steps=[
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba]",
|
||||
],
|
||||
pytest_options={"k onnx": None},
|
||||
pytest_num_workers=1,
|
||||
)
|
||||
|
||||
|
||||
layoutlm_job = CircleCIJob(
|
||||
"layoutlmv2_and_v3",
|
||||
install_steps=[
|
||||
"sudo apt-get -y update && sudo apt-get install -y libsndfile1-dev",
|
||||
"pip install --upgrade pip",
|
||||
"pip install .[torch,testing,vision]",
|
||||
"pip install torchvision",
|
||||
"pip install 'git+https://github.com/facebookresearch/detectron2.git'",
|
||||
"sudo apt install tesseract-ocr",
|
||||
"pip install pytesseract",
|
||||
],
|
||||
tests_to_run="tests/models/*layoutlmv*",
|
||||
pytest_num_workers=1,
|
||||
pytest_options={"durations": 100},
|
||||
)
|
||||
|
||||
|
||||
REGULAR_TESTS = [
|
||||
torch_and_tf_job,
|
||||
torch_and_flax_job,
|
||||
torch_job,
|
||||
tf_job,
|
||||
flax_job,
|
||||
custom_tokenizers_job,
|
||||
hub_job,
|
||||
onnx_job,
|
||||
layoutlm_job,
|
||||
]
|
||||
EXAMPLES_TESTS = [
|
||||
examples_torch_job,
|
||||
examples_tensorflow_job,
|
||||
examples_flax_job,
|
||||
]
|
||||
PIPELINE_TESTS = [
|
||||
pipelines_torch_job,
|
||||
pipelines_tf_job,
|
||||
]
|
||||
|
||||
|
||||
def create_circleci_config(folder=None):
|
||||
if folder is None:
|
||||
folder = os.getcwd()
|
||||
jobs = []
|
||||
all_test_file = os.path.join(folder, "test_list.txt")
|
||||
if os.path.exists(all_test_file):
|
||||
with open(all_test_file) as f:
|
||||
all_test_list = f.read()
|
||||
else:
|
||||
all_test_list = []
|
||||
if len(all_test_list) > 0:
|
||||
jobs.extend(PIPELINE_TESTS)
|
||||
|
||||
test_file = os.path.join(folder, "filtered_test_list.txt")
|
||||
if os.path.exists(test_file):
|
||||
with open(test_file) as f:
|
||||
test_list = f.read()
|
||||
else:
|
||||
test_list = []
|
||||
if len(test_list) > 0:
|
||||
jobs.extend(REGULAR_TESTS)
|
||||
|
||||
example_file = os.path.join(folder, "examples_test_list.txt")
|
||||
if os.path.exists(example_file) and os.path.getsize(example_file) > 0:
|
||||
jobs.extend(EXAMPLES_TESTS)
|
||||
|
||||
if len(jobs) > 0:
|
||||
config = {"version": "2.1"}
|
||||
config["parameters"] = {"tests_to_run": {"type": "string", "default": test_list}}
|
||||
config["jobs"] = {j.job_name: j.to_dict() for j in jobs}
|
||||
config["workflows"] = {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}}
|
||||
with open(os.path.join(folder, "generated_config.yml"), "w") as f:
|
||||
f.write(yaml.dump(config, indent=2, width=1000000, sort_keys=False))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--fetcher_folder", type=str, default=None, help="Only test that all tests and modules are accounted for."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
create_circleci_config(args.fetcher_folder)
|
26
.github/workflows/build-docker-images.yml
vendored
26
.github/workflows/build-docker-images.yml
vendored
@ -43,6 +43,19 @@ jobs:
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-all-latest-gpu${{ inputs.image_postfix }}
|
||||
# Push CI images still need to be re-built daily
|
||||
-
|
||||
name: Build and push (for Push CI) in a daily basis
|
||||
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker/transformers-all-latest-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-all-latest-gpu-push-ci
|
||||
|
||||
latest-with-torch-nightly-docker:
|
||||
name: "Nightly PyTorch + Stable TensorFlow"
|
||||
@ -98,6 +111,19 @@ jobs:
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }}
|
||||
# Push CI images still need to be re-built daily
|
||||
-
|
||||
name: Build and push (for Push CI) in a daily basis
|
||||
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
|
||||
|
||||
nightly-torch-deepspeed-docker:
|
||||
name: "Nightly PyTorch + DeepSpeed"
|
||||
|
7
.github/workflows/self-push.yml
vendored
7
.github/workflows/self-push.yml
vendored
@ -117,7 +117,7 @@ jobs:
|
||||
# TODO: add `git-python` in the docker images
|
||||
run: |
|
||||
pip install --upgrade git-python
|
||||
python utils/tests_fetcher.py --diff_with_last_commit | tee test_preparation.txt
|
||||
python3 utils/tests_fetcher.py --diff_with_last_commit | tee test_preparation.txt
|
||||
|
||||
- name: Report fetched tests
|
||||
uses: actions/upload-artifact@v2
|
||||
@ -526,6 +526,11 @@ jobs:
|
||||
echo "env.CI_SHA = ${{ env.CI_SHA }}"
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
# To avoid failure when multiple commits are merged into `main` in a short period of time.
|
||||
# Checking out to an old commit beyond the fetch depth will get an error `fatal: reference is not a tree: ...
|
||||
# (Only required for `workflow_run` event, where we get the latest HEAD on `main` instead of the event commit)
|
||||
with:
|
||||
fetch-depth: 20
|
||||
|
||||
- name: Update clone using environment variables
|
||||
run: |
|
||||
|
20
README.md
20
README.md
@ -43,7 +43,8 @@ limitations under the License.
|
||||
<b>English</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a>
|
||||
<p>
|
||||
</h4>
|
||||
|
||||
@ -133,7 +134,7 @@ Many tasks have a pre-trained `pipeline` ready to go, in NLP but also in compute
|
||||
>>> image = Image.open(image_data)
|
||||
|
||||
# Allocate a pipeline for object detection
|
||||
>>> object_detector = pipeline('object_detection')
|
||||
>>> object_detector = pipeline('object-detection')
|
||||
>>> object_detector(image)
|
||||
[{'score': 0.9982201457023621,
|
||||
'label': 'remote',
|
||||
@ -227,7 +228,7 @@ You should install 🤗 Transformers in a [virtual environment](https://docs.pyt
|
||||
First, create a virtual environment with the version of Python you're going to use and activate it.
|
||||
|
||||
Then, you will need to install at least one of Flax, PyTorch or TensorFlow.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax](https://github.com/google/flax#quick-install) and [Jax](https://github.com/google/jax#installation) installation pages regarding the specific install command for your platform.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax](https://github.com/google/flax#quick-install) and [Jax](https://github.com/google/jax#installation) installation pages regarding the specific installation command for your platform.
|
||||
|
||||
When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
@ -278,7 +279,7 @@ Current number of checkpoints: ** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
@ -300,7 +301,7 @@ Current number of checkpoints: ** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
@ -322,6 +323,7 @@ Current number of checkpoints: ** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
@ -329,7 +331,7 @@ Current number of checkpoints: ** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
@ -375,7 +377,7 @@ Current number of checkpoints: ** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
@ -388,12 +390,12 @@ Current number of checkpoints: ](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
|
444
README_es.md
Normal file
444
README_es.md
Normal file
@ -0,0 +1,444 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<p align="center">
|
||||
<br>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers_logo_name.png" width="400"/>
|
||||
<br>
|
||||
<p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<b>Español</b>
|
||||
<p>
|
||||
</h4>
|
||||
|
||||
<h3 align="center">
|
||||
<p>Lo último de Machine Learning para JAX, PyTorch y TensorFlow</p>
|
||||
</h3>
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||
</h3>
|
||||
|
||||
🤗 Transformers aporta miles de modelos preentrenados Para realizar tareas en diferentes modalidades como texto, vision, y audio.
|
||||
|
||||
Estos modelos pueden ser aplicados en:
|
||||
|
||||
* 📝 Texto, Para tareas como clasificación de texto, extracción de información, responder preguntas, resumir, traducir, generación de texto, en más de 100 idiomas.
|
||||
* 🖼️ Imágenes, para tareas como clasificación de imágenes, detección the objetos, y segmentación.
|
||||
* 🗣️ Audio, para tareas como reconocimiento de voz y clasificación de audio.
|
||||
|
||||
Los modelos de Transformer también pueden realizar tareas en **muchas modalidades combinadas**, como responder pregunstas, reconocimiento de carácteres ópticos,extracción de información de documentos escaneados, clasificación de video, y respuesta de preguntas visuales.
|
||||
|
||||
🤗 Transformers aporta APIs para descargar rápidamente y usar estos modelos preentrenados en un texto dado, afinarlos en tus propios sets de datos y compartirlos con la comunidad en nuestro [centro de modelos](https://huggingface.co/models). Al mismo tiempo, cada módulo de Python que define una arquitectura es completamente independiente y se puede modificar para permitir experimentos de investigación rápidos.
|
||||
|
||||
🤗 Transformers está respaldado por las tres bibliotecas de deep learning más populares — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) y [TensorFlow](https://www.tensorflow.org/) — con una perfecta integración entre ellos. Es sencillo entrenar sus modelos con uno antes de cargarlos para la inferencia con el otro.
|
||||
|
||||
## Demostraciones en línea
|
||||
|
||||
Puedes probar la mayoría de nuestros modelos directamente en sus páginas desde el [centro de modelos](https://huggingface.co/models). También ofrecemos [alojamiento de modelos privados, control de versiones y una API de inferencia](https://huggingface.co/pricing) para modelos públicos y privados.
|
||||
|
||||
Aquí hay algunos ejemplos:
|
||||
|
||||
En procesamiento del lenguaje natural:
|
||||
- [Terminación de palabras enmascaradas con BERT](https://huggingface.co/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France)
|
||||
- [Reconocimiento del nombre de la entidad con Electra](https://huggingface.co/dbmdz/electra-large-discriminator-finetuned-conll03-english?text=My+name+is+Sarah+and+I+live+in+London+city)
|
||||
- [Generación de texto con GPT-2](https://huggingface.co/gpt2?text=A+long+time+ago%2C+)
|
||||
- [Inferencia del lenguaje natural con RoBERTa](https://huggingface.co/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
||||
- [Resumen con BART](https://huggingface.co/facebook/bart-large-cnn?text=The+tower+is+324+metres+%281%2C063+ft%29+tall%2C+about+the+same+height+as+an+81-storey+building%2C+and+the+tallest+structure+in+Paris.+Its+base+is+square%2C+measuring+125+metres+%28410+ft%29+on+each+side.+During+its+construction%2C+the+Eiffel+Tower+surpassed+the+Washington+Monument+to+become+the+tallest+man-made+structure+in+the+world%2C+a+title+it+held+for+41+years+until+the+Chrysler+Building+in+New+York+City+was+finished+in+1930.+It+was+the+first+structure+to+reach+a+height+of+300+metres.+Due+to+the+addition+of+a+broadcasting+aerial+at+the+top+of+the+tower+in+1957%2C+it+is+now+taller+than+the+Chrysler+Building+by+5.2+metres+%2817+ft%29.+Excluding+transmitters%2C+the+Eiffel+Tower+is+the+second+tallest+free-standing+structure+in+France+after+the+Millau+Viaduct)
|
||||
- [Responder a preguntas con DistilBERT](https://huggingface.co/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
|
||||
- [Traducción con T5](https://huggingface.co/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
|
||||
|
||||
En visión de ordenador:
|
||||
- [Clasificación de imágenes con ViT](https://huggingface.co/google/vit-base-patch16-224)
|
||||
- [Detección de objetos con DETR](https://huggingface.co/facebook/detr-resnet-50)
|
||||
- [Segmentación semántica con SegFormer](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
||||
- [Segmentación panóptica con DETR](https://huggingface.co/facebook/detr-resnet-50-panoptic)
|
||||
|
||||
En Audio:
|
||||
- [Reconocimiento de voz automático con Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base-960h)
|
||||
- [Detección de palabras clave con Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks)
|
||||
|
||||
En tareas multimodales:
|
||||
- [Respuesta visual a preguntas con ViLT](https://huggingface.co/dandelin/vilt-b32-finetuned-vqa)
|
||||
|
||||
**[Escribe con Transformer](https://transformer.huggingface.co)**, construido por el equipo de Hugging Face, es la demostración oficial de las capacidades de generación de texto de este repositorio.
|
||||
|
||||
## Si está buscando soporte personalizado del equipo de Hugging Face
|
||||
|
||||
<a target="_blank" href="https://huggingface.co/support">
|
||||
<img alt="HuggingFace Expert Acceleration Program" src="https://cdn-media.huggingface.co/marketing/transformers/new-support-improved.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
|
||||
</a><br>
|
||||
|
||||
## Tour rápido
|
||||
|
||||
Para usar inmediatamente un modelo en una entrada determinada (texto, imagen, audio, ...), proporcionamos la API de `pipeline`. Los pipelines agrupan un modelo previamente entrenado con el preprocesamiento que se usó durante el entrenamiento de ese modelo. Aquí se explica cómo usar rápidamente un pipeline para clasificar textos positivos frente a negativos:
|
||||
|
||||
```python
|
||||
>>> from transformers import pipeline
|
||||
|
||||
# Allocate a pipeline for sentiment-analysis
|
||||
>>> classifier = pipeline('sentiment-analysis')
|
||||
>>> classifier('We are very happy to introduce pipeline to the transformers repository.')
|
||||
[{'label': 'POSITIVE', 'score': 0.9996980428695679}]
|
||||
```
|
||||
|
||||
La segunda línea de código descarga y almacena en caché el modelo previamente entrenado que usa la canalización, mientras que la tercera lo evalúa en el texto dado. Aquí la respuesta es "positiva" con una confianza del 99,97%.
|
||||
|
||||
Muchas tareas tienen un `pipeline` preentrenado listo para funcionar, en NLP pero también en visión por ordenador y habla. Por ejemplo, podemos extraer fácilmente los objetos detectados en una imagen:
|
||||
|
||||
``` python
|
||||
>>> import requests
|
||||
>>> from PIL import Image
|
||||
>>> from transformers import pipeline
|
||||
|
||||
# Download an image with cute cats
|
||||
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png"
|
||||
>>> image_data = requests.get(url, stream=True).raw
|
||||
>>> image = Image.open(image_data)
|
||||
|
||||
# Allocate a pipeline for object detection
|
||||
>>> object_detector = pipeline('object_detection')
|
||||
>>> object_detector(image)
|
||||
[{'score': 0.9982201457023621,
|
||||
'label': 'remote',
|
||||
'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
|
||||
{'score': 0.9960021376609802,
|
||||
'label': 'remote',
|
||||
'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
|
||||
{'score': 0.9954745173454285,
|
||||
'label': 'couch',
|
||||
'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
|
||||
{'score': 0.9988006353378296,
|
||||
'label': 'cat',
|
||||
'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
|
||||
{'score': 0.9986783862113953,
|
||||
'label': 'cat',
|
||||
'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}]
|
||||
```
|
||||
|
||||
Aquí obtenemos una lista de objetos detectados en la imagen, con un cuadro que rodea el objeto y una puntuación de confianza. Aquí está la imagen original a la derecha, con las predicciones mostradas a la izquierda:
|
||||
|
||||
<h3 align="center">
|
||||
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png" width="400"></a>
|
||||
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample_post_processed.png" width="400"></a>
|
||||
</h3>
|
||||
|
||||
Puedes obtener más información sobre las tareas admitidas por la API de `pipeline` en [este tutorial](https://huggingface.co/docs/transformers/task_summary).
|
||||
|
||||
Además de `pipeline`, para descargar y usar cualquiera de los modelos previamente entrenados en su tarea dada, todo lo que necesita son tres líneas de código. Aquí está la versión de PyTorch:
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModel
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||
>>> model = AutoModel.from_pretrained("bert-base-uncased")
|
||||
|
||||
>>> inputs = tokenizer("Hello world!", return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
|
||||
Y aquí está el código equivalente para TensorFlow:
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, TFAutoModel
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased")
|
||||
>>> model = TFAutoModel.from_pretrained("bert-base-uncased")
|
||||
|
||||
>>> inputs = tokenizer("Hello world!", return_tensors="tf")
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
|
||||
El tokenizador es responsable de todo el preprocesamiento que espera el modelo preentrenado y se puede llamar directamente en una sola cadena (como en los ejemplos anteriores) o en una lista. Dará como resultado un diccionario que puedes usar en el código descendente o simplemente pasarlo directamente a su modelo usando el operador de desempaquetado de argumento **.
|
||||
|
||||
El modelo en si es un [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) normal o un [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (dependiendo De tu backend) que puedes usar de forma habitual. [Este tutorial](https://huggingface.co/docs/transformers/training) explica cómo integrar un modelo de este tipo en un ciclo de entrenamiento PyTorch o TensorFlow clásico, o como usar nuestra API `Trainer` para ajustar rápidamente un nuevo conjunto de datos.
|
||||
|
||||
## ¿Por qué debo usar transformers?
|
||||
|
||||
1. Modelos de última generación fáciles de usar:
|
||||
- Alto rendimiento en comprensión y generación de lenguaje natural, visión artificial y tareas de audio.
|
||||
- Baja barrera de entrada para educadores y profesionales.
|
||||
- Pocas abstracciones de cara al usuario con solo tres clases para aprender.
|
||||
- Una API unificada para usar todos nuestros modelos preentrenados.
|
||||
|
||||
1. Menores costes de cómputo, menor huella de carbono:
|
||||
- Los investigadores pueden compartir modelos entrenados en lugar de siempre volver a entrenar.
|
||||
- Los profesionales pueden reducir el tiempo de cómputo y los costos de producción.
|
||||
- Docenas de arquitecturas con más de 60 000 modelos preentrenados en todas las modalidades.
|
||||
|
||||
1. Elija el marco adecuado para cada parte de la vida útil de un modelo:
|
||||
- Entrene modelos de última generación en 3 líneas de código.
|
||||
- Mueva un solo modelo entre los marcos TF2.0/PyTorch/JAX a voluntad.
|
||||
- Elija sin problemas el marco adecuado para la formación, la evaluación y la producción.
|
||||
|
||||
1. Personalice fácilmente un modelo o un ejemplo según sus necesidades:
|
||||
- Proporcionamos ejemplos de cada arquitectura para reproducir los resultados publicados por sus autores originales..
|
||||
- Los internos del modelo están expuestos lo más consistentemente posible..
|
||||
- Los archivos modelo se pueden usar independientemente de la biblioteca para experimentos rápidos.
|
||||
|
||||
## ¿Por qué no debería usar transformers?
|
||||
|
||||
- Esta biblioteca no es una caja de herramientas modular de bloques de construcción para redes neuronales. El código en los archivos del modelo no se refactoriza con abstracciones adicionales a propósito, de modo que los investigadores puedan iterar rápidamente en cada uno de los modelos sin sumergirse en abstracciones/archivos adicionales.
|
||||
- La API de entrenamiento no está diseñada para funcionar en ningún modelo, pero está optimizada para funcionar con los modelos proporcionados por la biblioteca. Para bucles genéricos de aprendizaje automático, debe usar otra biblioteca (posiblemente, [Accelerate](https://huggingface.co/docs/accelerate)).
|
||||
- Si bien nos esforzamos por presentar tantos casos de uso como sea posible, los scripts en nuestra [carpeta de ejemplos](https://github.com/huggingface/transformers/tree/main/examples) son solo eso: ejemplos. Se espera que no funcionen de forma inmediata en su problema específico y que deba cambiar algunas líneas de código para adaptarlas a sus necesidades.
|
||||
|
||||
## Instalación
|
||||
|
||||
### Con pip
|
||||
|
||||
Este repositorio está probado en Python 3.6+, Flax 0.3.2+, PyTorch 1.3.1+ y TensorFlow 2.3+.
|
||||
|
||||
Deberías instalar 🤗 Transformers en un [ambiente virtual](https://docs.python.org/3/library/venv.html). Si no estas familiarizado con los entornos virtuales de Python, consulta la [guía de usuario](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
|
||||
Primero, crea un entorno virtual con la versión de Python que vas a usar y actívalo.
|
||||
|
||||
Luego, deberás instalar al menos uno de Flax, PyTorch o TensorFlow.
|
||||
Por favor, ve a la [página de instalación de TensorFlow](https://www.tensorflow.org/install/), [página de instalación de PyTorch](https://pytorch.org/get-started/locally/#start-locally) y/o las páginas de instalación de [Flax](https://github.com/google/flax#quick-install) y [Jax](https://github.com/google/jax#installation) con respecto al comando de instalación específico para tu plataforma.
|
||||
|
||||
Cuando se ha instalado uno de esos backends, los 🤗 Transformers se pueden instalar usando pip de la siguiente manera:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
Si deseas jugar con los ejemplos o necesitas la última versión del código y no puedes esperar a una nueva versión, tienes que [instalar la librería de la fuente](https://huggingface.co/docs/transformers/installation#installing-from-source).
|
||||
|
||||
### Con conda
|
||||
|
||||
Desde la versión v4.0.0 de Transformers, ahora tenemos un canal conda: `huggingface`.
|
||||
|
||||
🤗 Transformers se puede instalar usando conda de la siguiente manera:
|
||||
|
||||
```shell script
|
||||
conda install -c huggingface transformers
|
||||
```
|
||||
|
||||
Sigue las páginas de instalación de Flax, PyTorch o TensorFlow para ver cómo instalarlos con conda.
|
||||
|
||||
> **_NOTA:_** En Windows, es posible que se le pida que active el modo de desarrollador para beneficiarse del almacenamiento en caché. Si esta no es una opción para usted, háganoslo saber en [esta issue](https://github.com/huggingface/huggingface_hub/issues/1062).
|
||||
|
||||
## Arquitecturas modelo
|
||||
|
||||
**[Todos los puntos de control del modelo](https://huggingface.co/models)** aportados por 🤗 Transformers están perfectamente integrados desde huggingface.co [Centro de modelos](https://huggingface.co) donde son subidos directamente por los [usuarios](https://huggingface.co/users) y [organizaciones](https://huggingface.co/organizations).
|
||||
|
||||
Número actual de puntos de control: 
|
||||
|
||||
🤗 Transformers actualmente proporciona las siguientes arquitecturas (ver [aquí](https://huggingface.co/docs/transformers/model_summary) para un resumen de alto nivel de cada uno de ellas.):
|
||||
|
||||
1. **[ALBERT](https://huggingface.co/docs/transformers/model_doc/albert)** (from Google Research and the Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.
|
||||
1. **[BART](https://huggingface.co/docs/transformers/model_doc/bart)** (from Facebook) released with the paper [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461) by Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov and Luke Zettlemoyer.
|
||||
1. **[BARThez](https://huggingface.co/docs/transformers/model_doc/barthez)** (from École polytechnique) released with the paper [BARThez: a Skilled Pretrained French Sequence-to-Sequence Model](https://arxiv.org/abs/2010.12321) by Moussa Kamal Eddine, Antoine J.-P. Tixier, Michalis Vazirgiannis.
|
||||
1. **[BARTpho](https://huggingface.co/docs/transformers/model_doc/bartpho)** (from VinAI Research) released with the paper [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen.
|
||||
1. **[BEiT](https://huggingface.co/docs/transformers/model_doc/beit)** (from Microsoft) released with the paper [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) by Hangbo Bao, Li Dong, Furu Wei.
|
||||
1. **[BERT](https://huggingface.co/docs/transformers/model_doc/bert)** (from Google) released with the paper [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova.
|
||||
1. **[BERT For Sequence Generation](https://huggingface.co/docs/transformers/model_doc/bert-generation)** (from Google) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[BERTweet](https://huggingface.co/docs/transformers/model_doc/bertweet)** (from VinAI Research) released with the paper [BERTweet: A pre-trained language model for English Tweets](https://aclanthology.org/2020.emnlp-demos.2/) by Dat Quoc Nguyen, Thanh Vu and Anh Tuan Nguyen.
|
||||
1. **[BigBird-Pegasus](https://huggingface.co/docs/transformers/model_doc/bigbird_pegasus)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[BigBird-RoBERTa](https://huggingface.co/docs/transformers/model_doc/big_bird)** (from Google Research) released with the paper [Big Bird: Transformers for Longer Sequences](https://arxiv.org/abs/2007.14062) by Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, Amr Ahmed.
|
||||
1. **[Blenderbot](https://huggingface.co/docs/transformers/model_doc/blenderbot)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BlenderbotSmall](https://huggingface.co/docs/transformers/model_doc/blenderbot-small)** (from Facebook) released with the paper [Recipes for building an open-domain chatbot](https://arxiv.org/abs/2004.13637) by Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu, Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston.
|
||||
1. **[BLOOM](https://huggingface.co/docs/transformers/model_doc/bloom)** (from BigScience workshop) released by the [BigSicence Workshop](https://bigscience.huggingface.co/).
|
||||
1. **[BORT](https://huggingface.co/docs/transformers/model_doc/bort)** (from Alexa) released with the paper [Optimal Subarchitecture Extraction For BERT](https://arxiv.org/abs/2010.10499) by Adrian de Wynter and Daniel J. Perry.
|
||||
1. **[ByT5](https://huggingface.co/docs/transformers/model_doc/byt5)** (from Google Research) released with the paper [ByT5: Towards a token-free future with pre-trained byte-to-byte models](https://arxiv.org/abs/2105.13626) by Linting Xue, Aditya Barua, Noah Constant, Rami Al-Rfou, Sharan Narang, Mihir Kale, Adam Roberts, Colin Raffel.
|
||||
1. **[CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert)** (from Inria/Facebook/Sorbonne) released with the paper [CamemBERT: a Tasty French Language Model](https://arxiv.org/abs/1911.03894) by Louis Martin*, Benjamin Muller*, Pedro Javier Ortiz Suárez*, Yoann Dupont, Laurent Romary, Éric Villemonte de la Clergerie, Djamé Seddah and Benoît Sagot.
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
1. **[CTRL](https://huggingface.co/docs/transformers/model_doc/ctrl)** (from Salesforce) released with the paper [CTRL: A Conditional Transformer Language Model for Controllable Generation](https://arxiv.org/abs/1909.05858) by Nitish Shirish Keskar*, Bryan McCann*, Lav R. Varshney, Caiming Xiong and Richard Socher.
|
||||
1. **[CvT](https://huggingface.co/docs/transformers/model_doc/cvt)** (from Microsoft) released with the paper [CvT: Introducing Convolutions to Vision Transformers](https://arxiv.org/abs/2103.15808) by Haiping Wu, Bin Xiao, Noel Codella, Mengchen Liu, Xiyang Dai, Lu Yuan, Lei Zhang.
|
||||
1. **[Data2Vec](https://huggingface.co/docs/transformers/model_doc/data2vec)** (from Facebook) released with the paper [Data2Vec: A General Framework for Self-supervised Learning in Speech, Vision and Language](https://arxiv.org/abs/2202.03555) by Alexei Baevski, Wei-Ning Hsu, Qiantong Xu, Arun Babu, Jiatao Gu, Michael Auli.
|
||||
1. **[DeBERTa](https://huggingface.co/docs/transformers/model_doc/deberta)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[DeBERTa-v2](https://huggingface.co/docs/transformers/model_doc/deberta-v2)** (from Microsoft) released with the paper [DeBERTa: Decoding-enhanced BERT with Disentangled Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen.
|
||||
1. **[Decision Transformer](https://huggingface.co/docs/transformers/model_doc/decision_transformer)** (from Berkeley/Facebook/Google) released with the paper [Decision Transformer: Reinforcement Learning via Sequence Modeling](https://arxiv.org/abs/2106.01345) by Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laskin, Pieter Abbeel, Aravind Srinivas, Igor Mordatch.
|
||||
1. **[Deformable DETR](https://huggingface.co/docs/transformers/model_doc/deformable_detr)** (from SenseTime Research) released with the paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) by Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, Jifeng Dai.
|
||||
1. **[DeiT](https://huggingface.co/docs/transformers/model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||
1. **[DETR](https://huggingface.co/docs/transformers/model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||
1. **[DialoGPT](https://huggingface.co/docs/transformers/model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||
1. **[DistilBERT](https://huggingface.co/docs/transformers/model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
||||
1. **[DiT](https://huggingface.co/docs/transformers/model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||
1. **[Donut](https://huggingface.co/docs/transformers/model_doc/donut)** (from NAVER), released together with the paper [OCR-free Document Understanding Transformer](https://arxiv.org/abs/2111.15664) by Geewook Kim, Teakgyu Hong, Moonbin Yim, Jeongyeon Nam, Jinyoung Park, Jinyeong Yim, Wonseok Hwang, Sangdoo Yun, Dongyoon Han, Seunghyun Park.
|
||||
1. **[DPR](https://huggingface.co/docs/transformers/model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||
1. **[DPT](https://huggingface.co/docs/transformers/master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
1. **[Funnel Transformer](https://huggingface.co/docs/transformers/model_doc/funnel)** (from CMU/Google Brain) released with the paper [Funnel-Transformer: Filtering out Sequential Redundancy for Efficient Language Processing](https://arxiv.org/abs/2006.03236) by Zihang Dai, Guokun Lai, Yiming Yang, Quoc V. Le.
|
||||
1. **[GLPN](https://huggingface.co/docs/transformers/model_doc/glpn)** (from KAIST) released with the paper [Global-Local Path Networks for Monocular Depth Estimation with Vertical CutDepth](https://arxiv.org/abs/2201.07436) by Doyeon Kim, Woonghyun Ga, Pyungwhan Ahn, Donggyu Joo, Sehwan Chun, Junmo Kim.
|
||||
1. **[GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt)** (from OpenAI) released with the paper [Improving Language Understanding by Generative Pre-Training](https://blog.openai.com/language-unsupervised/) by Alec Radford, Karthik Narasimhan, Tim Salimans and Ilya Sutskever.
|
||||
1. **[GPT Neo](https://huggingface.co/docs/transformers/model_doc/gpt_neo)** (from EleutherAI) released in the repository [EleutherAI/gpt-neo](https://github.com/EleutherAI/gpt-neo) by Sid Black, Stella Biderman, Leo Gao, Phil Wang and Connor Leahy.
|
||||
1. **[GPT NeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox)** (from EleutherAI) released with the paper [GPT-NeoX-20B: An Open-Source Autoregressive Language Model](https://arxiv.org/abs/2204.06745) by Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, Samuel Weinbach
|
||||
1. **[GPT NeoX Japanese](https://huggingface.co/docs/transformers/model_doc/gpt_neox_japanese)** (from ABEJA) released by Shinya Otani, Takayoshi Makabe, Anuj Arora, and Kyo Hattori.
|
||||
1. **[GPT-2](https://huggingface.co/docs/transformers/model_doc/gpt2)** (from OpenAI) released with the paper [Language Models are Unsupervised Multitask Learners](https://blog.openai.com/better-language-models/) by Alec Radford*, Jeffrey Wu*, Rewon Child, David Luan, Dario Amodei** and Ilya Sutskever**.
|
||||
1. **[GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj)** (from EleutherAI) released in the repository [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/) by Ben Wang and Aran Komatsuzaki.
|
||||
1. **[GroupViT](https://huggingface.co/docs/transformers/model_doc/groupvit)** (from UCSD, NVIDIA) released with the paper [GroupViT: Semantic Segmentation Emerges from Text Supervision](https://arxiv.org/abs/2202.11094) by Jiarui Xu, Shalini De Mello, Sifei Liu, Wonmin Byeon, Thomas Breuel, Jan Kautz, Xiaolong Wang.
|
||||
1. **[Hubert](https://huggingface.co/docs/transformers/model_doc/hubert)** (from Facebook) released with the paper [HuBERT: Self-Supervised Speech Representation Learning by Masked Prediction of Hidden Units](https://arxiv.org/abs/2106.07447) by Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, Abdelrahman Mohamed.
|
||||
1. **[I-BERT](https://huggingface.co/docs/transformers/model_doc/ibert)** (from Berkeley) released with the paper [I-BERT: Integer-only BERT Quantization](https://arxiv.org/abs/2101.01321) by Sehoon Kim, Amir Gholami, Zhewei Yao, Michael W. Mahoney, Kurt Keutzer.
|
||||
1. **[ImageGPT](https://huggingface.co/docs/transformers/model_doc/imagegpt)** (from OpenAI) released with the paper [Generative Pretraining from Pixels](https://openai.com/blog/image-gpt/) by Mark Chen, Alec Radford, Rewon Child, Jeffrey Wu, Heewoo Jun, David Luan, Ilya Sutskever.
|
||||
1. **[LayoutLM](https://huggingface.co/docs/transformers/model_doc/layoutlm)** (from Microsoft Research Asia) released with the paper [LayoutLM: Pre-training of Text and Layout for Document Image Understanding](https://arxiv.org/abs/1912.13318) by Yiheng Xu, Minghao Li, Lei Cui, Shaohan Huang, Furu Wei, Ming Zhou.
|
||||
1. **[LayoutLMv2](https://huggingface.co/docs/transformers/model_doc/layoutlmv2)** (from Microsoft Research Asia) released with the paper [LayoutLMv2: Multi-modal Pre-training for Visually-Rich Document Understanding](https://arxiv.org/abs/2012.14740) by Yang Xu, Yiheng Xu, Tengchao Lv, Lei Cui, Furu Wei, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Wanxiang Che, Min Zhang, Lidong Zhou.
|
||||
1. **[LayoutLMv3](https://huggingface.co/docs/transformers/model_doc/layoutlmv3)** (from Microsoft Research Asia) released with the paper [LayoutLMv3: Pre-training for Document AI with Unified Text and Image Masking](https://arxiv.org/abs/2204.08387) by Yupan Huang, Tengchao Lv, Lei Cui, Yutong Lu, Furu Wei.
|
||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
1. **[LXMERT](https://huggingface.co/docs/transformers/model_doc/lxmert)** (from UNC Chapel Hill) released with the paper [LXMERT: Learning Cross-Modality Encoder Representations from Transformers for Open-Domain Question Answering](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal.
|
||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
1. **[Megatron-BERT](https://huggingface.co/docs/transformers/model_doc/megatron-bert)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[Megatron-GPT2](https://huggingface.co/docs/transformers/model_doc/megatron_gpt2)** (from NVIDIA) released with the paper [Megatron-LM: Training Multi-Billion Parameter Language Models Using Model Parallelism](https://arxiv.org/abs/1909.08053) by Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper and Bryan Catanzaro.
|
||||
1. **[mLUKE](https://huggingface.co/docs/transformers/model_doc/mluke)** (from Studio Ousia) released with the paper [mLUKE: The Power of Entity Representations in Multilingual Pretrained Language Models](https://arxiv.org/abs/2110.08151) by Ryokan Ri, Ikuya Yamada, and Yoshimasa Tsuruoka.
|
||||
1. **[MobileBERT](https://huggingface.co/docs/transformers/model_doc/mobilebert)** (from CMU/Google Brain) released with the paper [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou.
|
||||
1. **[MobileViT](https://huggingface.co/docs/transformers/model_doc/mobilevit)** (from Apple) released with the paper [MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer](https://arxiv.org/abs/2110.02178) by Sachin Mehta and Mohammad Rastegari.
|
||||
1. **[MPNet](https://huggingface.co/docs/transformers/model_doc/mpnet)** (from Microsoft Research) released with the paper [MPNet: Masked and Permuted Pre-training for Language Understanding](https://arxiv.org/abs/2004.09297) by Kaitao Song, Xu Tan, Tao Qin, Jianfeng Lu, Tie-Yan Liu.
|
||||
1. **[MT5](https://huggingface.co/docs/transformers/model_doc/mt5)** (from Google AI) released with the paper [mT5: A massively multilingual pre-trained text-to-text transformer](https://arxiv.org/abs/2010.11934) by Linting Xue, Noah Constant, Adam Roberts, Mihir Kale, Rami Al-Rfou, Aditya Siddhant, Aditya Barua, Colin Raffel.
|
||||
1. **[MVP](https://huggingface.co/docs/transformers/model_doc/mvp)** (from RUC AI Box) released with the paper [MVP: Multi-task Supervised Pre-training for Natural Language Generation](https://arxiv.org/abs/2206.12131) by Tianyi Tang, Junyi Li, Wayne Xin Zhao and Ji-Rong Wen.
|
||||
1. **[Nezha](https://huggingface.co/docs/transformers/model_doc/nezha)** (from Huawei Noah’s Ark Lab) released with the paper [NEZHA: Neural Contextualized Representation for Chinese Language Understanding](https://arxiv.org/abs/1909.00204) by Junqiu Wei, Xiaozhe Ren, Xiaoguang Li, Wenyong Huang, Yi Liao, Yasheng Wang, Jiashu Lin, Xin Jiang, Xiao Chen and Qun Liu.
|
||||
1. **[NLLB](https://huggingface.co/docs/transformers/model_doc/nllb)** (from Meta) released with the paper [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by the NLLB team.
|
||||
1. **[Nyströmformer](https://huggingface.co/docs/transformers/model_doc/nystromformer)** (from the University of Wisconsin - Madison) released with the paper [Nyströmformer: A Nyström-Based Algorithm for Approximating Self-Attention](https://arxiv.org/abs/2102.03902) by Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, Vikas Singh.
|
||||
1. **[OPT](https://huggingface.co/docs/transformers/master/model_doc/opt)** (from Meta AI) released with the paper [OPT: Open Pre-trained Transformer Language Models](https://arxiv.org/abs/2205.01068) by Susan Zhang, Stephen Roller, Naman Goyal, Mikel Artetxe, Moya Chen, Shuohui Chen et al.
|
||||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby.
|
||||
1. **[Pegasus](https://huggingface.co/docs/transformers/model_doc/pegasus)** (from Google) released with the paper [PEGASUS: Pre-training with Extracted Gap-sentences for Abstractive Summarization](https://arxiv.org/abs/1912.08777) by Jingqing Zhang, Yao Zhao, Mohammad Saleh and Peter J. Liu.
|
||||
1. **[PEGASUS-X](https://huggingface.co/docs/transformers/model_doc/pegasus_x)** (from Google) released with the paper [Investigating Efficiently Extending Transformers for Long Input Summarization](https://arxiv.org/abs/2208.04347) by Jason Phang, Yao Zhao, and Peter J. Liu.
|
||||
1. **[Perceiver IO](https://huggingface.co/docs/transformers/model_doc/perceiver)** (from Deepmind) released with the paper [Perceiver IO: A General Architecture for Structured Inputs & Outputs](https://arxiv.org/abs/2107.14795) by Andrew Jaegle, Sebastian Borgeaud, Jean-Baptiste Alayrac, Carl Doersch, Catalin Ionescu, David Ding, Skanda Koppula, Daniel Zoran, Andrew Brock, Evan Shelhamer, Olivier Hénaff, Matthew M. Botvinick, Andrew Zisserman, Oriol Vinyals, João Carreira.
|
||||
1. **[PhoBERT](https://huggingface.co/docs/transformers/model_doc/phobert)** (from VinAI Research) released with the paper [PhoBERT: Pre-trained language models for Vietnamese](https://www.aclweb.org/anthology/2020.findings-emnlp.92/) by Dat Quoc Nguyen and Anh Tuan Nguyen.
|
||||
1. **[PLBart](https://huggingface.co/docs/transformers/model_doc/plbart)** (from UCLA NLP) released with the paper [Unified Pre-training for Program Understanding and Generation](https://arxiv.org/abs/2103.06333) by Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, Kai-Wei Chang.
|
||||
1. **[PoolFormer](https://huggingface.co/docs/transformers/model_doc/poolformer)** (from Sea AI Labs) released with the paper [MetaFormer is Actually What You Need for Vision](https://arxiv.org/abs/2111.11418) by Yu, Weihao and Luo, Mi and Zhou, Pan and Si, Chenyang and Zhou, Yichen and Wang, Xinchao and Feng, Jiashi and Yan, Shuicheng.
|
||||
1. **[ProphetNet](https://huggingface.co/docs/transformers/model_doc/prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[QDQBert](https://huggingface.co/docs/transformers/model_doc/qdqbert)** (from NVIDIA) released with the paper [Integer Quantization for Deep Learning Inference: Principles and Empirical Evaluation](https://arxiv.org/abs/2004.09602) by Hao Wu, Patrick Judd, Xiaojie Zhang, Mikhail Isaev and Paulius Micikevicius.
|
||||
1. **[RAG](https://huggingface.co/docs/transformers/model_doc/rag)** (from Facebook) released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandara Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Küttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, Sebastian Riedel, Douwe Kiela.
|
||||
1. **[REALM](https://huggingface.co/docs/transformers/model_doc/realm.html)** (from Google Research) released with the paper [REALM: Retrieval-Augmented Language Model Pre-Training](https://arxiv.org/abs/2002.08909) by Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat and Ming-Wei Chang.
|
||||
1. **[Reformer](https://huggingface.co/docs/transformers/model_doc/reformer)** (from Google Research) released with the paper [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya.
|
||||
1. **[RegNet](https://huggingface.co/docs/transformers/model_doc/regnet)** (from META Platforms) released with the paper [Designing Network Design Space](https://arxiv.org/abs/2003.13678) by Ilija Radosavovic, Raj Prateek Kosaraju, Ross Girshick, Kaiming He, Piotr Dollár.
|
||||
1. **[RemBERT](https://huggingface.co/docs/transformers/model_doc/rembert)** (from Google Research) released with the paper [Rethinking embedding coupling in pre-trained language models](https://arxiv.org/abs/2010.12821) by Hyung Won Chung, Thibault Févry, Henry Tsai, M. Johnson, Sebastian Ruder.
|
||||
1. **[ResNet](https://huggingface.co/docs/transformers/model_doc/resnet)** (from Microsoft Research) released with the paper [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
|
||||
1. **[RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta)** (from Facebook), released together with the paper [RoBERTa: A Robustly Optimized BERT Pretraining Approach](https://arxiv.org/abs/1907.11692) by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, Veselin Stoyanov.
|
||||
1. **[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer)** (from ZhuiyiTechnology), released together with the paper [RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/abs/2104.09864) by Jianlin Su and Yu Lu and Shengfeng Pan and Bo Wen and Yunfeng Liu.
|
||||
1. **[SegFormer](https://huggingface.co/docs/transformers/model_doc/segformer)** (from NVIDIA) released with the paper [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) by Enze Xie, Wenhai Wang, Zhiding Yu, Anima Anandkumar, Jose M. Alvarez, Ping Luo.
|
||||
1. **[SEW](https://huggingface.co/docs/transformers/model_doc/sew)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||
1. **[SEW-D](https://huggingface.co/docs/transformers/model_doc/sew_d)** (from ASAPP) released with the paper [Performance-Efficiency Trade-offs in Unsupervised Pre-training for Speech Recognition](https://arxiv.org/abs/2109.06870) by Felix Wu, Kwangyoun Kim, Jing Pan, Kyu Han, Kilian Q. Weinberger, Yoav Artzi.
|
||||
1. **[SpeechToTextTransformer](https://huggingface.co/docs/transformers/model_doc/speech_to_text)** (from Facebook), released together with the paper [fairseq S2T: Fast Speech-to-Text Modeling with fairseq](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, Juan Pino.
|
||||
1. **[SpeechToTextTransformer2](https://huggingface.co/docs/transformers/model_doc/speech_to_text_2)** (from Facebook), released together with the paper [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau.
|
||||
1. **[Splinter](https://huggingface.co/docs/transformers/model_doc/splinter)** (from Tel Aviv University), released together with the paper [Few-Shot Question Answering by Pretraining Span Selection](https://arxiv.org/abs/2101.00438) by Ori Ram, Yuval Kirstain, Jonathan Berant, Amir Globerson, Omer Levy.
|
||||
1. **[SqueezeBERT](https://huggingface.co/docs/transformers/model_doc/squeezebert)** (from Berkeley) released with the paper [SqueezeBERT: What can computer vision teach NLP about efficient neural networks?](https://arxiv.org/abs/2006.11316) by Forrest N. Iandola, Albert E. Shaw, Ravi Krishna, and Kurt W. Keutzer.
|
||||
1. **[Swin Transformer](https://huggingface.co/docs/transformers/model_doc/swin)** (from Microsoft) released with the paper [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) by Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, Baining Guo.
|
||||
1. **[Swin Transformer V2](https://huggingface.co/docs/transformers/model_doc/swinv2)** (from Microsoft) released with the paper [Swin Transformer V2: Scaling Up Capacity and Resolution](https://arxiv.org/abs/2111.09883) by Ze Liu, Han Hu, Yutong Lin, Zhuliang Yao, Zhenda Xie, Yixuan Wei, Jia Ning, Yue Cao, Zheng Zhang, Li Dong, Furu Wei, Baining Guo.
|
||||
1. **[T5](https://huggingface.co/docs/transformers/model_doc/t5)** (from Google AI) released with the paper [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/abs/1910.10683) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
1. **[UL2](https://huggingface.co/docs/transformers/model_doc/ul2)** (from Google Research) released with the paper [Unifying Language Learning Paradigms](https://arxiv.org/abs/2205.05131v1) by Yi Tay, Mostafa Dehghani, Vinh Q. Tran, Xavier Garcia, Dara Bahri, Tal Schuster, Huaixiu Steven Zheng, Neil Houlsby, Donald Metzler
|
||||
1. **[UniSpeech](https://huggingface.co/docs/transformers/model_doc/unispeech)** (from Microsoft Research) released with the paper [UniSpeech: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang.
|
||||
1. **[UniSpeechSat](https://huggingface.co/docs/transformers/model_doc/unispeech-sat)** (from Microsoft Research) released with the paper [UNISPEECH-SAT: UNIVERSAL SPEECH REPRESENTATION LEARNING WITH SPEAKER AWARE PRE-TRAINING](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu.
|
||||
1. **[VAN](https://huggingface.co/docs/transformers/model_doc/van)** (from Tsinghua University and Nankai University) released with the paper [Visual Attention Network](https://arxiv.org/abs/2202.09741) by Meng-Hao Guo, Cheng-Ze Lu, Zheng-Ning Liu, Ming-Ming Cheng, Shi-Min Hu.
|
||||
1. **[VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)** (from Multimedia Computing Group, Nanjing University) released with the paper [VideoMAE: Masked Autoencoders are Data-Efficient Learners for Self-Supervised Video Pre-Training](https://arxiv.org/abs/2203.12602) by Zhan Tong, Yibing Song, Jue Wang, Limin Wang.
|
||||
1. **[ViLT](https://huggingface.co/docs/transformers/model_doc/vilt)** (from NAVER AI Lab/Kakao Enterprise/Kakao Brain) released with the paper [ViLT: Vision-and-Language Transformer Without Convolution or Region Supervision](https://arxiv.org/abs/2102.03334) by Wonjae Kim, Bokyung Son, Ildoo Kim.
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
1. **[XLM-ProphetNet](https://huggingface.co/docs/transformers/model_doc/xlm-prophetnet)** (from Microsoft Research) released with the paper [ProphetNet: Predicting Future N-gram for Sequence-to-Sequence Pre-training](https://arxiv.org/abs/2001.04063) by Yu Yan, Weizhen Qi, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang and Ming Zhou.
|
||||
1. **[XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta)** (from Facebook AI), released together with the paper [Unsupervised Cross-lingual Representation Learning at Scale](https://arxiv.org/abs/1911.02116) by Alexis Conneau*, Kartikay Khandelwal*, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer and Veselin Stoyanov.
|
||||
1. **[XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl)** (from Facebook AI), released together with the paper [Larger-Scale Transformers for Multilingual Masked Language Modeling](https://arxiv.org/abs/2105.00572) by Naman Goyal, Jingfei Du, Myle Ott, Giri Anantharaman, Alexis Conneau.
|
||||
1. **[XLNet](https://huggingface.co/docs/transformers/model_doc/xlnet)** (from Google/CMU) released with the paper [XLNet: Generalized Autoregressive Pretraining for Language Understanding](https://arxiv.org/abs/1906.08237) by Zhilin Yang*, Zihang Dai*, Yiming Yang, Jaime Carbonell, Ruslan Salakhutdinov, Quoc V. Le.
|
||||
1. **[XLS-R](https://huggingface.co/docs/transformers/model_doc/xls_r)** (from Facebook AI) released with the paper [XLS-R: Self-supervised Cross-lingual Speech Representation Learning at Scale](https://arxiv.org/abs/2111.09296) by Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, Michael Auli.
|
||||
1. **[XLSR-Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/xlsr_wav2vec2)** (from Facebook AI) released with the paper [Unsupervised Cross-Lingual Representation Learning For Speech Recognition](https://arxiv.org/abs/2006.13979) by Alexis Conneau, Alexei Baevski, Ronan Collobert, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos)** (from Huazhong University of Science & Technology) released with the paper [You Only Look at One Sequence: Rethinking Transformer in Vision through Object Detection](https://arxiv.org/abs/2106.00666) by Yuxin Fang, Bencheng Liao, Xinggang Wang, Jiemin Fang, Jiyang Qi, Rui Wu, Jianwei Niu, Wenyu Liu.
|
||||
1. **[YOSO](https://huggingface.co/docs/transformers/model_doc/yoso)** (from the University of Wisconsin - Madison) released with the paper [You Only Sample (Almost) Once: Linear Cost Self-Attention Via Bernoulli Sampling](https://arxiv.org/abs/2111.09714) by Zhanpeng Zeng, Yunyang Xiong, Sathya N. Ravi, Shailesh Acharya, Glenn Fung, Vikas Singh.
|
||||
1. ¿Quieres aportar un nuevo modelo? Hemos agregado una **guía detallada y plantillas** para guiarte en el proceso de agregar un nuevo modelo. Puedes encontrarlos en la carpeta de [`templates`](./templates) del repositorio. Asegúrate de revisar las [pautas de contribución](./CONTRIBUTING.md) y comunícate con los mantenedores o abra un problema para recopilar comentarios antes de comenzar su PR.
|
||||
|
||||
Para comprobar si cada modelo tiene una implementación en Flax, PyTorch o TensorFlow, o tiene un tokenizador asociado respaldado por la librería 🤗 Tokenizers , ve a [esta tabla](https://huggingface.co/docs/transformers/index#supported-frameworks).
|
||||
|
||||
Estas implementaciones se han probado en varios conjuntos de datos (consulte los scripts de ejemplo) y deberían coincidir con el rendimiento de las implementaciones originales. Puede encontrar más detalles sobre el rendimiento en la sección Examples de la [documentación](https://github.com/huggingface/transformers/tree/main/examples).
|
||||
|
||||
|
||||
## Aprender más
|
||||
|
||||
| Sección | Descripción |
|
||||
|-|-|
|
||||
| [Documentación](https://huggingface.co/docs/transformers/) | Toda la documentación de la API y tutoriales |
|
||||
| [Resumen de tareas](https://huggingface.co/docs/transformers/task_summary) | Tareas soportadas 🤗 Transformers |
|
||||
| [Tutorial de preprocesAmiento](https://huggingface.co/docs/transformers/preprocessing) | Usando la clase `Tokenizer` para preparar datos para los modelos |
|
||||
| [Entrenamiento y puesta a punto](https://huggingface.co/docs/transformers/training) | Usando los modelos aportados por 🤗 Transformers en un bucle de entreno de PyTorch/TensorFlow y la API de `Trainer` |
|
||||
| [Recorrido rápido: secuencias de comandos de ajuste/uso](https://github.com/huggingface/transformers/tree/main/examples) | Scripts de ejemplo para ajustar modelos en una amplia gama de tareas |
|
||||
| [Compartir y subir modelos](https://huggingface.co/docs/transformers/model_sharing) | Carga y comparte tus modelos perfeccionados con la comunidad |
|
||||
| [Migración](https://huggingface.co/docs/transformers/migration) | Migra a 🤗 Transformers desde `pytorch-transformers` o `pytorch-pretrained-bert` |
|
||||
|
||||
## Citación
|
||||
|
||||
Ahora nosotros tenemos un [papel](https://www.aclweb.org/anthology/2020.emnlp-demos.6/) que puedes citar para la librería de 🤗 Transformers:
|
||||
```bibtex
|
||||
@inproceedings{wolf-etal-2020-transformers,
|
||||
title = "Transformers: State-of-the-Art Natural Language Processing",
|
||||
author = "Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and Rémi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush",
|
||||
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations",
|
||||
month = oct,
|
||||
year = "2020",
|
||||
address = "Online",
|
||||
publisher = "Association for Computational Linguistics",
|
||||
url = "https://www.aclweb.org/anthology/2020.emnlp-demos.6",
|
||||
pages = "38--45"
|
||||
}
|
||||
```
|
16
README_ko.md
16
README_ko.md
@ -43,7 +43,8 @@ limitations under the License.
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<b>한국어</b>
|
||||
<b>한국어</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a>
|
||||
<p>
|
||||
</h4>
|
||||
|
||||
@ -228,7 +229,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
@ -250,7 +251,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
@ -272,6 +273,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
@ -279,7 +281,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov.
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
@ -325,7 +327,7 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released in the repository [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft), released together with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
@ -338,12 +340,12 @@ Flax, PyTorch, TensorFlow 설치 페이지에서 이들을 conda로 설치하는
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
|
@ -68,7 +68,8 @@ checkpoint: 检查点
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<b>简体中文</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a>
|
||||
<p>
|
||||
</h4>
|
||||
|
||||
@ -252,7 +253,7 @@ conda install -c huggingface transformers
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (来自 Google Research) 伴随论文 [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) 由 Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting 发布。
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (来自 OpenAI) 伴随论文 [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) 由 Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever 发布。
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (来自 Salesforce) 伴随论文 [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) 由 Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong 发布。
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (来自 Microsoft Research Asia) 伴随论文 [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) 由 Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang 发布。
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (来自 YituTech) 伴随论文 [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) 由 Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan 发布。
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (来自 Facebook AI) 伴随论文 [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) 由 Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie 发布。
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (来自 Tsinghua University) 伴随论文 [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) 由 Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun 发布。
|
||||
@ -274,7 +275,7 @@ conda install -c huggingface transformers
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (来自 Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning 发布。
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (来自 Google Research) 伴随论文 [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) 由 Sascha Rothe, Shashi Narayan, Aliaksei Severyn 发布。
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (来自 Baidu) 伴随论文 [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu 发布。
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (来自 CNRS) 伴随论文 [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) 由 Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab 发布。
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (来自 Facebook AI) 伴随论文 [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) 由 Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela 发布。
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (来自 Google Research) 伴随论文 [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) 由 James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon 发布。
|
||||
@ -296,6 +297,7 @@ conda install -c huggingface transformers
|
||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (来自 Microsoft Research Asia) 伴随论文 [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) 由 Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei 发布。
|
||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。
|
||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (来自 Meta AI) 伴随论文 [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) 由 Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze 发布。
|
||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (来自 South China University of Technology) 伴随论文 [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) 由 Jiapeng Wang, Lianwen Jin, Kai Ding 发布。
|
||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (来自 AllenAI) 伴随论文 [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) 由 Iz Beltagy, Matthew E. Peters, Arman Cohan 发布。
|
||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (来自 Google AI) released 伴随论文 [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) 由 Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang 发布。
|
||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (来自 Studio Ousia) 伴随论文 [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) 由 Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto 发布。
|
||||
@ -303,7 +305,7 @@ conda install -c huggingface transformers
|
||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (来自 Facebook) 伴随论文 [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) 由 Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert 发布。
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (来自 Facebook) 伴随论文 [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) 由 Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin 发布。
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** 用 [OPUS](http://opus.nlpl.eu/) 数据训练的机器翻译模型由 Jörg Tiedemann 发布。[Marian Framework](https://marian-nmt.github.io/) 由微软翻译团队开发。
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (来自 Microsoft Research Asia) 伴随论文 [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) 由 Junlong Li, Yiheng Xu, Lei Cui, Furu Wei 发布。
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov >>>>>>> Fix rebase
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) 由 Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer 发布。
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (来自 Facebook) 伴随论文 [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) 由 Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan 发布。
|
||||
@ -349,7 +351,7 @@ conda install -c huggingface transformers
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (来自 Google AI) 伴随论文 [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) 由 Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu 发布。
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (来自 Google AI) 伴随论文 [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) 由 Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos 发布。
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (来自 Microsoft Research) 伴随论文 [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) 由 Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou 发布。
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (来自 Google/CMU) 伴随论文 [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) 由 Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov 发布。
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (来自 Microsoft) 伴随论文 [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) 由 Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei 发布。
|
||||
@ -362,12 +364,12 @@ conda install -c huggingface transformers
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (来自 Google AI) 伴随论文 [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) 由 Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby 发布。
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (来自 UCLA NLP) 伴随论文 [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) 由 Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang 发布。
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (来自 Meta AI) 伴随论文 [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) 由 Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick 发布。
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (来自 Meta AI) 伴随论文 [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas 发布.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (来自 Facebook AI) 伴随论文 [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) 由 Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli 发布。
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (来自 Facebook AI) 伴随论文 [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) 由 Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino 发布。
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (来自 Facebook AI) 伴随论文 [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) 由 Qiantong Xu, Alexei Baevski, Michael Auli 发布。
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (来自 OpenAI) 伴随论文 [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) 由 Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever 发布。
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (来自 Microsoft Research) 伴随论文 [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) 由 Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling 发布。
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (来自 Facebook) 伴随论文 [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) 由 Guillaume Lample and Alexis Conneau 发布。
|
||||
|
@ -80,7 +80,8 @@ user: 使用者
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<b>繁體中文</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a>
|
||||
<p>
|
||||
</h4>
|
||||
|
||||
@ -264,7 +265,7 @@ conda install -c huggingface transformers
|
||||
1. **[CANINE](https://huggingface.co/docs/transformers/model_doc/canine)** (from Google Research) released with the paper [CANINE: Pre-training an Efficient Tokenization-Free Encoder for Language Representation](https://arxiv.org/abs/2103.06874) by Jonathan H. Clark, Dan Garrette, Iulia Turc, John Wieting.
|
||||
1. **[CLIP](https://huggingface.co/docs/transformers/model_doc/clip)** (from OpenAI) released with the paper [Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020) by Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, Ilya Sutskever.
|
||||
1. **[CodeGen](https://huggingface.co/docs/transformers/model_doc/codegen)** (from Salesforce) released with the paper [A Conversational Paradigm for Program Synthesis](https://arxiv.org/abs/2203.13474) by Erik Nijkamp, Bo Pang, Hiroaki Hayashi, Lifu Tu, Huan Wang, Yingbo Zhou, Silvio Savarese, Caiming Xiong.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/main/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[Conditional DETR](https://huggingface.co/docs/transformers/model_doc/conditional_detr)** (from Microsoft Research Asia) released with the paper [Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) by Depu Meng, Xiaokang Chen, Zejia Fan, Gang Zeng, Houqiang Li, Yuhui Yuan, Lei Sun, Jingdong Wang.
|
||||
1. **[ConvBERT](https://huggingface.co/docs/transformers/model_doc/convbert)** (from YituTech) released with the paper [ConvBERT: Improving BERT with Span-based Dynamic Convolution](https://arxiv.org/abs/2008.02496) by Zihang Jiang, Weihao Yu, Daquan Zhou, Yunpeng Chen, Jiashi Feng, Shuicheng Yan.
|
||||
1. **[ConvNeXT](https://huggingface.co/docs/transformers/model_doc/convnext)** (from Facebook AI) released with the paper [A ConvNet for the 2020s](https://arxiv.org/abs/2201.03545) by Zhuang Liu, Hanzi Mao, Chao-Yuan Wu, Christoph Feichtenhofer, Trevor Darrell, Saining Xie.
|
||||
1. **[CPM](https://huggingface.co/docs/transformers/model_doc/cpm)** (from Tsinghua University) released with the paper [CPM: A Large-scale Generative Chinese Pre-trained Language Model](https://arxiv.org/abs/2012.00413) by Zhengyan Zhang, Xu Han, Hao Zhou, Pei Ke, Yuxian Gu, Deming Ye, Yujia Qin, Yusheng Su, Haozhe Ji, Jian Guan, Fanchao Qi, Xiaozhi Wang, Yanan Zheng, Guoyang Zeng, Huanqi Cao, Shengqi Chen, Daixuan Li, Zhenbo Sun, Zhiyuan Liu, Minlie Huang, Wentao Han, Jie Tang, Juanzi Li, Xiaoyan Zhu, Maosong Sun.
|
||||
@ -286,7 +287,7 @@ conda install -c huggingface transformers
|
||||
1. **[ELECTRA](https://huggingface.co/docs/transformers/model_doc/electra)** (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang Luong, Quoc V. Le, Christopher D. Manning.
|
||||
1. **[EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder-decoder)** (from Google Research) released with the paper [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
|
||||
1. **[ERNIE](https://huggingface.co/docs/transformers/model_doc/ernie)** (from Baidu) released with the paper [ERNIE: Enhanced Representation through Knowledge Integration](https://arxiv.org/abs/1904.09223) by Yu Sun, Shuohuan Wang, Yukun Li, Shikun Feng, Xuyi Chen, Han Zhang, Xin Tian, Danxiang Zhu, Hao Tian, Hua Wu.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/main/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[ESM](https://huggingface.co/docs/transformers/model_doc/esm)** (from Meta AI) are transformer protein language models. **ESM-1b** was released with the paper [Biological structure and function emerge from scaling unsupervised learning to 250 million protein sequences](https://www.pnas.org/content/118/15/e2016239118) by Alexander Rives, Joshua Meier, Tom Sercu, Siddharth Goyal, Zeming Lin, Jason Liu, Demi Guo, Myle Ott, C. Lawrence Zitnick, Jerry Ma, and Rob Fergus. **ESM-1v** was released with the paper [Language models enable zero-shot prediction of the effects of mutations on protein function](https://doi.org/10.1101/2021.07.09.450648) by Joshua Meier, Roshan Rao, Robert Verkuil, Jason Liu, Tom Sercu and Alexander Rives. **ESM-2** was released with the paper [Language models of protein sequences at the scale of evolution enable accurate structure prediction](https://doi.org/10.1101/2022.07.20.500902) by Zeming Lin, Halil Akin, Roshan Rao, Brian Hie, Zhongkai Zhu, Wenting Lu, Allan dos Santos Costa, Maryam Fazel-Zarandi, Tom Sercu, Sal Candido, Alexander Rives.
|
||||
1. **[FlauBERT](https://huggingface.co/docs/transformers/model_doc/flaubert)** (from CNRS) released with the paper [FlauBERT: Unsupervised Language Model Pre-training for French](https://arxiv.org/abs/1912.05372) by Hang Le, Loïc Vial, Jibril Frej, Vincent Segonne, Maximin Coavoux, Benjamin Lecouteux, Alexandre Allauzen, Benoît Crabbé, Laurent Besacier, Didier Schwab.
|
||||
1. **[FLAVA](https://huggingface.co/docs/transformers/model_doc/flava)** (from Facebook AI) released with the paper [FLAVA: A Foundational Language And Vision Alignment Model](https://arxiv.org/abs/2112.04482) by Amanpreet Singh, Ronghang Hu, Vedanuj Goswami, Guillaume Couairon, Wojciech Galuba, Marcus Rohrbach, and Douwe Kiela.
|
||||
1. **[FNet](https://huggingface.co/docs/transformers/model_doc/fnet)** (from Google Research) released with the paper [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon.
|
||||
@ -308,6 +309,7 @@ conda install -c huggingface transformers
|
||||
1. **[LayoutXLM](https://huggingface.co/docs/transformers/model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||
1. **[LED](https://huggingface.co/docs/transformers/model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LeViT](https://huggingface.co/docs/transformers/model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||
1. **[LiLT](https://huggingface.co/docs/transformers/main/model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||
1. **[Longformer](https://huggingface.co/docs/transformers/model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LongT5](https://huggingface.co/docs/transformers/model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||
1. **[LUKE](https://huggingface.co/docs/transformers/model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
@ -315,7 +317,7 @@ conda install -c huggingface transformers
|
||||
1. **[M-CTC-T](https://huggingface.co/docs/transformers/model_doc/mctct)** (from Facebook) released with the paper [Pseudo-Labeling For Massively Multilingual Speech Recognition](https://arxiv.org/abs/2111.00161) by Loren Lugosch, Tatiana Likhomanenko, Gabriel Synnaeve, and Ronan Collobert.
|
||||
1. **[M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100)** (from Facebook) released with the paper [Beyond English-Centric Multilingual Machine Translation](https://arxiv.org/abs/2010.11125) by Angela Fan, Shruti Bhosale, Holger Schwenk, Zhiyi Ma, Ahmed El-Kishky, Siddharth Goyal, Mandeep Baines, Onur Celebi, Guillaume Wenzek, Vishrav Chaudhary, Naman Goyal, Tom Birch, Vitaliy Liptchinsky, Sergey Edunov, Edouard Grave, Michael Auli, Armand Joulin.
|
||||
1. **[MarianMT](https://huggingface.co/docs/transformers/model_doc/marian)** Machine translation models trained using [OPUS](http://opus.nlpl.eu/) data by Jörg Tiedemann. The [Marian Framework](https://marian-nmt.github.io/) is being developed by the Microsoft Translator Team.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/main/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MarkupLM](https://huggingface.co/docs/transformers/model_doc/markuplm)** (from Microsoft Research Asia) released with the paper [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei.
|
||||
1. **[MaskFormer](https://huggingface.co/docs/transformers/model_doc/maskformer)** (from Meta and UIUC) released with the paper [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) by Bowen Cheng, Alexander G. Schwing, Alexander Kirillov
|
||||
1. **[mBART](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Denoising Pre-training for Neural Machine Translation](https://arxiv.org/abs/2001.08210) by Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, Luke Zettlemoyer.
|
||||
1. **[mBART-50](https://huggingface.co/docs/transformers/model_doc/mbart)** (from Facebook) released with the paper [Multilingual Translation with Extensible Multilingual Pretraining and Finetuning](https://arxiv.org/abs/2008.00401) by Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, Angela Fan.
|
||||
@ -361,7 +363,7 @@ conda install -c huggingface transformers
|
||||
1. **[T5v1.1](https://huggingface.co/docs/transformers/model_doc/t5v1.1)** (from Google AI) released with the paper [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) by Colin Raffel and Noam Shazeer and Adam Roberts and Katherine Lee and Sharan Narang and Michael Matena and Yanqi Zhou and Wei Li and Peter J. Liu.
|
||||
1. **[TAPAS](https://huggingface.co/docs/transformers/model_doc/tapas)** (from Google AI) released with the paper [TAPAS: Weakly Supervised Table Parsing via Pre-training](https://arxiv.org/abs/2004.02349) by Jonathan Herzig, Paweł Krzysztof Nowak, Thomas Müller, Francesco Piccinno and Julian Martin Eisenschlos.
|
||||
1. **[TAPEX](https://huggingface.co/docs/transformers/model_doc/tapex)** (from Microsoft Research) released with the paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/abs/2107.07653) by Qian Liu, Bei Chen, Jiaqi Guo, Morteza Ziyadi, Zeqi Lin, Weizhu Chen, Jian-Guang Lou.
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/main/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Time Series Transformer](https://huggingface.co/docs/transformers/model_doc/time_series_transformer)** (from HuggingFace).
|
||||
1. **[Trajectory Transformer](https://huggingface.co/docs/transformers/model_doc/trajectory_transformers)** (from the University of California at Berkeley) released with the paper [Offline Reinforcement Learning as One Big Sequence Modeling Problem](https://arxiv.org/abs/2106.02039) by Michael Janner, Qiyang Li, Sergey Levine
|
||||
1. **[Transformer-XL](https://huggingface.co/docs/transformers/model_doc/transfo-xl)** (from Google/CMU) released with the paper [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai*, Zhilin Yang*, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan Salakhutdinov.
|
||||
1. **[TrOCR](https://huggingface.co/docs/transformers/model_doc/trocr)** (from Microsoft) released with the paper [TrOCR: Transformer-based Optical Character Recognition with Pre-trained Models](https://arxiv.org/abs/2109.10282) by Minghao Li, Tengchao Lv, Lei Cui, Yijuan Lu, Dinei Florencio, Cha Zhang, Zhoujun Li, Furu Wei.
|
||||
@ -374,12 +376,12 @@ conda install -c huggingface transformers
|
||||
1. **[Vision Transformer (ViT)](https://huggingface.co/docs/transformers/model_doc/vit)** (from Google AI) released with the paper [An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, Neil Houlsby.
|
||||
1. **[VisualBERT](https://huggingface.co/docs/transformers/model_doc/visual_bert)** (from UCLA NLP) released with the paper [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
|
||||
1. **[ViTMAE](https://huggingface.co/docs/transformers/model_doc/vit_mae)** (from Meta AI) released with the paper [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) by Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, Ross Girshick.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/main/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn)** (from Meta AI) released with the paper [Masked Siamese Networks for Label-Efficient Learning](https://arxiv.org/abs/2204.07141) by Mahmoud Assran, Mathilde Caron, Ishan Misra, Piotr Bojanowski, Florian Bordes, Pascal Vincent, Armand Joulin, Michael Rabbat, Nicolas Ballas.
|
||||
1. **[Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2)** (from Facebook AI) released with the paper [wav2vec 2.0: A Framework for Self-Supervised Learning of Speech Representations](https://arxiv.org/abs/2006.11477) by Alexei Baevski, Henry Zhou, Abdelrahman Mohamed, Michael Auli.
|
||||
1. **[Wav2Vec2-Conformer](https://huggingface.co/docs/transformers/model_doc/wav2vec2-conformer)** (from Facebook AI) released with the paper [FAIRSEQ S2T: Fast Speech-to-Text Modeling with FAIRSEQ](https://arxiv.org/abs/2010.05171) by Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Sravya Popuri, Dmytro Okhonko, Juan Pino.
|
||||
1. **[Wav2Vec2Phoneme](https://huggingface.co/docs/transformers/model_doc/wav2vec2_phoneme)** (from Facebook AI) released with the paper [Simple and Effective Zero-shot Cross-lingual Phoneme Recognition](https://arxiv.org/abs/2109.11680) by Qiantong Xu, Alexei Baevski, Michael Auli.
|
||||
1. **[WavLM](https://huggingface.co/docs/transformers/model_doc/wavlm)** (from Microsoft Research) released with the paper [WavLM: Large-Scale Self-Supervised Pre-Training for Full Stack Speech Processing](https://arxiv.org/abs/2110.13900) by Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, Jian Wu, Long Zhou, Shuo Ren, Yanmin Qian, Yao Qian, Jian Wu, Michael Zeng, Furu Wei.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/main/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[Whisper](https://huggingface.co/docs/transformers/model_doc/whisper)** (from OpenAI) released with the paper [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
||||
1. **[X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)** (from Microsoft Research) released with the paper [Expanding Language-Image Pretrained Models for General Video Recognition](https://arxiv.org/abs/2208.02816) by Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, Haibin Ling.
|
||||
1. **[XGLM](https://huggingface.co/docs/transformers/model_doc/xglm)** (From Facebook AI) released with the paper [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668) by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal, Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo, Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
|
||||
1. **[XLM](https://huggingface.co/docs/transformers/model_doc/xlm)** (from Facebook) released together with the paper [Cross-lingual Language Model Pretraining](https://arxiv.org/abs/1901.07291) by Guillaume Lample and Alexis Conneau.
|
||||
|
@ -33,6 +33,7 @@ RUN echo torch=$VERSION
|
||||
RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir -U tensorflow
|
||||
RUN python3 -m pip install --no-cache-dir -U tensorflow_probability
|
||||
RUN python3 -m pip uninstall -y flax jax
|
||||
|
||||
# Use installed torch version for `torch-scatter` to avid to deal with PYTORCH='pre'.
|
||||
|
@ -275,6 +275,8 @@
|
||||
title: LayoutLM
|
||||
- local: model_doc/led
|
||||
title: LED
|
||||
- local: model_doc/lilt
|
||||
title: LiLT
|
||||
- local: model_doc/longformer
|
||||
title: Longformer
|
||||
- local: model_doc/longt5
|
||||
@ -519,6 +521,8 @@
|
||||
title: Utilities for Trainer
|
||||
- local: internal/generation_utils
|
||||
title: Utilities for Generation
|
||||
- local: internal/image_processing_utils
|
||||
title: Utilities for Image Processors
|
||||
- local: internal/file_utils
|
||||
title: General Utilities
|
||||
title: Internal Helpers
|
||||
|
@ -116,5 +116,5 @@ You could define your own compute_objective function, if not defined, the defaul
|
||||
... )
|
||||
```
|
||||
|
||||
## Hyperparameter search For DDP refinetune
|
||||
## Hyperparameter search For DDP finetune
|
||||
Currently, Hyperparameter search for DDP is enabled for optuna and sigopt. Only the rank-zero process will generate the search trial and pass the argument to other ranks.
|
@ -112,6 +112,7 @@ The documentation is organized into five sections:
|
||||
1. **[LayoutXLM](model_doc/layoutxlm)** (from Microsoft Research Asia) released with the paper [LayoutXLM: Multimodal Pre-training for Multilingual Visually-rich Document Understanding](https://arxiv.org/abs/2104.08836) by Yiheng Xu, Tengchao Lv, Lei Cui, Guoxin Wang, Yijuan Lu, Dinei Florencio, Cha Zhang, Furu Wei.
|
||||
1. **[LED](model_doc/led)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LeViT](model_doc/levit)** (from Meta AI) released with the paper [LeViT: A Vision Transformer in ConvNet's Clothing for Faster Inference](https://arxiv.org/abs/2104.01136) by Ben Graham, Alaaeldin El-Nouby, Hugo Touvron, Pierre Stock, Armand Joulin, Hervé Jégou, Matthijs Douze.
|
||||
1. **[LiLT](model_doc/lilt)** (from South China University of Technology) released with the paper [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||
1. **[Longformer](model_doc/longformer)** (from AllenAI) released with the paper [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan.
|
||||
1. **[LongT5](model_doc/longt5)** (from Google AI) released with the paper [LongT5: Efficient Text-To-Text Transformer for Long Sequences](https://arxiv.org/abs/2112.07916) by Mandy Guo, Joshua Ainslie, David Uthus, Santiago Ontanon, Jianmo Ni, Yun-Hsuan Sung, Yinfei Yang.
|
||||
1. **[LUKE](model_doc/luke)** (from Studio Ousia) released with the paper [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda, Yuji Matsumoto.
|
||||
@ -225,7 +226,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| ConvBERT | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| ConvNeXT | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||
| CTRL | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| CvT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| CvT | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||
| Data2VecAudio | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Data2VecText | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Data2VecVision | ❌ | ❌ | ✅ | ✅ | ❌ |
|
||||
@ -242,7 +243,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| ELECTRA | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Encoder decoder | ❌ | ❌ | ✅ | ✅ | ✅ |
|
||||
| ERNIE | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ESM | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| ESM | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| FairSeq Machine-Translation | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
| FlauBERT | ✅ | ❌ | ✅ | ✅ | ❌ |
|
||||
| FLAVA | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
@ -262,6 +263,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| LayoutLMv3 | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| LED | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| LeViT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| LiLT | ❌ | ❌ | ✅ | ❌ | ❌ |
|
||||
| Longformer | ✅ | ✅ | ✅ | ✅ | ❌ |
|
||||
| LongT5 | ❌ | ❌ | ✅ | ❌ | ✅ |
|
||||
| LUKE | ✅ | ❌ | ✅ | ❌ | ❌ |
|
||||
|
32
docs/source/en/internal/image_processing_utils.mdx
Normal file
32
docs/source/en/internal/image_processing_utils.mdx
Normal file
@ -0,0 +1,32 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Utilities for Image Processors
|
||||
|
||||
This page lists all the utility functions used by the image processors, mainly the functional
|
||||
transformations used to process the images.
|
||||
|
||||
Most of those are only useful if you are studying the code of the image processors in the library.
|
||||
|
||||
## Image Transformations
|
||||
|
||||
[[autodoc]] image_transforms.normalize
|
||||
|
||||
[[autodoc]] image_transforms.rescale
|
||||
|
||||
[[autodoc]] image_transforms.resize
|
||||
|
||||
[[autodoc]] image_transforms.to_pil_image
|
||||
|
||||
## ImageProcessorMixin
|
||||
|
||||
[[autodoc]] image_processing_utils.ImageProcessorMixin
|
@ -25,6 +25,7 @@ There are two categories of pipeline abstractions to be aware about:
|
||||
- [`AudioClassificationPipeline`]
|
||||
- [`AutomaticSpeechRecognitionPipeline`]
|
||||
- [`ConversationalPipeline`]
|
||||
- [`DepthEstimationPipeline`]
|
||||
- [`DocumentQuestionAnsweringPipeline`]
|
||||
- [`FeatureExtractionPipeline`]
|
||||
- [`FillMaskPipeline`]
|
||||
@ -344,12 +345,16 @@ That should enable you to do all the custom code you want.
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### DepthEstimationPipeline
|
||||
[[autodoc]] DepthEstimationPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### DocumentQuestionAnsweringPipeline
|
||||
|
||||
[[autodoc]] DocumentQuestionAnsweringPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### FeatureExtractionPipeline
|
||||
|
||||
[[autodoc]] FeatureExtractionPipeline
|
||||
|
@ -82,6 +82,10 @@ Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its
|
||||
|
||||
[[autodoc]] AutoModelForCausalLM
|
||||
|
||||
## AutoModelForDepthEstimation
|
||||
|
||||
[[autodoc]] AutoModelForDepthEstimation
|
||||
|
||||
## AutoModelForMaskedLM
|
||||
|
||||
[[autodoc]] AutoModelForMaskedLM
|
||||
|
@ -51,3 +51,14 @@ This model was contributed by [anugunj](https://huggingface.co/anugunj). The ori
|
||||
|
||||
[[autodoc]] CvtForImageClassification
|
||||
- forward
|
||||
|
||||
## TFCvtModel
|
||||
|
||||
[[autodoc]] TFCvtModel
|
||||
- call
|
||||
|
||||
## TFCvtForImageClassification
|
||||
|
||||
[[autodoc]] TFCvtForImageClassification
|
||||
- call
|
||||
|
||||
|
@ -107,3 +107,23 @@ and [Matt](https://huggingface.co/Rocketknight1).
|
||||
|
||||
[[autodoc]] EsmForTokenClassification
|
||||
- forward
|
||||
|
||||
## TFEsmModel
|
||||
|
||||
[[autodoc]] TFEsmModel
|
||||
- call
|
||||
|
||||
## TFEsmForMaskedLM
|
||||
|
||||
[[autodoc]] TFEsmForMaskedLM
|
||||
- call
|
||||
|
||||
## TFEsmForSequenceClassification
|
||||
|
||||
[[autodoc]] TFEsmForSequenceClassification
|
||||
- call
|
||||
|
||||
## TFEsmForTokenClassification
|
||||
|
||||
[[autodoc]] TFEsmForTokenClassification
|
||||
- call
|
||||
|
73
docs/source/en/model_doc/lilt.mdx
Normal file
73
docs/source/en/model_doc/lilt.mdx
Normal file
@ -0,0 +1,73 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# LiLT
|
||||
|
||||
## Overview
|
||||
|
||||
The LiLT model was proposed in [LiLT: A Simple yet Effective Language-Independent Layout Transformer for Structured Document Understanding](https://arxiv.org/abs/2202.13669) by Jiapeng Wang, Lianwen Jin, Kai Ding.
|
||||
LiLT allows to combine any pre-trained RoBERTa text encoder with a lightweight Layout Transformer, to enable [LayoutLM](layoutlm)-like document understanding for many
|
||||
languages.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Structured document understanding has attracted considerable attention and made significant progress recently, owing to its crucial role in intelligent document processing. However, most existing related models can only deal with the document data of specific language(s) (typically English) included in the pre-training collection, which is extremely limited. To address this issue, we propose a simple yet effective Language-independent Layout Transformer (LiLT) for structured document understanding. LiLT can be pre-trained on the structured documents of a single language and then directly fine-tuned on other languages with the corresponding off-the-shelf monolingual/multilingual pre-trained textual models. Experimental results on eight languages have shown that LiLT can achieve competitive or even superior performance on diverse widely-used downstream benchmarks, which enables language-independent benefit from the pre-training of document layout structure.*
|
||||
|
||||
Tips:
|
||||
|
||||
- To combine the Language-Independent Layout Transformer with a new RoBERTa checkpoint from the [hub](https://huggingface.co/models?search=roberta), refer to [this guide](https://github.com/jpWang/LiLT#or-generate-your-own-checkpoint-optional).
|
||||
The script will result in `config.json` and `pytorch_model.bin` files being stored locally. After doing this, one can do the following (assuming you're logged in with your HuggingFace account):
|
||||
|
||||
```
|
||||
from transformers import LiltModel
|
||||
|
||||
model = LiltModel.from_pretrained("path_to_your_files")
|
||||
model.push_to_hub("name_of_repo_on_the_hub")
|
||||
```
|
||||
|
||||
- When preparing data for the model, make sure to use the token vocabulary that corresponds to the RoBERTa checkpoint you combined with the Layout Transformer.
|
||||
- As (lilt-roberta-en-base)[https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base] uses the same vocabulary as [LayoutLMv3](layoutlmv3), one can use [`LayoutLMv3TokenizerFast`] to prepare data for the model.
|
||||
The same is true for (lilt-roberta-en-base)[https://huggingface.co/SCUT-DLVCLab/lilt-infoxlm-base]: one can use [`LayoutXLMTokenizerFast`] for that model.
|
||||
- Demo notebooks for LiLT can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LiLT).
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/lilt_architecture.jpg"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> LiLT architecture. Taken from the <a href="https://arxiv.org/abs/2202.13669">original paper</a>. </small>
|
||||
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr).
|
||||
The original code can be found [here](https://github.com/jpwang/lilt).
|
||||
|
||||
|
||||
## LiltConfig
|
||||
|
||||
[[autodoc]] LiltConfig
|
||||
|
||||
## LiltModel
|
||||
|
||||
[[autodoc]] LiltModel
|
||||
- forward
|
||||
|
||||
## LiltForSequenceClassification
|
||||
|
||||
[[autodoc]] LiltForSequenceClassification
|
||||
- forward
|
||||
|
||||
## LiltForTokenClassification
|
||||
|
||||
[[autodoc]] LiltForTokenClassification
|
||||
- forward
|
||||
|
||||
## LiltForQuestionAnswering
|
||||
|
||||
[[autodoc]] LiltForQuestionAnswering
|
||||
- forward
|
@ -178,7 +178,7 @@ processor will use the feature extractor to get all nodes and xpaths, and create
|
||||
dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq'])
|
||||
```
|
||||
|
||||
**Use case 5: web page question answering (inference), apply_ocr=False**
|
||||
**Use case 5: web page question answering (inference), parse_html=False**
|
||||
|
||||
For question answering tasks (such as WebSRC), you can provide a question to the processor. If you have extracted
|
||||
all nodes and xpaths yourself, you can provide them directly to the processor. Make sure to set `parse_html` to `False`.
|
||||
@ -243,4 +243,4 @@ dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'x
|
||||
## MarkupLMForQuestionAnswering
|
||||
|
||||
[[autodoc]] MarkupLMForQuestionAnswering
|
||||
- forward
|
||||
- forward
|
||||
|
@ -25,6 +25,7 @@ Tips:
|
||||
|
||||
- The model usually performs well without requiring any finetuning.
|
||||
- The architecture follows a classic encoder-decoder architecture, which means that it relies on the [`~generation_utils.GenerationMixin.generate`] function for inference.
|
||||
- Inference is currently only implemented for short-form i.e. audio is pre-segmented into <=30s segments. Long-form (including timestamps) will be implemented in a future release.
|
||||
- One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text.
|
||||
|
||||
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts).
|
||||
|
@ -27,6 +27,7 @@ Wheel files are available for the following Python versions:
|
||||
|
||||
| Extension Version | Python 3.6 | Python 3.7 | Python 3.8 | Python 3.9 | Python 3.10 |
|
||||
| :---------------: | :--------: | :--------: | :--------: | :--------: | :---------: |
|
||||
| 1.12.100 | | √ | √ | √ | √ |
|
||||
| 1.12.0 | | √ | √ | √ | √ |
|
||||
| 1.11.0 | | √ | √ | √ | √ |
|
||||
| 1.10.0 | √ | √ | √ | √ | |
|
||||
@ -41,6 +42,7 @@ Versions of oneCCL and PyTorch must match.
|
||||
<Tip warning={true}>
|
||||
|
||||
oneccl_bindings_for_pytorch 1.12.0 prebuilt wheel does not work with PyTorch 1.12.1 (it is for PyTorch 1.12.0)
|
||||
PyTorch 1.12.1 should work with oneccl_bindings_for_pytorch 1.12.100
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -49,7 +51,7 @@ Use this standards-based MPI implementation to deliver flexible, efficient, scal
|
||||
|
||||
oneccl_bindings_for_pytorch is installed along with the MPI tool set. Need to source the environment before using it.
|
||||
|
||||
for Intel® oneCCL 1.12.0
|
||||
for Intel® oneCCL >= 1.12.0
|
||||
```
|
||||
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
|
||||
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
|
||||
|
@ -25,7 +25,7 @@ In this section we have a look at a few tricks to reduce the memory footprint an
|
||||
| DataLoader | Yes | No |
|
||||
| DeepSpeed Zero | No | Yes |
|
||||
|
||||
A bracket means that it might not be strictly the case but is usually either not a main concern or negligable. Before we start make sure you have installed the following libraries:
|
||||
A bracket means that it might not be strictly the case but is usually either not a main concern or negligible. Before we start make sure you have installed the following libraries:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets accelerate nvidia-ml-py3
|
||||
@ -732,4 +732,4 @@ TrainingArguments(torchdynamo="fx2trt-f16") #enable tensorRT fp16
|
||||
This feature involves 3 different libraries. To install them, please follow the instructions below:
|
||||
- [Torchdynamo installation](https://github.com/pytorch/torchdynamo#requirements-and-setup)
|
||||
- [Functorch installation](https://github.com/pytorch/functorch#install)
|
||||
- [Torch-TensorRT(FX) installation](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst#installation)
|
||||
- [Torch-TensorRT(FX) installation](https://github.com/pytorch/TensorRT/blob/master/docsrc/tutorials/getting_started_with_fx_path.rst#installation)
|
||||
|
@ -193,8 +193,8 @@ Pass your text to the tokenizer:
|
||||
|
||||
The tokenizer returns a dictionary containing:
|
||||
|
||||
* [input_ids](./glossary#input-ids): numerical representions of your tokens.
|
||||
* [atttention_mask](.glossary#attention-mask): indicates which tokens should be attended to.
|
||||
* [input_ids](./glossary#input-ids): numerical representations of your tokens.
|
||||
* [attention_mask](.glossary#attention-mask): indicates which tokens should be attended to.
|
||||
|
||||
A tokenizer can also accept a list of inputs, and pad and truncate the text to return a batch with uniform length:
|
||||
|
||||
@ -525,4 +525,4 @@ All models are a standard [`tf.keras.Model`](https://www.tensorflow.org/api_docs
|
||||
|
||||
## What's next?
|
||||
|
||||
Now that you've completed the 🤗 Transformers quick tour, check out our guides and learn how to do more specific things like writing a custom model, fine-tuning a model for a task, and how to train a model with a script. If you're interested in learning more about 🤗 Transformers core concepts, grab a cup of coffee and take a look at our Conceptual Guides!
|
||||
Now that you've completed the 🤗 Transformers quick tour, check out our guides and learn how to do more specific things like writing a custom model, fine-tuning a model for a task, and how to train a model with a script. If you're interested in learning more about 🤗 Transformers core concepts, grab a cup of coffee and take a look at our Conceptual Guides!
|
||||
|
@ -176,6 +176,47 @@ If you want to include only tests that include both patterns, `and` is to be use
|
||||
```bash
|
||||
pytest -k "test and ada" tests/test_optimization.py
|
||||
```
|
||||
### Run documentation tests
|
||||
|
||||
In order to test whether the documentation examples are correct, you should checkt that the `doctests` are passing.
|
||||
As an example, let's use [`WhisperModel.forward`'s docstring](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035):
|
||||
|
||||
```python
|
||||
r"""
|
||||
Returns:
|
||||
|
||||
Example:
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import WhisperModel, WhisperFeatureExtractor
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> model = WhisperModel.from_pretrained("openai/whisper-base")
|
||||
>>> feature_extractor = WhisperFeatureExtractor.from_pretrained("openai/whisper-base")
|
||||
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
>>> inputs = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt")
|
||||
>>> input_features = inputs.input_features
|
||||
>>> decoder_input_ids = torch.tensor([[1, 1]]) * model.config.decoder_start_token_id
|
||||
>>> last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state
|
||||
>>> list(last_hidden_state.shape)
|
||||
[1, 2, 512]
|
||||
```"""
|
||||
|
||||
```
|
||||
3 steps are required to debug the docstring examples :
|
||||
1. In order to properly run the test, **an extra line has to be added** at the end of the docstring. This can be automatically done on any file using :
|
||||
```bash
|
||||
python utils/prepare_for_doc_test.py <path_to_file_or_dir>
|
||||
```
|
||||
|
||||
2. Then, you can use the following line to automatically test every docstring example in the desired file :
|
||||
```bash
|
||||
pytest --doctest-modules <path_to_file_or_dir>
|
||||
```
|
||||
3. Once you are done debugging, you need to remove the extra line added in step **1.** by running the follwing :
|
||||
```bash
|
||||
python utils/prepare_for_doc_test.py <path_to_file_or_dir> --remove_new_line
|
||||
```
|
||||
|
||||
### Run only modified tests
|
||||
|
||||
|
@ -119,7 +119,7 @@ Carga los atributos de tu configuración personalizada en el modelo de la siguie
|
||||
>>> model = DistilBertModel(my_config)
|
||||
```
|
||||
|
||||
Esto crea un modelo con valores aleatorios, en lugar de crearlo con los pesos del preentramiento, por lo que no serás capaz de usar este modelo para nada útil hasta que no lo entrenes. El entrenamiento es un proceso costoso, tanto en cuestión de recursos como de tiempo, por lo que generalmente es mejor usar un modelo preentrenado para obtener mejores resultados más rápido, consumiendo una fracción de los recursos que un entrenamiento completo hubiera requerido.
|
||||
Esto crea un modelo con valores aleatorios, en lugar de crearlo con los pesos del preentrenamiento, por lo que no serás capaz de usar este modelo para nada útil hasta que no lo entrenes. El entrenamiento es un proceso costoso, tanto en cuestión de recursos como de tiempo, por lo que generalmente es mejor usar un modelo preentrenado para obtener mejores resultados más rápido, consumiendo una fracción de los recursos que un entrenamiento completo hubiera requerido.
|
||||
|
||||
Puedes crear un modelo preentrenado con [`~PreTrainedModel.from_pretrained`]:
|
||||
|
||||
@ -127,7 +127,7 @@ Puedes crear un modelo preentrenado con [`~PreTrainedModel.from_pretrained`]:
|
||||
>>> model = DistilBertModel.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Cuando cargues tus pesos del preentramiento, el modelo por defecto se carga automáticamente si nos lo proporciona 🤗 Transformers. Sin embargo, siempre puedes reemplazar (todos o algunos de) los atributos del modelo por defecto por los tuyos:
|
||||
Cuando cargues tus pesos del preentrenamiento, el modelo por defecto se carga automáticamente si nos lo proporciona 🤗 Transformers. Sin embargo, siempre puedes reemplazar (todos o algunos de) los atributos del modelo por defecto por los tuyos:
|
||||
|
||||
```py
|
||||
>>> model = DistilBertModel.from_pretrained("distilbert-base-uncased", config=my_config)
|
||||
@ -144,7 +144,7 @@ Carga los atributos de tu configuración personalizada en el modelo de la siguie
|
||||
>>> tf_model = TFDistilBertModel(my_config)
|
||||
```
|
||||
|
||||
Esto crea un modelo con valores aleatorios, en lugar de crearlo con los pesos del preentramiento, por lo que no serás capaz de usar este modelo para nada útil hasta que no lo entrenes. El entrenamiento es un proceso costoso, tanto en cuestión de recursos como de tiempo, por lo que generalmente es mejor usar un modelo preentrenado para obtener mejores resultados más rápido, consumiendo solo una fracción de los recursos que un entrenamiento completo hubiera requerido.
|
||||
Esto crea un modelo con valores aleatorios, en lugar de crearlo con los pesos del preentrenamiento, por lo que no serás capaz de usar este modelo para nada útil hasta que no lo entrenes. El entrenamiento es un proceso costoso, tanto en cuestión de recursos como de tiempo, por lo que generalmente es mejor usar un modelo preentrenado para obtener mejores resultados más rápido, consumiendo solo una fracción de los recursos que un entrenamiento completo hubiera requerido.
|
||||
|
||||
Puedes crear un modelo preentrenado con [`~TFPreTrainedModel.from_pretrained`]:
|
||||
|
||||
@ -152,7 +152,7 @@ Puedes crear un modelo preentrenado con [`~TFPreTrainedModel.from_pretrained`]:
|
||||
>>> tf_model = TFDistilBertModel.from_pretrained("distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Cuando cargues tus pesos del preentramiento, el modelo por defecto se carga automáticamente si este nos lo proporciona 🤗 Transformers. Sin embargo, siempre puedes reemplazar (todos o algunos de) los atributos del modelo por defecto por los tuyos:
|
||||
Cuando cargues tus pesos del preentrenamiento, el modelo por defecto se carga automáticamente si este nos lo proporciona 🤗 Transformers. Sin embargo, siempre puedes reemplazar (todos o algunos de) los atributos del modelo por defecto por los tuyos:
|
||||
|
||||
```py
|
||||
>>> tf_model = TFDistilBertModel.from_pretrained("distilbert-base-uncased", config=my_config)
|
||||
@ -217,7 +217,7 @@ Ambos *tokenizers* son compatibles con los métodos comunes, como los de encodif
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
No todos los modelos son compatibles con un *tokenizer* rápido. Échale un vistazo a esta [tabla](index#supported-frameworks) para comprobar si un modelo en específico es compatible con un *tokenizer* rápido.
|
||||
No todos los modelos son compatibles con un *tokenizer* rápido. Échale un vistazo a esta [tabla](index#supported-frameworks) para comprobar si un modelo específico es compatible con un *tokenizer* rápido.
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -229,7 +229,7 @@ Si has entrenado tu propio *tokenizer*, puedes crear uno desde tu archivo de “
|
||||
>>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left")
|
||||
```
|
||||
|
||||
Es importante recordar que los vocabularios que provienen de un *tokenizer* personalizado serán diferentes a los vocabularios generados por el *tokenizer* de un modelo preentrenado. Debes usar el vocabulario de un *tokenizer* preentrenado si vas a usar un modelo preentrenado, de lo contrario las entradas no tendrán sentido. Crea un *tokenizer* con el vocabulario de un modelo preentrenado usado la clase [`DistilBertTokenizer`]:
|
||||
Es importante recordar que los vocabularios que provienen de un *tokenizer* personalizado serán diferentes a los vocabularios generados por el *tokenizer* de un modelo preentrenado. Debes usar el vocabulario de un *tokenizer* preentrenado si vas a usar un modelo preentrenado, de lo contrario las entradas no tendrán sentido. Crea un *tokenizer* con el vocabulario de un modelo preentrenado usando la clase [`DistilBertTokenizer`]:
|
||||
|
||||
|
||||
```py
|
||||
@ -249,7 +249,7 @@ Crea un *tokenizer* rápido con la clase [`DistilBertTokenizerFast`]:
|
||||
|
||||
<Tip>
|
||||
|
||||
Por defecto, el [`AutoTokenizer`] intentará cargar un *tokenizer* rápido. Puedes desactivar este compartimiento cambiando el parámetro `use_fast=False` de `from_pretrained`.
|
||||
Por defecto, el [`AutoTokenizer`] intentará cargar un *tokenizer* rápido. Puedes desactivar este comportamiento cambiando el parámetro `use_fast=False` de `from_pretrained`.
|
||||
|
||||
|
||||
</Tip>
|
||||
@ -258,7 +258,7 @@ Por defecto, el [`AutoTokenizer`] intentará cargar un *tokenizer* rápido. Pued
|
||||
|
||||
Un extractor de características procesa entradas de audio e imagen. Hereda de la clase base [`~feature_extraction_utils.FeatureExtractionMixin`] y también puede heredar de la clase [`ImageFeatureExtractionMixin`] para el procesamiento de características de las imágenes o de la clase [`SequenceFeatureExtractor`] para el procesamiento de entradas de audio.
|
||||
|
||||
Dependiendo de si trabajas en una tarea de audio o de video, puedes crear un extractor de características asociado al modelo que estes usando. Por ejemplo, podrías crear un [`ViTFeatureExtractor`] por defecto si estas usando [ViT](model_doc/vit) para clasificación de imágenes:
|
||||
Dependiendo de si trabajas en una tarea de audio o de video, puedes crear un extractor de características asociado al modelo que estés usando. Por ejemplo, podrías crear un [`ViTFeatureExtractor`] por defecto si estás usando [ViT](model_doc/vit) para clasificación de imágenes:
|
||||
|
||||
```py
|
||||
>>> from transformers import ViTFeatureExtractor
|
||||
|
@ -494,7 +494,7 @@ tres argumentos que necesitas conocer para ello son `padding`, `truncation` y `m
|
||||
|
||||
- `padding` controla el aplicarme padding al texto. Puede ser un booleano o una cadena que debe ser:
|
||||
|
||||
- `True` o `'longest'` para aplicar el pad hasta la secuencia más larga del batch (no apliques el padding si sólo se proporcionas
|
||||
- `True` o `'longest'` para aplicar el pad hasta la secuencia más larga del batch (no apliques el padding si sólo le proporcionas
|
||||
una sola secuencia).
|
||||
- `'max_length'` para aplicar el pad hasta la longitud especificada por el argumento `max_length` o la longitud máxima aceptada
|
||||
por el modelo si no le proporcionas `longitud_máxima` (`longitud_máxima=None`). Si sólo le proporcionas una única secuencia
|
||||
@ -523,7 +523,7 @@ padding/truncamiento a `longitud_máxima` se desactiva.
|
||||
|
||||
A continuación te mostramos en una tabla que resume la forma recomendada de configurar el padding y el truncamiento. Si utilizas un par de secuencias de entrada en
|
||||
algunos de los siguientes ejemplos, puedes sustituir `truncation=True` por una `STRATEGY` seleccionada en
|
||||
`['only_first', 'only_second', 'longest_first']`, es decir, `truncation='only_second'` o `truncation= 'longest_first'` para controlar cómo se trunquen ambas secuencias del par como lo has detallado anteriormente.
|
||||
`['only_first', 'only_second', 'longest_first']`, es decir, `truncation='only_second'` o `truncation= 'longest_first'` para controlar cómo se truncan ambas secuencias del par como se ha detallado anteriormente.
|
||||
|
||||
| Truncation | Padding | Instrucciones |
|
||||
|--------------------------------------|-----------------------------------|---------------------------------------------------------------------------------------------|
|
||||
@ -539,7 +539,7 @@ algunos de los siguientes ejemplos, puedes sustituir `truncation=True` por una `
|
||||
| | padding long max de input model | `tokenizer(batch_sentences, padding='max_length', truncation=True)` or |
|
||||
| | | `tokenizer(batch_sentences, padding='max_length', truncation=STRATEGY)` |
|
||||
| | padding a una long especifica | Not possible |
|
||||
| truncationa una long especifica | no padding | `tokenizer(batch_sentences, truncation=True, max_length=42)` or |
|
||||
| truncation a una long especifica | no padding | `tokenizer(batch_sentences, truncation=True, max_length=42)` or |
|
||||
| | | `tokenizer(batch_sentences, truncation=STRATEGY, max_length=42)` |
|
||||
| | padding secuencia max del batch | `tokenizer(batch_sentences, padding=True, truncation=True, max_length=42)` or |
|
||||
| | | `tokenizer(batch_sentences, padding=True, truncation=STRATEGY, max_length=42)` |
|
||||
|
@ -123,7 +123,7 @@ python examples/tensorflow/summarization/run_summarization.py \
|
||||
[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) admite un entrenamiento distribuido y de precisión mixta, lo que significa que también puedes usarlo en un script. Para habilitar ambas características:
|
||||
|
||||
- Agrega el argumento `fp16` para habilitar la precisión mixta.
|
||||
- Establece la cantidad de GPU que se usarás con el argumento `nproc_per_node`.
|
||||
- Establece la cantidad de GPU que se usará con el argumento `nproc_per_node`.
|
||||
|
||||
```bash
|
||||
python -m torch.distributed.launch \
|
||||
@ -200,7 +200,7 @@ En lugar del script `run_summarization.py`, debes usar el script `run_summarizat
|
||||
accelerate config
|
||||
```
|
||||
|
||||
Prueba tu configuración para asegurarte que esta configurada correctamente:
|
||||
Prueba tu configuración para asegurarte que está configurada correctamente:
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
@ -344,4 +344,4 @@ python examples/pytorch/summarization/run_summarization.py
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
```
|
||||
|
@ -335,7 +335,6 @@ def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuf
|
||||
batch_idx = np.arange(len(dataset))
|
||||
|
||||
for idx in range(steps):
|
||||
|
||||
start_idx = batch_size * idx
|
||||
end_idx = batch_size * (idx + 1)
|
||||
|
||||
@ -347,7 +346,6 @@ def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuf
|
||||
|
||||
|
||||
def write_metric(summary_writer, metrics, train_time, step, metric_key_prefix="train"):
|
||||
|
||||
if train_time:
|
||||
summary_writer.scalar("train_time", train_time, step)
|
||||
|
||||
@ -782,11 +780,9 @@ def main():
|
||||
num_splits = steps // steps_per_block + int(steps % steps_per_block > 0)
|
||||
|
||||
for idx in range(num_splits):
|
||||
|
||||
if not block_size:
|
||||
_ds = ds
|
||||
else:
|
||||
|
||||
start_idx = block_size * idx
|
||||
end_idx = block_size * (idx + 1)
|
||||
|
||||
@ -926,8 +922,9 @@ def main():
|
||||
|
||||
# ignore padded tokens from loss
|
||||
loss = loss * padding_mask
|
||||
loss = loss.sum() / padding_mask.sum()
|
||||
return loss
|
||||
loss = loss.sum()
|
||||
num_labels = padding_mask.sum()
|
||||
return loss, num_labels
|
||||
|
||||
# Define gradient update step fn
|
||||
def train_step(state, batch, label_smoothing_factor=0.0):
|
||||
@ -936,29 +933,38 @@ def main():
|
||||
def compute_loss(params):
|
||||
labels = batch.pop("labels")
|
||||
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
|
||||
loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
|
||||
return loss
|
||||
loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
|
||||
return loss, num_labels
|
||||
|
||||
grad_fn = jax.value_and_grad(compute_loss)
|
||||
loss, grad = grad_fn(state.params)
|
||||
grad = jax.lax.pmean(grad, "batch")
|
||||
grad_fn = jax.value_and_grad(compute_loss, has_aux=True)
|
||||
(loss, num_labels), grad = grad_fn(state.params)
|
||||
num_labels = jax.lax.psum(num_labels, "batch")
|
||||
|
||||
# true loss = total loss / total samples
|
||||
loss = jax.lax.psum(loss, "batch")
|
||||
loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
|
||||
|
||||
# true grad = total grad / total samples
|
||||
grad = jax.lax.psum(grad, "batch")
|
||||
grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad)
|
||||
new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
|
||||
|
||||
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
|
||||
metrics = jax.lax.pmean(metrics, axis_name="batch")
|
||||
|
||||
return new_state, metrics
|
||||
|
||||
# Define eval fn
|
||||
def eval_step(params, batch, label_smoothing_factor=0.0):
|
||||
labels = batch.pop("labels")
|
||||
logits = model(**batch, params=params, train=False)[0]
|
||||
loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
|
||||
|
||||
# summarize metrics
|
||||
loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
|
||||
num_labels = jax.lax.psum(num_labels, "batch")
|
||||
|
||||
# true loss = total loss / total samples
|
||||
loss = jax.lax.psum(loss, "batch")
|
||||
loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
|
||||
|
||||
metrics = {"loss": loss}
|
||||
metrics = jax.lax.pmean(metrics, axis_name="batch")
|
||||
return metrics
|
||||
|
||||
# Define generation function
|
||||
@ -1024,7 +1030,6 @@ def main():
|
||||
ckpt_dir: str = "",
|
||||
is_prediction=False,
|
||||
):
|
||||
|
||||
logger.info(f"*** {'Predict' if is_prediction else 'Evaluate'} ***")
|
||||
|
||||
metrics = []
|
||||
@ -1103,12 +1108,10 @@ def main():
|
||||
logger.info(desc)
|
||||
|
||||
if jax.process_index() == 0:
|
||||
|
||||
if not os.path.isdir(os.path.join(training_args.output_dir, ckpt_dir)):
|
||||
os.makedirs(os.path.join(training_args.output_dir, ckpt_dir), exist_ok=True)
|
||||
|
||||
if metrics:
|
||||
|
||||
# Save metrics (only for the evaluation/prediction being done along with training)
|
||||
if has_tensorboard and training_args.do_train:
|
||||
write_metric(
|
||||
@ -1143,7 +1146,6 @@ def main():
|
||||
input_rng = None
|
||||
|
||||
if training_args.do_train:
|
||||
|
||||
cur_step = 0
|
||||
train_time = 0
|
||||
epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
|
||||
@ -1166,7 +1168,6 @@ def main():
|
||||
|
||||
# train
|
||||
for batch_idx, _ in enumerate(tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False)):
|
||||
|
||||
cur_step += 1
|
||||
batch = next(train_batches)
|
||||
batch_start = time.time()
|
||||
@ -1177,7 +1178,6 @@ def main():
|
||||
|
||||
# log and save info
|
||||
if training_args.logging_steps > 0 and cur_step % training_args.logging_steps == 0:
|
||||
|
||||
_train_metric = unreplicate(train_metric)
|
||||
desc = (
|
||||
f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | Loss: {_train_metric['loss']} |"
|
||||
@ -1217,7 +1217,6 @@ def main():
|
||||
|
||||
# log and save info
|
||||
if training_args.logging_steps <= 0:
|
||||
|
||||
logger.info(desc)
|
||||
|
||||
with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
|
||||
|
@ -351,7 +351,7 @@ The example script uses the 🤗 Datasets library. You can easily customize them
|
||||
To setup all relevant files for training, let's create a directory.
|
||||
|
||||
```bash
|
||||
mkdir ./norwegian-roberta-base
|
||||
mkdir ./norwegian-bart-base
|
||||
```
|
||||
|
||||
### Train tokenizer
|
||||
|
@ -799,19 +799,25 @@ def main():
|
||||
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
|
||||
|
||||
# take average
|
||||
loss = loss.sum() / label_mask.sum()
|
||||
loss = loss.sum()
|
||||
num_labels = label_mask.sum()
|
||||
|
||||
return loss
|
||||
return loss, num_labels
|
||||
|
||||
grad_fn = jax.value_and_grad(loss_fn)
|
||||
loss, grad = grad_fn(state.params)
|
||||
grad = jax.lax.pmean(grad, "batch")
|
||||
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
|
||||
(loss, num_labels), grad = grad_fn(state.params)
|
||||
num_labels = jax.lax.psum(num_labels, "batch")
|
||||
|
||||
# true loss = total loss / total samples
|
||||
loss = jax.lax.psum(loss, "batch")
|
||||
loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
|
||||
|
||||
# true grad = total grad / total samples
|
||||
grad = jax.lax.psum(grad, "batch")
|
||||
grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad)
|
||||
new_state = state.apply_gradients(grads=grad)
|
||||
|
||||
metrics = jax.lax.pmean(
|
||||
{"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
|
||||
)
|
||||
|
||||
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
|
||||
return new_state, metrics, new_dropout_rng
|
||||
|
||||
# Create parallel version of the train step
|
||||
@ -888,7 +894,7 @@ def main():
|
||||
num_eval_samples = len(tokenized_datasets["validation"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
eval_samples_idx = np.arange(num_eval_samples)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
|
||||
|
||||
eval_metrics = []
|
||||
for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
||||
@ -903,9 +909,9 @@ def main():
|
||||
|
||||
# normalize eval metrics
|
||||
eval_metrics = get_metrics(eval_metrics)
|
||||
eval_metrics = jax.tree_map(jnp.sum, eval_metrics)
|
||||
eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics)
|
||||
eval_normalizer = eval_metrics.pop("normalizer")
|
||||
eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
|
||||
eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics)
|
||||
|
||||
# Update progress bar
|
||||
epochs.desc = f"Step... ({cur_step} | Loss: {eval_metrics['loss']}, Acc: {eval_metrics['accuracy']})"
|
||||
@ -917,7 +923,7 @@ def main():
|
||||
if cur_step % training_args.save_steps == 0 and cur_step > 0:
|
||||
# save checkpoint after each epoch and push checkpoint to the hub
|
||||
if jax.process_index() == 0:
|
||||
params = jax.device_get(jax.tree_map(lambda x: x[0], state.params))
|
||||
params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params))
|
||||
model.save_pretrained(training_args.output_dir, params=params)
|
||||
tokenizer.save_pretrained(training_args.output_dir)
|
||||
if training_args.push_to_hub:
|
||||
@ -928,7 +934,7 @@ def main():
|
||||
num_eval_samples = len(tokenized_datasets["validation"])
|
||||
# Avoid using jax.numpy here in case of TPU training
|
||||
eval_samples_idx = np.arange(num_eval_samples)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size, drop_last=False)
|
||||
eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size)
|
||||
|
||||
eval_metrics = []
|
||||
for _, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)):
|
||||
@ -943,9 +949,9 @@ def main():
|
||||
|
||||
# normalize eval metrics
|
||||
eval_metrics = get_metrics(eval_metrics)
|
||||
eval_metrics = jax.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics)
|
||||
eval_metrics = jax.tree_util.tree_map(lambda metric: jnp.sum(metric).item(), eval_metrics)
|
||||
eval_normalizer = eval_metrics.pop("normalizer")
|
||||
eval_metrics = jax.tree_map(lambda x: x / eval_normalizer, eval_metrics)
|
||||
eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics)
|
||||
|
||||
try:
|
||||
perplexity = math.exp(eval_metrics["loss"])
|
||||
|
@ -723,18 +723,25 @@ def main():
|
||||
loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask
|
||||
|
||||
# take average
|
||||
loss = loss.sum() / label_mask.sum()
|
||||
loss = loss.sum()
|
||||
num_labels = label_mask.sum()
|
||||
|
||||
return loss
|
||||
return loss, num_labels
|
||||
|
||||
grad_fn = jax.value_and_grad(loss_fn)
|
||||
loss, grad = grad_fn(state.params)
|
||||
grad = jax.lax.pmean(grad, "batch")
|
||||
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
|
||||
(loss, num_labels), grad = grad_fn(state.params)
|
||||
num_labels = jax.lax.psum(num_labels, "batch")
|
||||
|
||||
# true loss = total loss / total samples
|
||||
loss = jax.lax.psum(loss, "batch")
|
||||
loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
|
||||
|
||||
# true grad = total grad / total samples
|
||||
grad = jax.lax.psum(grad, "batch")
|
||||
grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad)
|
||||
new_state = state.apply_gradients(grads=grad)
|
||||
|
||||
metrics = jax.lax.pmean(
|
||||
{"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch"
|
||||
)
|
||||
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
|
||||
|
||||
return new_state, metrics, new_dropout_rng
|
||||
|
||||
|
@ -328,7 +328,6 @@ class FlaxDataCollatorForT5MLM:
|
||||
decoder_start_token_id: int
|
||||
|
||||
def __call__(self, examples: List[Dict[str, np.ndarray]]) -> BatchEncoding:
|
||||
|
||||
# convert list to dict and tensorize input
|
||||
batch = BatchEncoding(
|
||||
{k: np.array([examples[i][k] for i in range(len(examples))]) for k, v in examples[0].items()}
|
||||
@ -397,7 +396,6 @@ class FlaxDataCollatorForT5MLM:
|
||||
return input_ids
|
||||
|
||||
def random_spans_noise_mask(self, length):
|
||||
|
||||
"""This function is copy of `random_spans_helper <https://github.com/google-research/text-to-text-transfer-transformer/blob/84f8bcc14b5f2c03de51bd3587609ba8f6bbd1cd/t5/data/preprocessors.py#L2682>`__ .
|
||||
|
||||
Noise mask consisting of random spans of noise tokens.
|
||||
|
@ -61,7 +61,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
@ -784,8 +784,9 @@ def main():
|
||||
|
||||
# ignore padded tokens from loss
|
||||
loss = loss * padding_mask
|
||||
loss = loss.sum() / padding_mask.sum()
|
||||
return loss
|
||||
loss = loss.sum()
|
||||
num_labels = padding_mask.sum()
|
||||
return loss, num_labels
|
||||
|
||||
# Define gradient update step fn
|
||||
def train_step(state, batch, label_smoothing_factor=0.0):
|
||||
@ -794,29 +795,38 @@ def main():
|
||||
def compute_loss(params):
|
||||
labels = batch.pop("labels")
|
||||
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
|
||||
loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
|
||||
return loss
|
||||
loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
|
||||
return loss, num_labels
|
||||
|
||||
grad_fn = jax.value_and_grad(compute_loss)
|
||||
loss, grad = grad_fn(state.params)
|
||||
grad = jax.lax.pmean(grad, "batch")
|
||||
grad_fn = jax.value_and_grad(compute_loss, has_aux=True)
|
||||
(loss, num_labels), grad = grad_fn(state.params)
|
||||
num_labels = jax.lax.psum(num_labels, "batch")
|
||||
|
||||
# true loss = total loss / total samples
|
||||
loss = jax.lax.psum(loss, "batch")
|
||||
loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
|
||||
|
||||
# true grad = total grad / total samples
|
||||
grad = jax.lax.psum(grad, "batch")
|
||||
grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad)
|
||||
new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
|
||||
|
||||
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
|
||||
metrics = jax.lax.pmean(metrics, axis_name="batch")
|
||||
|
||||
return new_state, metrics
|
||||
|
||||
# Define eval fn
|
||||
def eval_step(params, batch, label_smoothing_factor=0.0):
|
||||
labels = batch.pop("labels")
|
||||
logits = model(**batch, params=params, train=False)[0]
|
||||
loss = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
|
||||
|
||||
# summarize metrics
|
||||
loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
|
||||
num_labels = jax.lax.psum(num_labels, "batch")
|
||||
|
||||
# true loss = total loss / total samples
|
||||
loss = jax.lax.psum(loss, "batch")
|
||||
loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
|
||||
|
||||
metrics = {"loss": loss}
|
||||
metrics = jax.lax.pmean(metrics, axis_name="batch")
|
||||
return metrics
|
||||
|
||||
# Define generation function
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils import check_min_version, get_full_repo_name, send_examp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
|
@ -45,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
|
||||
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
|
||||
|
||||
|
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
|
||||
|
||||
|
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -43,7 +43,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils import PaddingStrategy, check_min_version, get_full_repo
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
# You should update this to your particular problem to have better documentation of `model_type`
|
||||
|
@ -115,7 +115,7 @@ python run_seq2seq_qa.py \
|
||||
--dataset_name squad_v2 \
|
||||
--context_column context \
|
||||
--question_column question \
|
||||
--answer_column answer \
|
||||
--answer_column answers \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--per_device_train_batch_size 12 \
|
||||
|
@ -49,7 +49,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ from utils_qa import postprocess_qa_predictions_with_beam_search
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from utils_qa import postprocess_qa_predictions_with_beam_search
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
|
@ -45,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
|
||||
|
||||
@ -484,26 +484,12 @@ def main():
|
||||
max_length=max_seq_length,
|
||||
padding=padding,
|
||||
truncation=True,
|
||||
return_offsets_mapping=True,
|
||||
return_overflowing_tokens=True,
|
||||
return_offsets_mapping=True,
|
||||
)
|
||||
|
||||
# Tokenize targets with the `text_target` keyword argument
|
||||
labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True)
|
||||
|
||||
# Since one example might give us several features if it has a long context, we need a map from a feature to
|
||||
# its corresponding example. This key gives us just that.
|
||||
sample_mapping = model_inputs.pop("overflow_to_sample_mapping")
|
||||
|
||||
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
|
||||
# corresponding example_id and we will store the offset mappings.
|
||||
model_inputs["example_id"] = []
|
||||
|
||||
for i in range(len(model_inputs["input_ids"])):
|
||||
# One example can give several spans, this is the index of the example containing this span of text.
|
||||
sample_index = sample_mapping[i]
|
||||
model_inputs["example_id"].append(examples["id"][sample_index])
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
|
||||
@ -511,8 +497,23 @@ def main():
|
||||
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
|
||||
]
|
||||
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
# Since one example might give us several features if it has a long context, we need a map from a feature to
|
||||
# its corresponding example. This key gives us just that.
|
||||
sample_mapping = model_inputs.pop("overflow_to_sample_mapping")
|
||||
|
||||
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
|
||||
# corresponding example_id and we will store the offset mappings.
|
||||
model_inputs["example_id"] = []
|
||||
# Augment the overflowing tokens to the labels
|
||||
labels_out = []
|
||||
|
||||
for i in range(len(model_inputs["input_ids"])):
|
||||
# One example can give several spans, this is the index of the example containing this span of text.
|
||||
sample_index = sample_mapping[i]
|
||||
model_inputs["example_id"].append(examples["id"][sample_index])
|
||||
labels_out.append(labels["input_ids"][sample_index])
|
||||
|
||||
model_inputs["labels"] = labels_out
|
||||
return model_inputs
|
||||
|
||||
if training_args.do_train:
|
||||
|
@ -52,7 +52,8 @@ class QuestionAnsweringTrainer(Trainer):
|
||||
finally:
|
||||
self.compute_metrics = compute_metrics
|
||||
|
||||
if self.post_process_function is not None and self.compute_metrics is not None:
|
||||
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
|
||||
# Only the main node write the results by default
|
||||
eval_preds = self.post_process_function(eval_examples, eval_dataset, output.predictions)
|
||||
metrics = self.compute_metrics(eval_preds)
|
||||
|
||||
@ -60,11 +61,13 @@ class QuestionAnsweringTrainer(Trainer):
|
||||
for key in list(metrics.keys()):
|
||||
if not key.startswith(f"{metric_key_prefix}_"):
|
||||
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
|
||||
|
||||
self.log(metrics)
|
||||
else:
|
||||
metrics = {}
|
||||
|
||||
if self.args.should_log:
|
||||
# Only the main node log the results by default
|
||||
self.log(metrics)
|
||||
|
||||
if self.args.tpu_metrics_debug or self.args.debug:
|
||||
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
|
||||
xm.master_print(met.metrics_report())
|
||||
|
@ -84,7 +84,8 @@ class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer):
|
||||
)
|
||||
)
|
||||
|
||||
if self.post_process_function is not None and self.compute_metrics is not None:
|
||||
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
|
||||
# Only the main node write the results by default
|
||||
eval_preds = self.post_process_function(eval_examples, eval_dataset, output)
|
||||
metrics = self.compute_metrics(eval_preds)
|
||||
|
||||
@ -94,8 +95,12 @@ class QuestionAnsweringSeq2SeqTrainer(Seq2SeqTrainer):
|
||||
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
|
||||
|
||||
output.metrics.update(metrics)
|
||||
else:
|
||||
metrics = {}
|
||||
|
||||
self.log(metrics)
|
||||
if self.args.should_log:
|
||||
# Only the main node log the results by default
|
||||
self.log(metrics)
|
||||
|
||||
if self.args.tpu_metrics_debug or self.args.debug:
|
||||
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
|
||||
|
@ -51,7 +51,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt")
|
||||
|
||||
|
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -50,7 +50,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt")
|
||||
|
||||
@ -195,7 +195,7 @@ class DataCollatorSpeechSeq2SeqWithPadding:
|
||||
Data collator that will dynamically pad the inputs received.
|
||||
Args:
|
||||
processor ([`Wav2Vec2Processor`])
|
||||
The processor used for proccessing the data.
|
||||
The processor used for processing the data.
|
||||
decoder_start_token_id (`int`)
|
||||
The begin-of-sentence of the decoder.
|
||||
"""
|
||||
@ -204,7 +204,7 @@ class DataCollatorSpeechSeq2SeqWithPadding:
|
||||
decoder_start_token_id: int
|
||||
|
||||
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
|
||||
# split inputs and labels since they have to be of different lenghts and need
|
||||
# split inputs and labels since they have to be of different lengths and need
|
||||
# different padding methods
|
||||
input_features = [{"input_values": feature["input_values"]} for feature in features]
|
||||
label_features = [{"input_ids": feature["labels"]} for feature in features]
|
||||
@ -271,7 +271,7 @@ def main():
|
||||
transformers.utils.logging.set_verbosity_info()
|
||||
logger.info("Training/evaluation parameters %s", training_args)
|
||||
|
||||
# 3. Detecting last checkpoint and eventualy continue from last checkpoint
|
||||
# 3. Detecting last checkpoint and eventually continue from last checkpoint
|
||||
last_checkpoint = None
|
||||
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
|
||||
last_checkpoint = get_last_checkpoint(training_args.output_dir)
|
||||
@ -360,7 +360,7 @@ def main():
|
||||
if model_args.freeze_feature_encoder:
|
||||
model.freeze_feature_encoder()
|
||||
|
||||
# 6. Resample speech dataset if necassary
|
||||
# 6. Resample speech dataset if necessary
|
||||
dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate
|
||||
if dataset_sampling_rate != feature_extractor.sampling_rate:
|
||||
raw_datasets = raw_datasets.cast_column(
|
||||
|
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
|
||||
|
||||
|
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
|
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/translation/requirements.txt")
|
||||
|
@ -1,2 +1,3 @@
|
||||
datasets
|
||||
seqeval
|
||||
seqeval
|
||||
pillow
|
||||
|
@ -50,7 +50,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -48,7 +48,7 @@ from utils_qa import postprocess_qa_predictions
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -53,7 +53,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
# region Checking dependencies
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
task_to_keys = {
|
||||
"cola": ("sentence", None),
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
# region Dependencies and constants
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.23.0.dev0")
|
||||
check_min_version("4.24.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
|
||||
|
||||
|
2
setup.py
2
setup.py
@ -408,7 +408,7 @@ install_requires = [
|
||||
|
||||
setup(
|
||||
name="transformers",
|
||||
version="4.23.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||
version="4.24.0.dev0", # expected format is one of x.y.z.dev0, or x.y.z.rc1 or x.y.z (no to dashes, yes to dots)
|
||||
author="The Hugging Face team (past and future) with the help of all our contributors (https://github.com/huggingface/transformers/graphs/contributors)",
|
||||
author_email="transformers@huggingface.co",
|
||||
description="State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow",
|
||||
|
@ -22,7 +22,7 @@
|
||||
# to defer the actual importing for when the objects are requested. This way `import transformers` provides the names
|
||||
# in the namespace without actually importing anything (and especially none of the backends).
|
||||
|
||||
__version__ = "4.23.0.dev0"
|
||||
__version__ = "4.24.0.dev0"
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
@ -257,6 +257,7 @@ _import_structure = {
|
||||
"models.layoutxlm": ["LayoutXLMProcessor"],
|
||||
"models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"],
|
||||
"models.levit": ["LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LevitConfig"],
|
||||
"models.lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
|
||||
"models.longformer": ["LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerTokenizer"],
|
||||
"models.longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config"],
|
||||
"models.luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig", "LukeTokenizer"],
|
||||
@ -419,6 +420,7 @@ _import_structure = {
|
||||
"Conversation",
|
||||
"ConversationalPipeline",
|
||||
"CsvPipelineDataFormat",
|
||||
"DepthEstimationPipeline",
|
||||
"DocumentQuestionAnsweringPipeline",
|
||||
"FeatureExtractionPipeline",
|
||||
"FillMaskPipeline",
|
||||
@ -678,6 +680,8 @@ except OptionalDependencyNotAvailable:
|
||||
name for name in dir(dummy_vision_objects) if not name.startswith("_")
|
||||
]
|
||||
else:
|
||||
_import_structure["image_processing_utils"] = ["ImageProcessorMixin"]
|
||||
_import_structure["image_transforms"] = ["rescale", "resize", "to_pil_image"]
|
||||
_import_structure["image_utils"] = ["ImageFeatureExtractionMixin"]
|
||||
_import_structure["models.beit"].append("BeitFeatureExtractor")
|
||||
_import_structure["models.clip"].append("CLIPFeatureExtractor")
|
||||
@ -858,6 +862,7 @@ else:
|
||||
"MODEL_FOR_CAUSAL_LM_MAPPING",
|
||||
"MODEL_FOR_CTC_MAPPING",
|
||||
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
|
||||
"MODEL_FOR_DEPTH_ESTIMATION_MAPPING",
|
||||
"MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
|
||||
"MODEL_FOR_IMAGE_SEGMENTATION_MAPPING",
|
||||
"MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING",
|
||||
@ -887,6 +892,7 @@ else:
|
||||
"AutoModelForCausalLM",
|
||||
"AutoModelForCTC",
|
||||
"AutoModelForDocumentQuestionAnswering",
|
||||
"AutoModelForDepthEstimation",
|
||||
"AutoModelForImageClassification",
|
||||
"AutoModelForImageSegmentation",
|
||||
"AutoModelForInstanceSegmentation",
|
||||
@ -1821,6 +1827,16 @@ else:
|
||||
"RobertaPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.lilt"].extend(
|
||||
[
|
||||
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"LiltForQuestionAnswering",
|
||||
"LiltForSequenceClassification",
|
||||
"LiltForTokenClassification",
|
||||
"LiltModel",
|
||||
"LiltPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.roformer"].extend(
|
||||
[
|
||||
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -2113,6 +2129,7 @@ else:
|
||||
"XLMProphetNetForCausalLM",
|
||||
"XLMProphetNetForConditionalGeneration",
|
||||
"XLMProphetNetModel",
|
||||
"XLMProphetNetPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.xlm_roberta"].extend(
|
||||
@ -2125,6 +2142,7 @@ else:
|
||||
"XLMRobertaForSequenceClassification",
|
||||
"XLMRobertaForTokenClassification",
|
||||
"XLMRobertaModel",
|
||||
"XLMRobertaPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.xlm_roberta_xl"].extend(
|
||||
@ -2356,6 +2374,14 @@ else:
|
||||
"TFCTRLPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.cvt"].extend(
|
||||
[
|
||||
"TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TFCvtForImageClassification",
|
||||
"TFCvtModel",
|
||||
"TFCvtPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.data2vec"].extend(
|
||||
[
|
||||
"TFData2VecVisionForImageClassification",
|
||||
@ -2436,6 +2462,16 @@ else:
|
||||
]
|
||||
)
|
||||
_import_structure["models.encoder_decoder"].append("TFEncoderDecoderModel")
|
||||
_import_structure["models.esm"].extend(
|
||||
[
|
||||
"ESM_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
"TFEsmForMaskedLM",
|
||||
"TFEsmForSequenceClassification",
|
||||
"TFEsmForTokenClassification",
|
||||
"TFEsmModel",
|
||||
"TFEsmPreTrainedModel",
|
||||
]
|
||||
)
|
||||
_import_structure["models.flaubert"].extend(
|
||||
[
|
||||
"TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
|
||||
@ -3260,6 +3296,7 @@ if TYPE_CHECKING:
|
||||
from .models.layoutxlm import LayoutXLMProcessor
|
||||
from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer
|
||||
from .models.levit import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, LevitConfig
|
||||
from .models.lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
|
||||
from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer
|
||||
from .models.longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config
|
||||
from .models.luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig, LukeTokenizer
|
||||
@ -3397,6 +3434,7 @@ if TYPE_CHECKING:
|
||||
Conversation,
|
||||
ConversationalPipeline,
|
||||
CsvPipelineDataFormat,
|
||||
DepthEstimationPipeline,
|
||||
DocumentQuestionAnsweringPipeline,
|
||||
FeatureExtractionPipeline,
|
||||
FillMaskPipeline,
|
||||
@ -3622,6 +3660,8 @@ if TYPE_CHECKING:
|
||||
except OptionalDependencyNotAvailable:
|
||||
from .utils.dummy_vision_objects import *
|
||||
else:
|
||||
from .image_processing_utils import ImageProcessorMixin
|
||||
from .image_transforms import rescale, resize, to_pil_image
|
||||
from .image_utils import ImageFeatureExtractionMixin
|
||||
from .models.beit import BeitFeatureExtractor
|
||||
from .models.clip import CLIPFeatureExtractor
|
||||
@ -3766,6 +3806,7 @@ if TYPE_CHECKING:
|
||||
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
|
||||
MODEL_FOR_CAUSAL_LM_MAPPING,
|
||||
MODEL_FOR_CTC_MAPPING,
|
||||
MODEL_FOR_DEPTH_ESTIMATION_MAPPING,
|
||||
MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
|
||||
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
|
||||
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
|
||||
@ -3795,6 +3836,7 @@ if TYPE_CHECKING:
|
||||
AutoModelForAudioXVector,
|
||||
AutoModelForCausalLM,
|
||||
AutoModelForCTC,
|
||||
AutoModelForDepthEstimation,
|
||||
AutoModelForDocumentQuestionAnswering,
|
||||
AutoModelForImageClassification,
|
||||
AutoModelForImageSegmentation,
|
||||
@ -4256,6 +4298,14 @@ if TYPE_CHECKING:
|
||||
LevitModel,
|
||||
LevitPreTrainedModel,
|
||||
)
|
||||
from .models.lilt import (
|
||||
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
LiltForQuestionAnswering,
|
||||
LiltForSequenceClassification,
|
||||
LiltForTokenClassification,
|
||||
LiltModel,
|
||||
LiltPreTrainedModel,
|
||||
)
|
||||
from .models.longformer import (
|
||||
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
LongformerForMaskedLM,
|
||||
@ -4795,6 +4845,7 @@ if TYPE_CHECKING:
|
||||
XLMProphetNetForCausalLM,
|
||||
XLMProphetNetForConditionalGeneration,
|
||||
XLMProphetNetModel,
|
||||
XLMProphetNetPreTrainedModel,
|
||||
)
|
||||
from .models.xlm_roberta import (
|
||||
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
@ -4805,6 +4856,7 @@ if TYPE_CHECKING:
|
||||
XLMRobertaForSequenceClassification,
|
||||
XLMRobertaForTokenClassification,
|
||||
XLMRobertaModel,
|
||||
XLMRobertaPreTrainedModel,
|
||||
)
|
||||
from .models.xlm_roberta_xl import (
|
||||
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
@ -5020,6 +5072,12 @@ if TYPE_CHECKING:
|
||||
TFCTRLModel,
|
||||
TFCTRLPreTrainedModel,
|
||||
)
|
||||
from .models.cvt import (
|
||||
TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFCvtForImageClassification,
|
||||
TFCvtModel,
|
||||
TFCvtPreTrainedModel,
|
||||
)
|
||||
from .models.data2vec import (
|
||||
TFData2VecVisionForImageClassification,
|
||||
TFData2VecVisionForSemanticSegmentation,
|
||||
@ -5086,6 +5144,14 @@ if TYPE_CHECKING:
|
||||
TFElectraPreTrainedModel,
|
||||
)
|
||||
from .models.encoder_decoder import TFEncoderDecoderModel
|
||||
from .models.esm import (
|
||||
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFEsmForMaskedLM,
|
||||
TFEsmForSequenceClassification,
|
||||
TFEsmForTokenClassification,
|
||||
TFEsmModel,
|
||||
TFEsmPreTrainedModel,
|
||||
)
|
||||
from .models.flaubert import (
|
||||
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
|
||||
TFFlaubertForMultipleChoice,
|
||||
|
@ -18,7 +18,6 @@ from argparse import ArgumentParser, Namespace
|
||||
from importlib import import_module
|
||||
|
||||
import numpy as np
|
||||
from datasets import load_dataset
|
||||
from packaging import version
|
||||
|
||||
import huggingface_hub
|
||||
@ -31,6 +30,7 @@ from .. import (
|
||||
AutoFeatureExtractor,
|
||||
AutoProcessor,
|
||||
AutoTokenizer,
|
||||
is_datasets_available,
|
||||
is_tf_available,
|
||||
is_torch_available,
|
||||
)
|
||||
@ -46,6 +46,9 @@ if is_tf_available():
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
if is_datasets_available():
|
||||
from datasets import load_dataset
|
||||
|
||||
|
||||
MAX_ERROR = 5e-5 # larger error tolerance than in our internal tests, to avoid flaky user-facing errors
|
||||
|
||||
@ -194,24 +197,6 @@ class PTtoTFCommand(BaseTransformersCLICommand):
|
||||
raw_samples = [x["array"] for x in speech_samples]
|
||||
return raw_samples
|
||||
|
||||
model_forward_signature = set(inspect.signature(pt_model.forward).parameters.keys())
|
||||
processor_inputs = {}
|
||||
if "input_ids" in model_forward_signature:
|
||||
processor_inputs.update(
|
||||
{
|
||||
"text": ["Hi there!", "I am a batch with more than one row and different input lengths."],
|
||||
"padding": True,
|
||||
"truncation": True,
|
||||
}
|
||||
)
|
||||
if "pixel_values" in model_forward_signature:
|
||||
sample_images = load_dataset("cifar10", "plain_text", split="test")[:2]["img"]
|
||||
processor_inputs.update({"images": sample_images})
|
||||
if "input_features" in model_forward_signature:
|
||||
processor_inputs.update({"raw_speech": _get_audio_input(), "padding": True})
|
||||
if "input_values" in model_forward_signature: # Wav2Vec2 audio input
|
||||
processor_inputs.update({"raw_speech": _get_audio_input(), "padding": True})
|
||||
|
||||
model_config_class = type(pt_model.config)
|
||||
if model_config_class in PROCESSOR_MAPPING:
|
||||
processor = AutoProcessor.from_pretrained(self._local_dir)
|
||||
@ -226,6 +211,34 @@ class PTtoTFCommand(BaseTransformersCLICommand):
|
||||
else:
|
||||
raise ValueError(f"Unknown data processing type (model config type: {model_config_class})")
|
||||
|
||||
model_forward_signature = set(inspect.signature(pt_model.forward).parameters.keys())
|
||||
processor_inputs = {}
|
||||
if "input_ids" in model_forward_signature:
|
||||
processor_inputs.update(
|
||||
{
|
||||
"text": ["Hi there!", "I am a batch with more than one row and different input lengths."],
|
||||
"padding": True,
|
||||
"truncation": True,
|
||||
}
|
||||
)
|
||||
if "pixel_values" in model_forward_signature:
|
||||
sample_images = load_dataset("cifar10", "plain_text", split="test")[:2]["img"]
|
||||
processor_inputs.update({"images": sample_images})
|
||||
if "input_features" in model_forward_signature:
|
||||
feature_extractor_signature = inspect.signature(processor.feature_extractor).parameters
|
||||
# Pad to the largest input length by default but take feature extractor default
|
||||
# padding value if it exists e.g. "max_length" and is not False or None
|
||||
if "padding" in feature_extractor_signature:
|
||||
default_strategy = feature_extractor_signature["padding"].default
|
||||
if default_strategy is not False and default_strategy is not None:
|
||||
padding_strategy = default_strategy
|
||||
else:
|
||||
padding_strategy = True
|
||||
else:
|
||||
padding_strategy = True
|
||||
processor_inputs.update({"audio": _get_audio_input(), "padding": padding_strategy})
|
||||
if "input_values" in model_forward_signature: # Wav2Vec2 audio input
|
||||
processor_inputs.update({"audio": _get_audio_input(), "padding": True})
|
||||
pt_input = processor(**processor_inputs, return_tensors="pt")
|
||||
tf_input = processor(**processor_inputs, return_tensors="tf")
|
||||
|
||||
|
@ -735,10 +735,11 @@ class SuppressTokensLogitsProcessor(LogitsProcessor):
|
||||
|
||||
|
||||
class ForceTokensLogitsProcessor(LogitsProcessor):
|
||||
r"""This processor can be used to force a list of tokens. The processor will set their log probs to `inf` so that they
|
||||
are sampled at their corresponding index."""
|
||||
r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
|
||||
indices that will be forced before sampling. The processor will set their log probs to `inf` so that they are
|
||||
sampled at their corresponding index."""
|
||||
|
||||
def __init__(self, force_token_map):
|
||||
def __init__(self, force_token_map: List[List[int]]):
|
||||
self.force_token_map = dict(force_token_map)
|
||||
|
||||
def __call__(self, input_ids, scores):
|
||||
|
@ -547,10 +547,11 @@ class TFSuppressTokensLogitsProcessor(TFLogitsProcessor):
|
||||
|
||||
|
||||
class TFForceTokensLogitsProcessor(TFLogitsProcessor):
|
||||
r"""This processor can be used to force a list of tokens. The processor will set their log probs to `0` and all
|
||||
other tokens to `-inf` so that they are sampled at their corresponding index."""
|
||||
r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
|
||||
indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to
|
||||
`-inf` so that they are sampled at their corresponding index."""
|
||||
|
||||
def __init__(self, force_token_map):
|
||||
def __init__(self, force_token_map: List[List[int]]):
|
||||
force_token_map = dict(force_token_map)
|
||||
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
|
||||
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
|
||||
|
@ -406,7 +406,7 @@ class TFGenerationMixin:
|
||||
forced_eos_token_id=None,
|
||||
suppress_tokens: Optional[List[int]] = None,
|
||||
begin_suppress_tokens: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[List[int]]] = None,
|
||||
**model_kwargs,
|
||||
) -> Union[TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]:
|
||||
r"""
|
||||
@ -506,8 +506,10 @@ class TFGenerationMixin:
|
||||
begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`):
|
||||
A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens`
|
||||
logit processor will set their log probs to `-inf` so that they are not sampled.
|
||||
forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of tokens that will be forced as beginning tokens, before sampling.
|
||||
forced_decoder_ids (`List[List[int]]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of pairs of integers which indicates a mapping from generation indices to token indices that
|
||||
will be forced before sampling. For example, `[[1, 123]]` means the second generated token will always
|
||||
be a token of index 123.
|
||||
model_specific_kwargs:
|
||||
Additional model specific kwargs will be forwarded to the `forward` function of the model.
|
||||
|
||||
@ -1493,9 +1495,10 @@ class TFGenerationMixin:
|
||||
begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`):
|
||||
A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens`
|
||||
logit processor will set their log probs to `-inf` so that they are not sampled.
|
||||
forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of tokens that will be forced as beginning tokens.
|
||||
|
||||
forced_decoder_ids (`List[List[int]]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of pairs of integers which indicates a mapping from generation indices to token indices that
|
||||
will be forced before sampling. For example, `[[1, 123]]` means the second generated token will always
|
||||
be a token of index 123.
|
||||
model_kwargs:
|
||||
Additional model specific kwargs will be forwarded to the `call` function of the model.
|
||||
|
||||
@ -2147,7 +2150,7 @@ class TFGenerationMixin:
|
||||
forced_eos_token_id: int,
|
||||
suppress_tokens: Optional[List[int]] = None,
|
||||
begin_suppress_tokens: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[List[int]]] = None,
|
||||
) -> TFLogitsProcessorList:
|
||||
"""
|
||||
This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`]
|
||||
|
@ -696,7 +696,7 @@ class GenerationMixin:
|
||||
renormalize_logits: Optional[bool],
|
||||
suppress_tokens: Optional[List[int]] = None,
|
||||
begin_suppress_tokens: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[List[int]]] = None,
|
||||
) -> LogitsProcessorList:
|
||||
"""
|
||||
This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`]
|
||||
@ -956,7 +956,7 @@ class GenerationMixin:
|
||||
exponential_decay_length_penalty: Optional[Tuple[int, float]] = None,
|
||||
suppress_tokens: Optional[List[int]] = None,
|
||||
begin_suppress_tokens: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[int]] = None,
|
||||
forced_decoder_ids: Optional[List[List[int]]] = None,
|
||||
**model_kwargs,
|
||||
) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]:
|
||||
r"""
|
||||
@ -1121,9 +1121,10 @@ class GenerationMixin:
|
||||
begin_suppress_tokens (`List[int]`, *optional*, defaults to `model.config.begin_suppress_tokens`):
|
||||
A list of tokens that will be supressed at the begining of the generation. The `SupressBeginTokens`
|
||||
logit processor will set their log probs to `-inf` so that they are not sampled.
|
||||
forced_decoder_ids (`List[int]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of tokens that will be forced as beginning tokens, before sampling.
|
||||
|
||||
forced_decoder_ids (`List[List[int]]`, *optional*, defaults to `model.config.forced_decoder_ids`):
|
||||
A list of pairs of integers which indicates a mapping from generation indices to token indices that
|
||||
will be forced before sampling. For example, `[[1, 123]]` means the second generated token will always
|
||||
be a token of index 123.
|
||||
model_kwargs:
|
||||
Additional model specific kwargs will be forwarded to the `forward` function of the model. If the model
|
||||
is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs
|
||||
@ -1349,6 +1350,17 @@ class GenerationMixin:
|
||||
"Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`."
|
||||
)
|
||||
|
||||
if self.device.type != input_ids.device.type:
|
||||
warnings.warn(
|
||||
"You are calling .generate() with the `input_ids` being on a device type different"
|
||||
f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model"
|
||||
f" is on {self.device.type}. You may experience unexpected behaviors or slower generation."
|
||||
" Please make sure that you have put `input_ids` to the"
|
||||
f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before"
|
||||
" running `.generate()`.",
|
||||
UserWarning,
|
||||
)
|
||||
|
||||
# 7. prepare distribution pre_processing samplers
|
||||
logits_processor = self._get_logits_processor(
|
||||
repetition_penalty=repetition_penalty,
|
||||
@ -1976,7 +1988,6 @@ class GenerationMixin:
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['Today is a beautiful day, and a wonderful day.\n\nI was lucky enough to meet the']
|
||||
```"""
|
||||
|
||||
# init values
|
||||
logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
|
||||
stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()
|
||||
|
54
src/transformers/image_processing_utils.py
Normal file
54
src/transformers/image_processing_utils.py
Normal file
@ -0,0 +1,54 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .feature_extraction_utils import BatchFeature as BaseBatchFeature
|
||||
from .feature_extraction_utils import FeatureExtractionMixin
|
||||
from .utils import logging
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
# TODO: Move BatchFeature to be imported by both feature_extraction_utils and image_processing_utils
|
||||
# We override the class string here, but logic is the same.
|
||||
class BatchFeature(BaseBatchFeature):
|
||||
r"""
|
||||
Holds the output of the image processor specific `__call__` methods.
|
||||
|
||||
This class is derived from a python dictionary and can be used as a dictionary.
|
||||
|
||||
Args:
|
||||
data (`dict`):
|
||||
Dictionary of lists/arrays/tensors returned by the __call__ method ('pixel_values', etc.).
|
||||
tensor_type (`Union[None, str, TensorType]`, *optional*):
|
||||
You can give a tensor_type here to convert the lists of integers in PyTorch/TensorFlow/Numpy Tensors at
|
||||
initialization.
|
||||
"""
|
||||
|
||||
|
||||
# We use aliasing whilst we phase out the old API. Once feature extractors for vision models
|
||||
# are deprecated, ImageProcessor mixin will be implemented. Any shared logic will be abstracted out.
|
||||
ImageProcessorMixin = FeatureExtractionMixin
|
||||
|
||||
|
||||
class BaseImageProcessor(ImageProcessorMixin):
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
def __call__(self, images, **kwargs) -> BatchFeature:
|
||||
return self.preprocess(images, **kwargs)
|
||||
|
||||
def preprocess(self, images, **kwargs) -> BatchFeature:
|
||||
raise NotImplementedError("Each image processor must implement its own preprocess method")
|
318
src/transformers/image_transforms.py
Normal file
318
src/transformers/image_transforms.py
Normal file
@ -0,0 +1,318 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2022 The HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import warnings
|
||||
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
|
||||
from transformers.utils.import_utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
import PIL
|
||||
|
||||
from .image_utils import (
|
||||
ChannelDimension,
|
||||
get_channel_dimension_axis,
|
||||
get_image_size,
|
||||
infer_channel_dimension_format,
|
||||
is_jax_tensor,
|
||||
is_tf_tensor,
|
||||
is_torch_tensor,
|
||||
to_numpy_array,
|
||||
)
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
if is_torch_available():
|
||||
import torch
|
||||
if is_tf_available():
|
||||
import tensorflow as tf
|
||||
if is_flax_available():
|
||||
import jax.numpy as jnp
|
||||
|
||||
|
||||
def to_channel_dimension_format(image: np.ndarray, channel_dim: Union[ChannelDimension, str]) -> np.ndarray:
|
||||
"""
|
||||
Converts `image` to the channel dimension format specified by `channel_dim`.
|
||||
|
||||
Args:
|
||||
image (`numpy.ndarray`):
|
||||
The image to have its channel dimension set.
|
||||
channel_dim (`ChannelDimension`):
|
||||
The channel dimension format to use.
|
||||
|
||||
Returns:
|
||||
`np.ndarray`: The image with the channel dimension set to `channel_dim`.
|
||||
"""
|
||||
if not isinstance(image, np.ndarray):
|
||||
raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
|
||||
|
||||
current_channel_dim = infer_channel_dimension_format(image)
|
||||
target_channel_dim = ChannelDimension(channel_dim)
|
||||
if current_channel_dim == target_channel_dim:
|
||||
return image
|
||||
|
||||
if target_channel_dim == ChannelDimension.FIRST:
|
||||
image = image.transpose((2, 0, 1))
|
||||
elif target_channel_dim == ChannelDimension.LAST:
|
||||
image = image.transpose((1, 2, 0))
|
||||
else:
|
||||
raise ValueError("Unsupported channel dimension format: {}".format(channel_dim))
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def rescale(
|
||||
image: np.ndarray, scale: float, data_format: Optional[ChannelDimension] = None, dtype=np.float32
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Rescales `image` by `scale`.
|
||||
|
||||
Args:
|
||||
image (`np.ndarray`):
|
||||
The image to rescale.
|
||||
scale (`float`):
|
||||
The scale to use for rescaling the image.
|
||||
data_format (`ChannelDimension`, *optional*):
|
||||
The channel dimension format of the image. If not provided, it will be the same as the input image.
|
||||
dtype (`np.dtype`, *optional*, defaults to `np.float32`):
|
||||
The dtype of the output image. Defaults to `np.float32`. Used for backwards compatibility with feature
|
||||
extractors.
|
||||
|
||||
Returns:
|
||||
`np.ndarray`: The rescaled image.
|
||||
"""
|
||||
if not isinstance(image, np.ndarray):
|
||||
raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}")
|
||||
|
||||
rescaled_image = image * scale
|
||||
if data_format is not None:
|
||||
rescaled_image = to_channel_dimension_format(rescaled_image, data_format)
|
||||
rescaled_image = rescaled_image.astype(dtype)
|
||||
return rescaled_image
|
||||
|
||||
|
||||
def to_pil_image(
|
||||
image: Union[np.ndarray, PIL.Image.Image, "torch.Tensor", "tf.Tensor", "jnp.Tensor"],
|
||||
do_rescale: Optional[bool] = None,
|
||||
) -> PIL.Image.Image:
|
||||
"""
|
||||
Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
|
||||
needed.
|
||||
|
||||
Args:
|
||||
image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`):
|
||||
The image to convert to the `PIL.Image` format.
|
||||
do_rescale (`bool`, *optional*):
|
||||
Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default
|
||||
to `True` if the image type is a floating type, `False` otherwise.
|
||||
|
||||
Returns:
|
||||
`PIL.Image.Image`: The converted image.
|
||||
"""
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
return image
|
||||
|
||||
# Convert all tensors to numpy arrays before converting to PIL image
|
||||
if is_torch_tensor(image) or is_tf_tensor(image):
|
||||
image = image.numpy()
|
||||
elif is_jax_tensor(image):
|
||||
image = np.array(image)
|
||||
elif not isinstance(image, np.ndarray):
|
||||
raise ValueError("Input image type not supported: {}".format(type(image)))
|
||||
|
||||
# If the channel as been moved to first dim, we put it back at the end.
|
||||
image = to_channel_dimension_format(image, ChannelDimension.LAST)
|
||||
|
||||
# PIL.Image can only store uint8 values, so we rescale the image to be between 0 and 255 if needed.
|
||||
do_rescale = isinstance(image.flat[0], float) if do_rescale is None else do_rescale
|
||||
if do_rescale:
|
||||
image = rescale(image, 255)
|
||||
image = image.astype(np.uint8)
|
||||
return PIL.Image.fromarray(image)
|
||||
|
||||
|
||||
def get_resize_output_image_size(
|
||||
input_image: np.ndarray,
|
||||
size: Union[int, Tuple[int, int], List[int], Tuple[int]],
|
||||
default_to_square: bool = True,
|
||||
max_size: Optional[int] = None,
|
||||
) -> tuple:
|
||||
"""
|
||||
Find the target (height, width) dimension of the output image after resizing given the input image and the desired
|
||||
size.
|
||||
|
||||
Args:
|
||||
input_image (`np.ndarray`):
|
||||
The image to resize.
|
||||
size (`int` or `Tuple[int, int]` or List[int] or Tuple[int]):
|
||||
The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to
|
||||
this.
|
||||
|
||||
If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
|
||||
`size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this
|
||||
number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
|
||||
default_to_square (`bool`, *optional*, defaults to `True`):
|
||||
How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square
|
||||
(`size`,`size`). If set to `False`, will replicate
|
||||
[`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
|
||||
with support for resizing only the smallest edge and providing an optional `max_size`.
|
||||
max_size (`int`, *optional*):
|
||||
The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater
|
||||
than `max_size` after being resized according to `size`, then the image is resized again so that the longer
|
||||
edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter
|
||||
than `size`. Only used if `default_to_square` is `False`.
|
||||
|
||||
Returns:
|
||||
`tuple`: The target (height, width) dimension of the output image after resizing.
|
||||
"""
|
||||
if isinstance(size, (tuple, list)):
|
||||
if len(size) == 2:
|
||||
return tuple(size)
|
||||
elif len(size) == 1:
|
||||
# Perform same logic as if size was an int
|
||||
size = size[0]
|
||||
else:
|
||||
raise ValueError("size must have 1 or 2 elements if it is a list or tuple")
|
||||
|
||||
if default_to_square:
|
||||
return (size, size)
|
||||
|
||||
height, width = get_image_size(input_image)
|
||||
short, long = (width, height) if width <= height else (height, width)
|
||||
requested_new_short = size
|
||||
|
||||
if short == requested_new_short:
|
||||
return (height, width)
|
||||
|
||||
new_short, new_long = requested_new_short, int(requested_new_short * long / short)
|
||||
|
||||
if max_size is not None:
|
||||
if max_size <= requested_new_short:
|
||||
raise ValueError(
|
||||
f"max_size = {max_size} must be strictly greater than the requested "
|
||||
f"size for the smaller edge size = {size}"
|
||||
)
|
||||
if new_long > max_size:
|
||||
new_short, new_long = int(max_size * new_short / new_long), max_size
|
||||
|
||||
return (new_long, new_short) if width <= height else (new_short, new_long)
|
||||
|
||||
|
||||
def resize(
|
||||
image,
|
||||
size: Tuple[int, int],
|
||||
resample=PIL.Image.BILINEAR,
|
||||
data_format: Optional[ChannelDimension] = None,
|
||||
return_numpy: bool = True,
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Resizes `image` to (h, w) specified by `size` using the PIL library.
|
||||
|
||||
Args:
|
||||
image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
|
||||
The image to resize.
|
||||
size (`Tuple[int, int]`):
|
||||
The size to use for resizing the image.
|
||||
resample (`int`, *optional*, defaults to `PIL.Image.BILINEAR`):
|
||||
The filter to user for resampling.
|
||||
data_format (`ChannelDimension`, *optional*):
|
||||
The channel dimension format of the output image. If `None`, will use the inferred format from the input.
|
||||
return_numpy (`bool`, *optional*, defaults to `True`):
|
||||
Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is
|
||||
returned.
|
||||
|
||||
Returns:
|
||||
`np.ndarray`: The resized image.
|
||||
"""
|
||||
if not len(size) == 2:
|
||||
raise ValueError("size must have 2 elements")
|
||||
|
||||
# For all transformations, we want to keep the same data format as the input image unless otherwise specified.
|
||||
# The resized image from PIL will always have channels last, so find the input format first.
|
||||
data_format = infer_channel_dimension_format(image) if data_format is None else data_format
|
||||
|
||||
# To maintain backwards compatibility with the resizing done in previous image feature extractors, we use
|
||||
# the pillow library to resize the image and then convert back to numpy
|
||||
if not isinstance(image, PIL.Image.Image):
|
||||
# PIL expects image to have channels last
|
||||
image = to_channel_dimension_format(image, ChannelDimension.LAST)
|
||||
image = to_pil_image(image)
|
||||
height, width = size
|
||||
# PIL images are in the format (width, height)
|
||||
resized_image = image.resize((width, height), resample=resample)
|
||||
|
||||
if return_numpy:
|
||||
resized_image = np.array(resized_image)
|
||||
resized_image = to_channel_dimension_format(resized_image, data_format)
|
||||
return resized_image
|
||||
|
||||
|
||||
def normalize(
|
||||
image: np.ndarray,
|
||||
mean: Union[float, Iterable[float]],
|
||||
std: Union[float, Iterable[float]],
|
||||
data_format: Optional[ChannelDimension] = None,
|
||||
) -> np.ndarray:
|
||||
"""
|
||||
Normalizes `image` using the mean and standard deviation specified by `mean` and `std`.
|
||||
|
||||
image = (image - mean) / std
|
||||
|
||||
Args:
|
||||
image (`np.ndarray`):
|
||||
The image to normalize.
|
||||
mean (`float` or `Iterable[float]`):
|
||||
The mean to use for normalization.
|
||||
std (`float` or `Iterable[float]`):
|
||||
The standard deviation to use for normalization.
|
||||
data_format (`ChannelDimension`, *optional*):
|
||||
The channel dimension format of the output image. If `None`, will use the inferred format from the input.
|
||||
"""
|
||||
if isinstance(image, PIL.Image.Image):
|
||||
warnings.warn(
|
||||
"PIL.Image.Image inputs are deprecated and will be removed in v4.26.0. Please use numpy arrays instead.",
|
||||
FutureWarning,
|
||||
)
|
||||
# Convert PIL image to numpy array with the same logic as in the previous feature extractor normalize -
|
||||
# casting to numpy array and dividing by 255.
|
||||
image = to_numpy_array(image)
|
||||
image = rescale(image, scale=1 / 255)
|
||||
|
||||
input_data_format = infer_channel_dimension_format(image)
|
||||
channel_axis = get_channel_dimension_axis(image)
|
||||
num_channels = image.shape[channel_axis]
|
||||
|
||||
if isinstance(mean, Iterable):
|
||||
if len(mean) != num_channels:
|
||||
raise ValueError(f"mean must have {num_channels} elements if it is an iterable, got {len(mean)}")
|
||||
else:
|
||||
mean = [mean] * num_channels
|
||||
|
||||
if isinstance(std, Iterable):
|
||||
if len(std) != num_channels:
|
||||
raise ValueError(f"std must have {num_channels} elements if it is an iterable, got {len(std)}")
|
||||
else:
|
||||
std = [std] * num_channels
|
||||
|
||||
if input_data_format == ChannelDimension.LAST:
|
||||
image = (image - mean) / std
|
||||
else:
|
||||
image = ((image.T - mean) / std).T
|
||||
|
||||
image = to_channel_dimension_format(image, data_format) if data_format is not None else image
|
||||
return image
|
@ -14,33 +14,147 @@
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from typing import List, Union
|
||||
from typing import TYPE_CHECKING, List, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
import PIL.Image
|
||||
import PIL.ImageOps
|
||||
|
||||
import requests
|
||||
|
||||
from .utils import is_torch_available
|
||||
from .utils import is_flax_available, is_tf_available, is_torch_available, is_vision_available
|
||||
from .utils.constants import ( # noqa: F401
|
||||
IMAGENET_DEFAULT_MEAN,
|
||||
IMAGENET_DEFAULT_STD,
|
||||
IMAGENET_STANDARD_MEAN,
|
||||
IMAGENET_STANDARD_STD,
|
||||
)
|
||||
from .utils.generic import _is_torch
|
||||
from .utils.generic import ExplicitEnum, _is_jax, _is_tensorflow, _is_torch, to_numpy
|
||||
|
||||
|
||||
if is_vision_available():
|
||||
import PIL.Image
|
||||
import PIL.ImageOps
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
if is_torch_available():
|
||||
import torch
|
||||
|
||||
|
||||
ImageInput = Union[
|
||||
PIL.Image.Image, np.ndarray, "torch.Tensor", List[PIL.Image.Image], List[np.ndarray], List["torch.Tensor"] # noqa
|
||||
]
|
||||
"PIL.Image.Image", np.ndarray, "torch.Tensor", List["PIL.Image.Image"], List[np.ndarray], List["torch.Tensor"]
|
||||
] # noqa
|
||||
|
||||
|
||||
class ChannelDimension(ExplicitEnum):
|
||||
FIRST = "channels_first"
|
||||
LAST = "channels_last"
|
||||
|
||||
|
||||
def is_torch_tensor(obj):
|
||||
return _is_torch(obj) if is_torch_available() else False
|
||||
|
||||
|
||||
def is_tf_tensor(obj):
|
||||
return _is_tensorflow(obj) if is_tf_available() else False
|
||||
|
||||
|
||||
def is_jax_tensor(obj):
|
||||
return _is_jax(obj) if is_flax_available() else False
|
||||
|
||||
|
||||
def is_valid_image(img):
|
||||
return (
|
||||
isinstance(img, (PIL.Image.Image, np.ndarray))
|
||||
or is_torch_tensor(img)
|
||||
or is_tf_tensor(img)
|
||||
or is_jax_tensor(img)
|
||||
)
|
||||
|
||||
|
||||
def valid_images(imgs):
|
||||
return all(is_valid_image(img) for img in imgs)
|
||||
|
||||
|
||||
def is_batched(img):
|
||||
if isinstance(img, (list, tuple)):
|
||||
return is_valid_image(img[0])
|
||||
return False
|
||||
|
||||
|
||||
def to_numpy_array(img) -> np.ndarray:
|
||||
if isinstance(img, PIL.Image.Image):
|
||||
return np.array(img)
|
||||
return to_numpy(img)
|
||||
|
||||
|
||||
def infer_channel_dimension_format(image: np.ndarray) -> ChannelDimension:
|
||||
"""
|
||||
Infers the channel dimension format of `image`.
|
||||
|
||||
Args:
|
||||
image (`np.ndarray`):
|
||||
The image to infer the channel dimension of.
|
||||
|
||||
Returns:
|
||||
The channel dimension of the image.
|
||||
"""
|
||||
if image.ndim == 3:
|
||||
first_dim, last_dim = 0, 2
|
||||
elif image.ndim == 4:
|
||||
first_dim, last_dim = 1, 3
|
||||
else:
|
||||
raise ValueError(f"Unsupported number of image dimensions: {image.ndim}")
|
||||
|
||||
if image.shape[first_dim] in (1, 3):
|
||||
return ChannelDimension.FIRST
|
||||
elif image.shape[last_dim] in (1, 3):
|
||||
return ChannelDimension.LAST
|
||||
raise ValueError("Unable to infer channel dimension format")
|
||||
|
||||
|
||||
def get_channel_dimension_axis(image: np.ndarray) -> int:
|
||||
"""
|
||||
Returns the channel dimension axis of the image.
|
||||
|
||||
Args:
|
||||
image (`np.ndarray`):
|
||||
The image to get the channel dimension axis of.
|
||||
|
||||
Returns:
|
||||
The channel dimension axis of the image.
|
||||
"""
|
||||
channel_dim = infer_channel_dimension_format(image)
|
||||
if channel_dim == ChannelDimension.FIRST:
|
||||
return image.ndim - 3
|
||||
elif channel_dim == ChannelDimension.LAST:
|
||||
return image.ndim - 1
|
||||
raise ValueError(f"Unsupported data format: {channel_dim}")
|
||||
|
||||
|
||||
def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:
|
||||
"""
|
||||
Returns the (height, width) dimensions of the image.
|
||||
|
||||
Args:
|
||||
image (`np.ndarray`):
|
||||
The image to get the dimensions of.
|
||||
channel_dim (`ChannelDimension`, *optional*):
|
||||
Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.
|
||||
|
||||
Returns:
|
||||
A tuple of the image's height and width.
|
||||
"""
|
||||
if channel_dim is None:
|
||||
channel_dim = infer_channel_dimension_format(image)
|
||||
|
||||
if channel_dim == ChannelDimension.FIRST:
|
||||
return image.shape[-2], image.shape[-1]
|
||||
elif channel_dim == ChannelDimension.LAST:
|
||||
return image.shape[-3], image.shape[-2]
|
||||
else:
|
||||
raise ValueError(f"Unsupported data format: {channel_dim}")
|
||||
|
||||
|
||||
def load_image(image: Union[str, "PIL.Image.Image"]) -> "PIL.Image.Image":
|
||||
"""
|
||||
Loads `image` to a PIL Image.
|
||||
@ -236,7 +350,7 @@ class ImageFeatureExtractionMixin:
|
||||
else:
|
||||
return (image - mean) / std
|
||||
|
||||
def resize(self, image, size, resample=PIL.Image.BILINEAR, default_to_square=True, max_size=None):
|
||||
def resize(self, image, size, resample=None, default_to_square=True, max_size=None):
|
||||
"""
|
||||
Resizes `image`. Enforces conversion of input to PIL.Image.
|
||||
|
||||
@ -266,6 +380,8 @@ class ImageFeatureExtractionMixin:
|
||||
Returns:
|
||||
image: A resized `PIL.Image.Image`.
|
||||
"""
|
||||
resample = resample if resample is not None else PIL.Image.BILINEAR
|
||||
|
||||
self._ensure_format_supported(image)
|
||||
|
||||
if not isinstance(image, PIL.Image.Image):
|
||||
@ -393,7 +509,7 @@ class ImageFeatureExtractionMixin:
|
||||
|
||||
return image[::-1, :, :]
|
||||
|
||||
def rotate(self, image, angle, resample=PIL.Image.NEAREST, expand=0, center=None, translate=None, fillcolor=None):
|
||||
def rotate(self, image, angle, resample=None, expand=0, center=None, translate=None, fillcolor=None):
|
||||
"""
|
||||
Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees
|
||||
counter clockwise around its centre.
|
||||
@ -406,6 +522,8 @@ class ImageFeatureExtractionMixin:
|
||||
Returns:
|
||||
image: A rotated `PIL.Image.Image`.
|
||||
"""
|
||||
resample = resample if resample is not None else PIL.Image.NEAREST
|
||||
|
||||
self._ensure_format_supported(image)
|
||||
|
||||
if not isinstance(image, PIL.Image.Image):
|
||||
|
@ -707,8 +707,15 @@ def load_tf_sharded_weights(model, shard_files, ignore_mismatched_sizes=False, s
|
||||
|
||||
# Since TF adds the name of the class to its weights, and uses the index and not the name of the layer to load
|
||||
# the weight, we have to get rid of the first prefix of the name of the layer.
|
||||
model_keys = set("/".join(k.name.split("/")[1:]) for k in model.weights)
|
||||
model_layer_map = {"/".join(k.name.split("/")[1:]): i for i, k in enumerate(model.weights)}
|
||||
model_keys = set()
|
||||
model_layer_map = dict()
|
||||
for i, k in enumerate(model.weights):
|
||||
if "model." in k.name or len(k.name.split("/")) == 1:
|
||||
layer_name = k.name
|
||||
else:
|
||||
layer_name = "/".join(k.name.split("/")[1:])
|
||||
model_keys.add(layer_name)
|
||||
model_layer_map[layer_name] = i
|
||||
|
||||
for shard_file in shard_files:
|
||||
state_dict = tf.io.read_file(shard_file)
|
||||
@ -2059,12 +2066,23 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
|
||||
Return:
|
||||
`tf.keras.layers.Embedding`: Resized Embedding layer.
|
||||
"""
|
||||
|
||||
# Get the initialization range for the embeddings
|
||||
init_range = 0.02 # default value
|
||||
potential_initialization_variable_names = [
|
||||
"initializer_range", # most common
|
||||
"initializer_factor", # e.g. T5
|
||||
"init_std", # e.g BART
|
||||
]
|
||||
for var_name in potential_initialization_variable_names:
|
||||
if hasattr(self.config, var_name):
|
||||
init_range = getattr(self.config, var_name)
|
||||
|
||||
# Get a new (initialized) embeddings layer
|
||||
init_range = getattr(self.config, "initializer_range", 0.02)
|
||||
new_embeddings = tf.keras.layers.Embedding(
|
||||
input_dim=new_num_tokens,
|
||||
output_dim=old_embeddings.output_dim,
|
||||
embeddings_initializer=get_initializer(init_range),
|
||||
embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=init_range),
|
||||
name=old_embeddings.embeddings.name[:-13], # exact same scoped name except "/embeddings:0"
|
||||
)
|
||||
new_embeddings(tf.constant([[0]]))
|
||||
@ -2097,6 +2115,7 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
|
||||
saved_model=False,
|
||||
version=1,
|
||||
push_to_hub=False,
|
||||
signatures=None,
|
||||
max_shard_size: Union[int, str] = "10GB",
|
||||
create_pr: bool = False,
|
||||
**kwargs
|
||||
@ -2118,6 +2137,8 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
|
||||
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
|
||||
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
|
||||
namespace).
|
||||
signatures (`dict` or `tf.function`, *optional*):
|
||||
Model's signature used for serving. This will be passed to the `signatures` argument of model.save().
|
||||
max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
|
||||
The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
|
||||
lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).
|
||||
@ -2148,8 +2169,10 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
|
||||
files_timestamps = self._get_files_timestamps(save_directory)
|
||||
|
||||
if saved_model:
|
||||
if signatures is None:
|
||||
signatures = self.serving
|
||||
saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
|
||||
self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
|
||||
self.save(saved_model_dir, include_optimizer=False, signatures=signatures)
|
||||
logger.info(f"Saved model created in {saved_model_dir}")
|
||||
|
||||
# Save configuration file
|
||||
@ -2195,17 +2218,19 @@ class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, Pu
|
||||
)
|
||||
for shard_file, shard in shards.items():
|
||||
with h5py.File(os.path.join(save_directory, shard_file), mode="w") as shard_file:
|
||||
save_attributes_to_hdf5_group(
|
||||
shard_file,
|
||||
"layer_names",
|
||||
["/".join(layer.name.split("/")[1:]).encode("utf8") for layer in shard],
|
||||
)
|
||||
|
||||
layers = []
|
||||
for layer in sorted(shard, key=lambda x: x.name):
|
||||
if "model." in layer.name or len(layer.name.split("/")) == 1:
|
||||
layer_name = layer.name
|
||||
print(layer_name)
|
||||
else:
|
||||
layer_name = "/".join(layer.name.split("/")[1:])
|
||||
param_dset = shard_file.create_dataset(
|
||||
"/".join(layer.name.split("/")[1:]), layer.numpy().shape, dtype=layer.numpy().dtype
|
||||
layer_name, layer.numpy().shape, dtype=layer.numpy().dtype
|
||||
)
|
||||
param_dset[:] = layer.numpy()
|
||||
layers.append(layer_name.encode("utf8"))
|
||||
save_attributes_to_hdf5_group(shard_file, "layer_names", layers)
|
||||
|
||||
if push_to_hub:
|
||||
self._upload_modified_files(
|
||||
@ -3022,36 +3047,3 @@ def get_initializer(initializer_range: float = 0.02) -> tf.initializers.Truncate
|
||||
`tf.initializers.TruncatedNormal`: The truncated normal initializer.
|
||||
"""
|
||||
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
|
||||
|
||||
|
||||
class TFWrappedEmbeddings:
|
||||
"""
|
||||
this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
|
||||
weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
|
||||
saving/storing the correct weights
|
||||
"""
|
||||
|
||||
# TODO (joao): flagged for delection due to embeddings refactor
|
||||
|
||||
def __init__(self, layer, abs_scope_name=None):
|
||||
self._layer = layer
|
||||
self._abs_scope_name = abs_scope_name
|
||||
self.vocab_size = self._layer.vocab_size
|
||||
|
||||
def call(self, inputs, mode="embedding"):
|
||||
if self._abs_scope_name is None:
|
||||
return self._layer.call(inputs, mode)
|
||||
|
||||
# if an abs scope name is given to the embedding variable, call variable from absolute scope
|
||||
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
|
||||
with tf.name_scope(abs_scope_name.original_name_scope):
|
||||
return self._layer.call(inputs, mode)
|
||||
|
||||
def __call__(self, inputs, mode="embedding"):
|
||||
if self._abs_scope_name is None:
|
||||
return self._layer(inputs, mode)
|
||||
|
||||
# if an abs scope name is given to the embedding variable, call variable from absolute scope
|
||||
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
|
||||
with tf.name_scope(abs_scope_name.original_name_scope):
|
||||
return self._layer(inputs, mode)
|
||||
|
@ -83,6 +83,7 @@ from . import (
|
||||
layoutxlm,
|
||||
led,
|
||||
levit,
|
||||
lilt,
|
||||
longformer,
|
||||
longt5,
|
||||
luke,
|
||||
|
@ -101,7 +101,7 @@ class AlbertConfig(PretrainedConfig):
|
||||
... intermediate_size=3072,
|
||||
... )
|
||||
|
||||
>>> # Initializing a model from the ALBERT-base style configuration
|
||||
>>> # Initializing a model (with random weights) from the ALBERT-base style configuration
|
||||
>>> model = AlbertModel(albert_xxlarge_configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
|
@ -48,6 +48,7 @@ else:
|
||||
"MODEL_FOR_CAUSAL_LM_MAPPING",
|
||||
"MODEL_FOR_CTC_MAPPING",
|
||||
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
|
||||
"MODEL_FOR_DEPTH_ESTIMATION_MAPPING",
|
||||
"MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
|
||||
"MODEL_FOR_IMAGE_SEGMENTATION_MAPPING",
|
||||
"MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING",
|
||||
@ -76,6 +77,7 @@ else:
|
||||
"AutoModelForAudioXVector",
|
||||
"AutoModelForCausalLM",
|
||||
"AutoModelForCTC",
|
||||
"AutoModelForDepthEstimation",
|
||||
"AutoModelForImageClassification",
|
||||
"AutoModelForImageSegmentation",
|
||||
"AutoModelForInstanceSegmentation",
|
||||
@ -197,6 +199,7 @@ if TYPE_CHECKING:
|
||||
MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
|
||||
MODEL_FOR_CAUSAL_LM_MAPPING,
|
||||
MODEL_FOR_CTC_MAPPING,
|
||||
MODEL_FOR_DEPTH_ESTIMATION_MAPPING,
|
||||
MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
|
||||
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
|
||||
MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
|
||||
@ -226,6 +229,7 @@ if TYPE_CHECKING:
|
||||
AutoModelForAudioXVector,
|
||||
AutoModelForCausalLM,
|
||||
AutoModelForCTC,
|
||||
AutoModelForDepthEstimation,
|
||||
AutoModelForDocumentQuestionAnswering,
|
||||
AutoModelForImageClassification,
|
||||
AutoModelForImageSegmentation,
|
||||
|
@ -555,7 +555,14 @@ def getattribute_from_module(module, attr):
|
||||
# Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the
|
||||
# object at the top level.
|
||||
transformers_module = importlib.import_module("transformers")
|
||||
return getattribute_from_module(transformers_module, attr)
|
||||
|
||||
if module != transformers_module:
|
||||
try:
|
||||
return getattribute_from_module(transformers_module, attr)
|
||||
except ValueError:
|
||||
raise ValueError(f"Could not find {attr} neither in {module} nor in {transformers_module}!")
|
||||
else:
|
||||
raise ValueError(f"Could not find {attr} in {transformers_module}!")
|
||||
|
||||
|
||||
class _LazyAutoMapping(OrderedDict):
|
||||
|
@ -85,6 +85,7 @@ CONFIG_MAPPING_NAMES = OrderedDict(
|
||||
("layoutlmv3", "LayoutLMv3Config"),
|
||||
("led", "LEDConfig"),
|
||||
("levit", "LevitConfig"),
|
||||
("lilt", "LiltConfig"),
|
||||
("longformer", "LongformerConfig"),
|
||||
("longt5", "LongT5Config"),
|
||||
("luke", "LukeConfig"),
|
||||
@ -221,6 +222,7 @@ CONFIG_ARCHIVE_MAP_MAPPING_NAMES = OrderedDict(
|
||||
("layoutlmv3", "LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("led", "LED_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("levit", "LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("lilt", "LILT_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("longformer", "LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("longt5", "LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
("luke", "LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP"),
|
||||
@ -360,6 +362,7 @@ MODEL_NAMES_MAPPING = OrderedDict(
|
||||
("layoutxlm", "LayoutXLM"),
|
||||
("led", "LED"),
|
||||
("levit", "LeViT"),
|
||||
("lilt", "LiLT"),
|
||||
("longformer", "Longformer"),
|
||||
("longt5", "LongT5"),
|
||||
("luke", "LUKE"),
|
||||
|
@ -84,6 +84,7 @@ MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("layoutlmv3", "LayoutLMv3Model"),
|
||||
("led", "LEDModel"),
|
||||
("levit", "LevitModel"),
|
||||
("lilt", "LiltModel"),
|
||||
("longformer", "LongformerModel"),
|
||||
("longt5", "LongT5Model"),
|
||||
("luke", "LukeModel"),
|
||||
@ -252,7 +253,6 @@ MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
|
||||
("luke", "LukeForMaskedLM"),
|
||||
("m2m_100", "M2M100ForConditionalGeneration"),
|
||||
("marian", "MarianMTModel"),
|
||||
("markuplm", "MarkupLMForMaskedLM"),
|
||||
("megatron-bert", "MegatronBertForCausalLM"),
|
||||
("mobilebert", "MobileBertForMaskedLM"),
|
||||
("mpnet", "MPNetForMaskedLM"),
|
||||
@ -479,6 +479,13 @@ MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
|
||||
]
|
||||
)
|
||||
|
||||
MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
# Model for depth estimation mapping
|
||||
("dpt", "DPTForDepthEstimation"),
|
||||
("glpn", "GLPNForDepthEstimation"),
|
||||
]
|
||||
)
|
||||
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
# Model for Seq2Seq Causal LM mapping
|
||||
@ -544,6 +551,7 @@ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
("layoutlmv2", "LayoutLMv2ForSequenceClassification"),
|
||||
("layoutlmv3", "LayoutLMv3ForSequenceClassification"),
|
||||
("led", "LEDForSequenceClassification"),
|
||||
("lilt", "LiltForSequenceClassification"),
|
||||
("longformer", "LongformerForSequenceClassification"),
|
||||
("luke", "LukeForSequenceClassification"),
|
||||
("markuplm", "MarkupLMForSequenceClassification"),
|
||||
@ -600,6 +608,7 @@ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
|
||||
("layoutlmv2", "LayoutLMv2ForQuestionAnswering"),
|
||||
("layoutlmv3", "LayoutLMv3ForQuestionAnswering"),
|
||||
("led", "LEDForQuestionAnswering"),
|
||||
("lilt", "LiltForQuestionAnswering"),
|
||||
("longformer", "LongformerForQuestionAnswering"),
|
||||
("luke", "LukeForQuestionAnswering"),
|
||||
("lxmert", "LxmertForQuestionAnswering"),
|
||||
@ -673,6 +682,7 @@ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
("layoutlm", "LayoutLMForTokenClassification"),
|
||||
("layoutlmv2", "LayoutLMv2ForTokenClassification"),
|
||||
("layoutlmv3", "LayoutLMv3ForTokenClassification"),
|
||||
("lilt", "LiltForTokenClassification"),
|
||||
("longformer", "LongformerForTokenClassification"),
|
||||
("luke", "LukeForTokenClassification"),
|
||||
("markuplm", "MarkupLMForTokenClassification"),
|
||||
@ -841,6 +851,7 @@ MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODE
|
||||
MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(
|
||||
CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES
|
||||
)
|
||||
MODEL_FOR_DEPTH_ESTIMATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES)
|
||||
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
|
||||
CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
|
||||
)
|
||||
@ -1036,6 +1047,13 @@ AutoModelForZeroShotObjectDetection = auto_class_update(
|
||||
)
|
||||
|
||||
|
||||
class AutoModelForDepthEstimation(_BaseAutoModelClass):
|
||||
_model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
|
||||
|
||||
|
||||
AutoModelForDepthEstimation = auto_class_update(AutoModelForDepthEstimation, head_doc="depth estimation")
|
||||
|
||||
|
||||
class AutoModelForVideoClassification(_BaseAutoModelClass):
|
||||
_model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
|
||||
|
||||
|
@ -39,6 +39,7 @@ TF_MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("convbert", "TFConvBertModel"),
|
||||
("convnext", "TFConvNextModel"),
|
||||
("ctrl", "TFCTRLModel"),
|
||||
("cvt", "TFCvtModel"),
|
||||
("data2vec-vision", "TFData2VecVisionModel"),
|
||||
("deberta", "TFDebertaModel"),
|
||||
("deberta-v2", "TFDebertaV2Model"),
|
||||
@ -46,6 +47,7 @@ TF_MODEL_MAPPING_NAMES = OrderedDict(
|
||||
("distilbert", "TFDistilBertModel"),
|
||||
("dpr", "TFDPRQuestionEncoder"),
|
||||
("electra", "TFElectraModel"),
|
||||
("esm", "TFEsmModel"),
|
||||
("flaubert", "TFFlaubertModel"),
|
||||
("funnel", ("TFFunnelModel", "TFFunnelBaseModel")),
|
||||
("gpt2", "TFGPT2Model"),
|
||||
@ -128,6 +130,7 @@ TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
|
||||
("ctrl", "TFCTRLLMHeadModel"),
|
||||
("distilbert", "TFDistilBertForMaskedLM"),
|
||||
("electra", "TFElectraForMaskedLM"),
|
||||
("esm", "TFEsmForMaskedLM"),
|
||||
("flaubert", "TFFlaubertWithLMHeadModel"),
|
||||
("funnel", "TFFunnelForMaskedLM"),
|
||||
("gpt2", "TFGPT2LMHeadModel"),
|
||||
@ -184,6 +187,7 @@ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
[
|
||||
# Model for Image-classsification
|
||||
("convnext", "TFConvNextForImageClassification"),
|
||||
("cvt", "TFCvtForImageClassification"),
|
||||
("data2vec-vision", "TFData2VecVisionForImageClassification"),
|
||||
("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")),
|
||||
("mobilevit", "TFMobileViTForImageClassification"),
|
||||
@ -221,6 +225,7 @@ TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
|
||||
("deberta-v2", "TFDebertaV2ForMaskedLM"),
|
||||
("distilbert", "TFDistilBertForMaskedLM"),
|
||||
("electra", "TFElectraForMaskedLM"),
|
||||
("esm", "TFEsmForMaskedLM"),
|
||||
("flaubert", "TFFlaubertWithLMHeadModel"),
|
||||
("funnel", "TFFunnelForMaskedLM"),
|
||||
("layoutlm", "TFLayoutLMForMaskedLM"),
|
||||
@ -271,6 +276,7 @@ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
("deberta-v2", "TFDebertaV2ForSequenceClassification"),
|
||||
("distilbert", "TFDistilBertForSequenceClassification"),
|
||||
("electra", "TFElectraForSequenceClassification"),
|
||||
("esm", "TFEsmForSequenceClassification"),
|
||||
("flaubert", "TFFlaubertForSequenceClassification"),
|
||||
("funnel", "TFFunnelForSequenceClassification"),
|
||||
("gpt2", "TFGPT2ForSequenceClassification"),
|
||||
@ -344,6 +350,7 @@ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
|
||||
("deberta-v2", "TFDebertaV2ForTokenClassification"),
|
||||
("distilbert", "TFDistilBertForTokenClassification"),
|
||||
("electra", "TFElectraForTokenClassification"),
|
||||
("esm", "TFEsmForTokenClassification"),
|
||||
("flaubert", "TFFlaubertForTokenClassification"),
|
||||
("funnel", "TFFunnelForTokenClassification"),
|
||||
("layoutlm", "TFLayoutLMForTokenClassification"),
|
||||
|
@ -62,7 +62,7 @@ PROCESSOR_MAPPING_NAMES = OrderedDict(
|
||||
("wav2vec2_with_lm", "Wav2Vec2ProcessorWithLM"),
|
||||
("wavlm", "Wav2Vec2Processor"),
|
||||
("whisper", "WhisperProcessor"),
|
||||
("xclip", "CLIPProcessor"),
|
||||
("xclip", "XCLIPProcessor"),
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -122,6 +122,7 @@ else:
|
||||
),
|
||||
("electra", ("ElectraTokenizer", "ElectraTokenizerFast" if is_tokenizers_available() else None)),
|
||||
("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
|
||||
("esm", ("EsmTokenizer", None)),
|
||||
("flaubert", ("FlaubertTokenizer", None)),
|
||||
("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)),
|
||||
("fsmt", ("FSMTTokenizer", None)),
|
||||
@ -140,6 +141,7 @@ else:
|
||||
("layoutlmv3", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)),
|
||||
("layoutxlm", ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast" if is_tokenizers_available() else None)),
|
||||
("led", ("LEDTokenizer", "LEDTokenizerFast" if is_tokenizers_available() else None)),
|
||||
("lilt", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)),
|
||||
("longformer", ("LongformerTokenizer", "LongformerTokenizerFast" if is_tokenizers_available() else None)),
|
||||
(
|
||||
"longt5",
|
||||
|
@ -96,12 +96,12 @@ class BartConfig(PretrainedConfig):
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import BartModel, BartConfig
|
||||
>>> from transformers import BartConfig, BartModel
|
||||
|
||||
>>> # Initializing a BART facebook/bart-large style configuration
|
||||
>>> configuration = BartConfig()
|
||||
|
||||
>>> # Initializing a model from the facebook/bart-large style configuration
|
||||
>>> # Initializing a model (with random weights) from the facebook/bart-large style configuration
|
||||
>>> model = BartModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
|
@ -16,7 +16,6 @@
|
||||
|
||||
|
||||
import random
|
||||
from contextlib import nullcontext
|
||||
from typing import Optional, Tuple, Union
|
||||
|
||||
import numpy as np
|
||||
@ -41,6 +40,7 @@ from ...modeling_tf_utils import (
|
||||
)
|
||||
from ...tf_utils import shape_list, stable_softmax
|
||||
from ...utils import (
|
||||
ContextManagers,
|
||||
add_code_sample_docstrings,
|
||||
add_end_docstrings,
|
||||
add_start_docstrings,
|
||||
@ -741,11 +741,10 @@ class TFBartEncoder(tf.keras.layers.Layer):
|
||||
# scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope`
|
||||
# is used with a name ending in `/`, that name replaces the current name scope.
|
||||
# (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0)
|
||||
context = []
|
||||
if hasattr(self.embed_tokens, "load_weight_prefix"):
|
||||
context_manager = tf.name_scope(self.embed_tokens.load_weight_prefix + "/")
|
||||
else:
|
||||
context_manager = nullcontext()
|
||||
with context_manager:
|
||||
context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/"))
|
||||
with ContextManagers(context):
|
||||
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
|
||||
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
|
||||
tf.debugging.assert_less(
|
||||
@ -945,11 +944,10 @@ class TFBartDecoder(tf.keras.layers.Layer):
|
||||
# scope, so that its weights are registered with the desired name for loading/storing. When `tf.name_scope`
|
||||
# is used with a name ending in `/`, that name replaces the current name scope.
|
||||
# (embeddings with tf.name_scope: self.embed_tokens.load_weight_prefix/self.embed_tokens.name/embeddings:0)
|
||||
context = []
|
||||
if hasattr(self.embed_tokens, "load_weight_prefix"):
|
||||
context_manager = tf.name_scope(self.embed_tokens.load_weight_prefix + "/")
|
||||
else:
|
||||
context_manager = nullcontext()
|
||||
with context_manager:
|
||||
context.append(tf.name_scope(self.embed_tokens.load_weight_prefix + "/"))
|
||||
with ContextManagers(context):
|
||||
# Note: tf.gather, on which the embedding layer is based, won't check positive out of bound
|
||||
# indices on GPU, returning zeros instead. This is a dangerous silent behavior.
|
||||
tf.debugging.assert_less(
|
||||
@ -1053,7 +1051,12 @@ class TFBartMainLayer(tf.keras.layers.Layer):
|
||||
def __init__(self, config: BartConfig, load_weight_prefix=None, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.config = config
|
||||
self.shared = tf.keras.layers.Embedding(config.vocab_size, config.d_model, name="model.shared")
|
||||
self.shared = tf.keras.layers.Embedding(
|
||||
input_dim=config.vocab_size,
|
||||
output_dim=config.d_model,
|
||||
embeddings_initializer=tf.keras.initializers.TruncatedNormal(stddev=self.config.init_std),
|
||||
name="model.shared",
|
||||
)
|
||||
# Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
|
||||
self.shared.load_weight_prefix = "model.shared" if load_weight_prefix is None else load_weight_prefix
|
||||
|
||||
@ -1373,8 +1376,6 @@ class TFBartForConditionalGeneration(TFBartPretrainedModel, TFCausalLanguageMode
|
||||
return_dict=return_dict,
|
||||
training=training,
|
||||
)
|
||||
# TODO (joao): the line below is for models with tied embeddings. The previous TFBart had tied embeddings.
|
||||
# The PT Bart does not have tied embeddings. Untie the weights while keeping loading retrocompatibility.
|
||||
lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
|
||||
lm_logits = self.bias_layer(lm_logits)
|
||||
masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
|
||||
|
@ -104,12 +104,12 @@ class BeitConfig(PretrainedConfig):
|
||||
Example:
|
||||
|
||||
```python
|
||||
>>> from transformers import BeitModel, BeitConfig
|
||||
>>> from transformers import BeitConfig, BeitModel
|
||||
|
||||
>>> # Initializing a BEiT beit-base-patch16-224-pt22k style configuration
|
||||
>>> configuration = BeitConfig()
|
||||
|
||||
>>> # Initializing a model from the beit-base-patch16-224-pt22k style configuration
|
||||
>>> # Initializing a model (with random weights) from the beit-base-patch16-224-pt22k style configuration
|
||||
>>> model = BeitModel(configuration)
|
||||
|
||||
>>> # Accessing the model configuration
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user