mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
Compare commits
254 Commits
v4.42.4
...
fix-word-i
Author | SHA1 | Date | |
---|---|---|---|
3963a17762 | |||
6c1cc67a38 | |||
60f1f426d6 | |||
606aa37a4a | |||
8deb370946 | |||
7f552e28e0 | |||
a3264332cf | |||
6e2d04e429 | |||
026a173a64 | |||
516af4bb63 | |||
62c60a3018 | |||
1627108033 | |||
bd54ed2ed7 | |||
e68ec18ce2 | |||
2fbbcf5007 | |||
084b5094eb | |||
20528f067c | |||
934fe1504e | |||
3e8106d253 | |||
f0bc49e7f6 | |||
a24a9a66f4 | |||
811a9caa21 | |||
7f5d644e69 | |||
3fbaaaa64d | |||
7ffe25f2b9 | |||
49928892d6 | |||
6494479f1d | |||
535fe78b9f | |||
a2ad9d5ad5 | |||
5019aabfac | |||
f2122cc6eb | |||
f739687684 | |||
44f6fdd74f | |||
8da9068730 | |||
81233c069c | |||
27c7f971c0 | |||
5f841c74b6 | |||
f9756d9edb | |||
b8e5cd5396 | |||
1c7ebf1d6e | |||
c46edfb823 | |||
fad15fba78 | |||
4ab33c2d81 | |||
9d6c0641c4 | |||
3a83ec48a6 | |||
6ed0bf1e85 | |||
df6eee9201 | |||
de2318894e | |||
9b9a54e61b | |||
1ecedf1d9e | |||
f53a5dec7b | |||
5658e749ad | |||
85a1269e19 | |||
edd68f4ed8 | |||
1c122a46dc | |||
af0e4b7b37 | |||
1392a6867f | |||
8d2534c4d0 | |||
e0182f3bd7 | |||
165116bc14 | |||
5f4ee98a7a | |||
8678879f1d | |||
01be5b4879 | |||
c85510f958 | |||
bc2adb0112 | |||
23f6a43f82 | |||
d5a99dfcee | |||
ff0d708fe6 | |||
d2c687b3f1 | |||
9cf4f2aa9a | |||
7d92009af6 | |||
63700628ad | |||
a009fbdab3 | |||
3263b34354 | |||
034b477847 | |||
bab32d6fe9 | |||
9ced33ca7f | |||
a5b226ce98 | |||
a1844a3209 | |||
2e113422b3 | |||
5a4a76edb7 | |||
1535a2c93d | |||
34b43211d7 | |||
7405c1c77e | |||
605f3245dc | |||
2782aadae2 | |||
f83c6f1d02 | |||
3aefb4ec7f | |||
251a2409c6 | |||
96a074fa7e | |||
bd9dca3b85 | |||
817a676bd7 | |||
74d0eb3fed | |||
7987710696 | |||
12b6880c81 | |||
d1ec36b94f | |||
7ba028fccb | |||
5a649ff3ec | |||
f2a1e3ca68 | |||
0fcfc5ccc9 | |||
c38c55f4fb | |||
aa8f86a421 | |||
b381880597 | |||
0fdea8607d | |||
fe008d6ebe | |||
62aa270f2a | |||
89575b567e | |||
46835ec6ae | |||
4bd8f12972 | |||
566b0f1fbf | |||
e316c5214f | |||
22f888b3fa | |||
cd48553fc8 | |||
56a7745704 | |||
b873234cb6 | |||
271fd8e60d | |||
8f0d26c55e | |||
c75969ee28 | |||
4c040aba02 | |||
c50e0551fd | |||
c25dde1fc9 | |||
673d30b826 | |||
765732e92c | |||
1c37e8c1a6 | |||
b31d595040 | |||
cb23d1b20b | |||
bc36c26fa6 | |||
63be8e6f39 | |||
72fb02c47d | |||
691586b0dc | |||
24cfcc2114 | |||
4037a2b5b1 | |||
6f40a213eb | |||
e391706420 | |||
c22efa6196 | |||
88e0813d8d | |||
036d3de23d | |||
89eec5cf20 | |||
999981daf4 | |||
693cb828ff | |||
25e5e3fa56 | |||
e0dfd7bcaf | |||
03a3becc48 | |||
ac946aac25 | |||
6fbea6d237 | |||
e4682de635 | |||
a1a34657d4 | |||
11efb4fc09 | |||
556a4205f0 | |||
907500423d | |||
454bc14d90 | |||
a5c642fe7a | |||
df1c248a6d | |||
739a63166d | |||
8480fda6ee | |||
7f79a97399 | |||
d1a1bcf56a | |||
aec1ca3a58 | |||
c1e139c2b0 | |||
574e68d554 | |||
52585019a1 | |||
6a05f68f51 | |||
e314395277 | |||
ad4ef3a290 | |||
1499a55008 | |||
23d6d0cc06 | |||
2e48b3e872 | |||
48c20700e1 | |||
f4ec7a286a | |||
f67e0f7fb7 | |||
14d3b3f0f0 | |||
a695c18649 | |||
d625294d79 | |||
c54af4c77e | |||
080e14b24c | |||
ec03d97b27 | |||
8df28bb308 | |||
da79b18087 | |||
9d98706b3f | |||
a0a3e2f469 | |||
e9eeedaf3b | |||
97aa3e2905 | |||
ad35309a62 | |||
6176d8f5ee | |||
b45dd5de9c | |||
c5bc2d5fd5 | |||
6e59b30841 | |||
e3a7d9bd47 | |||
4c8149d643 | |||
d094d8d9ec | |||
99c0e55335 | |||
4c2538b863 | |||
cffa2b9c1d | |||
350aed7076 | |||
bd760cd13d | |||
0abf5e8eae | |||
952dfd4867 | |||
594c1610fa | |||
ae9dd02ee1 | |||
4879ac2b33 | |||
ba743700f4 | |||
e5ca9b057c | |||
f4711844a3 | |||
9f3f58c905 | |||
a177821b24 | |||
076e66e479 | |||
c1cda0ee2c | |||
06fd7972ac | |||
1082361a19 | |||
a8fa6fbbec | |||
a01b033cb4 | |||
ac26260436 | |||
e786844425 | |||
1d3eaa6f7e | |||
1556025271 | |||
eef0507f3d | |||
9e599d1d94 | |||
d19b5a90c2 | |||
2aa2a14481 | |||
8c5c180de0 | |||
eaa5f41439 | |||
43ffb785c0 | |||
cee768d97e | |||
87726a08ed | |||
048f599f35 | |||
b97521614a | |||
534cbf8a5d | |||
65a02cd27d | |||
ddfaf11926 | |||
c1fe12595e | |||
0fd885b91c | |||
dc72fd7edd | |||
7f91f168a1 | |||
f91c16d270 | |||
cd0935dd55 | |||
82486e5995 | |||
a9701953ff | |||
57d7594a79 | |||
93cd94b79d | |||
cf85e86e9a | |||
3345ae733b | |||
e655029515 | |||
bbf1e61864 | |||
cb298978ad | |||
82a1fc7256 | |||
5e89b335ab | |||
0142aab7f8 | |||
1c68f2cafb | |||
464aa74659 | |||
e44b878c02 | |||
75a6319864 | |||
727eea4ab0 | |||
0cf60f13ab | |||
4aa17d0069 |
@ -142,6 +142,7 @@ jobs:
|
||||
- run: python utils/custom_init_isort.py --check_only
|
||||
- run: python utils/sort_auto_mappings.py --check_only
|
||||
- run: python utils/check_doc_toc.py
|
||||
- run: python utils/check_docstrings.py --check_all
|
||||
|
||||
check_repository_consistency:
|
||||
working_directory: ~/transformers
|
||||
@ -190,4 +191,4 @@ workflows:
|
||||
- check_circleci_user
|
||||
- check_code_quality
|
||||
- check_repository_consistency
|
||||
- fetch_all_tests
|
||||
- fetch_all_tests
|
||||
|
@ -248,7 +248,7 @@ torch_job = CircleCIJob(
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=16
|
||||
pytest_num_workers=4
|
||||
)
|
||||
|
||||
tokenization_job = CircleCIJob(
|
||||
@ -256,7 +256,7 @@ tokenization_job = CircleCIJob(
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=16
|
||||
pytest_num_workers=4
|
||||
)
|
||||
|
||||
|
||||
@ -265,7 +265,7 @@ tf_job = CircleCIJob(
|
||||
docker_image=[{"image":"huggingface/transformers-tf-light"}],
|
||||
install_steps=["uv venv", "uv pip install -e."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=16,
|
||||
pytest_num_workers=4,
|
||||
)
|
||||
|
||||
|
||||
@ -274,7 +274,7 @@ flax_job = CircleCIJob(
|
||||
docker_image=[{"image":"huggingface/transformers-jax-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=16
|
||||
pytest_num_workers=4
|
||||
)
|
||||
|
||||
|
||||
|
17
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
17
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,6 +1,17 @@
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve transformers
|
||||
labels: [ "bug" ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report! 🤗
|
||||
|
||||
Before you submit your bug report:
|
||||
|
||||
- If it is your first time submitting, be sure to check our [bug report guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#did-you-find-a-bug)
|
||||
- Try our [docs bot](https://huggingface.co/spaces/huggingchat/hf-docs-chat) -- it might be able to help you with your issue
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
attributes:
|
||||
@ -25,7 +36,7 @@ body:
|
||||
|
||||
Models:
|
||||
|
||||
- text models: @ArthurZucker
|
||||
- text models: @ArthurZucker
|
||||
- vision models: @amyeroberts
|
||||
- speech models: @sanchit-gandhi
|
||||
- graph models: @clefourrier
|
||||
@ -38,9 +49,9 @@ body:
|
||||
- tensorflow: @gante and @Rocketknight1
|
||||
- tokenizers: @ArthurZucker
|
||||
- trainer: @muellerzr @SunMarc
|
||||
|
||||
|
||||
Integrations:
|
||||
|
||||
|
||||
- deepspeed: HF Trainer/Accelerate: @muellerzr
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- Big Model Inference: @SunMarc
|
||||
|
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
4
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -58,9 +58,9 @@ Integrations:
|
||||
- deepspeed: HF Trainer/Accelerate: @muellerzr
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- Big Model Inference: @SunMarc
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc
|
||||
|
||||
Documentation: @stevhliu and @MKhalusova
|
||||
Documentation: @stevhliu
|
||||
|
||||
HF projects:
|
||||
|
||||
|
19
.github/workflows/build-ci-docker-images.yml
vendored
19
.github/workflows/build-ci-docker-images.yml
vendored
@ -27,10 +27,10 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
file: ["quality", "consistency", "custom-tokenizers", "torch-light", "tf-light", "exotic-models", "torch-tf-light", "torch-jax-light", "jax-light", "examples-torch", "examples-tf"]
|
||||
continue-on-error: true
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
-
|
||||
-
|
||||
name: Set tag
|
||||
run: |
|
||||
if ${{contains(github.event.head_commit.message, '[build-ci-image]')}}; then
|
||||
@ -61,4 +61,17 @@ jobs:
|
||||
REF=${{ github.sha }}
|
||||
file: "./docker/${{ matrix.file }}.dockerfile"
|
||||
push: ${{ contains(github.event.head_commit.message, 'ci-image]') || github.event_name == 'schedule' }}
|
||||
tags: ${{ env.TAG }}
|
||||
tags: ${{ env.TAG }}
|
||||
|
||||
notify:
|
||||
runs-on: ubuntu-22.04
|
||||
if: ${{ contains(github.event.head_commit.message, '[build-ci-image]') || contains(github.event.head_commit.message, '[push-ci-image]') && '!cancelled()' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- name: Post to Slack
|
||||
if: ${{ contains(github.event.head_commit.message, '[push-ci-image]') && github.event_name != 'schedule' }}
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: "#transformers-ci-circleci-images"
|
||||
title: 🤗 New docker images for CircleCI are pushed.
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
2
.github/workflows/self-pr-slow-ci.yml
vendored
2
.github/workflows/self-pr-slow-ci.yml
vendored
@ -4,7 +4,7 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/transformers/models/*/modeling_*.py"
|
||||
- "tests/models/*/test_*.py"
|
||||
- "tests/**/test_*.py"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
|
23
.github/workflows/trufflehog.yml
vendored
23
.github/workflows/trufflehog.yml
vendored
@ -10,20 +10,9 @@ jobs:
|
||||
trufflehog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- shell: bash
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "push" ]; then
|
||||
echo "depth=$(($(jq length <<< '${{ toJson(github.event.commits) }}') + 2))" >> $GITHUB_ENV
|
||||
echo "branch=${{ github.ref_name }}" >> $GITHUB_ENV
|
||||
fi
|
||||
if [ "${{ github.event_name }}" == "pull_request" ]; then
|
||||
echo "depth=$((${{ github.event.pull_request.commits }}+2))" >> $GITHUB_ENV
|
||||
echo "branch=${{ github.event.pull_request.head.ref }}" >> $GITHUB_ENV
|
||||
fi
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{env.branch}}
|
||||
fetch-depth: ${{env.depth}}
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
|
@ -61,7 +61,10 @@ feedback.
|
||||
The 🤗 Transformers library is robust and reliable thanks to users who report the problems they encounter.
|
||||
|
||||
Before you report an issue, we would really appreciate it if you could **make sure the bug was not
|
||||
already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask in the [forum](https://discuss.huggingface.co/) first. This helps us respond quicker to fixing issues related to the library versus general questions.
|
||||
already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask in the [forum](https://discuss.huggingface.co/) or on our [discord](https://discord.com/invite/hugging-face-879548962464493619) first. This helps us respond quicker to fixing issues related to the library versus general questions.
|
||||
|
||||
> [!TIP]
|
||||
> We have a [docs bot](https://huggingface.co/spaces/huggingchat/hf-docs-chat), and we highly encourage you to ask all your questions there. There is always a chance your bug can be fixed with a simple flag 👾🔫
|
||||
|
||||
Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it:
|
||||
|
||||
@ -129,7 +132,7 @@ You will need basic `git` proficiency to contribute to
|
||||
manual. Type `git --help` in a shell and enjoy! If you prefer books, [Pro
|
||||
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
||||
|
||||
You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main/setup.py#L426)** or above to contribute to 🤗 Transformers. Follow the steps below to start contributing:
|
||||
You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main/setup.py#L449)** or above to contribute to 🤗 Transformers. Follow the steps below to start contributing:
|
||||
|
||||
1. Fork the [repository](https://github.com/huggingface/transformers) by
|
||||
clicking on the **[Fork](https://github.com/huggingface/transformers/fork)** button on the repository's page. This creates a copy of the code
|
||||
@ -160,7 +163,7 @@ You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main
|
||||
If 🤗 Transformers was already installed in the virtual environment, remove
|
||||
it with `pip uninstall transformers` before reinstalling it in editable
|
||||
mode with the `-e` flag.
|
||||
|
||||
|
||||
Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a
|
||||
failure with this command. If that's the case make sure to install the Deep Learning framework you are working with
|
||||
(PyTorch, TensorFlow and/or Flax) then do:
|
||||
@ -219,7 +222,7 @@ You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main
|
||||
|
||||
If you're modifying documents under the `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check
|
||||
make sure you install the documentation builder:
|
||||
|
||||
|
||||
```bash
|
||||
pip install ".[docs]"
|
||||
```
|
||||
@ -338,12 +341,12 @@ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_ne
|
||||
RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification
|
||||
```
|
||||
|
||||
Like the slow tests, there are other environment variables available which not enabled by default during testing:
|
||||
Like the slow tests, there are other environment variables available which are not enabled by default during testing:
|
||||
- `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers.
|
||||
- `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration.
|
||||
- `RUN_PT_TF_CROSS_TESTS`: Enables tests for TensorFlow + PyTorch integration.
|
||||
|
||||
More environment variables and additional information can be found in the [testing_utils.py](src/transformers/testing_utils.py).
|
||||
More environment variables and additional information can be found in the [testing_utils.py](https://github.com/huggingface/transformers/blob/main/src/transformers/testing_utils.py).
|
||||
|
||||
🤗 Transformers uses `pytest` as a test runner only. It doesn't use any
|
||||
`pytest`-specific features in the test suite itself.
|
||||
|
1
Makefile
1
Makefile
@ -56,6 +56,7 @@ quality:
|
||||
python utils/custom_init_isort.py --check_only
|
||||
python utils/sort_auto_mappings.py --check_only
|
||||
python utils/check_doc_toc.py
|
||||
python utils/check_docstrings.py --check_all
|
||||
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
|
@ -596,7 +596,7 @@ Keywords: Data-Centric AI, Data Quality, Noisy Labels, Outlier Detection, Active
|
||||
|
||||
## [BentoML](https://github.com/bentoml/BentoML)
|
||||
|
||||
[BentoML](https://github.com/bentoml) is the unified framework for for building, shipping, and scaling production-ready AI applications incorporating traditional ML, pre-trained AI models, Generative and Large Language Models.
|
||||
[BentoML](https://github.com/bentoml) is the unified framework for building, shipping, and scaling production-ready AI applications incorporating traditional ML, pre-trained AI models, Generative and Large Language Models.
|
||||
All Hugging Face models and pipelines can be seamlessly integrated into BentoML applications, enabling the running of models on the most suitable hardware and independent scaling based on usage.
|
||||
|
||||
Keywords: BentoML, Framework, Deployment, AI Applications
|
||||
|
@ -6,10 +6,10 @@ RUN apt-get update && apt-get install -y time git pkg-config make git-lfs
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools GitPython
|
||||
RUN uv pip install --no-cache-dir --upgrade 'torch' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir tensorflow-cpu tf-keras
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,vision,testing]"
|
||||
# tensorflow pin matching setup.py
|
||||
RUN uv pip install --no-cache-dir "tensorflow-cpu<2.16" "tf-keras<2.16"
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,vision,testing]"
|
||||
RUN git lfs install
|
||||
|
||||
RUN pip uninstall -y transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||
|
||||
|
@ -9,7 +9,7 @@ SHELL ["sh", "-lc"]
|
||||
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
|
||||
# to be used as arguments for docker build (so far).
|
||||
|
||||
ARG PYTORCH='2.3.0'
|
||||
ARG PYTORCH='2.4.0'
|
||||
# (not always a valid torch version)
|
||||
ARG INTEL_TORCH_EXT='2.3.0'
|
||||
# Example: `cu102`, `cu113`, etc.
|
||||
|
@ -11,7 +11,7 @@ ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
|
||||
# If set to nothing, will install the latest version
|
||||
ARG PYTORCH='2.3.0'
|
||||
ARG PYTORCH='2.4.0'
|
||||
ARG TORCH_VISION=''
|
||||
ARG TORCH_AUDIO=''
|
||||
# Example: `cu102`, `cu113`, etc.
|
||||
|
@ -92,6 +92,8 @@
|
||||
title: Visual Question Answering
|
||||
- local: tasks/text-to-speech
|
||||
title: Text to speech
|
||||
- local: tasks/image_text_to_text
|
||||
title: Image-text-to-text
|
||||
title: Multimodal
|
||||
- isExpanded: false
|
||||
sections:
|
||||
@ -155,6 +157,8 @@
|
||||
title: EETQ
|
||||
- local: quantization/hqq
|
||||
title: HQQ
|
||||
- local: quantization/fbgemm_fp8
|
||||
title: FBGEMM_FP8
|
||||
- local: quantization/optimum
|
||||
title: Optimum
|
||||
- local: quantization/contribute
|
||||
@ -382,6 +386,8 @@
|
||||
title: Fuyu
|
||||
- local: model_doc/gemma
|
||||
title: Gemma
|
||||
- local: model_doc/gemma2
|
||||
title: Gemma2
|
||||
- local: model_doc/openai-gpt
|
||||
title: GPT
|
||||
- local: model_doc/gpt_neo
|
||||
@ -579,6 +585,8 @@
|
||||
title: DeiT
|
||||
- local: model_doc/depth_anything
|
||||
title: Depth Anything
|
||||
- local: model_doc/depth_anything_v2
|
||||
title: Depth Anything V2
|
||||
- local: model_doc/deta
|
||||
title: DETA
|
||||
- local: model_doc/detr
|
||||
@ -599,6 +607,8 @@
|
||||
title: FocalNet
|
||||
- local: model_doc/glpn
|
||||
title: GLPN
|
||||
- local: model_doc/hiera
|
||||
title: Hiera
|
||||
- local: model_doc/imagegpt
|
||||
title: ImageGPT
|
||||
- local: model_doc/levit
|
||||
@ -663,6 +673,8 @@
|
||||
title: ViTMSN
|
||||
- local: model_doc/yolos
|
||||
title: YOLOS
|
||||
- local: model_doc/zoedepth
|
||||
title: ZoeDepth
|
||||
title: Vision models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
@ -674,6 +686,8 @@
|
||||
title: CLAP
|
||||
- local: model_doc/encodec
|
||||
title: EnCodec
|
||||
- local: model_doc/hiera
|
||||
title: Hiera
|
||||
- local: model_doc/hubert
|
||||
title: Hubert
|
||||
- local: model_doc/mctct
|
||||
@ -748,6 +762,8 @@
|
||||
title: BridgeTower
|
||||
- local: model_doc/bros
|
||||
title: BROS
|
||||
- local: model_doc/chameleon
|
||||
title: Chameleon
|
||||
- local: model_doc/chinese_clip
|
||||
title: Chinese-CLIP
|
||||
- local: model_doc/clip
|
||||
|
@ -50,7 +50,7 @@ We implement two versions of ReactJsonAgent:
|
||||
|
||||

|
||||
|
||||
For example, here is how a ReAct agent would work its way through the following question.
|
||||
For example, here is how a ReAct Code agent would work its way through the following question.
|
||||
|
||||
```py3
|
||||
>>> agent.run(
|
||||
@ -188,7 +188,7 @@ You can still authorize additional imports by passing the authorized modules as
|
||||
>>> from transformers import ReactCodeAgent
|
||||
|
||||
>>> agent = ReactCodeAgent(tools=[], additional_authorized_imports=['requests', 'bs4'])
|
||||
>>>agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?")
|
||||
>>> agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?")
|
||||
|
||||
(...)
|
||||
'Hugging Face – Blog'
|
||||
@ -256,6 +256,13 @@ agent = ReactJsonAgent(tools=[PythonInterpreterTool()], system_prompt="{your_cus
|
||||
> Please make sure to define the `<<tool_descriptions>>` string somewhere in the `template` so the agent is aware
|
||||
of the available tools.
|
||||
|
||||
|
||||
### Inspecting an agent run
|
||||
|
||||
Here are a few useful attributes to inspect what happened after a run:
|
||||
- `agent.logs` stores the fine-grained logs of the agent. At every step of the agent's run, everything gets stored in a dictionary that then is appended to `agent.logs`.
|
||||
- Running `agent.write_inner_memory_from_logs()` creates an inner memory of the agent's logs for the LLM to view, as a list of chat messages. This method goes over each step of the log and only stores what it's interested in as a message: for instance, it will save the system prompt and task in separate messages, then for each step it will store the LLM output as a message, and the tool call output as another message. Use this if you want a higher-level view of what has happened - but not every log will be transcripted by this method.
|
||||
|
||||
## Tools
|
||||
|
||||
A tool is an atomic function to be used by an agent.
|
||||
@ -379,7 +386,7 @@ And the output:
|
||||
`"The most downloaded model for the 'text-to-video' task is ByteDance/AnimateDiff-Lightning."`
|
||||
|
||||
|
||||
### Manage agent toolbox
|
||||
### Manage your agent's toolbox
|
||||
|
||||
If you have already initialized an agent, it is inconvenient to reinitialize it from scratch with a tool you want to use. With Transformers, you can manage an agent's toolbox by adding or replacing a tool.
|
||||
|
||||
@ -502,3 +509,54 @@ agent = ReactCodeAgent(tools=[search_tool])
|
||||
|
||||
agent.run("How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?")
|
||||
```
|
||||
|
||||
## Gradio interface
|
||||
|
||||
You can leverage `gradio.Chatbot`to display your agent's thoughts using `stream_to_gradio`, here is an example:
|
||||
|
||||
```py
|
||||
import gradio as gr
|
||||
from transformers import (
|
||||
load_tool,
|
||||
ReactCodeAgent,
|
||||
HfEngine,
|
||||
stream_to_gradio,
|
||||
)
|
||||
|
||||
# Import tool from Hub
|
||||
image_generation_tool = load_tool("m-ric/text-to-image")
|
||||
|
||||
llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
|
||||
# Initialize the agent with the image generation tool
|
||||
agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
|
||||
|
||||
|
||||
def interact_with_agent(task):
|
||||
messages = []
|
||||
messages.append(gr.ChatMessage(role="user", content=task))
|
||||
yield messages
|
||||
for msg in stream_to_gradio(agent, task):
|
||||
messages.append(msg)
|
||||
yield messages + [
|
||||
gr.ChatMessage(role="assistant", content="⏳ Task not finished yet!")
|
||||
]
|
||||
yield messages
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
text_input = gr.Textbox(lines=1, label="Chat Message", value="Make me a picture of the Statue of Liberty.")
|
||||
submit = gr.Button("Run illustrator agent!")
|
||||
chatbot = gr.Chatbot(
|
||||
label="Agent",
|
||||
type="messages",
|
||||
avatar_images=(
|
||||
None,
|
||||
"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png",
|
||||
),
|
||||
)
|
||||
submit.click(interact_with_agent, [text_input], [chatbot])
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
@ -199,7 +199,8 @@ effect that `add_generation_prompt` has will depend on the template being used.
|
||||
|
||||
## Can I use chat templates in training?
|
||||
|
||||
Yes! We recommend that you apply the chat template as a preprocessing step for your dataset. After this, you
|
||||
Yes! This is a good way to ensure that the chat template matches the tokens the model sees during training.
|
||||
We recommend that you apply the chat template as a preprocessing step for your dataset. After this, you
|
||||
can simply continue like any other language model training task. When training, you should usually set
|
||||
`add_generation_prompt=False`, because the added tokens to prompt an assistant response will not be helpful during
|
||||
training. Let's see an example:
|
||||
@ -233,6 +234,16 @@ The sun.</s>
|
||||
|
||||
From here, just continue training like you would with a standard language modelling task, using the `formatted_chat` column.
|
||||
|
||||
<Tip>
|
||||
If you format text with `apply_chat_template(tokenize=False)` and then tokenize it in a separate step, you should set the argument
|
||||
`add_special_tokens=False`. If you use `apply_chat_template(tokenize=True)`, you don't need to worry about this!
|
||||
|
||||
By default, some tokenizers add special tokens like `<bos>` and `<eos>` to text they tokenize. Chat templates should
|
||||
always include all of the special tokens they need, and so adding extra special tokens with
|
||||
the default `add_special_tokens=True` can result in incorrect or duplicated special tokens, which will hurt model
|
||||
performance.
|
||||
</Tip>
|
||||
|
||||
## Advanced: Extra inputs to chat templates
|
||||
|
||||
The only argument that `apply_chat_template` requires is `messages`. However, you can pass any keyword
|
||||
@ -569,7 +580,7 @@ default template for that model class is used instead. Let's take a look at the
|
||||
>>> from transformers import AutoTokenizer
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
||||
|
||||
>>> tokenizer.default_chat_template
|
||||
>>> tokenizer.chat_template
|
||||
"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}"
|
||||
```
|
||||
|
||||
@ -693,23 +704,6 @@ with other names, pass the name of the template you want to the `chat_template`
|
||||
We find that this can be a bit confusing for users, though - so if you're writing a template yourself, we recommend
|
||||
trying to put it all in a single template where possible!
|
||||
|
||||
### What are "default" templates?
|
||||
|
||||
Before the introduction of chat templates, chat handling was hardcoded at the model class level. For backwards
|
||||
compatibility, we have retained this class-specific handling as default templates, also set at the class level. If a
|
||||
model does not have a chat template set, but there is a default template for its model class, the `TextGenerationPipeline`
|
||||
class and methods like `apply_chat_template` will use the class template instead. You can find out what the default
|
||||
template for your tokenizer is by checking the `tokenizer.default_chat_template` attribute.
|
||||
|
||||
This is something we do purely for backward compatibility reasons, to avoid breaking any existing workflows. Even when
|
||||
the class template is appropriate for your model, we strongly recommend overriding the default template by
|
||||
setting the `chat_template` attribute explicitly to make it clear to users that your model has been correctly configured
|
||||
for chat.
|
||||
|
||||
Now that actual chat templates have been adopted more widely, default templates have been deprecated and will be
|
||||
removed in a future release. We strongly recommend setting the `chat_template` attribute for any tokenizers that
|
||||
still depend on them!
|
||||
|
||||
### What template should I use?
|
||||
|
||||
When setting the template for a model that's already been trained for chat, you should ensure that the template
|
||||
|
@ -195,7 +195,7 @@ inputs = {key: tensor.to(model.device) for key, tensor in inputs.items()}
|
||||
print("Tokenized inputs:\n", inputs)
|
||||
|
||||
# 4: Generate text from the model
|
||||
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.)
|
||||
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.1)
|
||||
print("Generated tokens:\n", outputs)
|
||||
|
||||
# 5: Decode the output back to a string
|
||||
|
@ -16,11 +16,11 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# DeepSpeed
|
||||
|
||||
[DeepSpeed](https://www.deepspeed.ai/) is a PyTorch optimization library that makes distributed training memory-efficient and fast. At it's core is the [Zero Redundancy Optimizer (ZeRO)](https://hf.co/papers/1910.02054) which enables training large models at scale. ZeRO works in several stages:
|
||||
[DeepSpeed](https://www.deepspeed.ai/) is a PyTorch optimization library that makes distributed training memory-efficient and fast. At its core is the [Zero Redundancy Optimizer (ZeRO)](https://hf.co/papers/1910.02054) which enables training large models at scale. ZeRO works in several stages:
|
||||
|
||||
* ZeRO-1, optimizer state partioning across GPUs
|
||||
* ZeRO-1, optimizer state partitioning across GPUs
|
||||
* ZeRO-2, gradient partitioning across GPUs
|
||||
* ZeRO-3, parameteter partitioning across GPUs
|
||||
* ZeRO-3, parameter partitioning across GPUs
|
||||
|
||||
In GPU-limited environments, ZeRO also enables offloading optimizer memory and computation from the GPU to the CPU to fit and train really large models on a single GPU. DeepSpeed is integrated with the Transformers [`Trainer`] class for all ZeRO stages and offloading. All you need to do is provide a config file or you can use a provided template. For inference, Transformers support ZeRO-3 and offloading since it allows loading huge models.
|
||||
|
||||
@ -159,7 +159,7 @@ There are three types of configuration parameters:
|
||||
|
||||
You could also modify the DeepSpeed configuration and edit [`TrainingArguments`] from it:
|
||||
|
||||
1. Create or load a DeepSpeed configuration to used as the main configuration
|
||||
1. Create or load a DeepSpeed configuration to use as the main configuration
|
||||
2. Create a [`TrainingArguments`] object based on these DeepSpeed configuration values
|
||||
|
||||
Some values, such as `scheduler.params.total_num_steps` are calculated by the [`Trainer`] during training.
|
||||
@ -191,7 +191,7 @@ ZeRO-1 shards the optimizer states across GPUs, and you can expect a tiny speed
|
||||
</hfoption>
|
||||
<hfoption id="ZeRO-2">
|
||||
|
||||
ZeRO-2 shards the optimizer and gradients across GPUs. This stage is primarily used for training since it's features are not relevant to inference. Some important parameters to configure for better performance include:
|
||||
ZeRO-2 shards the optimizer and gradients across GPUs. This stage is primarily used for training since its features are not relevant to inference. Some important parameters to configure for better performance include:
|
||||
|
||||
* `offload_optimizer` should be enabled to reduce GPU memory usage.
|
||||
* `overlap_comm` when set to `true` trades off increased GPU memory usage to lower allreduce latency. This feature uses 4.5x the `allgather_bucket_size` and `reduce_bucket_size` values. In this example, they're set to `5e8` which means it requires 9GB of GPU memory. If your GPU memory is 8GB or less, you should reduce `overlap_comm` to lower the memory requirements and prevent an out-of-memory (OOM) error.
|
||||
@ -226,7 +226,7 @@ ZeRO-3 shards the optimizer, gradient, and parameters across GPUs. Unlike ZeRO-2
|
||||
* `pin_memory: true` can improve throughput, but less memory becomes available for other processes because the pinned memory is reserved for the specific process that requested it and it's typically accessed much faster than normal CPU memory.
|
||||
* `stage3_max_live_parameters` is the upper limit on how many full parameters you want to keep on the GPU at any given time. Reduce this value if you encounter an OOM error.
|
||||
* `stage3_max_reuse_distance` is a value for determining when a parameter is used again in the future, and it helps decide whether to throw the parameter away or to keep it. If the parameter is going to be reused (if the value is less than `stage3_max_reuse_distance`), then it is kept to reduce communication overhead. This is super helpful when activation checkpointing is enabled and you want to keep the parameter in the forward recompute until the backward pass. But reduce this value if you encounter an OOM error.
|
||||
* `stage3_gather_16bit_weights_on_model_save` consolidates fp16 weights when a model is saved. For large models and multiple GPUs, this is an expensive in terms of memory and speed. You should enable it if you're planning on resuming training.
|
||||
* `stage3_gather_16bit_weights_on_model_save` consolidates fp16 weights when a model is saved. For large models and multiple GPUs, this is expensive in terms of memory and speed. You should enable it if you're planning on resuming training.
|
||||
* `sub_group_size` controls which parameters are updated during the optimizer step. Parameters are grouped into buckets of `sub_group_size` and each bucket is updated one at a time. When used with NVMe offload, `sub_group_size` determines when model states are moved in and out of CPU memory from during the optimization step. This prevents running out of CPU memory for extremely large models. `sub_group_size` can be left to its default value if you aren't using NVMe offload, but you may want to change it if you:
|
||||
|
||||
1. Run into an OOM error during the optimizer step. In this case, reduce `sub_group_size` to reduce memory usage of the temporary buffers.
|
||||
|
@ -178,7 +178,7 @@ An increasing sequence: one, two, three, four, five, six, seven, eight, nine, te
|
||||
|
||||
The `generate()` method supports caching keys and values to enhance efficiency and avoid re-computations. However the key and value
|
||||
cache can occupy a large portion of memory, becoming a bottleneck for long-context generation, especially for Large Language Models.
|
||||
Quantizing the cache when using `generate()` can significantly reduce memory requirements at the cost of speed.
|
||||
Quantizing the cache when using `generate()` can significantly reduce memory requirements at the cost of speed.
|
||||
|
||||
KV Cache quantization in `transformers` is largely inspired by the paper [KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache]
|
||||
(https://arxiv.org/abs/2402.02750) and currently supports `quanto` and `HQQ` as backends. For more information on the inner workings see the paper.
|
||||
@ -213,11 +213,11 @@ I like rock music because it's loud and energetic. I like to listen to it when I
|
||||
|
||||
## Watermarking
|
||||
|
||||
The `generate()` supports watermarking the generated text by randomly marking a portion of tokens as "green".
|
||||
The `generate()` supports watermarking the generated text by randomly marking a portion of tokens as "green".
|
||||
When generating the "green" will have a small 'bias' value added to their logits, thus having a higher chance to be generated.
|
||||
The watermarked text can be detected by calculating the proportion of "green" tokens in the text and estimating how likely it is
|
||||
statistically to obtain that amount of "green" tokens for human-generated text. This watermarking strategy was proposed in the paper
|
||||
["On the Reliability of Watermarks for Large Language Models"](https://arxiv.org/abs/2306.04634). For more information on
|
||||
statistically to obtain that amount of "green" tokens for human-generated text. This watermarking strategy was proposed in the paper
|
||||
["On the Reliability of Watermarks for Large Language Models"](https://arxiv.org/abs/2306.04634). For more information on
|
||||
the inner functioning of watermarking, it is recommended to refer to the paper.
|
||||
|
||||
The watermarking can be used with any generative model in `tranformers` and does not require an extra classification model
|
||||
@ -484,3 +484,59 @@ just like in multinomial sampling. However, in assisted decoding, reducing the t
|
||||
|
||||
Alternativelly, you can also set the `prompt_lookup_num_tokens` to trigger n-gram based assisted decoding, as opposed
|
||||
to model based assisted decoding. You can read more about it [here](https://twitter.com/joao_gante/status/1747322413006643259).
|
||||
### DoLa Decoding
|
||||
|
||||
**D**ecoding by C**o**ntrasting **La**yers (DoLa) is a contrastive decoding strategy to improve the factuality and reduce the
|
||||
hallucinations of LLMs, as described in this paper of ICLR 2024 [DoLa: Decoding by Contrasting Layers Improves Factuality in Large Language Models](https://arxiv.org/abs/2309.03883).
|
||||
|
||||
DoLa is achieved by contrasting the differences in logits obtained from final
|
||||
layers versus earlier layers, thus amplify the factual knowledge localized to particular part of transformer layers.
|
||||
|
||||
Do the following two steps to activate DoLa decoding when calling the `model.generate` function:
|
||||
1. Set the `dola_layers` argument, which can be either a string or a list of integers.
|
||||
- If set to a string, it can be one of `low`, `high`.
|
||||
- If set to a list of integers, it should be a list of layer indices between 0 and the total number of layers in the model. The 0-th layer is word embedding, and the 1st layer is the first transformer layer, and so on.
|
||||
2. Set `repetition_penalty = 1.2` is suggested to reduce repetition in DoLa decoding.
|
||||
|
||||
See the following examples for DoLa decoding with the 32-layer LLaMA-7B model.
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
|
||||
>>> import torch
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", torch_dtype=torch.float16)
|
||||
>>> device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
>>> model.to(device)
|
||||
>>> set_seed(42)
|
||||
|
||||
>>> text = "On what date was the Declaration of Independence officially signed?"
|
||||
>>> inputs = tokenizer(text, return_tensors="pt").to(device)
|
||||
|
||||
# Vanilla greddy decoding
|
||||
>>> vanilla_output = model.generate(**inputs, do_sample=False, max_new_tokens=50)
|
||||
>>> tokenizer.batch_decode(vanilla_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True)
|
||||
['\nThe Declaration of Independence was signed on July 4, 1776.\nWhat was the date of the signing of the Declaration of Independence?\nThe Declaration of Independence was signed on July 4,']
|
||||
|
||||
# DoLa decoding with contrasting higher part of layers (layers 16,18,...,30)
|
||||
>>> dola_high_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers='high')
|
||||
>>> tokenizer.batch_decode(dola_high_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True)
|
||||
['\nJuly 4, 1776, when the Continental Congress voted to separate from Great Britain. The 56 delegates to the Continental Congress signed the Declaration on August 2, 1776.']
|
||||
|
||||
# DoLa decoding with contrasting specific layers (layers 28 and 30)
|
||||
>>> dola_custom_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers=[28,30], repetition_penalty=1.2)
|
||||
>>> tokenizer.batch_decode(dola_custom_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True)
|
||||
['\nIt was officially signed on 2 August 1776, when 56 members of the Second Continental Congress, representing the original 13 American colonies, voted unanimously for the resolution for independence. The 2']
|
||||
```
|
||||
|
||||
#### Understanding the `dola_layers` argument
|
||||
|
||||
`dola_layers` stands for the candidate layers in premature layer selection, as described in the DoLa paper. The selected premature layer will be contrasted with the final layer.
|
||||
|
||||
Setting `dola_layers` to `'low'` or `'high'` will select the lower or higher part of the layers to contrast, respectively.
|
||||
- For `N`-layer models with `N <= 40` layers, the layers of `range(0, N // 2, 2)` and `range(N // 2, N, 2)` are used for `'low'` and `'high'` layers, respectively.
|
||||
- For models with `N > 40` layers, the layers of `range(0, 20, 2)` and `range(N - 20, N, 2)` are used for `'low'` and `'high'` layers, respectively.
|
||||
- If the model has tied word embeddings, we skip the word embeddings (0-th) layer and start from the 2nd layer, as the early exit from word embeddings will become identity function.
|
||||
- Set the `dola_layers` to a list of integers for layer indices to contrast manually specified layers. For example, setting `dola_layers=[28,30]` will contrast the final layer (32-th layer) with the 28-th and 30-th layers.
|
||||
|
||||
The paper suggested that contrasting `'high'` layers to improve short-answer tasks like TruthfulQA, and contrasting `'low'` layers to improve all the other long-answer reasoning tasks, such as GSM8K, StrategyQA, FACTOR, and VicunaQA. Applying DoLa to smaller models like GPT-2 is not recommended, as the results shown in the Appendix N of the paper.
|
||||
|
@ -139,7 +139,7 @@ reading the whole sentence with a mask to hide future tokens at a certain timest
|
||||
|
||||
### deep learning (DL)
|
||||
|
||||
Machine learning algorithms which uses neural networks with several layers.
|
||||
Machine learning algorithms which use neural networks with several layers.
|
||||
|
||||
## E
|
||||
|
||||
@ -519,4 +519,4 @@ A form of model training in which data provided to the model is not labeled. Uns
|
||||
Parallelism technique which performs sharding of the tensors somewhat similar to [TensorParallel](#tensor-parallelism-tp),
|
||||
except the whole tensor gets reconstructed in time for a forward or backward computation, therefore the model doesn't need
|
||||
to be modified. This method also supports various offloading techniques to compensate for limited GPU memory.
|
||||
Learn more about ZeRO [here](perf_train_gpu_many#zero-data-parallelism).
|
||||
Learn more about ZeRO [here](perf_train_gpu_many#zero-data-parallelism).
|
||||
|
@ -88,6 +88,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [ByT5](model_doc/byt5) | ✅ | ✅ | ✅ |
|
||||
| [CamemBERT](model_doc/camembert) | ✅ | ✅ | ❌ |
|
||||
| [CANINE](model_doc/canine) | ✅ | ❌ | ❌ |
|
||||
| [Chameleon](model_doc/chameleon) | ✅ | ❌ | ❌ |
|
||||
| [Chinese-CLIP](model_doc/chinese_clip) | ✅ | ❌ | ❌ |
|
||||
| [CLAP](model_doc/clap) | ✅ | ❌ | ❌ |
|
||||
| [CLIP](model_doc/clip) | ✅ | ✅ | ✅ |
|
||||
@ -145,6 +146,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Funnel Transformer](model_doc/funnel) | ✅ | ✅ | ❌ |
|
||||
| [Fuyu](model_doc/fuyu) | ✅ | ❌ | ❌ |
|
||||
| [Gemma](model_doc/gemma) | ✅ | ❌ | ✅ |
|
||||
| [Gemma2](model_doc/gemma2) | ✅ | ❌ | ❌ |
|
||||
| [GIT](model_doc/git) | ✅ | ❌ | ❌ |
|
||||
| [GLPN](model_doc/glpn) | ✅ | ❌ | ❌ |
|
||||
| [GPT Neo](model_doc/gpt_neo) | ✅ | ❌ | ✅ |
|
||||
@ -158,6 +160,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Grounding DINO](model_doc/grounding-dino) | ✅ | ❌ | ❌ |
|
||||
| [GroupViT](model_doc/groupvit) | ✅ | ✅ | ❌ |
|
||||
| [HerBERT](model_doc/herbert) | ✅ | ✅ | ✅ |
|
||||
| [Hiera](model_doc/hiera) | ✅ | ❌ | ❌ |
|
||||
| [Hubert](model_doc/hubert) | ✅ | ✅ | ❌ |
|
||||
| [I-BERT](model_doc/ibert) | ✅ | ❌ | ❌ |
|
||||
| [IDEFICS](model_doc/idefics) | ✅ | ✅ | ❌ |
|
||||
@ -342,5 +345,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [XLSR-Wav2Vec2](model_doc/xlsr_wav2vec2) | ✅ | ✅ | ✅ |
|
||||
| [YOLOS](model_doc/yolos) | ✅ | ❌ | ❌ |
|
||||
| [YOSO](model_doc/yoso) | ✅ | ❌ | ❌ |
|
||||
| [ZoeDepth](model_doc/zoedepth) | ✅ | ❌ | ❌ |
|
||||
|
||||
<!-- End table-->
|
||||
|
@ -391,6 +391,12 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
||||
- get_seq_length
|
||||
- reset
|
||||
|
||||
[[autodoc]] EncoderDecoderCache
|
||||
- get_seq_length
|
||||
- to_legacy_cache
|
||||
- from_legacy_cache
|
||||
- reset
|
||||
- reorder_cache
|
||||
|
||||
## Watermark Utils
|
||||
|
||||
|
@ -18,59 +18,109 @@ Basic inference is slow because LLMs have to be called repeatedly to generate th
|
||||
This guide will show you how to use the optimization techniques available in Transformers to accelerate LLM inference.
|
||||
|
||||
> [!TIP]
|
||||
> Hugging Face also provides [Text Generation Inference (TGI)](https://hf.co/docs/text-generation-inference), a library dedicated to deploying and serving highly optimized LLMs for inference. It includes more optimization features not included in Transformers, such as continuous batching for increasing throughput and tensor parallelism for multi-GPU inference.
|
||||
> Hugging Face also provides [Text Generation Inference (TGI)](https://hf.co/docs/text-generation-inference), a library dedicated to deploying and serving highly optimized LLMs for inference. It includes deployment-oriented optimization features not included in Transformers, such as continuous batching for increasing throughput and tensor parallelism for multi-GPU inference.
|
||||
|
||||
## Static kv-cache and torch.compile
|
||||
## Static kv-cache and `torch.compile`
|
||||
|
||||
During decoding, a LLM computes the key-value (kv) values for each input token and since it is autoregressive, it computes the same kv values each time because the generated output becomes part of the input now. This is not very efficient because you're recomputing the same kv values each time.
|
||||
|
||||
To optimize this, you can use a kv-cache to store the past keys and values instead of recomputing them each time. However, since the kv-cache grows with each generation step and is dynamic, it prevents you from taking advantage of [torch.compile](./perf_torch_compile), a powerful optimization tool that fuses PyTorch code into fast and optimized kernels.
|
||||
To optimize this, you can use a kv-cache to store the past keys and values instead of recomputing them each time. However, since the kv-cache grows with each generation step and is dynamic, it prevents you from taking advantage of [`torch.compile`](./perf_torch_compile), a powerful optimization tool that fuses PyTorch code into fast and optimized kernels.
|
||||
|
||||
The *static kv-cache* solves this issue by pre-allocating the kv-cache size to a maximum value which allows you to combine it with torch.compile for up to a 4x speed up.
|
||||
The *static kv-cache* solves this issue by pre-allocating the kv-cache size to a maximum value which allows you to combine it with `torch.compile` for up to a 4x speed up. Your speed up may vary depending on the model size (larger models have a smaller speed up) and hardware.
|
||||
|
||||
> [!WARNING]
|
||||
> Currently, only [Llama](./model_doc/llama2) and a few other models support static kv-cache and torch.compile. Check [this issue](https://github.com/huggingface/transformers/issues/28981) for a live model compatibility list.
|
||||
> Currently, only [Llama](./model_doc/llama2) and a few other models support static kv-cache and `torch.compile`. Check [this issue](https://github.com/huggingface/transformers/issues/28981) for a live model compatibility list.
|
||||
|
||||
For this example, let's load the [Gemma](https://hf.co/google/gemma-2b) model.
|
||||
There are three flavors of static kv-cache usage, depending on the complexity of your task:
|
||||
1. Basic usage: simply set a flag in `generation_config` (recommended);
|
||||
2. Advanced usage: handle a cache object for multi-turn generation or a custom generation loop;
|
||||
3. Advanced usage: compile the entire `generate` function into a single graph, if having a single graph is relevant for you.
|
||||
|
||||
Select the correct tab below for further instructions on each of these flavors.
|
||||
|
||||
> [!TIP]
|
||||
> Regardless of the strategy used with `torch.compile`, you can avoid shape-related recompilations if you left-pad your LLM inputs to a limited set of values. The [`pad_to_multiple_of` tokenizer flag](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__.pad_to_multiple_of) is your friend!
|
||||
|
||||
<hfoptions id="static-kv">
|
||||
<hfoption id="basic usage: generation_config">
|
||||
|
||||
For this example, let's use the [Gemma](https://hf.co/google/gemma-2b) model. All we need to do is to:
|
||||
1. Access the model's `generation_config` attribute and set the `cache_implementation` to "static";
|
||||
2. Call `torch.compile` on the model to compile the forward pass with the static kv-cache.
|
||||
|
||||
And that's it!
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"google/gemma-2b", device_map="auto"
|
||||
)
|
||||
```
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
|
||||
There are two ways you can configure the model to use a static kv-cache. For a 7B model on an A100, both methods get a 4x speed up in the forward pass. Your speed up may vary depending on the model size (larger models have a smaller speed up) and hardware. If you're using the [`~GenerationMixin.generate`] method, the speed up is ~3x. The forward pass (which still gets 4x speed up) is only a part of the whole [`~GenerationMixin.generate`] code.
|
||||
|
||||
<hfoptions id="static-kv">
|
||||
<hfoption id="generation_config">
|
||||
|
||||
Access the model's `generation_config` attribute and set the `cache_implementation` to "static".
|
||||
|
||||
```py
|
||||
model.generation_config.cache_implementation = "static"
|
||||
```
|
||||
|
||||
Call torch.compile on the model to compile the forward pass with the static kv-cache.
|
||||
|
||||
```py
|
||||
compiled_model = torch.compile(model, mode="reduce-overhead", fullgraph=True)
|
||||
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
outputs = compiled_model.generate(**input_ids)
|
||||
tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
outputs = model.generate(**input_ids)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference']
|
||||
```
|
||||
|
||||
Under the hood, `generate` will attempt to reuse the same cache object, removing the need for re-compilation at each call. However, if the batch size or the maximum output length increase between calls, the cache will have to be reinitialized, triggering a new compilation.
|
||||
Under the hood, `generate` will attempt to reuse the same cache object, removing the need for re-compilation at each call. Avoiding re-compilation is critical to get the most out of `torch.compile`, and you should be aware of the following:
|
||||
1. If the batch size changes or the maximum output length increases between calls, the cache will have to be reinitialized, triggering a new compilation;
|
||||
2. The first couple of calls of the compiled function are slower, as the function is being compiled.
|
||||
|
||||
> [!WARNING]
|
||||
> For a more advanced usage of the static cache, such as multi-turn conversations, we recommend instantiating and manipulating the cache object outside [`~GenerationMixin.generate`]. See the advanced usage tab.
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Static Cache">
|
||||
<hfoption id="advanced usage: control Static Cache">
|
||||
|
||||
A [`StaticCache`] object can be passed to the model's forward pass under the `past_key_values` argument, enabling the use of this object as a static kv-cache. Using this strategy, you can write your own function to decode the next token given the current token and position and cache position of previously generated tokens. You can also pass the [`StaticCache`] object to [`~GenerationMixin.generate`] and use it across calls, like you would do with a dynamic cache.
|
||||
A [`StaticCache`] object can be passed to the model's [`~GenerationMixin.generate`] under the `past_key_values` argument. The object will retain the cache contents, so you can pass it to a new [`~GenerationMixin.generate`] call to continue generation, like you would do with a dynamic cache.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
|
||||
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
prompt_length = input_ids.input_ids.shape[1]
|
||||
model.generation_config.max_new_tokens = 16
|
||||
|
||||
past_key_values = StaticCache(
|
||||
config=model.config,
|
||||
max_batch_size=1,
|
||||
# If you plan to reuse the cache, make sure the cache length is large enough for all cases
|
||||
max_cache_len=prompt_length+(model.generation_config.max_new_tokens*2),
|
||||
device=model.device,
|
||||
dtype=model.dtype
|
||||
)
|
||||
outputs = model.generate(**input_ids, past_key_values=past_key_values)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2']
|
||||
|
||||
# pass in the generated text and the same cache object to continue generation from where it left off. Optionally, in a
|
||||
# multi-turn conversation, append the new user input to the generated text.
|
||||
new_input_ids = outputs
|
||||
outputs = model.generate(new_input_ids, past_key_values=past_key_values)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2. The speed of light is constant in all inertial reference frames. 3.']
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> If you want to reuse the same [`StaticCache`] object on a new prompt, be sure to reset its contents with the `.reset()` method between calls
|
||||
|
||||
If you want to go further down a level, the [`StaticCache`] object can also be passed to the model's forward pass under the same `past_key_values` argument. Using this strategy, you can write your own function to decode the next token given the current token and position and cache position of previously generated tokens.
|
||||
|
||||
```py
|
||||
from transformers import LlamaTokenizer, LlamaForCausalLM, StaticCache, logging
|
||||
@ -102,12 +152,9 @@ def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_valu
|
||||
return new_token
|
||||
```
|
||||
|
||||
There are a few important things you must do to enable static kv-cache and torch.compile with the `StaticCache` method:
|
||||
|
||||
There are a few important things you must do to enable static kv-cache and `torch.compile` with the `StaticCache` method:
|
||||
1. Initialize the [`StaticCache`] instance before using the model for inference. There you can configure parameters like the maximum batch size and sequence length.
|
||||
|
||||
2. Call torch.compile on the model to compile the forward pass with the static kv-cache.
|
||||
|
||||
2. Call `torch.compile` on the model to compile the forward pass with the static kv-cache.
|
||||
3. Set `enable_math=True` in the [torch.backends.cuda.sdp_kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) context manager to enable the native PyTorch C++ implementation of scaled dot product attention to speed up inference even more.
|
||||
|
||||
```py
|
||||
@ -142,8 +189,34 @@ text
|
||||
'My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p']
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> If you want to reuse the [`StaticCache`] object on a new prompt, be sure to reset its contents with the `.reset()` method
|
||||
</hfoption>
|
||||
<hfoption id="advanced usage: end-to-end generate compilation">
|
||||
|
||||
Compiling the entire `generate` function, in terms of code, is even simpler than in the basic usage: call `torch.compile` on `generate` to compile the entire function. No need to specify the use of the static cache: although it is compatible, dynamic cache (default) was faster in our benchmarks.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
|
||||
model.generate = torch.compile(model.generate, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
outputs = model.generate(**input_ids)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference']
|
||||
```
|
||||
|
||||
As a result, we compile not only the model forward pass, but also all input preparation, logit processor operations, and so on. The result should be a slightly `generate` call, compared to the basic usage example, and the compiled graph may be better suited to more exotic hardware devices or use cases. However, there are severe drawbacks in using this approach:
|
||||
1. Compilation is much slower;
|
||||
2. All parameterization of `generate` must be done through `generation_config`;
|
||||
3. Many warnings and exceptions are suppressed -- we suggest testing with its uncompiled form first;
|
||||
4. Although we are working on it, it is heavily feature restricted (for instance, at the time of writing, generation does not stop if an EOS token is selected).
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
@ -147,7 +147,7 @@ Let's call it now for the next experiment.
|
||||
```python
|
||||
flush()
|
||||
```
|
||||
In the recent version of the accelerate library, you can also use an utility method called `release_memory()`
|
||||
In the recent version of the accelerate library, you can also use a utility method called `release_memory()`
|
||||
|
||||
```python
|
||||
from accelerate.utils import release_memory
|
||||
@ -683,7 +683,7 @@ Assistant: Germany has ca. 81 million inhabitants
|
||||
|
||||
In this chat, the LLM runs auto-regressive decoding twice:
|
||||
1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step.
|
||||
2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, it's computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`.
|
||||
2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, its computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`.
|
||||
|
||||
Two things should be noted here:
|
||||
1. Keeping all the context is crucial for LLMs deployed in chat so that the LLM understands all the previous context of the conversation. E.g. for the example above the LLM needs to understand that the user refers to the population when asking `"And how many are in Germany"`.
|
||||
|
@ -72,6 +72,10 @@ We provide two types of agents, based on the main [`Agent`] class:
|
||||
|
||||
[[autodoc]] launch_gradio_demo
|
||||
|
||||
### stream_to_gradio
|
||||
|
||||
[[autodoc]] stream_to_gradio
|
||||
|
||||
### ToolCollection
|
||||
|
||||
[[autodoc]] ToolCollection
|
||||
|
@ -25,11 +25,11 @@ A backbone is a model used for feature extraction for higher level computer visi
|
||||
|
||||
Backbones are supported for the following models:
|
||||
|
||||
* [BEiT](..model_doc/beit)
|
||||
* [BEiT](../model_doc/beit)
|
||||
* [BiT](../model_doc/bit)
|
||||
* [ConvNet](../model_doc/convnext)
|
||||
* [ConvNext](../model_doc/convnext)
|
||||
* [ConvNextV2](../model_doc/convnextv2)
|
||||
* [DiNAT](..model_doc/dinat)
|
||||
* [DiNAT](../model_doc/dinat)
|
||||
* [DINOV2](../model_doc/dinov2)
|
||||
* [FocalNet](../model_doc/focalnet)
|
||||
* [MaskFormer](../model_doc/maskformer)
|
||||
|
@ -34,7 +34,7 @@ By default, `TrainingArguments.report_to` is set to `"all"`, so a [`Trainer`] wi
|
||||
- [`~integrations.TensorBoardCallback`] if tensorboard is accessible (either through PyTorch >= 1.4
|
||||
or tensorboardX).
|
||||
- [`~integrations.WandbCallback`] if [wandb](https://www.wandb.com/) is installed.
|
||||
- [`~integrations.CometCallback`] if [comet_ml](https://www.comet.ml/site/) is installed.
|
||||
- [`~integrations.CometCallback`] if [comet_ml](https://www.comet.com/site/) is installed.
|
||||
- [`~integrations.MLflowCallback`] if [mlflow](https://www.mlflow.org/) is installed.
|
||||
- [`~integrations.NeptuneCallback`] if [neptune](https://neptune.ai/) is installed.
|
||||
- [`~integrations.AzureMLCallback`] if [azureml-sdk](https://pypi.org/project/azureml-sdk/) is
|
||||
|
@ -66,3 +66,8 @@ Examples of use can be found in the [example scripts](../examples) or [example n
|
||||
- numpy_mask_tokens
|
||||
- tf_mask_tokens
|
||||
- torch_mask_tokens
|
||||
|
||||
## DataCollatorWithFlattening
|
||||
|
||||
[[autodoc]] data.data_collator.DataCollatorWithFlattening
|
||||
|
||||
|
@ -40,6 +40,10 @@ for text generation, [`~generation.GenerationMixin`] (for the PyTorch models),
|
||||
- push_to_hub
|
||||
- all
|
||||
|
||||
Custom models should also include a `_supports_assign_param_buffer`, which determines if superfast init can apply
|
||||
on the particular model. Signs that your model needs this are if `test_save_and_load_from_pretrained` fails. If so,
|
||||
set this to `False`.
|
||||
|
||||
## ModuleUtilsMixin
|
||||
|
||||
[[autodoc]] modeling_utils.ModuleUtilsMixin
|
||||
|
@ -270,6 +270,11 @@ This is a simplified view, since the pipeline can handle automatically the batch
|
||||
about how many forward passes you inputs are actually going to trigger, you can optimize the `batch_size`
|
||||
independently of the inputs. The caveats from the previous section still apply.
|
||||
|
||||
## Pipeline FP16 inference
|
||||
Models can be run in FP16 which can be significantly faster on GPU while saving memory. Most models will not suffer noticeable performance loss from this. The larger the model, the less likely that it will.
|
||||
|
||||
To enable FP16 inference, you can simply pass `torch_dtype=torch.float16` or `torch_dtype='float16'` to the pipeline constructor. Note that this only works for models with a PyTorch backend. Your inputs will be converted to FP16 internally.
|
||||
|
||||
## Pipeline custom code
|
||||
|
||||
If you want to override a specific pipeline.
|
||||
|
@ -56,3 +56,8 @@ Learn how to quantize models in the [Quantization](../quantization) guide.
|
||||
## HqqConfig
|
||||
|
||||
[[autodoc]] HqqConfig
|
||||
|
||||
## FbgemmFp8Config
|
||||
|
||||
[[autodoc]] FbgemmFp8Config
|
||||
|
||||
|
192
docs/source/en/model_doc/chameleon.md
Normal file
192
docs/source/en/model_doc/chameleon.md
Normal file
@ -0,0 +1,192 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Chameleon
|
||||
|
||||
## Overview
|
||||
|
||||
The Chameleon model was proposed in [Chameleon: Mixed-Modal Early-Fusion Foundation Models
|
||||
](https://arxiv.org/abs/2405.09818v1) by META AI Chameleon Team. Chameleon is a Vision-Language Model that use vector quantization to tokenize images which enables the model to generate multimodal output. The model takes images and texts as input, including an interleaved format, and generates textual response. Image generation module is not released yet.
|
||||
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We present Chameleon, a family of early-fusion token-based mixed-modal models capable of understanding and generating images and text in any arbitrary sequence. We outline a stable training
|
||||
approach from inception, an alignment recipe, and an architectural parameterization tailored for the
|
||||
early-fusion, token-based, mixed-modal setting. The models are evaluated on a comprehensive range
|
||||
of tasks, including visual question answering, image captioning, text generation, image generation, and
|
||||
long-form mixed modal generation. Chameleon demonstrates broad and general capabilities, including
|
||||
state-of-the-art performance in image captioning tasks, outperforms Llama-2 in text-only tasks while
|
||||
being competitive with models such as Mixtral 8x7B and Gemini-Pro, and performs non-trivial image
|
||||
generation, all in a single model. It also matches or exceeds the performance of much larger models,
|
||||
including Gemini Pro and GPT-4V, according to human judgments on a new long-form mixed-modal
|
||||
generation evaluation, where either the prompt or outputs contain mixed sequences of both images and
|
||||
text. Chameleon marks a significant step forward in unified modeling of full multimodal documents*
|
||||
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/chameleon_arch.png"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> Chameleon incorporates a vector quantizer module to transform images into discrete tokens. That also enables image generation using an auto-regressive transformer. Taken from the <a href="https://arxiv.org/abs/2405.09818v1">original paper.</a> </small>
|
||||
|
||||
This model was contributed by [joaogante](https://huggingface.co/joaogante) and [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
|
||||
The original code can be found [here](https://github.com/facebookresearch/chameleon).
|
||||
|
||||
|
||||
## Usage tips
|
||||
|
||||
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to set `processor.tokenizer.padding_side = "left"` before generating.
|
||||
|
||||
- Note that Chameleon was tuned for safety alignment. If the model is refusing to answer, consider asking a more concrete question, instead of an open question.
|
||||
|
||||
- Chameleon generates in chat format which means that the generated text will always be the "assistant's turn". You can enable a text completion generation by passing `return_for_text_completion=True` when calling the processor.
|
||||
|
||||
> [!NOTE]
|
||||
> Chameleon implementation in Transformers uses a special image token to indicate where to merge image embeddings. For special image token we didn't add a new one but used one of the reserved tokens: `<reserved08707>`. You have to add `<image>` to your prompt in the place where the image should be embedded for correct generation.
|
||||
|
||||
## Usage example
|
||||
|
||||
### Single image inference
|
||||
|
||||
Chameleon is a gated model so make sure to have access and login to Hugging Face Hub using a token.
|
||||
Here's how to load the model and perform inference in half-precision (`torch.bfloat16`):
|
||||
|
||||
```python
|
||||
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
|
||||
import torch
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
|
||||
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.bfloat16, device_map="cuda")
|
||||
|
||||
# prepare image and text prompt
|
||||
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
prompt = "What do you see in this image?<image>"
|
||||
|
||||
inputs = processor(prompt, image, return_tensors="pt").to(model.device)
|
||||
|
||||
# autoregressively complete prompt
|
||||
output = model.generate(**inputs, max_new_tokens=50)
|
||||
print(processor.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
### Multi image inference
|
||||
|
||||
Chameleon can perform inference with multiple images as input, where images either belong to the same prompt or different prompts (in batched inference). Here is how you can do it:
|
||||
|
||||
```python
|
||||
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
|
||||
import torch
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
|
||||
|
||||
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.bfloat16, device_map="cuda")
|
||||
|
||||
# Get three different images
|
||||
url = "https://www.ilankelman.org/stopsigns/australia.jpg"
|
||||
image_stop = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image_cats = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
|
||||
image_snowman = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
# Prepare a batched prompt, where the first one is a multi-image prompt and the second is not
|
||||
prompts = [
|
||||
"What do these images have in common?<image><image>",
|
||||
"<image>What is shown in this image?"
|
||||
]
|
||||
|
||||
# We can simply feed images in the order they have to be used in the text prompt
|
||||
# Each "<image>" token uses one image leaving the next for the subsequent "<image>" tokens
|
||||
inputs = processor(text=prompts, images=[image_stop, image_cats, image_snowman], padding=True, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16)
|
||||
|
||||
# Generate
|
||||
generate_ids = model.generate(**inputs, max_new_tokens=50)
|
||||
processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
||||
```
|
||||
|
||||
## Model optimization
|
||||
|
||||
### Quantization using Bitsandbytes
|
||||
|
||||
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with:
|
||||
|
||||
```python
|
||||
from transformers import ChameleonForConditionalGeneration, BitsAndBytesConfig
|
||||
|
||||
# specify how to quantize the model
|
||||
quantization_config = BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
bnb_4bit_compute_dtype=torch.float16,
|
||||
)
|
||||
|
||||
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="cuda")
|
||||
```
|
||||
|
||||
### Use Flash-Attention 2 and SDPA to further speed-up generation
|
||||
|
||||
The models supports both, Flash-Attention 2 and PyTorch's [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) which can be enables for optimization. SDPA is the default options when you load the model, If you want to switch for Flash Attention 2, first make sure to install flash-attn. Refer to the [original repository](https://github.com/Dao-AILab/flash-attention) regarding that package installation. Simply change the snippet above with:
|
||||
|
||||
```python
|
||||
from transformers import ChameleonForConditionalGeneration
|
||||
|
||||
model_id = "facebook/chameleon-7b"
|
||||
model = ChameleonForConditionalGeneration.from_pretrained(
|
||||
model_id,
|
||||
torch_dtype=torch.bfloat16,
|
||||
low_cpu_mem_usage=True,
|
||||
attn_implementation="flash_attention_2"
|
||||
).to(0)
|
||||
```
|
||||
|
||||
## ChameleonConfig
|
||||
|
||||
[[autodoc]] ChameleonConfig
|
||||
|
||||
## ChameleonVQVAEConfig
|
||||
|
||||
[[autodoc]] ChameleonVQVAEConfig
|
||||
|
||||
## ChameleonProcessor
|
||||
|
||||
[[autodoc]] ChameleonProcessor
|
||||
|
||||
## ChameleonImageProcessor
|
||||
|
||||
[[autodoc]] ChameleonImageProcessor
|
||||
- preprocess
|
||||
|
||||
## ChameleonVQVAE
|
||||
|
||||
[[autodoc]] ChameleonVQVAE
|
||||
- forward
|
||||
|
||||
## ChameleonModel
|
||||
|
||||
[[autodoc]] ChameleonModel
|
||||
- forward
|
||||
|
||||
## ChameleonForConditionalGeneration
|
||||
|
||||
[[autodoc]] ChameleonForConditionalGeneration
|
||||
- forward
|
@ -79,6 +79,123 @@ encode the text and prepare the images. The following example shows how to get t
|
||||
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
||||
```
|
||||
|
||||
|
||||
### Combining CLIP and Flash Attention 2
|
||||
|
||||
First, make sure to install the latest version of Flash Attention 2.
|
||||
|
||||
```bash
|
||||
pip install -U flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16`)
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
For small batch sizes, you might notice a slowdown in your model when using flash attention. Refer to the section [Expected speedups with Flash Attention and SDPA](#Expected-speedups-with-Flash-Attention-and-SDPA) below and select an appropriate attention implementation.
|
||||
|
||||
</Tip>
|
||||
|
||||
To load and run a model using Flash Attention 2, refer to the snippet below:
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> import requests
|
||||
>>> from PIL import Image
|
||||
|
||||
>>> from transformers import CLIPProcessor, CLIPModel
|
||||
|
||||
>>> device = "cuda"
|
||||
>>> torch_dtype = torch.float16
|
||||
|
||||
>>> model = CLIPModel.from_pretrained(
|
||||
... "openai/clip-vit-base-patch32",
|
||||
... attn_implementation="flash_attention_2",
|
||||
... device_map=device,
|
||||
... torch_dtype=torch_dtype,
|
||||
... )
|
||||
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
|
||||
>>> inputs.to(device)
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... with torch.autocast(device):
|
||||
... outputs = model(**inputs)
|
||||
|
||||
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
||||
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
||||
>>> print(probs)
|
||||
tensor([[0.9946, 0.0052]], device='cuda:0', dtype=torch.float16)
|
||||
```
|
||||
|
||||
|
||||
### Using Scaled Dot Product Attention (SDPA)
|
||||
|
||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
|
||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
|
||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
|
||||
page for more information.
|
||||
|
||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
|
||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
|
||||
|
||||
```python
|
||||
from transformers import CLIPModel
|
||||
|
||||
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", torch_dtype=torch.float16, attn_implementation="sdpa")
|
||||
```
|
||||
|
||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
|
||||
|
||||
### Expected speedups with Flash Attention and SDPA
|
||||
|
||||
On a local benchmark (NVIDIA A10G, PyTorch 2.3.1+cu121) with `float16`, we saw the following speedups during inference for `"openai/clip-vit-large-patch14"` checkpoint ([code](https://gist.github.com/qubvel/ac691a54e54f9fae8144275f866a7ff8)):
|
||||
|
||||
#### CLIPTextModel
|
||||
|
||||
| Num text labels | Eager (s/iter) | FA2 (s/iter) | FA2 speedup | SDPA (s/iter) | SDPA speedup |
|
||||
|------------------:|-----------------:|---------------:|--------------:|----------------:|---------------:|
|
||||
| 4 | 0.009 | 0.012 | 0.737 | 0.007 | 1.269 |
|
||||
| 16 | 0.009 | 0.014 | 0.659 | 0.008 | 1.187 |
|
||||
| 32 | 0.018 | 0.021 | 0.862 | 0.016 | 1.142 |
|
||||
| 64 | 0.034 | 0.034 | 1.001 | 0.03 | 1.163 |
|
||||
| 128 | 0.063 | 0.058 | 1.09 | 0.054 | 1.174 |
|
||||
|
||||

|
||||
|
||||
#### CLIPVisionModel
|
||||
|
||||
| Image batch size | Eager (s/iter) | FA2 (s/iter) | FA2 speedup | SDPA (s/iter) | SDPA speedup |
|
||||
|-------------------:|-----------------:|---------------:|--------------:|----------------:|---------------:|
|
||||
| 1 | 0.016 | 0.013 | 1.247 | 0.012 | 1.318 |
|
||||
| 4 | 0.025 | 0.021 | 1.198 | 0.021 | 1.202 |
|
||||
| 16 | 0.093 | 0.075 | 1.234 | 0.075 | 1.24 |
|
||||
| 32 | 0.181 | 0.147 | 1.237 | 0.146 | 1.241 |
|
||||
|
||||

|
||||
|
||||
#### CLIPModel
|
||||
|
||||
| Image batch size | Num text labels | Eager (s/iter) | FA2 (s/iter) | FA2 speedup | SDPA (s/iter) | SDPA speedup |
|
||||
|-------------------:|------------------:|-----------------:|---------------:|--------------:|----------------:|---------------:|
|
||||
| 1 | 4 | 0.025 | 0.026 | 0.954 | 0.02 | 1.217 |
|
||||
| 1 | 16 | 0.026 | 0.028 | 0.918 | 0.02 | 1.287 |
|
||||
| 1 | 64 | 0.042 | 0.046 | 0.906 | 0.036 | 1.167 |
|
||||
| 4 | 4 | 0.028 | 0.033 | 0.849 | 0.024 | 1.189 |
|
||||
| 4 | 16 | 0.034 | 0.035 | 0.955 | 0.029 | 1.169 |
|
||||
| 4 | 64 | 0.059 | 0.055 | 1.072 | 0.05 | 1.179 |
|
||||
| 16 | 4 | 0.096 | 0.088 | 1.091 | 0.078 | 1.234 |
|
||||
| 16 | 16 | 0.102 | 0.09 | 1.129 | 0.083 | 1.224 |
|
||||
| 16 | 64 | 0.127 | 0.11 | 1.157 | 0.105 | 1.218 |
|
||||
| 32 | 4 | 0.185 | 0.159 | 1.157 | 0.149 | 1.238 |
|
||||
| 32 | 16 | 0.19 | 0.162 | 1.177 | 0.154 | 1.233 |
|
||||
| 32 | 64 | 0.216 | 0.181 | 1.19 | 0.176 | 1.228 |
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with CLIP.
|
||||
|
@ -20,6 +20,12 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
The Depth Anything model was proposed in [Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data](https://arxiv.org/abs/2401.10891) by Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, Hengshuang Zhao. Depth Anything is based on the [DPT](dpt) architecture, trained on ~62 million images, obtaining state-of-the-art results for both relative and absolute depth estimation.
|
||||
|
||||
<Tip>
|
||||
|
||||
[Depth Anything V2](depth_anything_v2) was released in June 2024. It uses the same architecture as Depth Anything and therefore it is compatible with all code examples and existing workflows. However, it leverages synthetic data and a larger capacity teacher model to achieve much finer and robust depth predictions.
|
||||
|
||||
</Tip>
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This work presents Depth Anything, a highly practical solution for robust monocular depth estimation. Without pursuing novel technical modules, we aim to build a simple yet powerful foundation model dealing with any images under any circumstances. To this end, we scale up the dataset by designing a data engine to collect and automatically annotate large-scale unlabeled data (~62M), which significantly enlarges the data coverage and thus is able to reduce the generalization error. We investigate two simple yet effective strategies that make data scaling-up promising. First, a more challenging optimization target is created by leveraging data augmentation tools. It compels the model to actively seek extra visual knowledge and acquire robust representations. Second, an auxiliary supervision is developed to enforce the model to inherit rich semantic priors from pre-trained encoders. We evaluate its zero-shot capabilities extensively, including six public datasets and randomly captured photos. It demonstrates impressive generalization ability. Further, through fine-tuning it with metric depth information from NYUv2 and KITTI, new SOTAs are set. Our better depth model also results in a better depth-conditioned ControlNet.*
|
||||
|
115
docs/source/en/model_doc/depth_anything_v2.md
Normal file
115
docs/source/en/model_doc/depth_anything_v2.md
Normal file
@ -0,0 +1,115 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Depth Anything V2
|
||||
|
||||
## Overview
|
||||
|
||||
Depth Anything V2 was introduced in [the paper of the same name](https://arxiv.org/abs/2406.09414) by Lihe Yang et al. It uses the same architecture as the original [Depth Anything model](depth_anything), but uses synthetic data and a larger capacity teacher model to achieve much finer and robust depth predictions.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This work presents Depth Anything V2. Without pursuing fancy techniques, we aim to reveal crucial findings to pave the way towards building a powerful monocular depth estimation model. Notably, compared with V1, this version produces much finer and more robust depth predictions through three key practices: 1) replacing all labeled real images with synthetic images, 2) scaling up the capacity of our teacher model, and 3) teaching student models via the bridge of large-scale pseudo-labeled real images. Compared with the latest models built on Stable Diffusion, our models are significantly more efficient (more than 10x faster) and more accurate. We offer models of different scales (ranging from 25M to 1.3B params) to support extensive scenarios. Benefiting from their strong generalization capability, we fine-tune them with metric depth labels to obtain our metric depth models. In addition to our models, considering the limited diversity and frequent noise in current test sets, we construct a versatile evaluation benchmark with precise annotations and diverse scenes to facilitate future research.*
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/depth_anything_overview.jpg"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> Depth Anything overview. Taken from the <a href="https://arxiv.org/abs/2401.10891">original paper</a>.</small>
|
||||
|
||||
The Depth Anything models were contributed by [nielsr](https://huggingface.co/nielsr).
|
||||
The original code can be found [here](https://github.com/DepthAnything/Depth-Anything-V2).
|
||||
|
||||
## Usage example
|
||||
|
||||
There are 2 main ways to use Depth Anything V2: either using the pipeline API, which abstracts away all the complexity for you, or by using the `DepthAnythingForDepthEstimation` class yourself.
|
||||
|
||||
### Pipeline API
|
||||
|
||||
The pipeline allows to use the model in a few lines of code:
|
||||
|
||||
```python
|
||||
>>> from transformers import pipeline
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
|
||||
>>> # load pipe
|
||||
>>> pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Small-hf")
|
||||
|
||||
>>> # load image
|
||||
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> # inference
|
||||
>>> depth = pipe(image)["depth"]
|
||||
```
|
||||
|
||||
### Using the model yourself
|
||||
|
||||
If you want to do the pre- and post-processing yourself, here's how to do that:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation
|
||||
>>> import torch
|
||||
>>> import numpy as np
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> image_processor = AutoImageProcessor.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
||||
>>> model = AutoModelForDepthEstimation.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
||||
|
||||
>>> # prepare image for the model
|
||||
>>> inputs = image_processor(images=image, return_tensors="pt")
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(**inputs)
|
||||
... predicted_depth = outputs.predicted_depth
|
||||
|
||||
>>> # interpolate to original size
|
||||
>>> prediction = torch.nn.functional.interpolate(
|
||||
... predicted_depth.unsqueeze(1),
|
||||
... size=image.size[::-1],
|
||||
... mode="bicubic",
|
||||
... align_corners=False,
|
||||
... )
|
||||
|
||||
>>> # visualize the prediction
|
||||
>>> output = prediction.squeeze().cpu().numpy()
|
||||
>>> formatted = (output * 255 / np.max(output)).astype("uint8")
|
||||
>>> depth = Image.fromarray(formatted)
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Depth Anything.
|
||||
|
||||
- [Monocular depth estimation task guide](../tasks/depth_estimation)
|
||||
- [Depth Anything V2 demo](https://huggingface.co/spaces/depth-anything/Depth-Anything-V2).
|
||||
- A notebook showcasing inference with [`DepthAnythingForDepthEstimation`] can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Depth%20Anything/Predicting_depth_in_an_image_with_Depth_Anything.ipynb). 🌎
|
||||
- [Core ML conversion of the `small` variant for use on Apple Silicon](https://huggingface.co/apple/coreml-depth-anything-v2-small).
|
||||
|
||||
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
||||
|
||||
## DepthAnythingConfig
|
||||
|
||||
[[autodoc]] DepthAnythingConfig
|
||||
|
||||
## DepthAnythingForDepthEstimation
|
||||
|
||||
[[autodoc]] DepthAnythingForDepthEstimation
|
||||
- forward
|
@ -57,7 +57,7 @@ print((last_hidden_states - traced_outputs[0]).abs().max())
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DPT.
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DINOv2.
|
||||
|
||||
- Demo notebooks for DINOv2 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DINOv2). 🌎
|
||||
|
||||
|
58
docs/source/en/model_doc/gemma2.md
Normal file
58
docs/source/en/model_doc/gemma2.md
Normal file
@ -0,0 +1,58 @@
|
||||
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Gemma2
|
||||
|
||||
## Overview
|
||||
|
||||
The Gemma2 model was proposed in [Gemma2: Open Models Based on Gemini Technology and Research](https://blog.google/technology/developers/google-gemma-2/) by Gemma2 Team, Google.
|
||||
Two Gemma2 models are released, with parameters sizes of 9 billion (9B) and 27 billion (27B).
|
||||
|
||||
The abstract from the blog post is the following:
|
||||
|
||||
*Now we’re officially releasing Gemma 2 to researchers and developers globally. Available in both 9 billion (9B) and 27 billion (27B) parameter sizes, Gemma 2 is higher-performing and more efficient at inference than the first generation, with significant safety advancements built in. In fact, at 27B, it offers competitive alternatives to models more than twice its size, delivering the kind of performance that was only possible with proprietary models as recently as December.*
|
||||
|
||||
Tips:
|
||||
|
||||
- The original checkpoints can be converted using the conversion script `src/transformers/models/Gemma2/convert_Gemma2_weights_to_hf.py`
|
||||
|
||||
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ), [Pedro Cuenca](https://huggingface.co/pcuenq) and [Tom Arsen]().
|
||||
|
||||
|
||||
## Gemma2Config
|
||||
|
||||
[[autodoc]] Gemma2Config
|
||||
|
||||
## Gemma2Model
|
||||
|
||||
[[autodoc]] Gemma2Model
|
||||
- forward
|
||||
|
||||
## Gemma2ForCausalLM
|
||||
|
||||
[[autodoc]] Gemma2ForCausalLM
|
||||
- forward
|
||||
|
||||
## Gemma2ForSequenceClassification
|
||||
|
||||
[[autodoc]] Gemma2ForSequenceClassification
|
||||
- forward
|
||||
|
||||
## Gemma2ForTokenClassification
|
||||
|
||||
[[autodoc]] Gemma2ForTokenClassification
|
||||
- forward
|
@ -41,33 +41,40 @@ The original code can be found [here](https://github.com/IDEA-Research/Grounding
|
||||
Here's how to use the model for zero-shot object detection:
|
||||
|
||||
```python
|
||||
import requests
|
||||
>>> import requests
|
||||
|
||||
import torch
|
||||
from PIL import Image
|
||||
from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection,
|
||||
>>> import torch
|
||||
>>> from PIL import Image
|
||||
>>> from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
|
||||
|
||||
model_id = "IDEA-Research/grounding-dino-tiny"
|
||||
>>> model_id = "IDEA-Research/grounding-dino-tiny"
|
||||
>>> device = "cuda"
|
||||
|
||||
processor = AutoProcessor.from_pretrained(model_id)
|
||||
model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)
|
||||
>>> processor = AutoProcessor.from_pretrained(model_id)
|
||||
>>> model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)
|
||||
|
||||
image_url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image = Image.open(requests.get(image_url, stream=True).raw)
|
||||
# Check for cats and remote controls
|
||||
text = "a cat. a remote control."
|
||||
>>> image_url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(image_url, stream=True).raw)
|
||||
>>> # Check for cats and remote controls
|
||||
>>> text = "a cat. a remote control."
|
||||
|
||||
inputs = processor(images=image, text=text, return_tensors="pt").to(device)
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs)
|
||||
>>> inputs = processor(images=image, text=text, return_tensors="pt").to(device)
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(**inputs)
|
||||
|
||||
results = processor.post_process_grounded_object_detection(
|
||||
outputs,
|
||||
inputs.input_ids,
|
||||
box_threshold=0.4,
|
||||
text_threshold=0.3,
|
||||
target_sizes=[image.size[::-1]]
|
||||
)
|
||||
>>> results = processor.post_process_grounded_object_detection(
|
||||
... outputs,
|
||||
... inputs.input_ids,
|
||||
... box_threshold=0.4,
|
||||
... text_threshold=0.3,
|
||||
... target_sizes=[image.size[::-1]]
|
||||
... )
|
||||
>>> print(results)
|
||||
[{'boxes': tensor([[344.6959, 23.1090, 637.1833, 374.2751],
|
||||
[ 12.2666, 51.9145, 316.8582, 472.4392],
|
||||
[ 38.5742, 70.0015, 176.7838, 118.1806]], device='cuda:0'),
|
||||
'labels': ['a cat', 'a cat', 'a remote control'],
|
||||
'scores': tensor([0.4785, 0.4381, 0.4776], device='cuda:0')}]
|
||||
```
|
||||
|
||||
## Grounded SAM
|
||||
|
62
docs/source/en/model_doc/hiera.md
Normal file
62
docs/source/en/model_doc/hiera.md
Normal file
@ -0,0 +1,62 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Hiera
|
||||
|
||||
## Overview
|
||||
|
||||
Hiera was proposed in [Hiera: A Hierarchical Vision Transformer without the Bells-and-Whistles](https://arxiv.org/abs/2306.00989) by Chaitanya Ryali, Yuan-Ting Hu, Daniel Bolya, Chen Wei, Haoqi Fan, Po-Yao Huang, Vaibhav Aggarwal, Arkabandhu Chowdhury, Omid Poursaeed, Judy Hoffman, Jitendra Malik, Yanghao Li, Christoph Feichtenhofer
|
||||
|
||||
The paper introduces "Hiera," a hierarchical Vision Transformer that simplifies the architecture of modern hierarchical vision transformers by removing unnecessary components without compromising on accuracy or efficiency. Unlike traditional transformers that add complex vision-specific components to improve supervised classification performance, Hiera demonstrates that such additions, often termed "bells-and-whistles," are not essential for high accuracy. By leveraging a strong visual pretext task (MAE) for pretraining, Hiera retains simplicity and achieves superior accuracy and speed both in inference and training across various image and video recognition tasks. The approach suggests that spatial biases required for vision tasks can be effectively learned through proper pretraining, eliminating the need for added architectural complexity.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Modern hierarchical vision transformers have added several vision-specific components in the pursuit of supervised classification performance. While these components lead to effective accuracies and attractive FLOP counts, the added complexity actually makes these transformers slower than their vanilla ViT counterparts. In this paper, we argue that this additional bulk is unnecessary. By pretraining with a strong visual pretext task (MAE), we can strip out all the bells-and-whistles from a state-of-the-art multi-stage vision transformer without losing accuracy. In the process, we create Hiera, an extremely simple hierarchical vision transformer that is more accurate than previous models while being significantly faster both at inference and during training. We evaluate Hiera on a variety of tasks for image and video recognition. Our code and models are available at https://github.com/facebookresearch/hiera.*
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/hiera_overview.png"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> Hiera architecture. Taken from the <a href="https://arxiv.org/abs/2306.00989">original paper.</a> </small>
|
||||
|
||||
This model was a joint contibution by [EduardoPacheco](https://huggingface.co/EduardoPacheco) and [namangarg110](https://huggingface.co/namangarg110). The original code can be found [here] (https://github.com/facebookresearch/hiera).
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Hiera. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
||||
|
||||
<PipelineTag pipeline="image-classification"/>
|
||||
|
||||
- [`HieraForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb).
|
||||
- See also: [Image classification task guide](../tasks/image_classification)
|
||||
|
||||
## HieraConfig
|
||||
|
||||
[[autodoc]] HieraConfig
|
||||
|
||||
## HieraModel
|
||||
|
||||
[[autodoc]] HieraModel
|
||||
- forward
|
||||
|
||||
## HieraForPreTraining
|
||||
|
||||
[[autodoc]] HieraForPreTraining
|
||||
- forward
|
||||
|
||||
## HieraForImageClassification
|
||||
|
||||
[[autodoc]] HieraForImageClassification
|
||||
- forward
|
@ -16,6 +16,15 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# Llama3
|
||||
|
||||
```py3
|
||||
import transformers
|
||||
import torch
|
||||
|
||||
model_id = "meta-llama/Meta-Llama-3-8B"
|
||||
|
||||
pipeline = transformers.pipeline("text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto")
|
||||
pipeline("Hey how are you doing today?")
|
||||
```
|
||||
|
||||
## Overview
|
||||
|
||||
@ -66,20 +75,7 @@ model = AutoModelForCausalLM.from_pretrained("/output/path")
|
||||
Note that executing the script requires enough CPU RAM to host the whole model in float16 precision (even if the biggest versions
|
||||
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). For the 75B model, it's thus 145GB of RAM needed.
|
||||
|
||||
|
||||
- When using Flash Attention 2 via `attn_implementation="flash_attention_2"`, don't pass `torch_dtype` to the `from_pretrained` class method and use Automatic Mixed-Precision training. When using `Trainer`, it is simply specifying either `fp16` or `bf16` to `True`. Otherwise, make sure you are using `torch.autocast`. This is required because the Flash Attention only support `fp16` and `bf16` data type.
|
||||
|
||||
## Quick usage
|
||||
|
||||
```py3
|
||||
import transformers
|
||||
import torch
|
||||
|
||||
model_id = "meta-llama/Meta-Llama-3-8B"
|
||||
|
||||
pipeline = transformers.pipeline("text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto")
|
||||
pipeline("Hey how are you doing today?")
|
||||
```
|
||||
|
||||
## Resources
|
||||
A ton of cool resources are already available on the documentation page of [~llama2], inviting contributors to add new resources curated for Llama3 here! 🤗
|
||||
A ton of cool resources are already available on the documentation page of [Llama2](./llama2), inviting contributors to add new resources curated for Llama3 here! 🤗
|
||||
|
@ -43,6 +43,13 @@ The original code can be found [here](https://github.com/LLaVA-VL/LLaVA-NeXT/tre
|
||||
|
||||
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
- Llava-Next uses different number of patches for images and thus has to pad the inputs inside modeling code, aside from the padding done when processing the inputs. The default setting is "left-padding" if model is in `eval()` mode, otherwise "right-padding".
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
- Note that each checkpoint has been trained with a specific prompt format, depending on which large language model (LLM) was used. You can use tokenizer's `apply_chat_template` to format your prompts correctly. Below is an example of how to do that.
|
||||
|
||||
We will use [LLaVA-NeXT-Video-7B-hf](https://huggingface.co/llava-hf/LLaVA-NeXT-Video-7B-hf) and a conversation history of videos and images. Each content field has to be a list of dicts, as follows:
|
||||
|
@ -40,8 +40,55 @@ The original code can be found [here](https://github.com/haotian-liu/LLaVA/tree/
|
||||
|
||||
- Note the model has not been explicitly trained to process multiple images in the same prompt, although this is technically possible, you may experience inaccurate results.
|
||||
|
||||
- For better results, we recommend users to prompt the model with the correct prompt format:
|
||||
- For better results, we recommend users to use the processor's `apply_chat_template()` method to format your prompt correctly. For that you need to construct a conversation history, passing in a plain string will not format your prompt. Each message in the conversation history for chat templates is a dictionary with keys "role" and "content". The "content" should be a list of dictionaries, for "text" and "image" modalities, as follows:
|
||||
|
||||
```python
|
||||
from transformers import AutoProcessor
|
||||
|
||||
processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What’s shown in this image?"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [{"type": "text", "text": "This image shows a red stop sign."},]
|
||||
},
|
||||
{
|
||||
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Describe the image in more details."},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
|
||||
# Note that the template simply formats your prompt, you still have to tokenize it and obtain pixel values for your images
|
||||
print(text_prompt)
|
||||
>>> "USER: <image>\n<What’s shown in this image? ASSISTANT: This image shows a red stop sign.</s>USER: Describe the image in more details. ASSISTANT:"
|
||||
```
|
||||
|
||||
- If you want to construct a chat prompt yourself, below is a list of prompt formats accepted by each llava checkpoint:
|
||||
|
||||
[llava-interleave models](https://huggingface.co/collections/llava-hf/llava-interleave-668e19a97da0036aad4a2f19) requires the following format:
|
||||
```bash
|
||||
"<|im_start|>user <image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant"
|
||||
```
|
||||
|
||||
For multiple turns conversation:
|
||||
|
||||
```bash
|
||||
"<|im_start|>user <image>\n<prompt1><|im_end|><|im_start|>assistant <answer1><|im_end|><|im_start|>user <image>\n<prompt1><|im_end|><|im_start|>assistant "
|
||||
```
|
||||
|
||||
[llava-1.5 models](https://huggingface.co/collections/llava-hf/llava-15-65f762d5b6941db5c2ba07e0) requires the following format:
|
||||
```bash
|
||||
"USER: <image>\n<prompt> ASSISTANT:"
|
||||
```
|
||||
@ -52,6 +99,7 @@ For multiple turns conversation:
|
||||
"USER: <image>\n<prompt1> ASSISTANT: <answer1></s>USER: <prompt2> ASSISTANT: <answer2></s>USER: <prompt3> ASSISTANT:"
|
||||
```
|
||||
|
||||
|
||||
### Using Flash Attention 2
|
||||
|
||||
Flash Attention 2 is an even faster, optimized version of the previous optimization, please refer to the [Flash Attention 2 section of performance docs](https://huggingface.co/docs/transformers/perf_infer_gpu_one).
|
||||
|
@ -46,26 +46,79 @@ The original code can be found [here](https://github.com/haotian-liu/LLaVA/tree/
|
||||
|
||||
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to call `processor.tokenizer.padding_side = "left"` before generating.
|
||||
|
||||
- Note that each checkpoint has been trained with a specific prompt format, depending on which large language model (LLM) was used. Below, we list the correct prompt formats to use for the text prompt "What is shown in this image?":
|
||||
<Tip warning={true}>
|
||||
|
||||
- Llava-Next uses different number of patches for images and thus has to pad the inputs inside modeling code, aside from the padding done when processing the inputs. The default setting is "left-padding" if model is in `eval()` mode, otherwise "right-padding".
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
- Note that each checkpoint has been trained with a specific prompt format, depending on which large language model (LLM) was used. You can use the processor's `apply_chat_template` to format your prompts correctly. For that you have to construct a conversation history, passing a plain string will not format your prompt. Each message in the conversation history for chat templates is a dictionary with keys "role" and "content". The "content" should be a list of dictionaries, for "text" and "image" modalities. Below is an example of how to do that and the list of formats accepted by each checkpoint.
|
||||
|
||||
We will use [llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-hf/llava-v1.6-mistral-7b-hf) and a conversation history of text and image. Each content field has to be a list of dicts, as follows:
|
||||
|
||||
```python
|
||||
from transformers import LlavaNextProcessor
|
||||
|
||||
processor = LlavaNextProcessor.from_pretrained("llava-hf/llava-hf/llava-v1.6-mistral-7b-hf")
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What’s shown in this image?"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [{"type": "text", "text": "This image shows a red stop sign."},]
|
||||
},
|
||||
{
|
||||
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Describe the image in more details."},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
|
||||
# Note that the template simply formats your prompt, you still have to tokenize it and obtain pixel values for your images
|
||||
print(text_prompt)
|
||||
>>> "[INST] <image>\nWhat's shown in this image? [/INST] This image shows a red stop sign. [INST] Describe the image in more details. [/INST]"
|
||||
```
|
||||
|
||||
- If you want to construct a chat prompt yourself, below is a list of possible formats
|
||||
.
|
||||
[llava-v1.6-mistral-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-mistral-7b-hf) requires the following format:
|
||||
|
||||
```bash
|
||||
"[INST] <image>\nWhat is shown in this image? [/INST]"
|
||||
```
|
||||
|
||||
[llava-v1.6-vicuna-7b-hf](https://huggingface.co/llava-hf/llava-v1.6-vicuna-7b-hf) and [llava-v1.6-vicuna-13b-hf](https://huggingface.co/llava-hf/llava-v1.6-vicuna-13b-hf) require the following format:
|
||||
|
||||
```bash
|
||||
"A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions. USER: <image>\nWhat is shown in this image? ASSISTANT:"
|
||||
```
|
||||
|
||||
[llava-v1.6-34b-hf](https://huggingface.co/llava-hf/llava-v1.6-34b-hf) requires the following format:
|
||||
|
||||
```bash
|
||||
"<|im_start|>system\nAnswer the questions.<|im_end|><|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|><|im_start|>assistant\n"
|
||||
```
|
||||
|
||||
[llama3-llava-next-8b-hf](https://huggingface.co/llava-hf/llava-next-8b-hf) requires the following format:
|
||||
|
||||
```bash
|
||||
"<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful language and vision assistant. You are able to understand the visual content that the user provides, and assist the user with a variety of tasks using natural language.<|eot_id|><|start_header_id|><|start_header_id|>user<|end_header_id|>\n\n<image>\nWhat is shown in this image?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"
|
||||
```
|
||||
|
||||
[llava-next-72b-hf](https://huggingface.co/llava-hf/llava-next-72b-hf) and [llava-next-110b-hf](https://huggingface.co/llava-hf/llava-next-110b-hf) require the following format:
|
||||
|
||||
```bash
|
||||
"<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<image>\nWhat is shown in this image?<|im_end|>\n<|im_start|>assistant\n"
|
||||
```
|
||||
|
||||
## Usage example
|
||||
|
||||
### Single image inference
|
||||
@ -86,8 +139,17 @@ model.to("cuda:0")
|
||||
# prepare image and text prompt, using the appropriate prompt template
|
||||
url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
prompt = "[INST] <image>\nWhat is shown in this image? [/INST]"
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What is shown in this image?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
inputs = processor(prompt, image, return_tensors="pt").to("cuda:0")
|
||||
|
||||
# autoregressively complete prompt
|
||||
@ -120,15 +182,47 @@ image_cats = Image.open(requests.get(url, stream=True).raw)
|
||||
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
|
||||
image_snowman = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
# Prepare a batched prompt, where the first one is a multi-turn conversation and the second is not
|
||||
prompt = [
|
||||
"[INST] <image>\nWhat is shown in this image? [/INST] There is a red stop sign in the image. [INST] <image>\nWhat about this image? How many cats do you see [/INST]",
|
||||
"[INST] <image>\nWhat is shown in this image? [/INST]"
|
||||
# Prepare a batch of two prompts, where the first one is a multi-turn conversation and the second is not
|
||||
conversation_1 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What is shown in this image?"},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{"type": "text", "text": "There is a red stop sign in the image."},
|
||||
],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What about this image? How many cats do you see?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
conversation_2 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What is shown in this image?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
prompt_1 = processor.apply_chat_template(conversation_1, add_generation_prompt=True)
|
||||
prompt_2 = processor.apply_chat_template(conversation_2, add_generation_prompt=True)
|
||||
prompts = [prompt_1, prompt_2]
|
||||
|
||||
# We can simply feed images in the order they have to be used in the text prompt
|
||||
# Each "<image>" token uses one image leaving the next for the subsequent "<image>" tokens
|
||||
inputs = processor(text=prompt, images=[image_stop, image_cats, image_snowman], padding=True, return_tensors="pt").to(model.device)
|
||||
inputs = processor(text=prompts, images=[image_stop, image_cats, image_snowman], padding=True, return_tensors="pt").to(model.device)
|
||||
|
||||
# Generate
|
||||
generate_ids = model.generate(**inputs, max_new_tokens=30)
|
||||
|
@ -105,7 +105,7 @@ from huggingface_hub import list_models
|
||||
|
||||
model_list = list_models()
|
||||
org = "Helsinki-NLP"
|
||||
model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)]
|
||||
model_ids = [x.id for x in model_list if x.id.startswith(org)]
|
||||
suffix = [x.split("/")[1] for x in model_ids]
|
||||
old_style_multi_models = [f"{org}/{s}" for s in suffix if s != s.lower()]
|
||||
```
|
||||
|
@ -51,7 +51,7 @@ The Authors' code can be found [here](https://github.com/microsoft/ProphetNet).
|
||||
|
||||
- ProphetNet is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than
|
||||
the left.
|
||||
- The model architecture is based on the original Transformer, but replaces the “standard” self-attention mechanism in the decoder by a a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism.
|
||||
- The model architecture is based on the original Transformer, but replaces the “standard” self-attention mechanism in the decoder by a main self-attention mechanism and a self and n-stream (predict) self-attention mechanism.
|
||||
|
||||
## Resources
|
||||
|
||||
|
@ -18,7 +18,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
## Overview
|
||||
|
||||
Qwen2 is the new model series of large language models from the Qwen team. Previously, we released the Qwen series, including Qwen-72B, Qwen-1.8B, Qwen-VL, Qwen-Audio, etc.
|
||||
Qwen2 is the new model series of large language models from the Qwen team. Previously, we released the Qwen series, including Qwen2-0.5B, Qwen2-1.5B, Qwen2-7B, Qwen2-57B-A14B, Qwen2-72B, Qwen2-Audio, etc.
|
||||
|
||||
### Model Details
|
||||
|
||||
@ -27,16 +27,16 @@ Qwen2 is a language model series including decoder language models of different
|
||||
|
||||
## Usage tips
|
||||
|
||||
`Qwen2-7B-beta` and `Qwen2-7B-Chat-beta` can be found on the [Huggingface Hub](https://huggingface.co/Qwen)
|
||||
`Qwen2-7B` and `Qwen2-7B-Instruct` can be found on the [Huggingface Hub](https://huggingface.co/Qwen)
|
||||
|
||||
In the following, we demonstrate how to use `Qwen2-7B-Chat-beta` for the inference. Note that we have used the ChatML format for dialog, in this demo we show how to leverage `apply_chat_template` for this purpose.
|
||||
In the following, we demonstrate how to use `Qwen2-7B-Instruct` for the inference. Note that we have used the ChatML format for dialog, in this demo we show how to leverage `apply_chat_template` for this purpose.
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
>>> device = "cuda" # the device to load the model onto
|
||||
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-7B-Chat", device_map="auto")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-7B-Chat")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen2-7B-Instruct", device_map="auto")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-7B-Instruct")
|
||||
|
||||
>>> prompt = "Give me a short introduction to large language model."
|
||||
|
||||
|
@ -51,19 +51,19 @@ This model was contributed by [julien-c](https://huggingface.co/julien-c). The o
|
||||
|
||||
## Usage tips
|
||||
|
||||
- This implementation is the same as [`BertModel`] with a tiny embeddings tweak as well as a setup
|
||||
for Roberta pretrained models.
|
||||
- RoBERTa has the same architecture as BERT, but uses a byte-level BPE as a tokenizer (same as GPT-2) and uses a
|
||||
- This implementation is the same as [`BertModel`] with a minor tweak to the embeddings, as well as a setup
|
||||
for RoBERTa pretrained models.
|
||||
- RoBERTa has the same architecture as BERT but uses a byte-level BPE as a tokenizer (same as GPT-2) and uses a
|
||||
different pretraining scheme.
|
||||
- RoBERTa doesn't have `token_type_ids`, you don't need to indicate which token belongs to which segment. Just
|
||||
separate your segments with the separation token `tokenizer.sep_token` (or `</s>`)
|
||||
- Same as BERT with better pretraining tricks:
|
||||
- RoBERTa doesn't have `token_type_ids`, so you don't need to indicate which token belongs to which segment. Just
|
||||
separate your segments with the separation token `tokenizer.sep_token` (or `</s>`).
|
||||
- RoBERTa is similar to BERT but with better pretraining techniques:
|
||||
|
||||
* dynamic masking: tokens are masked differently at each epoch, whereas BERT does it once and for all
|
||||
* together to reach 512 tokens (so the sentences are in an order than may span several documents)
|
||||
* train with larger batches
|
||||
* use BPE with bytes as a subunit and not characters (because of unicode characters)
|
||||
- [CamemBERT](camembert) is a wrapper around RoBERTa. Refer to this page for usage examples.
|
||||
* Dynamic masking: tokens are masked differently at each epoch, whereas BERT does it once and for all.
|
||||
* Sentence packing: Sentences are packed together to reach 512 tokens (so the sentences are in an order that may span several documents).
|
||||
* Larger batches: Training uses larger batches.
|
||||
* Byte-level BPE vocabulary: Uses BPE with bytes as a subunit instead of characters, accommodating Unicode characters.
|
||||
- [CamemBERT](camembert) is a wrapper around RoBERTa. Refer to its model page for usage examples.
|
||||
|
||||
## Resources
|
||||
|
||||
|
@ -27,6 +27,11 @@ The abstract from the paper is the following:
|
||||
|
||||
*Recently, end-to-end transformer-based detectors (DETRs) have achieved remarkable performance. However, the issue of the high computational cost of DETRs has not been effectively addressed, limiting their practical application and preventing them from fully exploiting the benefits of no post-processing, such as non-maximum suppression (NMS). In this paper, we first analyze the influence of NMS in modern real-time object detectors on inference speed, and establish an end-to-end speed benchmark. To avoid the inference delay caused by NMS, we propose a Real-Time DEtection TRansformer (RT-DETR), the first real-time end-to-end object detector to our best knowledge. Specifically, we design an efficient hybrid encoder to efficiently process multi-scale features by decoupling the intra-scale interaction and cross-scale fusion, and propose IoU-aware query selection to improve the initialization of object queries. In addition, our proposed detector supports flexibly adjustment of the inference speed by using different decoder layers without the need for retraining, which facilitates the practical application of real-time object detectors. Our RT-DETR-L achieves 53.0% AP on COCO val2017 and 114 FPS on T4 GPU, while RT-DETR-X achieves 54.8% AP and 74 FPS, outperforming all YOLO detectors of the same scale in both speed and accuracy. Furthermore, our RT-DETR-R50 achieves 53.1% AP and 108 FPS, outperforming DINO-Deformable-DETR-R50 by 2.2% AP in accuracy and by about 21 times in FPS.*
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/rt_detr_overview.png"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> RT-DETR performance relative to YOLO models. Taken from the <a href="https://arxiv.org/abs/2304.08069">original paper.</a> </small>
|
||||
|
||||
The model version was contributed by [rafaelpadilla](https://huggingface.co/rafaelpadilla) and [sangbumchoi](https://github.com/SangbumChoi). The original code can be found [here](https://github.com/lyuwenyu/RT-DETR/).
|
||||
|
||||
|
||||
@ -66,6 +71,16 @@ remote: 0.95 [40.11, 73.44, 175.96, 118.48]
|
||||
remote: 0.92 [333.73, 76.58, 369.97, 186.99]
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with RT-DETR.
|
||||
|
||||
<PipelineTag pipeline="object-detection"/>
|
||||
|
||||
- Scripts for finetuning [`RTDetrForObjectDetection`] with [`Trainer`] or [Accelerate](https://huggingface.co/docs/accelerate/index) can be found [here](https://github.com/huggingface/transformers/tree/main/examples/pytorch/object-detection).
|
||||
- See also: [Object detection task guide](../tasks/object_detection).
|
||||
- Notebooks regarding inference and fine-tuning RT-DETR on a custom dataset can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/RT-DETR). 🌎
|
||||
|
||||
## RTDetrConfig
|
||||
|
||||
[[autodoc]] RTDetrConfig
|
||||
|
@ -27,7 +27,7 @@ The abstract from the paper is the following:
|
||||
## Usage tips
|
||||
|
||||
- Usage of SigLIP is similar to [CLIP](clip). The main difference is the training loss, which does not require a global view of all the pairwise similarities of images and texts within a batch. One needs to apply the sigmoid activation function to the logits, rather than the softmax.
|
||||
- Training is not yet supported. If you want to fine-tune SigLIP or train from scratch, refer to the loss function from [OpenCLIP](https://github.com/mlfoundations/open_clip/blob/73ad04ae7fb93ede1c02dc9040a828634cb1edf1/src/open_clip/loss.py#L307), which leverages various `torch.distributed` utilities.
|
||||
- Training is supported but does not use `torch.distributed` utilities which may limit the scalability of batch size. However, DDP and FDSP works on single-node multi-gpu setup.
|
||||
- When using the standalone [`SiglipTokenizer`] or [`SiglipProcessor`], make sure to pass `padding="max_length"` as that's how the model was trained.
|
||||
- To get the same results as the pipeline, a prompt template of "This is a photo of {label}." should be used.
|
||||
|
||||
@ -107,6 +107,88 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
||||
|
||||
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
||||
|
||||
|
||||
## Combining SigLIP and Flash Attention 2
|
||||
|
||||
First, make sure to install the latest version of Flash Attention 2.
|
||||
|
||||
```bash
|
||||
pip install -U flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16``)
|
||||
|
||||
To load and run a model using Flash Attention 2, refer to the snippet below:
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> import requests
|
||||
>>> from PIL import Image
|
||||
>>> from transformers import SiglipProcessor, SiglipModel
|
||||
>>> device = "cuda" # the device to load the model onto
|
||||
|
||||
>>> model = SiglipModel.from_pretrained(
|
||||
... "google/siglip-so400m-patch14-384",
|
||||
... attn_implementation="flash_attention_2",
|
||||
... torch_dtype=torch.float16,
|
||||
... device_map=device,
|
||||
... )
|
||||
>>> processor = SiglipProcessor.from_pretrained("google/siglip-so400m-patch14-384")
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> candidate_labels = ["2 cats", "2 dogs"]
|
||||
# follows the pipeline prompt template to get same results
|
||||
>>> candidate_labels = [f'This is a photo of {label}.' for label in candidate_labels]
|
||||
# important: we pass `padding=max_length` since the model was trained with this
|
||||
>>> inputs = processor(text=candidate_labels, images=image, padding="max_length", return_tensors="pt")
|
||||
>>> inputs.to(device)
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... with torch.autocast(device):
|
||||
... outputs = model(**inputs)
|
||||
|
||||
>>> logits_per_image = outputs.logits_per_image
|
||||
>>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
|
||||
>>> print(f"{probs[0][0]:.1%} that image 0 is '{candidate_labels[0]}'")
|
||||
51.3% that image 0 is 'This is a photo of 2 cats.'
|
||||
```
|
||||
|
||||
|
||||
## Using Scaled Dot Product Attention (SDPA)
|
||||
|
||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
|
||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
|
||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
|
||||
page for more information.
|
||||
|
||||
You may set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used. Make sure you have `torch>=2.1.1`.
|
||||
|
||||
```python
|
||||
>>> from transformers import SiglipModel
|
||||
|
||||
>>> model = SiglipModel.from_pretrained(
|
||||
... "google/siglip-so400m-patch14-384",
|
||||
... attn_implementation="sdpa",
|
||||
... torch_dtype=torch.float16,
|
||||
... device_map=device,
|
||||
... )
|
||||
```
|
||||
|
||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
|
||||
|
||||
|
||||
## Expected speedups
|
||||
|
||||
Below is an expected speedup diagram that compares inference time between the native implementation in transformers using `google/siglip-so400m-patch14-384` checkpoint in `float16` precision and the Flash Attention 2 / SDPA version of the model using different batch sizes.
|
||||
|
||||
<div style="text-align: center">
|
||||
<img src="https://i.imgur.com/cWm4rsn.png">
|
||||
</div>
|
||||
|
||||
|
||||
## SiglipConfig
|
||||
|
||||
[[autodoc]] SiglipConfig
|
||||
|
@ -98,7 +98,7 @@ indices = np.arange(0, total_frames, total_frames / 8).astype(int)
|
||||
video = read_video_pyav(container, indices)
|
||||
|
||||
# For better results, we recommend to prompt the model in the following format
|
||||
prompt = "USER: <video>Why is this funny? ASSISTANT:"
|
||||
prompt = "USER: <video>\nWhy is this funny? ASSISTANT:"
|
||||
inputs = processor(text=prompt, videos=video, return_tensors="pt")
|
||||
|
||||
out = model.generate(**inputs, max_new_tokens=60)
|
||||
@ -108,7 +108,7 @@ processor.batch_decode(out, skip_special_tokens=True, clean_up_tokenization_spac
|
||||
For multiple turns conversation change the prompt format to:
|
||||
|
||||
```bash
|
||||
"USER: <video>What do you see in this video? ASSISTANT: A baby reading a book. USER: Why is the it funny? ASSISTANT:"
|
||||
"USER: <video>\nWhat do you see in this video? ASSISTANT: A baby reading a book. USER: Why is the it funny? ASSISTANT:"
|
||||
```
|
||||
|
||||
### Mixed Media Mode
|
||||
@ -123,7 +123,7 @@ import requests
|
||||
# Load and image and write a new prompt
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
prompt = "USER: <image> How many cats are there in the image? ASSISTANT: There are two cats. USER: <video>Why is this video funny? ASSISTANT:"
|
||||
prompt = "USER: <image>\nHow many cats are there in the image? ASSISTANT: There are two cats. USER: <video>\nWhy is this video funny? ASSISTANT:"
|
||||
|
||||
inputs = processor(text=prompt, images=image, videos=clip, padding=True, return_tensors="pt")
|
||||
|
||||
|
@ -26,7 +26,12 @@ The abstract from the paper is the following:
|
||||
|
||||
*While existing large vision-language multimodal models focus on whole image understanding, there is a prominent gap in achieving region-specific comprehension. Current approaches that use textual coordinates or spatial encodings often fail to provide a user-friendly interface for visual prompting. To address this challenge, we introduce a novel multimodal model capable of decoding arbitrary visual prompts. This allows users to intuitively mark images and interact with the model using natural cues like a "red bounding box" or "pointed arrow". Our simple design directly overlays visual markers onto the RGB image, eliminating the need for complex region encodings, yet achieves state-of-the-art performance on region-understanding tasks like Visual7W, PointQA, and Visual Commonsense Reasoning benchmark. Furthermore, we present ViP-Bench, a comprehensive benchmark to assess the capability of models in understanding visual prompts across multiple dimensions, enabling future research in this domain. Code, data, and model are publicly available.*
|
||||
|
||||
Tips:
|
||||
The original code can be found [here](https://github.com/mu-cai/ViP-LLaVA).
|
||||
|
||||
This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada)
|
||||
|
||||
|
||||
## Usage tips:
|
||||
|
||||
- The architecture is similar than llava architecture except that the multi-modal projector takes a set of concatenated vision hidden states and has an additional layernorm layer on that module.
|
||||
|
||||
@ -34,22 +39,51 @@ Tips:
|
||||
|
||||
- Note the model has not been explicitly trained to process multiple images in the same prompt, although this is technically possible, you may experience inaccurate results.
|
||||
|
||||
- For better results, we recommend users to prompt the model with the correct prompt format:
|
||||
- For better results, we recommend users to use the processor's `apply_chat_template()` method to format your prompt correctly. For that you need to construct a conversation history, passing in a plain string will not format your prompt. Each message in the conversation history for chat templates is a dictionary with keys "role" and "content". The "content" should be a list of dictionaries, for "text" and "image" modalities, as follows:
|
||||
|
||||
```python
|
||||
from transformers import AutoProcessor
|
||||
|
||||
processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf")
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What’s shown in this image?"},
|
||||
,
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [{"type": "text", "text": "This image shows a red stop sign."},]
|
||||
},
|
||||
{
|
||||
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Describe the image in more details."},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
|
||||
# Note that the template simply formats your prompt, you still have to tokenize it and obtain pixel values for your images
|
||||
print(text_prompt)
|
||||
>>> "###Human: <image>\nWhat’s shown in this image?###Assistant: This image shows a red stop sign.###Human: Describe the image in more details.###Assistant:"
|
||||
```
|
||||
|
||||
- If you want to construct a chat prompt yourself, below is a list of prompt formats accepted by VipLLaVa checkpoints:
|
||||
```bash
|
||||
A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.###Human: <image>\n<prompt>###Assistant:
|
||||
```
|
||||
|
||||
For multiple turns conversation:
|
||||
|
||||
```bash
|
||||
A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.###Human: <image>\n<prompt1>###Assistant: <answer1>###Human: <prompt2>###Assistant:
|
||||
```
|
||||
|
||||
The original code can be found [here](https://github.com/mu-cai/ViP-LLaVA).
|
||||
|
||||
This model was contributed by [Younes Belkada](https://huggingface.co/ybelkada)
|
||||
|
||||
|
||||
## VipLlavaConfig
|
||||
|
||||
|
@ -52,8 +52,6 @@ Here is a step-by-step guide to transcribing an audio sample using a pre-trained
|
||||
>>> # Select an audio file and read it:
|
||||
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
>>> audio_sample = ds[0]["audio"]
|
||||
>>> waveform = audio_sample["array"]
|
||||
>>> sampling_rate = audio_sample["sampling_rate"]
|
||||
|
||||
>>> # Load the Whisper model in Hugging Face format:
|
||||
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
|
||||
@ -61,7 +59,7 @@ Here is a step-by-step guide to transcribing an audio sample using a pre-trained
|
||||
|
||||
>>> # Use the model and processor to transcribe the audio:
|
||||
>>> input_features = processor(
|
||||
... waveform, sampling_rate=sampling_rate, return_tensors="pt"
|
||||
... audio_sample["array"], sampling_rate=audio_sample["sampling_rate"], return_tensors="pt"
|
||||
... ).input_features
|
||||
|
||||
>>> # Generate token ids
|
||||
@ -74,6 +72,49 @@ Here is a step-by-step guide to transcribing an audio sample using a pre-trained
|
||||
' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
|
||||
```
|
||||
|
||||
Whisper is compatible with the following optimisations:
|
||||
- [PyTorch Scaled Dot Product Attention (SDPA)](../perf_infer_gpu_one#pytorch-scaled-dot-product-attention): flash attention and memory-efficient attention kernels. Enabled by default for `torch>=2.1.1`.
|
||||
- [Flash Attention 2](../perf_infer_gpu_one#flashattention-2): improved implementation of flash attention through better parallelism and work partitioning.
|
||||
- [torch.compile](../llm_optims#static-kv-cache-and-torchcompile): JIT-compile the forward pass to dispatch to efficient fused kernels.
|
||||
|
||||
As an example, the following codesnippet enables SDPA and `torch.compile` for up to 5x faster inference:
|
||||
|
||||
```python
|
||||
>>> from datasets import load_dataset
|
||||
>>> from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
||||
|
||||
>>> # Select an audio file and read it:
|
||||
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
>>> audio_sample = ds[0]["audio"]
|
||||
|
||||
>>> # Load the Whisper model with SDPA attention
|
||||
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
|
||||
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", attn_implementation="sdpa")
|
||||
|
||||
>>> # Enable static cache and compile the forward pass
|
||||
>>> model.generation_config.cache_implementation = "static"
|
||||
>>> model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
||||
|
||||
>>> # Use the model and processor to transcribe the audio:
|
||||
>>> input_features = processor(
|
||||
... audio_sample["array"], sampling_rate=audio_sample["sampling_rate"], return_tensors="pt"
|
||||
... ).input_features
|
||||
|
||||
>>> # Compile the forward pass
|
||||
>>> _ = model.generate(input_features)
|
||||
|
||||
>>> # Generate token ids using compiled graph (fast!)
|
||||
>>> predicted_ids = model.generate(input_features)
|
||||
|
||||
>>> # Decode token ids to text
|
||||
>>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
|
||||
|
||||
>>> transcription[0]
|
||||
' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
|
||||
```
|
||||
|
||||
For more details on each optimisation, refer to the documentation linked above.
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Whisper. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
||||
|
108
docs/source/en/model_doc/zoedepth.md
Normal file
108
docs/source/en/model_doc/zoedepth.md
Normal file
@ -0,0 +1,108 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# ZoeDepth
|
||||
|
||||
## Overview
|
||||
|
||||
The ZoeDepth model was proposed in [ZoeDepth: Zero-shot Transfer by Combining Relative and Metric Depth](https://arxiv.org/abs/2302.12288) by Shariq Farooq Bhat, Reiner Birkl, Diana Wofk, Peter Wonka, Matthias Müller. ZoeDepth extends the [DPT](dpt) framework for metric (also called absolute) depth estimation. ZoeDepth is pre-trained on 12 datasets using relative depth and fine-tuned on two domains (NYU and KITTI) using metric depth. A lightweight head is used with a novel bin adjustment design called metric bins module for each domain. During inference, each input image is automatically routed to the appropriate head using a latent classifier.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This paper tackles the problem of depth estimation from a single image. Existing work either focuses on generalization performance disregarding metric scale, i.e. relative depth estimation, or state-of-the-art results on specific datasets, i.e. metric depth estimation. We propose the first approach that combines both worlds, leading to a model with excellent generalization performance while maintaining metric scale. Our flagship model, ZoeD-M12-NK, is pre-trained on 12 datasets using relative depth and fine-tuned on two datasets using metric depth. We use a lightweight head with a novel bin adjustment design called metric bins module for each domain. During inference, each input image is automatically routed to the appropriate head using a latent classifier. Our framework admits multiple configurations depending on the datasets used for relative depth pre-training and metric fine-tuning. Without pre-training, we can already significantly improve the state of the art (SOTA) on the NYU Depth v2 indoor dataset. Pre-training on twelve datasets and fine-tuning on the NYU Depth v2 indoor dataset, we can further improve SOTA for a total of 21% in terms of relative absolute error (REL). Finally, ZoeD-M12-NK is the first model that can jointly train on multiple datasets (NYU Depth v2 and KITTI) without a significant drop in performance and achieve unprecedented zero-shot generalization performance to eight unseen datasets from both indoor and outdoor domains.*
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/zoedepth_architecture_bis.png"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> ZoeDepth architecture. Taken from the <a href="https://arxiv.org/abs/2302.12288">original paper.</a> </small>
|
||||
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr).
|
||||
The original code can be found [here](https://github.com/isl-org/ZoeDepth).
|
||||
|
||||
## Usage tips
|
||||
|
||||
- ZoeDepth is an absolute (also called metric) depth estimation model, unlike DPT which is a relative depth estimation model. This means that ZoeDepth is able to estimate depth in metric units like meters.
|
||||
|
||||
The easiest to perform inference with ZoeDepth is by leveraging the [pipeline API](../main_classes/pipelines.md):
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
pipe = pipeline(task="depth-estimation", model="Intel/zoedepth-nyu-kitti")
|
||||
result = pipe(image)
|
||||
depth = result["depth"]
|
||||
```
|
||||
|
||||
Alternatively, one can also perform inference using the classes:
|
||||
|
||||
```python
|
||||
from transformers import AutoImageProcessor, ZoeDepthForDepthEstimation
|
||||
import torch
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
image_processor = AutoImageProcessor.from_pretrained("Intel/zoedepth-nyu-kitti")
|
||||
model = ZoeDepthForDepthEstimation.from_pretrained("Intel/zoedepth-nyu-kitti")
|
||||
|
||||
# prepare image for the model
|
||||
inputs = image_processor(images=image, return_tensors="pt")
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs)
|
||||
predicted_depth = outputs.predicted_depth
|
||||
|
||||
# interpolate to original size
|
||||
prediction = torch.nn.functional.interpolate(
|
||||
predicted_depth.unsqueeze(1),
|
||||
size=image.size[::-1],
|
||||
mode="bicubic",
|
||||
align_corners=False,
|
||||
)
|
||||
|
||||
# visualize the prediction
|
||||
output = prediction.squeeze().cpu().numpy()
|
||||
formatted = (output * 255 / np.max(output)).astype("uint8")
|
||||
depth = Image.fromarray(formatted)
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ZoeDepth.
|
||||
|
||||
- A demo notebook regarding inference with ZoeDepth models can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/ZoeDepth). 🌎
|
||||
|
||||
## ZoeDepthConfig
|
||||
|
||||
[[autodoc]] ZoeDepthConfig
|
||||
|
||||
## ZoeDepthImageProcessor
|
||||
|
||||
[[autodoc]] ZoeDepthImageProcessor
|
||||
- preprocess
|
||||
|
||||
## ZoeDepthForDepthEstimation
|
||||
|
||||
[[autodoc]] ZoeDepthForDepthEstimation
|
||||
- forward
|
@ -77,7 +77,7 @@ Then use `notebook_login` to sign-in to the Hub, and follow the link [here](http
|
||||
|
||||
To ensure your model can be used by someone working with a different framework, we recommend you convert and upload your model with both PyTorch and TensorFlow checkpoints. While users are still able to load your model from a different framework if you skip this step, it will be slower because 🤗 Transformers will need to convert the checkpoint on-the-fly.
|
||||
|
||||
Converting a checkpoint for another framework is easy. Make sure you have PyTorch and TensorFlow installed (see [here](installation) for installation instructions), and then find the specific model for your task in the other framework.
|
||||
Converting a checkpoint for another framework is easy. Make sure you have PyTorch and TensorFlow installed (see [here](installation) for installation instructions), and then find the specific model for your task in the other framework.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
@ -116,7 +116,7 @@ Each new generation provides a faster bandwidth, e.g. here is a quote from [Nvid
|
||||
|
||||
So the higher `X` you get in the report of `NVX` in the output of `nvidia-smi topo -m` the better. The generation will depend on your GPU architecture.
|
||||
|
||||
Let's compare the execution of a openai-community/gpt2 language model training over a small sample of wikitext.
|
||||
Let's compare the execution of an openai-community/gpt2 language model training over a small sample of wikitext.
|
||||
|
||||
The results are:
|
||||
|
||||
|
@ -39,10 +39,13 @@ FlashAttention-2 is experimental and may change considerably in future versions.
|
||||
FlashAttention-2 is currently supported for the following architectures:
|
||||
* [Bark](https://huggingface.co/docs/transformers/model_doc/bark#transformers.BarkModel)
|
||||
* [Bart](https://huggingface.co/docs/transformers/model_doc/bart#transformers.BartModel)
|
||||
* [Chameleon](https://huggingface.co/docs/transformers/model_doc/chameleon#transformers.Chameleon)
|
||||
* [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPModel)
|
||||
* [Cohere](https://huggingface.co/docs/transformers/model_doc/cohere#transformers.CohereModel)
|
||||
* [Dbrx](https://huggingface.co/docs/transformers/model_doc/dbrx#transformers.DbrxModel)
|
||||
* [DistilBert](https://huggingface.co/docs/transformers/model_doc/distilbert#transformers.DistilBertModel)
|
||||
* [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel)
|
||||
* [Gemma2](https://huggingface.co/docs/transformers/model_doc/gemma2#transformers.Gemma2Model)
|
||||
* [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)
|
||||
* [GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode#transformers.GPTBigCodeModel)
|
||||
* [GPTNeo](https://huggingface.co/docs/transformers/model_doc/gpt_neo#transformers.GPTNeoModel)
|
||||
@ -69,6 +72,7 @@ FlashAttention-2 is currently supported for the following architectures:
|
||||
* [OPT](https://huggingface.co/docs/transformers/model_doc/opt#transformers.OPTModel)
|
||||
* [Phi](https://huggingface.co/docs/transformers/model_doc/phi#transformers.PhiModel)
|
||||
* [Phi3](https://huggingface.co/docs/transformers/model_doc/phi3#transformers.Phi3Model)
|
||||
* [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip)
|
||||
* [StableLm](https://huggingface.co/docs/transformers/model_doc/stablelm#transformers.StableLmModel)
|
||||
* [Starcoder2](https://huggingface.co/docs/transformers/model_doc/starcoder2#transformers.Starcoder2Model)
|
||||
* [Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2#transformers.Qwen2Model)
|
||||
@ -196,12 +200,15 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer#transformers.ASTModel)
|
||||
* [Bart](https://huggingface.co/docs/transformers/model_doc/bart#transformers.BartModel)
|
||||
* [Bert](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertModel)
|
||||
* [Chameleon](https://huggingface.co/docs/transformers/model_doc/chameleon#transformers.Chameleon)
|
||||
* [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPModel)
|
||||
* [Cohere](https://huggingface.co/docs/transformers/model_doc/cohere#transformers.CohereModel)
|
||||
* [Dbrx](https://huggingface.co/docs/transformers/model_doc/dbrx#transformers.DbrxModel)
|
||||
* [DeiT](https://huggingface.co/docs/transformers/model_doc/deit#transformers.DeiTModel)
|
||||
* [Dpr](https://huggingface.co/docs/transformers/model_doc/dpr#transformers.DprReader)
|
||||
* [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon#transformers.FalconModel)
|
||||
* [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel)
|
||||
* [Gemma2](https://huggingface.co/docs/transformers/model_doc/gemma2#transformers.Gemma2Model)
|
||||
* [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)
|
||||
* [GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode#transformers.GPTBigCodeModel)
|
||||
* [GPTNeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox#transformers.GPTNeoXModel)
|
||||
@ -229,6 +236,7 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [wav2vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2Model)
|
||||
* [Hubert](https://huggingface.co/docs/transformers/model_doc/hubert#transformers.HubertModel)
|
||||
* [data2vec_audio](https://huggingface.co/docs/transformers/main/en/model_doc/data2vec#transformers.Data2VecAudioModel)
|
||||
* [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip)
|
||||
* [Sew](https://huggingface.co/docs/transformers/main/en/model_doc/sew#transformers.SEWModel)
|
||||
* [UniSpeech](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech#transformers.UniSpeechModel)
|
||||
* [unispeech_sat](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech-sat#transformers.UniSpeechSatModel)
|
||||
|
@ -98,7 +98,7 @@ Below you can find the list of the models we benchmarked.
|
||||
- [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224)
|
||||
- [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k)
|
||||
- [facebook/convnext-large-224](https://huggingface.co/facebook/convnext-large-224)
|
||||
- [microsoft/resnet-50](https://huggingface.co/)
|
||||
- [microsoft/resnet-50](https://huggingface.co/microsoft/resnet-50)
|
||||
|
||||
**Image Segmentation**
|
||||
- [nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
||||
|
@ -41,21 +41,22 @@ hyperparameter tuning, you should determine which batch size yields the best res
|
||||
|
||||
The methods and tools covered in this guide can be classified based on the effect they have on the training process:
|
||||
|
||||
| Method/tool | Improves training speed | Optimizes memory utilization |
|
||||
|:-----------------------------------------------------------|:------------------------|:-----------------------------|
|
||||
| [Batch size choice](#batch-size-choice) | Yes | Yes |
|
||||
| [Gradient accumulation](#gradient-accumulation) | No | Yes |
|
||||
| [Gradient checkpointing](#gradient-checkpointing) | No | Yes |
|
||||
| [Mixed precision training](#mixed-precision-training) | Yes | (No) |
|
||||
| [Optimizer choice](#optimizer-choice) | Yes | Yes |
|
||||
| [Data preloading](#data-preloading) | Yes | No |
|
||||
| [DeepSpeed Zero](#deepspeed-zero) | No | Yes |
|
||||
| [torch.compile](#using-torchcompile) | Yes | No |
|
||||
| [Parameter-Efficient Fine Tuning (PEFT)](#using--peft) | No | Yes |
|
||||
| Method/tool | Improves training speed | Optimizes memory utilization |
|
||||
|:--------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------|:-----------------------------|
|
||||
| [Batch size choice](#batch-size-choice) | Yes | Yes |
|
||||
| [Gradient accumulation](#gradient-accumulation) | No | Yes |
|
||||
| [Gradient checkpointing](#gradient-checkpointing) | No | Yes |
|
||||
| [Mixed precision training](#mixed-precision-training) | Yes | Maybe* |
|
||||
| [torch_empty_cache_steps](https://huggingface.co/docs/transformers/main/en/main_classes/trainer#transformers.TrainingArguments.torch_empty_cache_steps) | No | Yes |
|
||||
| [Optimizer choice](#optimizer-choice) | Yes | Yes |
|
||||
| [Data preloading](#data-preloading) | Yes | No |
|
||||
| [DeepSpeed Zero](#deepspeed-zero) | No | Yes |
|
||||
| [torch.compile](#using-torchcompile) | Yes | No |
|
||||
| [Parameter-Efficient Fine Tuning (PEFT)](#using--peft) | No | Yes |
|
||||
|
||||
<Tip>
|
||||
|
||||
Note: when using mixed precision with a small model and a large batch size, there will be some memory savings but with a
|
||||
*Note: when using mixed precision with a small model and a large batch size, there will be some memory savings but with a
|
||||
large model and a small batch size, the memory use will be larger.
|
||||
|
||||
</Tip>
|
||||
|
@ -113,7 +113,9 @@ This will work regardless of whether you are using PyTorch or Tensorflow.
|
||||
transcriber = pipeline(model="openai/whisper-large-v2", device=0)
|
||||
```
|
||||
|
||||
If the model is too large for a single GPU and you are using PyTorch, you can set `device_map="auto"` to automatically
|
||||
If the model is too large for a single GPU and you are using PyTorch, you can set `torch_dtype='float16'` to enable FP16 precision inference. Usually this would not cause significant performance drops but make sure you evaluate it on your models!
|
||||
|
||||
Alternatively, you can set `device_map="auto"` to automatically
|
||||
determine how to load and store the model weights. Using the `device_map` argument requires the 🤗 [Accelerate](https://huggingface.co/docs/accelerate)
|
||||
package:
|
||||
|
||||
@ -342,4 +344,3 @@ gr.Interface.from_pipeline(pipe).launch()
|
||||
|
||||
By default, the web demo runs on a local server. If you'd like to share it with others, you can generate a temporary public
|
||||
link by setting `share=True` in `launch()`. You can also host your demo on [Hugging Face Spaces](https://huggingface.co/spaces) for a permanent link.
|
||||
|
||||
|
@ -471,7 +471,7 @@ from [`DetrImageProcessor`] and define a custom `collate_fn` to batch images tog
|
||||
|
||||
## Multimodal
|
||||
|
||||
For tasks involving multimodal inputs, you'll need a [processor](main_classes/processors) to prepare your dataset for the model. A processor couples together two processing objects such as as tokenizer and feature extractor.
|
||||
For tasks involving multimodal inputs, you'll need a [processor](main_classes/processors) to prepare your dataset for the model. A processor couples together two processing objects such as tokenizer and feature extractor.
|
||||
|
||||
Load the [LJ Speech](https://huggingface.co/datasets/lj_speech) dataset (see the 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub) for more details on how to load a dataset) to see how you can use a processor for automatic speech recognition (ASR):
|
||||
|
||||
|
58
docs/source/en/quantization/fbgemm_fp8.md
Normal file
58
docs/source/en/quantization/fbgemm_fp8.md
Normal file
@ -0,0 +1,58 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# FBGEMM FP8
|
||||
|
||||
With FBGEMM FP8 quantization method, you can quantize your model in FP8 (W8A8):
|
||||
- the weights will be quantized in 8bit (FP8) per channel
|
||||
- the activation will be quantized in 8bit (FP8) per token
|
||||
|
||||
It relies on the [FBGEMM](https://github.com/pytorch/FBGEMM) library which provides efficient low-precision general matrix multiplication for small batch sizes and support for accuracy-loss minimizing techniques such as row-wise quantization and outlier-aware quantization.
|
||||
|
||||
> [!TIP]
|
||||
> You need a GPU with compute capability>=9 (e.g. H100)
|
||||
|
||||
Before you begin, make sure the following libraries are installed with their latest version:
|
||||
|
||||
```bash
|
||||
pip install --upgrade accelerate fbgemm-gpu torch
|
||||
```
|
||||
|
||||
If you are having issues with fbgemm-gpu and torch library, you might need to install the nighlty release. You can follow the instruction [here](https://pytorch.org/FBGEMM/fbgemm_gpu-development/InstallationInstructions.html#fbgemm-gpu-install-libraries:~:text=found%20here.-,Install%20the%20FBGEMM_GPU%20Package,-Install%20through%20PyTorch)
|
||||
|
||||
|
||||
```py
|
||||
from transformers import FbgemmFp8Config, AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_name = "meta-llama/Meta-Llama-3-8B"
|
||||
quantization_config = FbgemmFp8Config()
|
||||
quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", quantization_config=quantization_config)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
input_text = "What are we having for dinner?"
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
output = quantized_model.generate(**input_ids, max_new_tokens=10)
|
||||
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
A quantized model can be saved via "saved_pretrained" and be reused again via the "from_pretrained".
|
||||
|
||||
```py
|
||||
quant_path = "/path/to/save/quantized/model"
|
||||
model.save_pretrained(quant_path)
|
||||
model = AutoModelForCausalLM.from_pretrained(quant_path, device_map="auto")
|
||||
```
|
@ -55,4 +55,5 @@ Use the table below to help you decide which quantization method to use.
|
||||
| [GPTQ](./gptq) | 🔴 | 🔴 | 🟢 | 🟢 | 🔴 | 🔴 | 2 - 3 - 4 - 8 | 🟢 | 🟢 | 🟢 | https://github.com/AutoGPTQ/AutoGPTQ |
|
||||
| [HQQ](./hqq) | 🟢 | 🟢 | 🟢 | 🔴 | 🔴 | 🟢 | 1 - 8 | 🟢 | 🔴 | 🟢 | https://github.com/mobiusml/hqq/ |
|
||||
| [Quanto](./quanto) | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | 🟢 | 2 / 4 / 8 | 🔴 | 🔴 | 🟢 | https://github.com/huggingface/quanto |
|
||||
| [FBGEMM_FP8](./fbgemm_fp8.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 8 | 🔴 | 🟢 | 🟢 | https://github.com/pytorch/FBGEMM |
|
||||
|
||||
|
232
docs/source/en/tasks/image_text_to_text.md
Normal file
232
docs/source/en/tasks/image_text_to_text.md
Normal file
@ -0,0 +1,232 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Image-text-to-text
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
Image-text-to-text models, also known as vision language models (VLMs), are language models that take an image input. These models can tackle various tasks, from visual question answering to image segmentation. This task shares many similarities with image-to-text, but with some overlapping use cases like image captioning. Image-to-text models only take image inputs and often accomplish a specific task, whereas VLMs take open-ended text and image inputs and are more generalist models.
|
||||
|
||||
In this guide, we provide a brief overview of VLMs and show how to use them with Transformers for inference.
|
||||
|
||||
To begin with, there are multiple types of VLMs:
|
||||
- base models used for fine-tuning
|
||||
- chat fine-tuned models for conversation
|
||||
- instruction fine-tuned models
|
||||
|
||||
This guide focuses on inference with an instruction-tuned model.
|
||||
|
||||
Let's begin installing the dependencies.
|
||||
|
||||
```bash
|
||||
pip install -q transformers accelerate flash_attn
|
||||
```
|
||||
|
||||
Let's initialize the model and the processor.
|
||||
|
||||
```python
|
||||
from transformers import AutoProcessor, Idefics2ForConditionalGeneration
|
||||
import torch
|
||||
|
||||
device = torch.device("cuda")
|
||||
model = Idefics2ForConditionalGeneration.from_pretrained(
|
||||
"HuggingFaceM4/idefics2-8b",
|
||||
torch_dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
).to(device)
|
||||
|
||||
processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b")
|
||||
```
|
||||
|
||||
This model has a [chat template](./chat_templating) that helps user parse chat outputs. Moreover, the model can also accept multiple images as input in a single conversation or message. We will now prepare the inputs.
|
||||
|
||||
The image inputs look like the following.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png" alt="Two cats sitting on a net"/>
|
||||
</div>
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" alt="A bee on a pink flower"/>
|
||||
</div>
|
||||
|
||||
|
||||
```python
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
img_urls =["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png",
|
||||
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"]
|
||||
images = [Image.open(requests.get(img_urls[0], stream=True).raw),
|
||||
Image.open(requests.get(img_urls[1], stream=True).raw)]
|
||||
```
|
||||
|
||||
Below is an example of the chat template. We can feed conversation turns and the last message as an input by appending it at the end of the template.
|
||||
|
||||
|
||||
```python
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What do we see in this image?"},
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": [
|
||||
{"type": "text", "text": "In this image we can see two cats on the nets."},
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "And how about this image?"},
|
||||
]
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
We will now call the processors' [`~ProcessorMixin.apply_chat_template`] method to preprocess its output along with the image inputs.
|
||||
|
||||
```python
|
||||
prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
|
||||
inputs = processor(text=prompt, images=[images[0], images[1]], return_tensors="pt").to(device)
|
||||
```
|
||||
|
||||
We can now pass the preprocessed inputs to the model.
|
||||
|
||||
```python
|
||||
with torch.no_grad():
|
||||
generated_ids = model.generate(**inputs, max_new_tokens=500)
|
||||
generated_texts = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
||||
|
||||
print(generated_texts)
|
||||
## ['User: What do we see in this image? \nAssistant: In this image we can see two cats on the nets. \nUser: And how about this image? \nAssistant: In this image we can see flowers, plants and insect.']
|
||||
```
|
||||
|
||||
## Streaming
|
||||
|
||||
We can use [text streaming](./generation_strategies#streaming) for a better generation experience. Transformers supports streaming with the [`TextStreamer`] or [`TextIteratorStreamer`] classes. We will use the [`TextIteratorStreamer`] with IDEFICS-8B.
|
||||
|
||||
Assume we have an application that keeps chat history and takes in the new user input. We will preprocess the inputs as usual and initialize [`TextIteratorStreamer`] to handle the generation in a separate thread. This allows you to stream the generated text tokens in real-time. Any generation arguments can be passed to [`TextIteratorStreamer`].
|
||||
|
||||
|
||||
```python
|
||||
import time
|
||||
from transformers import TextIteratorStreamer
|
||||
from threading import Thread
|
||||
|
||||
def model_inference(
|
||||
user_prompt,
|
||||
chat_history,
|
||||
max_new_tokens,
|
||||
images
|
||||
):
|
||||
user_prompt = {
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": user_prompt},
|
||||
]
|
||||
}
|
||||
chat_history.append(user_prompt)
|
||||
streamer = TextIteratorStreamer(
|
||||
processor.tokenizer,
|
||||
skip_prompt=True,
|
||||
timeout=5.0,
|
||||
)
|
||||
|
||||
generation_args = {
|
||||
"max_new_tokens": max_new_tokens,
|
||||
"streamer": streamer,
|
||||
"do_sample": False
|
||||
}
|
||||
|
||||
# add_generation_prompt=True makes model generate bot response
|
||||
prompt = processor.apply_chat_template(chat_history, add_generation_prompt=True)
|
||||
inputs = processor(
|
||||
text=prompt,
|
||||
images=images,
|
||||
return_tensors="pt",
|
||||
).to(device)
|
||||
generation_args.update(inputs)
|
||||
|
||||
thread = Thread(
|
||||
target=model.generate,
|
||||
kwargs=generation_args,
|
||||
)
|
||||
thread.start()
|
||||
|
||||
acc_text = ""
|
||||
for text_token in streamer:
|
||||
time.sleep(0.04)
|
||||
acc_text += text_token
|
||||
if acc_text.endswith("<end_of_utterance>"):
|
||||
acc_text = acc_text[:-18]
|
||||
yield acc_text
|
||||
|
||||
thread.join()
|
||||
```
|
||||
|
||||
Now let's call the `model_inference` function we created and stream the values.
|
||||
|
||||
```python
|
||||
generator = model_inference(
|
||||
user_prompt="And what is in this image?",
|
||||
chat_history=messages,
|
||||
max_new_tokens=100,
|
||||
images=images
|
||||
)
|
||||
|
||||
for value in generator:
|
||||
print(value)
|
||||
|
||||
# In
|
||||
# In this
|
||||
# In this image ...
|
||||
```
|
||||
|
||||
## Fit models in smaller hardware
|
||||
|
||||
VLMs are often large and need to be optimized to fit in smaller hardware. Transformers supports many model quantization libraries, and here we will only show int8 quantization with [Quanto](./quantization/quanto#quanto). int8 quantization offers memory improvements up to 75 percent (if all weights are quantized). However it is no free lunch, since 8-bit is not a CUDA-native precision, the weights are quantized back and forth on the fly, which adds up to latency.
|
||||
|
||||
First, install dependencies.
|
||||
|
||||
```bash
|
||||
pip install -U quanto bitsandbytes
|
||||
```
|
||||
|
||||
To quantize a model during loading, we need to first create [`QuantoConfig`]. Then load the model as usual, but pass `quantization_config` during model initialization.
|
||||
|
||||
```python
|
||||
from transformers import Idefics2ForConditionalGeneration, AutoTokenizer, QuantoConfig
|
||||
|
||||
model_id = "HuggingFaceM4/idefics2-8b"
|
||||
quantization_config = QuantoConfig(weights="int8")
|
||||
quantized_model = Idefics2ForConditionalGeneration.from_pretrained(model_id, device_map="cuda", quantization_config=quantization_config)
|
||||
```
|
||||
|
||||
And that's it, we can use the model the same way with no changes.
|
||||
|
||||
## Further Reading
|
||||
|
||||
Here are some more resources for the image-text-to-text task.
|
||||
|
||||
- [Image-text-to-text task page](https://huggingface.co/tasks/image-text-to-text) covers model types, use cases, datasets, and more.
|
||||
- [Vision Language Models Explained](https://huggingface.co/blog/vlms) is a blog post that covers everything about vision language models and supervised fine-tuning using [TRL](https://huggingface.co/docs/trl/en/index).
|
@ -23,23 +23,26 @@ a single camera viewpoint.
|
||||
Monocular depth estimation has various applications, including 3D reconstruction, augmented reality, autonomous driving,
|
||||
and robotics. It is a challenging task as it requires the model to understand the complex relationships between objects
|
||||
in the scene and the corresponding depth information, which can be affected by factors such as lighting conditions,
|
||||
occlusion, and texture.
|
||||
occlusion, and texture.
|
||||
|
||||
There are two main depth estimation categories:
|
||||
|
||||
- **Absolute depth estimation**: This task variant aims to provide exact depth measurements from the camera. The term is used interchangeably with metric depth estimation, where depth is provided in precise measurements in meters or feet. Absolute depth estimation models output depth maps with numerical values that represent real-world distances.
|
||||
|
||||
- **Relative depth estimation**: Relative depth estimation aims to predict the depth order of objects or points in a scene without providing the precise measurements. These models output a depth map that indicates which parts of the scene are closer or farther relative to each other without the actual distances to A and B.
|
||||
|
||||
In this guide, we will see how to infer with [Depth Anything V2](https://huggingface.co/depth-anything/Depth-Anything-V2-Large), a state-of-the-art zero-shot relative depth estimation model, and [ZoeDepth](https://huggingface.co/docs/transformers/main/en/model_doc/zoedepth), an absolute depth estimation model.
|
||||
|
||||
<Tip>
|
||||
|
||||
To see all architectures and checkpoints compatible with this task, we recommend checking the [task-page](https://huggingface.co/tasks/depth-anything)
|
||||
Check the [Depth Estimation](https://huggingface.co/tasks/depth-estimation) task page to view all compatible architectures and checkpoints.
|
||||
|
||||
</Tip>
|
||||
|
||||
In this guide you'll learn how to:
|
||||
|
||||
* create a depth estimation pipeline
|
||||
* run depth estimation inference by hand
|
||||
|
||||
Before you begin, make sure you have all the necessary libraries installed:
|
||||
Before we begin, we need to install the latest version of Transformers:
|
||||
|
||||
```bash
|
||||
pip install -q transformers
|
||||
pip install -q -U transformers
|
||||
```
|
||||
|
||||
## Depth estimation pipeline
|
||||
@ -49,9 +52,11 @@ Instantiate a pipeline from a [checkpoint on the Hugging Face Hub](https://huggi
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
>>> import torch
|
||||
|
||||
>>> checkpoint = "vinvino02/glpn-nyu"
|
||||
>>> depth_estimator = pipeline("depth-estimation", model=checkpoint)
|
||||
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
>>> checkpoint = "depth-anything/Depth-Anything-V2-base-hf"
|
||||
>>> pipe = pipeline("depth-estimation", model=checkpoint, device=device)
|
||||
```
|
||||
|
||||
Next, choose an image to analyze:
|
||||
@ -60,19 +65,19 @@ Next, choose an image to analyze:
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
|
||||
>>> url = "https://unsplash.com/photos/HwBAsSbPBDU/download?ixid=MnwxMjA3fDB8MXxzZWFyY2h8MzR8fGNhciUyMGluJTIwdGhlJTIwc3RyZWV0fGVufDB8MHx8fDE2Nzg5MDEwODg&force=true&w=640"
|
||||
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
>>> image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-estimation-example.jpg" alt="Photo of a busy street"/>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" alt="Photo of a bee"/>
|
||||
</div>
|
||||
|
||||
Pass the image to the pipeline.
|
||||
|
||||
```py
|
||||
>>> predictions = depth_estimator(image)
|
||||
>>> predictions = pipe(image)
|
||||
```
|
||||
|
||||
The pipeline returns a dictionary with two entries. The first one, called `predicted_depth`, is a tensor with the values
|
||||
@ -99,17 +104,17 @@ Here we'll use the same checkpoint as before:
|
||||
```py
|
||||
>>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation
|
||||
|
||||
>>> checkpoint = "vinvino02/glpn-nyu"
|
||||
>>> checkpoint = "Intel/zoedepth-nyu-kitti"
|
||||
|
||||
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint)
|
||||
>>> model = AutoModelForDepthEstimation.from_pretrained(checkpoint)
|
||||
>>> model = AutoModelForDepthEstimation.from_pretrained(checkpoint).to(device)
|
||||
```
|
||||
|
||||
Prepare the image input for the model using the `image_processor` that will take care of the necessary image transformations
|
||||
such as resizing and normalization:
|
||||
|
||||
```py
|
||||
>>> pixel_values = image_processor(image, return_tensors="pt").pixel_values
|
||||
>>> pixel_values = image_processor(image, return_tensors="pt").pixel_values.to(device)
|
||||
```
|
||||
|
||||
Pass the prepared inputs through the model:
|
||||
@ -119,28 +124,100 @@ Pass the prepared inputs through the model:
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(pixel_values)
|
||||
... predicted_depth = outputs.predicted_depth
|
||||
```
|
||||
|
||||
Visualize the results:
|
||||
Let's post-process and visualize the results.
|
||||
|
||||
We need to pad and then resize the outputs so that predicted depth map has the same dimension as the original image. After resizing we will remove the padded regions from the depth.
|
||||
|
||||
```py
|
||||
>>> import numpy as np
|
||||
>>> import torch.nn.functional as F
|
||||
|
||||
>>> # interpolate to original size
|
||||
>>> prediction = torch.nn.functional.interpolate(
|
||||
... predicted_depth.unsqueeze(1),
|
||||
... size=image.size[::-1],
|
||||
... mode="bicubic",
|
||||
... align_corners=False,
|
||||
... ).squeeze()
|
||||
>>> output = prediction.numpy()
|
||||
>>> predicted_depth = outputs.predicted_depth.unsqueeze(dim=1)
|
||||
>>> height, width = pixel_values.shape[2:]
|
||||
|
||||
>>> formatted = (output * 255 / np.max(output)).astype("uint8")
|
||||
>>> depth = Image.fromarray(formatted)
|
||||
>>> depth
|
||||
>>> height_padding_factor = width_padding_factor = 3
|
||||
>>> pad_h = int(np.sqrt(height/2) * height_padding_factor)
|
||||
>>> pad_w = int(np.sqrt(width/2) * width_padding_factor)
|
||||
|
||||
>>> if predicted_depth.shape[-2:] != pixel_values.shape[-2:]:
|
||||
>>> predicted_depth = F.interpolate(predicted_depth, size= (height, width), mode='bicubic', align_corners=False)
|
||||
|
||||
>>> if pad_h > 0:
|
||||
predicted_depth = predicted_depth[:, :, pad_h:-pad_h,:]
|
||||
>>> if pad_w > 0:
|
||||
predicted_depth = predicted_depth[:, :, :, pad_w:-pad_w]
|
||||
```
|
||||
|
||||
We can now visualize the results (the function below is taken from the [GaussianObject](https://github.com/GaussianObject/GaussianObject/blob/ad6629efadb57902d5f8bc0fa562258029a4bdf1/pred_monodepth.py#L11) framework).
|
||||
|
||||
```py
|
||||
import matplotlib
|
||||
|
||||
def colorize(value, vmin=None, vmax=None, cmap='gray_r', invalid_val=-99, invalid_mask=None, background_color=(128, 128, 128, 255), gamma_corrected=False, value_transform=None):
|
||||
"""Converts a depth map to a color image.
|
||||
|
||||
Args:
|
||||
value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed
|
||||
vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None.
|
||||
vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None.
|
||||
cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'.
|
||||
invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99.
|
||||
invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None.
|
||||
background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255).
|
||||
gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False.
|
||||
value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None.
|
||||
|
||||
Returns:
|
||||
numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4)
|
||||
"""
|
||||
if isinstance(value, torch.Tensor):
|
||||
value = value.detach().cpu().numpy()
|
||||
|
||||
value = value.squeeze()
|
||||
if invalid_mask is None:
|
||||
invalid_mask = value == invalid_val
|
||||
mask = np.logical_not(invalid_mask)
|
||||
|
||||
# normalize
|
||||
vmin = np.percentile(value[mask],2) if vmin is None else vmin
|
||||
vmax = np.percentile(value[mask],85) if vmax is None else vmax
|
||||
if vmin != vmax:
|
||||
value = (value - vmin) / (vmax - vmin) # vmin..vmax
|
||||
else:
|
||||
# Avoid 0-division
|
||||
value = value * 0.
|
||||
|
||||
# squeeze last dim if it exists
|
||||
# grey out the invalid values
|
||||
|
||||
value[invalid_mask] = np.nan
|
||||
cmapper = matplotlib.colormaps.get_cmap(cmap)
|
||||
if value_transform:
|
||||
value = value_transform(value)
|
||||
# value = value / value.max()
|
||||
value = cmapper(value, bytes=True) # (nxmx4)
|
||||
|
||||
# img = value[:, :, :]
|
||||
img = value[...]
|
||||
img[invalid_mask] = background_color
|
||||
|
||||
# return img.transpose((2, 0, 1))
|
||||
if gamma_corrected:
|
||||
# gamma correction
|
||||
img = img / 255
|
||||
img = np.power(img, 2.2)
|
||||
img = img * 255
|
||||
img = img.astype(np.uint8)
|
||||
return img
|
||||
|
||||
>>> result = colorize(predicted_depth.cpu().squeeze().numpy())
|
||||
>>> Image.fromarray(result)
|
||||
```
|
||||
|
||||
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-visualization.png" alt="Depth estimation visualization"/>
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/depth-visualization-zoe.png" alt="Depth estimation visualization"/>
|
||||
</div>
|
||||
|
@ -1011,7 +1011,7 @@ slow models to do qualitative testing. To see the use of these simply look for *
|
||||
grep tiny tests examples
|
||||
```
|
||||
|
||||
Here is a an example of a [script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) that created the tiny model
|
||||
Here is an example of a [script](https://github.com/huggingface/transformers/tree/main/scripts/fsmt/fsmt-make-tiny-model.py) that created the tiny model
|
||||
[stas/tiny-wmt19-en-de](https://huggingface.co/stas/tiny-wmt19-en-de). You can easily adjust it to your specific
|
||||
model's architecture.
|
||||
|
||||
|
@ -157,7 +157,7 @@ Execution time -- 79.0 ms
|
||||
|
||||
Execution time -- 78.9 ms
|
||||
```
|
||||
The first call to `xla_generate()` is time-consuming because of tracing, but the successive calls are orders of magnitude faster. Keep in mind that any change in the generation options at any point with trigger re-tracing and thus leading to slow-downs in the generation time.
|
||||
The first call to `xla_generate()` is time-consuming because of tracing, but the successive calls are orders of magnitude faster. Keep in mind that any change in the generation options at any point will trigger re-tracing and thus leading to slow-downs in the generation time.
|
||||
|
||||
We didn’t cover all the text generation options 🤗 Transformers provides in this document. We encourage you to read the documentation for advanced use cases.
|
||||
|
||||
@ -171,4 +171,4 @@ Here, we leave you with some additional resources if you want to delve deeper in
|
||||
* Recommended posts for learning more about XLA and TensorFlow graphs in general:
|
||||
* [XLA: Optimizing Compiler for Machine Learning](https://www.tensorflow.org/xla)
|
||||
* [Introduction to graphs and tf.function](https://www.tensorflow.org/guide/intro_to_graphs)
|
||||
* [Better performance with tf.function](https://www.tensorflow.org/guide/function)
|
||||
* [Better performance with tf.function](https://www.tensorflow.org/guide/function)
|
||||
|
@ -278,7 +278,7 @@ args = TrainingArguments(
|
||||
max_steps=100,
|
||||
per_device_train_batch_size=2,
|
||||
optim="galore_adamw",
|
||||
optim_target_modules=["attn", "mlp"]
|
||||
optim_target_modules=[r".*.attn.*", r".*.mlp.*"]
|
||||
)
|
||||
|
||||
model_id = "google/gemma-2b"
|
||||
@ -315,7 +315,7 @@ args = TrainingArguments(
|
||||
max_steps=100,
|
||||
per_device_train_batch_size=2,
|
||||
optim="galore_adamw",
|
||||
optim_target_modules=["attn", "mlp"],
|
||||
optim_target_modules=[r".*.attn.*", r".*.mlp.*"],
|
||||
optim_args="rank=64, update_proj_gap=100, scale=0.10",
|
||||
)
|
||||
|
||||
@ -359,7 +359,7 @@ args = TrainingArguments(
|
||||
max_steps=100,
|
||||
per_device_train_batch_size=2,
|
||||
optim="galore_adamw_layerwise",
|
||||
optim_target_modules=["attn", "mlp"]
|
||||
optim_target_modules=[r".*.attn.*", r".*.mlp.*"]
|
||||
)
|
||||
|
||||
model_id = "google/gemma-2b"
|
||||
|
@ -220,7 +220,7 @@ La plantilla de chat para un modelo se almacena en el atributo `tokenizer.chat_t
|
||||
>>> from transformers import AutoTokenizer
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
||||
|
||||
>>> tokenizer.default_chat_template
|
||||
>>> tokenizer.chat_template
|
||||
"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}"
|
||||
```
|
||||
|
||||
@ -307,12 +307,6 @@ Si estás ajustando finamente un modelo para chat, además de establecer una pla
|
||||
|
||||
</Tip>
|
||||
|
||||
### ¿Qué son las plantillas "default"?
|
||||
|
||||
Antes de la introducción de las plantillas de chat, el manejo del chat estaba codificado en el nivel de la clase del modelo. Por razones de compatibilidad con versiones anteriores, hemos conservado este manejo específico de la clase como plantillas predeterminadas, también establecidas a nivel de clase. Si un modelo no tiene una plantilla de chat establecida, pero hay una plantilla predeterminada para su clase de modelo, la clase `TextGenerationPipeline` y métodos como `apply_chat_template` usarán la plantilla de clase en su lugar. Puedes averiguar cuál es la plantilla predeterminada para tu tokenizador comprobando el atributo `tokenizer.default_chat_template`.
|
||||
|
||||
Esto es algo que hacemos puramente por razones de compatibilidad con versiones anteriores, para evitar romper cualquier flujo de trabajo existente. Incluso cuando la plantilla de clase es apropiada para tu modelo, recomendamos encarecidamente anular la plantilla predeterminada estableciendo explícitamente el atributo `chat_template` para dejar claro a los usuarios que tu modelo ha sido configurado correctamente para el chat, y para estar preparados para el futuro en caso de que las plantillas predeterminadas alguna vez se alteren o se eliminen.
|
||||
|
||||
### ¿Qué plantilla debería usar?
|
||||
|
||||
Cuando establezcas la plantilla para un modelo que ya ha sido entrenado para chat, debes asegurarte de que la plantilla coincida exactamente con el formato de mensajes que el modelo vio durante el entrenamiento, o de lo contrario es probable que experimentes degradación del rendimiento. Esto es cierto incluso si estás entrenando aún más el modelo; probablemente obtendrás el mejor rendimiento si mantienes constantes los tokens de chat. Esto es muy análogo a la tokenización: generalmente obtienes el mejor rendimiento para la inferencia o el ajuste fino cuando coincides precisamente con la tokenización utilizada durante el entrenamiento.
|
||||
|
@ -15,7 +15,7 @@
|
||||
title: Préparation des données
|
||||
- local: in_translation
|
||||
title: Fine-tune un modèle pré-entraîné
|
||||
- local: in_translation
|
||||
- local: run_scripts_fr
|
||||
title: Entraînement avec un script
|
||||
- local: in_translation
|
||||
title: Entraînement distribué avec 🤗 Accelerate
|
||||
|
355
docs/source/fr/run_scripts_fr.md
Normal file
355
docs/source/fr/run_scripts_fr.md
Normal file
@ -0,0 +1,355 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Entraîner avec un script
|
||||
|
||||
En plus des [notebooks](./notebooks) de 🤗 Transformers, il existe également des exemples de scripts démontrant comment entraîner un modèle pour une tâche avec [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) ou [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax).
|
||||
|
||||
|
||||
Vous trouverez également des scripts que nous avons utilisé dans nos [projets de recherche](https://github.com/huggingface/transformers/tree/main/examples/research_projects) et des [exemples "legacy"](https://github.com/huggingface/transformers/tree/main/examples/legacy) qui sont des contributions de la communauté. Ces scripts ne sont pas activement maintenus et nécessitent une version spécifique de 🤗 Transformers qui sera probablement incompatible avec la dernière version de la librairie.
|
||||
|
||||
Les exemples de scripts ne sont pas censés fonctionner immédiatement pour chaque problème, et il se peut que vous ayez besoin d'adapter le script au problème que vous essayez de résoudre. Pour vous aider dans cette tâche, la plupart des scripts exposent entièrement la manière dont les données sont prétraitées, vous permettant de les modifier selon vos besoins.
|
||||
|
||||
Pour toute fonctionnalité que vous souhaitez implémenter dans un script d'exemple, veuillez en discuter sur le [forum](https://discuss.huggingface.co/) ou dans une [issue](https://github.com/huggingface/transformers/issues) avant de soumettre une Pull Request. Bien que nous acceptions les corrections de bugs, il est peu probable que nous fusionnions une Pull Request (opération "merge" dans Git) ajoutant plus de fonctionnalités au détriment de la lisibilité.
|
||||
|
||||
Ce guide vous montrera comment exécuter un script d'entraînement de résumé en exemple avec [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) et [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization). Tous les exemples sont censés fonctionner avec les deux frameworks, sauf indication contraire.
|
||||
|
||||
## Configuration
|
||||
|
||||
Pour exécuter avec succès la dernière version des scripts d'exemple, vous devez **installer 🤗 Transformers à partir du code source** dans un nouvel environnement virtuel :
|
||||
|
||||
```bash
|
||||
git clone https://github.com/huggingface/transformers
|
||||
cd transformers
|
||||
pip install .
|
||||
```
|
||||
|
||||
Pour les versions plus anciennes des exemples de scripts, cliquez sur le bouton ci-dessous :
|
||||
|
||||
<details>
|
||||
<summary>Exemples pour les anciennes versions de Transformers 🤗</summary>
|
||||
<ul>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li>
|
||||
<li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li>
|
||||
</ul>
|
||||
</details>
|
||||
|
||||
Ensuite, changez votre clone actuel de 🤗 Transformers pour une version spécifique, comme par exemple v3.5.1 :
|
||||
|
||||
```bash
|
||||
git checkout tags/v3.5.1
|
||||
```
|
||||
|
||||
Après avoir configuré la bonne version de la librairie, accédez au dossier d'exemple de votre choix et installez les prérequis spécifiques à l'exemple.
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
## Exécuter un script
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
Le script d'exemple télécharge et prétraite un jeu de données à partir de la bibliothèque 🤗 [Datasets](https://huggingface.co/docs/datasets/). Ensuite, le script affine un ensemble de données à l'aide de [Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) sur une architecture qui prend en charge la tâche de résumé. L'exemple suivant montre comment ajuster le modèle [T5-small](https://huggingface.co/google-t5/t5-small) sur les données [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). Le modèle T5 nécessite un argument supplémentaire `source_prefix` en raison de la façon dont il a été entraîné. Cette invite permet à T5 de savoir qu'il s'agit d'une tâche de résumé.
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py \
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
Le script d'exemple télécharge et prétraite un jeu de données à partir de la bibliothèque 🤗 [Datasets](https://huggingface.co/docs/datasets/). Ensuite, le script ajuste un modèle à l'aide de Keras sur une architecture qui prend en charge la tâche de résumé. L'exemple suivant montre comment ajuster le modèle [T5-small](https://huggingface.co/google-t5/t5-small) sur le jeu de données [CNN/DailyMail](https://huggingface.co/datasets/cnn_dailymail). Le modèle T5 nécessite un argument supplémentaire source_prefix en raison de la façon dont il a été entraîné. Cette invite permet à T5 de savoir qu'il s'agit d'une tâche de résumé.
|
||||
|
||||
```bash
|
||||
python examples/tensorflow/summarization/run_summarization.py \
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size 8 \
|
||||
--per_device_eval_batch_size 16 \
|
||||
--num_train_epochs 3 \
|
||||
--do_train \
|
||||
--do_eval
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## Entraînement distribué et précision mixte
|
||||
|
||||
[Trainer](https://huggingface.co/docs/transformers/main_classes/trainer) prend en charge l'entraînement distribué et la précision mixte, ce qui signifie que vous pouvez également les utiliser dans un script. Pour activer ces deux fonctionnalités :
|
||||
|
||||
- Ajoutez l'argument fp16 pour activer la précision mixte.
|
||||
- Définissez le nombre de GPU à utiliser avec l'argument `nproc_per_node`.
|
||||
|
||||
```bash
|
||||
torchrun \
|
||||
--nproc_per_node 8 pytorch/summarization/run_summarization.py \
|
||||
--fp16 \
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
Les scripts TensorFlow utilisent une Strategie en Miroir [`MirroredStrategy`](https://www.tensorflow.org/guide/distributed_training#mirroredstrategy) pour l'entraînement distribué, et vous n'avez pas besoin d'ajouter d'arguments supplémentaires au script d'entraînement. Le script TensorFlow utilisera plusieurs GPU par défaut s'ils sont disponibles.
|
||||
|
||||
## Exécuter un script sur un TPU
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
Les unités de traitement de tenseurs (UTT) (TPU) sont spécialement conçues pour accélérer les performances. PyTorch prend en charge les TPU avec le compilateur de deep learning [XLA](https://www.tensorflow.org/xla). Pour utiliser un TPU, lancez le script xla_spawn.py et utilisez l'argument num_cores pour définir le nombre de cœurs TPU que vous souhaitez utilise
|
||||
|
||||
```bash
|
||||
python xla_spawn.py --num_cores 8 \
|
||||
summarization/run_summarization.py \
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
Les scripts TensorFlow utilisent une [`TPUStrategy`](https://www.tensorflow.org/guide/distributed_training#tpustrategy) pour l'entraînement sur TPU. Pour utiliser un TPU, passez le nom de la ressource TPU à l'argument tpu.
|
||||
|
||||
```bash
|
||||
python run_summarization.py \
|
||||
--tpu name_of_tpu_resource \
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size 8 \
|
||||
--per_device_eval_batch_size 16 \
|
||||
--num_train_epochs 3 \
|
||||
--do_train \
|
||||
--do_eval
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## Exécuter un script avec 🤗 Accelerate
|
||||
|
||||
🤗 [Accelerate](https://huggingface.co/docs/accelerate) est une bibliothèque uniquement pour PyTorch qui offre une méthode unifiée pour entraîner un modèle sur plusieurs types de configurations (CPU uniquement, plusieurs GPU, TPU) tout en maintenant une visibilité complète sur la boucle d'entraînement PyTorch. Assurez-vous que vous avez installé 🤗 Accelerate si ce n'est pas déjà le cas.
|
||||
|
||||
> Note : Comme Accelerate est en développement rapide, la version git d'accelerate doit être installée pour exécuter les scripts.
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/accelerate
|
||||
```
|
||||
|
||||
Au lieu du script `run_summarization.py`, vous devez utiliser le script `run_summarization_no_trainer.py`. Les scripts compatibles avec 🤗 Accelerate auront un fichier `task_no_trainer.py` dans le dossier. Commencez par exécuter la commande suivante pour créer et enregistrer un fichier de configuration.
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
Testez votre configuration pour vous assurer qu'elle est correctement configurée :
|
||||
|
||||
```bash
|
||||
accelerate test
|
||||
```
|
||||
|
||||
Maintenant, vous êtes prêt à lancer l'entraînement :
|
||||
|
||||
```bash
|
||||
accelerate launch run_summarization_no_trainer.py \
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir ~/tmp/tst-summarization
|
||||
```
|
||||
|
||||
## Utiliser un jeu de données personnalisé
|
||||
|
||||
Le script de résumé prend en charge les jeux de données personnalisés tant qu'ils sont au format CSV ou JSON Line. Lorsque vous utilisez votre propre jeu de données, vous devez spécifier plusieurs arguments supplémentaires :
|
||||
|
||||
- `train_file` et `validation_file` spécifient le chemin vers vos fichiers d'entraînement et de validation.
|
||||
- `text_column` est le texte d'entrée à résumer.
|
||||
- `summary_column` est le texte cible à produire.
|
||||
|
||||
Un exemple de script de résumé utilisant un ensemble de données personnalisé ressemblerait à ceci :
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py \
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--train_file path_to_csv_or_jsonlines_file \
|
||||
--validation_file path_to_csv_or_jsonlines_file \
|
||||
--text_column text_column_name \
|
||||
--summary_column summary_column_name \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--overwrite_output_dir \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
## Tester un script
|
||||
Il est souvent judicieux d'exécuter votre script sur un plus petit nombre d'exemples de jeu de données pour s'assurer que tout fonctionne comme prévu avant de s'engager sur un jeu de données complet qui pourrait prendre des heures à traiter. Utilisez les arguments suivants pour tronquer le jeu de données à un nombre maximal d'échantillons :
|
||||
|
||||
- `max_train_samples`
|
||||
- `max_eval_samples`
|
||||
- `max_predict_samples`
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py \
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--max_train_samples 50 \
|
||||
--max_eval_samples 50 \
|
||||
--max_predict_samples 50 \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
Tous les scripts d'exemple ne prennent pas en charge l'argument `max_predict_samples`. Si vous n'êtes pas sûr que votre script prenne en charge cet argument, ajoutez l'argument `-h` pour vérifier.
|
||||
|
||||
```bash
|
||||
examples/pytorch/summarization/run_summarization.py -h
|
||||
```
|
||||
|
||||
## Reprendre l'entraînement à partir d'un point de contrôle
|
||||
|
||||
Une autre option utile est de reprendre l'entraînement à partir d'un point de contrôle précédent. Cela vous permettra de reprendre là où vous vous étiez arrêté sans recommencer si votre entraînement est interrompu. Il existe deux méthodes pour reprendre l'entraînement à partir d'un point de contrôle.
|
||||
|
||||
La première méthode utilise l'argument `output_dir previous_output_dir` pour reprendre l'entraînement à partir du dernier point de contrôle stocké dans `output_dir`. Dans ce cas, vous devez supprimer l'argument `overwrite_output_dir`.
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--output_dir previous_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
La seconde méthode utilise l'argument `resume_from_checkpoint path_to_specific_checkpoint` pour reprendre l'entraînement à partir d'un dossier de point de contrôle spécifique.
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--resume_from_checkpoint path_to_specific_checkpoint \
|
||||
--predict_with_generate
|
||||
```
|
||||
|
||||
## Partage ton modèle
|
||||
|
||||
Tous les scripts peuvent télécharger votre modèle final sur le Model Hub. Assurez-vous que vous êtes connecté à Hugging Face avant de commencer :
|
||||
|
||||
```bash
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
Ensuite, ajoutez l'argument `push_to_hub` au script. Cet argument créera un dépôt avec votre nom d'utilisateur Hugging Face et le nom du dossier spécifié dans `output_dir`.
|
||||
|
||||
|
||||
Pour donner un nom spécifique à votre dépôt, utilisez l'argument `push_to_hub_model_id` pour l'ajouter. Le dépôt sera automatiquement listé sous votre namespace.
|
||||
|
||||
L'exemple suivant montre comment télécharger un modèle avec un nom de dépôt spécifique :
|
||||
|
||||
```bash
|
||||
python examples/pytorch/summarization/run_summarization.py
|
||||
--model_name_or_path google-t5/t5-small \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--dataset_name cnn_dailymail \
|
||||
--dataset_config "3.0.0" \
|
||||
--source_prefix "summarize: " \
|
||||
--push_to_hub \
|
||||
--push_to_hub_model_id finetuned-t5-cnn_dailymail \
|
||||
--output_dir /tmp/tst-summarization \
|
||||
--per_device_train_batch_size=4 \
|
||||
--per_device_eval_batch_size=4 \
|
||||
--overwrite_output_dir \
|
||||
--predict_with_generate
|
||||
```
|
@ -85,7 +85,7 @@ LLM(Language Model)のますます一般的な使用事例の1つは「チ
|
||||
>>> from transformers import AutoTokenizer
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
||||
|
||||
>>> tokenizer.default_chat_template
|
||||
>>> tokenizer.chat_template
|
||||
"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}"
|
||||
```
|
||||
|
||||
|
@ -35,7 +35,7 @@ rendered properly in your Markdown viewer.
|
||||
- [`~integrations.TensorBoardCallback`] (PyTorch >= 1.4 を介して) tensorboard にアクセスできる場合
|
||||
またはテンソルボードX)。
|
||||
- [`~integrations.WandbCallback`] [wandb](https://www.wandb.com/) がインストールされている場合。
|
||||
- [`~integrations.CometCallback`] [comet_ml](https://www.comet.ml/site/) がインストールされている場合。
|
||||
- [`~integrations.CometCallback`] [comet_ml](https://www.comet.com/site/) がインストールされている場合。
|
||||
- [mlflow](https://www.mlflow.org/) がインストールされている場合は [`~integrations.MLflowCallback`]。
|
||||
- [`~integrations.NeptuneCallback`] [neptune](https://neptune.ai/) がインストールされている場合。
|
||||
- [`~integrations.AzureMLCallback`] [azureml-sdk](https://pypi.org/project/azureml-sdk/) の場合
|
||||
|
@ -27,6 +27,8 @@
|
||||
title: 에이전트
|
||||
- local: llm_tutorial
|
||||
title: 대규모 언어 모델로 생성하기
|
||||
- local: in_translation
|
||||
title: (번역중)Chatting with Transformers
|
||||
title: 튜토리얼
|
||||
- sections:
|
||||
- isExpanded: false
|
||||
@ -131,21 +133,41 @@
|
||||
title: (번역중) Notebooks with examples
|
||||
- local: community
|
||||
title: 커뮤니티 리소스
|
||||
- local: custom_tools
|
||||
title: 사용자 정의 도구와 프롬프트
|
||||
- local: troubleshooting
|
||||
title: 문제 해결
|
||||
- local: in_translation
|
||||
title: (번역중) Contribute new quantization method
|
||||
title: (번역중) Interoperability with GGUF files
|
||||
title: (번역중) 개발자 가이드
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: (번역중) Getting started
|
||||
- local: in_translation
|
||||
title: (번역중) bitsandbytes
|
||||
- local: in_translation
|
||||
title: (번역중) GPTQ
|
||||
- local: in_translation
|
||||
title: (번역중) AWQ
|
||||
- local: in_translation
|
||||
title: (번역중) AQLM
|
||||
- local: in_translation
|
||||
title: (번역중) Quanto
|
||||
- local: in_translation
|
||||
title: (번역중) EETQ
|
||||
- local: in_translation
|
||||
title: (번역중) HQQ
|
||||
- local: in_translation
|
||||
title: (번역중) Optimum
|
||||
- local: in_translation
|
||||
title: (번역중) Contribute new quantization method
|
||||
title: (번역중) 경량화 메소드
|
||||
- sections:
|
||||
- local: performance
|
||||
title: 성능 및 확장성
|
||||
- local: in_translation
|
||||
title: (번역중) Quantization
|
||||
title: (번역중) LLM inference optimization
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: (번역중) Training on one GPU
|
||||
title: (번역중) Methods and tools for efficient training on a single GPU
|
||||
- local: perf_train_gpu_many
|
||||
title: 다중 GPU에서 훈련 진행하기
|
||||
- local: in_translation
|
||||
@ -191,7 +213,7 @@
|
||||
title: 테스트
|
||||
- local: pr_checks
|
||||
title: Pull Request에 대한 검사
|
||||
title: (번역중) 기여하기
|
||||
title: 기여하기
|
||||
- sections:
|
||||
- local: philosophy
|
||||
title: 이념과 목표
|
||||
|
@ -1,22 +0,0 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# 사용자 정의 도구와 프롬프트[[custom-tools-and-prompts]]
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
The Agents framework has significantly changed in version v4.41.0.
|
||||
This document has been removed as it was referencing an older API.
|
||||
|
||||
We eagerly welcome new contributions for the updated API.
|
||||
|
||||
</Tip>
|
@ -78,6 +78,8 @@
|
||||
title: 如何将流水线添加到 🤗 Transformers?
|
||||
title: 贡献
|
||||
- sections:
|
||||
- local: philosophy
|
||||
title: Transformers的设计理念
|
||||
- local: task_summary
|
||||
title: 🤗Transformers能做什么
|
||||
- local: tokenizer_summary
|
||||
|
@ -228,7 +228,7 @@ The sun.</s>
|
||||
>>> from transformers import AutoTokenizer
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
||||
|
||||
>>> tokenizer.default_chat_template
|
||||
>>> tokenizer.chat_template
|
||||
"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}"
|
||||
```
|
||||
|
||||
|
@ -28,7 +28,7 @@ Callbacks是“只读”的代码片段,除了它们返回的[TrainerControl]
|
||||
- [`PrinterCallback`] 或 [`ProgressCallback`],用于显示进度和打印日志(如果通过[`TrainingArguments`]停用tqdm,则使用第一个函数;否则使用第二个)。
|
||||
- [`~integrations.TensorBoardCallback`],如果TensorBoard可访问(通过PyTorch版本 >= 1.4 或者 tensorboardX)。
|
||||
- [`~integrations.WandbCallback`],如果安装了[wandb](https://www.wandb.com/)。
|
||||
- [`~integrations.CometCallback`],如果安装了[comet_ml](https://www.comet.ml/site/)。
|
||||
- [`~integrations.CometCallback`],如果安装了[comet_ml](https://www.comet.com/site/)。
|
||||
- [`~integrations.MLflowCallback`],如果安装了[mlflow](https://www.mlflow.org/)。
|
||||
- [`~integrations.NeptuneCallback`],如果安装了[neptune](https://neptune.ai/)。
|
||||
- [`~integrations.AzureMLCallback`],如果安装了[azureml-sdk](https://pypi.org/project/azureml-sdk/)。
|
||||
|
67
docs/source/zh/philosophy.md
Normal file
67
docs/source/zh/philosophy.md
Normal file
@ -0,0 +1,67 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
|
||||
|
||||
# Transformers 的设计理念
|
||||
|
||||
🤗 Transformers 是一个专为以下用户群体构建的库:
|
||||
|
||||
- 寻求使用、研究或扩展大规模 Transformers 模型的机器学习研究人员和教育者。
|
||||
- 希望微调这些模型或在生产环境中使用它们(或两者兼而有之)的实际操作者。
|
||||
- 只想下载预训练模型并将其用于解决给定机器学习任务的工程师。
|
||||
|
||||
Transformers 设计时有两个主要目标:
|
||||
|
||||
1. 尽可能简单快速地使用:
|
||||
|
||||
- 我们尽可能地限制用户能接触的抽象层,实际上几乎没有抽象。用户只需学习三个标准类即可使用每个模型:[configuration](main_classes/configuration)、[models](main_classes/model) 和一个预处理类(用于 NLP 的 [tokenizer](main_classes/tokenizer),用于视觉的 [image processor](main_classes/image_processor),用于音频的 [feature extractor](main_classes/feature_extractor),以及用于多模态输入的 [processor](main_classes/processors))。
|
||||
- 所有这些类都可以通过一个通用的 `from_pretrained()` 方法从预训练实例中简单统一地初始化,该方法会从提供在 [Hugging Face Hub](https://huggingface.co/models) 上的预训练检查点(如果需要的话)下载、缓存和加载相关类实例及相关数据(配置的超参数、分词器的词汇表和模型的权重)。
|
||||
- 在这三个基本类之上,该库提供了两种 API:[`pipeline`] 用于快速在给定任务上使用模型进行推断,以及 [`Trainer`] 用于快速训练或微调 PyTorch 模型(所有 TensorFlow 模型与 `Keras.fit` 兼容)。
|
||||
- 因此,Transformers 不是神经网络的模块化工具箱。如果要基于 Transformers 扩展或搭建新项目,请使用常规的 Python、PyTorch、TensorFlow、Keras 模块,并从 Transformers 的基类继承以重用模型加载和保存等功能。如果想了解更多有关我们的模型代码的设计理念,请查看我们的[重复自己](https://huggingface.co/blog/transformers-design-philosophy)博文。
|
||||
|
||||
2. 提供与原始模型性能尽可能接近的最新模型:
|
||||
|
||||
- 我们为每种架构提供至少一个示例,复现了该架构官方作者提供的结果。
|
||||
- 代码通常尽可能接近原始代码库,这意味着某些 PyTorch 代码可能不够*pytorchic*,因为它是转换后的 TensorFlow 代码,反之亦然。
|
||||
|
||||
其他几个目标:
|
||||
|
||||
- 尽可能一致地公开模型的内部:
|
||||
|
||||
- 我们使用单一 API 提供对完整隐藏状态和注意力权重的访问。
|
||||
- 预处理类和基本模型 API 标准化,便于在不同模型之间轻松切换。
|
||||
|
||||
- 结合主观选择的有前途的工具进行模型微调和调查:
|
||||
|
||||
- 简单一致的方法来向词汇表和嵌入中添加新标记以进行微调。
|
||||
- 简单的方法来屏蔽和修剪 Transformer 头部。
|
||||
|
||||
- 轻松在 PyTorch、TensorFlow 2.0 和 Flax 之间切换,允许使用一个框架进行训练并使用另一个进行推断。
|
||||
|
||||
## 主要概念
|
||||
|
||||
该库围绕每个模型的三类类构建:
|
||||
|
||||
- **模型类** 可以是 PyTorch 模型([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module))、Keras 模型([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model))或 JAX/Flax 模型([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html)),这些模型可以使用库中提供的预训练权重。
|
||||
- **配置类** 存储构建模型所需的超参数(如层数和隐藏大小)。通常情况下,如果您使用不进行任何修改的预训练模型,则创建模型将自动处理配置的实例化(配置是模型的一部分)。
|
||||
- **预处理类** 将原始数据转换为模型可接受的格式。一个 [tokenizer](main_classes/tokenizer) 存储每个模型的词汇表,并提供编码和解码字符串为要馈送到模型的令牌嵌入索引列表的方法。[Image processors](main_classes/image_processor) 预处理视觉输入,[feature extractors](main_classes/feature_extractor) 预处理音频输入,而 [processor](main_classes/processors) 则处理多模态输入。
|
||||
|
||||
所有这些类都可以从预训练实例中实例化、本地保存,并通过以下三种方法与 Hub 共享:
|
||||
|
||||
- `from_pretrained()` 允许您从库自身提供的预训练版本(支持的模型可在 [Model Hub](https://huggingface.co/models) 上找到)或用户本地(或服务器上)存储的版本实例化模型、配置和预处理类。
|
||||
- `save_pretrained()` 允许您本地保存模型、配置和预处理类,以便可以使用 `from_pretrained()` 重新加载。
|
||||
- `push_to_hub()` 允许您将模型、配置和预处理类共享到 Hub,以便所有人都可以轻松访问。
|
@ -290,7 +290,7 @@ class FlaxDataCollatorForBartDenoisingLM:
|
||||
def __post_init__(self):
|
||||
if self.tokenizer.mask_token is None or self.tokenizer.eos_token is None:
|
||||
raise ValueError(
|
||||
"This tokenizer does not have a mask token or eos token token which is necessary for denoising"
|
||||
"This tokenizer does not have a mask token or eos token which is necessary for denoising"
|
||||
" language modeling. "
|
||||
)
|
||||
|
||||
|
@ -225,9 +225,6 @@ class DataTrainingArguments:
|
||||
)
|
||||
},
|
||||
)
|
||||
overwrite_cache: bool = field(
|
||||
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
|
||||
)
|
||||
validation_split_percentage: Optional[int] = field(
|
||||
default=5,
|
||||
metadata={
|
||||
|
@ -61,7 +61,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
@ -60,7 +60,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risk.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/flax/speech-recognition/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
@ -484,7 +484,7 @@ def main():
|
||||
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
|
||||
else:
|
||||
logger.warning(
|
||||
"Your model seems to have been trained with labels, but they don't match the dataset: ",
|
||||
"Your model seems to have been trained with labels, but they don't match the dataset: "
|
||||
f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}."
|
||||
"\nIgnoring the model labels as a result.",
|
||||
)
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
|
@ -200,7 +200,7 @@ You can easily log and monitor your runs code. The following are currently suppo
|
||||
|
||||
* [TensorBoard](https://www.tensorflow.org/tensorboard)
|
||||
* [Weights & Biases](https://docs.wandb.ai/integrations/huggingface)
|
||||
* [Comet ML](https://www.comet.ml/docs/python-sdk/huggingface/)
|
||||
* [Comet ML](https://www.comet.com/docs/v2/integrations/ml-frameworks/transformers/)
|
||||
* [Neptune](https://docs.neptune.ai/integrations-and-supported-tools/model-training/hugging-face)
|
||||
* [ClearML](https://clear.ml/docs/latest/docs/getting_started/ds/ds_first_steps)
|
||||
* [DVCLive](https://dvc.org/doc/dvclive/ml-frameworks/huggingface)
|
||||
@ -244,7 +244,7 @@ Additional configuration options are available through generic [wandb environmen
|
||||
|
||||
Refer to related [documentation & examples](https://docs.wandb.ai/integrations/huggingface).
|
||||
|
||||
### Comet.ml
|
||||
### Comet
|
||||
|
||||
To use `comet_ml`, install the Python package with:
|
||||
|
||||
|
@ -45,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
|
||||
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
|
||||
|
||||
@ -190,9 +190,9 @@ class DataTrainingArguments:
|
||||
if self.validation_file is not None:
|
||||
extension = self.validation_file.split(".")[-1]
|
||||
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
|
||||
if self.validation_file is not None:
|
||||
extension = self.validation_file.split(".")[-1]
|
||||
assert extension == "json", "`validation_file` should be a json file."
|
||||
if self.test_file is not None:
|
||||
extension = self.test_file.split(".")[-1]
|
||||
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
|
||||
|
||||
|
||||
dataset_name_mapping = {
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
|
||||
|
||||
|
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -43,7 +43,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -53,7 +53,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -46,7 +46,8 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/instance-segmentation/requirements.txt")
|
||||
|
||||
|
||||
|
@ -52,7 +52,8 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/instance-segmentation/requirements.txt")
|
||||
|
||||
|
||||
|
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -58,7 +58,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -60,7 +60,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.42.0.dev0")
|
||||
check_min_version("4.44.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user