mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-23 10:54:36 +08:00
Compare commits
255 Commits
base-model
...
fix-quanti
Author | SHA1 | Date | |
---|---|---|---|
f3d6d7e93f | |||
85b20d4208 | |||
3b07ca78bb | |||
00c6e5fea3 | |||
475664e2c6 | |||
0710e9b1e8 | |||
f99c279d20 | |||
d1efaf0318 | |||
19919689b2 | |||
d0b65bb479 | |||
ad63d20dff | |||
286393fbb1 | |||
4705b04c74 | |||
2b4734bd49 | |||
bd41b9c1ac | |||
6acd5aecb3 | |||
0d6a60fe55 | |||
b7fc2daf8b | |||
bab605dd04 | |||
9fd9476005 | |||
257bc670fb | |||
2bea6bf24e | |||
a86dad56bc | |||
d6064754ea | |||
581cf96e0c | |||
eca74d1367 | |||
52cc204dd7 | |||
aa3778afc2 | |||
c90e6e9625 | |||
1fcaad6df9 | |||
3af425d4c6 | |||
064cd7cdac | |||
348f3285c5 | |||
d6b3c7486b | |||
6cc9c8d7d1 | |||
4cc65e990f | |||
41a0e58e5b | |||
de77f5b1ec | |||
8c5e29bad5 | |||
471cf1de63 | |||
29f322d04d | |||
fb8e6c50e4 | |||
e97c760006 | |||
c7bc79bd2a | |||
d1eafe8d4e | |||
0e56fb69a2 | |||
7e813f9cf0 | |||
92429057d9 | |||
279c2e302a | |||
d13c390d01 | |||
d6d930a64b | |||
927ce1d39f | |||
49b5ab6a27 | |||
5b08db8844 | |||
3a8ec8c467 | |||
2b550c47b2 | |||
44715225e3 | |||
79d6f9fd70 | |||
13d36e89fe | |||
021006e1b0 | |||
788e1092e9 | |||
ad5d40de9c | |||
8084b26294 | |||
b56d8f07e4 | |||
78afa1c537 | |||
181d453069 | |||
e7139d06f5 | |||
be37d34f44 | |||
ab4656f6b7 | |||
ba531278ca | |||
a844297088 | |||
d68a91aebf | |||
121830ab47 | |||
a41677a68b | |||
3dce98a437 | |||
ebd2029483 | |||
69632aadb7 | |||
c6814b4ee8 | |||
bc1c90a755 | |||
80b4c5dcc9 | |||
0f733110a6 | |||
19085c28da | |||
69bcb86c58 | |||
be2c0e7bff | |||
4303d88c09 | |||
47e5432805 | |||
2b8a15cc3f | |||
91455c1825 | |||
48385aa4f4 | |||
5932606d8e | |||
2be2984462 | |||
00d077267a | |||
a6ecb54159 | |||
cbf924b76c | |||
340500b1a9 | |||
9e125d9a2e | |||
57f551c78d | |||
a41e08aa19 | |||
e28be7a692 | |||
48da44be24 | |||
fe4ca2f4a7 | |||
c9d1e5238a | |||
d253de6d58 | |||
beb9b5b022 | |||
dd3933dd65 | |||
90e2df5d55 | |||
4542b8fb27 | |||
523f6e743c | |||
3f9ff19b4e | |||
f94b0c59f2 | |||
2638d54e78 | |||
b8aadc31d5 | |||
6321876b5b | |||
94f487626a | |||
f19d018bff | |||
62116c967f | |||
26c83490d2 | |||
0adbc873d0 | |||
6bb8565f0c | |||
949cca4061 | |||
97d2f9d8ae | |||
6a2627918d | |||
9e771bf402 | |||
ecd60d01c3 | |||
42c489f2ae | |||
068b663f90 | |||
1d3f35f30a | |||
6515c25953 | |||
66291778dd | |||
730d2a52e7 | |||
1a374799ce | |||
ce091b1bda | |||
3e8f0fbf44 | |||
055afdb6bb | |||
487dab1b2b | |||
a63e92e2f0 | |||
8124a234ca | |||
cf8091c017 | |||
388e6659bf | |||
b47d9b2f8a | |||
8e97b44087 | |||
63380b77d4 | |||
957b05b413 | |||
f0d5b2ff04 | |||
1ddb64937c | |||
e7337ee7be | |||
8b479e39bb | |||
3f03c379d2 | |||
8f64b177f6 | |||
94555437e2 | |||
8733297b41 | |||
b815fae359 | |||
9be4728af8 | |||
51bd0ceb9e | |||
107fedc1e2 | |||
258dd9cc69 | |||
f39f4960f3 | |||
63c3116530 | |||
7c233980f4 | |||
b11050d6a2 | |||
e8d960329e | |||
fef8b7f8e9 | |||
0fe0bae0a8 | |||
a861db01e5 | |||
b9374a0763 | |||
4fa91b1be5 | |||
706703bba6 | |||
179d02ffb8 | |||
12f2ebef63 | |||
00915d3041 | |||
14b597f518 | |||
30580f035b | |||
db1d4c5a0b | |||
7baf00089a | |||
3017536ebf | |||
e959530b8f | |||
bd92073692 | |||
7426d02ea8 | |||
19b9d8ae13 | |||
7f5077e536 | |||
cbfb8d7b27 | |||
ac1a1b66b9 | |||
cff4caa0c1 | |||
e3af4fec91 | |||
c8a2b25f91 | |||
8e67230860 | |||
27361bd218 | |||
da7d64f4ff | |||
2256875a77 | |||
9e94801146 | |||
c53d53da89 | |||
fc8764c9a6 | |||
f263e88dcf | |||
6f3e0b68e0 | |||
2c2495cc7b | |||
25992b493c | |||
42ebb6c23e | |||
9215cc62d4 | |||
691d1b52c3 | |||
3bd1a0ddf1 | |||
8cb522b419 | |||
72861e11eb | |||
53742b11f5 | |||
69bc848480 | |||
48ef468c74 | |||
b070025aa6 | |||
4a60bae8e2 | |||
09a309d273 | |||
2a004f9ff1 | |||
a3201cea14 | |||
d84569387f | |||
32c95bd847 | |||
bb965d8e87 | |||
1c287aecfc | |||
65b8e38aac | |||
87b30c3589 | |||
47cc4da351 | |||
bc3d5781e7 | |||
fbb18ce68b | |||
c4161238bd | |||
79254c9b61 | |||
48292a9848 | |||
ea219ed164 | |||
cc3a361b46 | |||
bc3253f076 | |||
0013ba61e5 | |||
c7eb95581a | |||
071a161d3e | |||
7652804d23 | |||
994cad2790 | |||
2829013d2d | |||
89f6956015 | |||
50d3530aa0 | |||
81aa9b2e07 | |||
cb384dcd7a | |||
1e4286fd59 | |||
ed1807bab3 | |||
b80b3ec529 | |||
556d2c23c6 | |||
b1a51ea464 | |||
d126f35427 | |||
d8663cb8c5 | |||
1c4b62b219 | |||
e9756cdbc7 | |||
af9b2eaa54 | |||
a929c466d0 | |||
858545047c | |||
94ae1ba5b5 | |||
a1cf9f3390 | |||
4fce7a0f0f | |||
f2fb41948e | |||
1b9978c360 | |||
f2e197c30a | |||
8a16edce67 | |||
6f775970c7 |
@ -154,7 +154,7 @@ jobs:
|
|||||||
path: ~/transformers/installed.txt
|
path: ~/transformers/installed.txt
|
||||||
- run: python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1)
|
- run: python -c "from transformers import *" || (echo '🚨 import failed, this means you introduced unprotected imports! 🚨'; exit 1)
|
||||||
- run: ruff check examples tests src utils
|
- run: ruff check examples tests src utils
|
||||||
- run: ruff format tests src utils --check
|
- run: ruff format examples tests src utils --check
|
||||||
- run: python utils/custom_init_isort.py --check_only
|
- run: python utils/custom_init_isort.py --check_only
|
||||||
- run: python utils/sort_auto_mappings.py --check_only
|
- run: python utils/sort_auto_mappings.py --check_only
|
||||||
- run: python utils/check_doc_toc.py
|
- run: python utils/check_doc_toc.py
|
||||||
|
@ -30,9 +30,28 @@ COMMON_ENV_VARIABLES = {
|
|||||||
"RUN_PIPELINE_TESTS": False,
|
"RUN_PIPELINE_TESTS": False,
|
||||||
}
|
}
|
||||||
# Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical
|
# Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical
|
||||||
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "vvv": None, "rsfE":None}
|
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "vvv": None, "rsfE":None}
|
||||||
DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.8.12"}]
|
DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.8.12"}]
|
||||||
|
|
||||||
|
# Strings that commonly appear in the output of flaky tests when they fail. These are used with `pytest-rerunfailures`
|
||||||
|
# to rerun the tests that match these patterns.
|
||||||
|
FLAKY_TEST_FAILURE_PATTERNS = [
|
||||||
|
"OSError", # Machine/connection transient error
|
||||||
|
"Timeout", # Machine/connection transient error
|
||||||
|
"ConnectionError", # Connection transient error
|
||||||
|
"FileNotFoundError", # Raised by `datasets` on Hub failures
|
||||||
|
"PIL.UnidentifiedImageError", # Raised by `PIL.Image.open` on connection issues
|
||||||
|
"HTTPError", # Also catches HfHubHTTPError
|
||||||
|
"AssertionError: Tensor-likes are not close!", # `torch.testing.assert_close`, we might have unlucky random values
|
||||||
|
# TODO: error downloading tokenizer's `merged.txt` from hub can cause all the exceptions below. Throw and handle
|
||||||
|
# them under a single message.
|
||||||
|
"TypeError: expected str, bytes or os.PathLike object, not NoneType",
|
||||||
|
"TypeError: stat: path should be string, bytes, os.PathLike or integer, not NoneType",
|
||||||
|
"Converting from Tiktoken failed",
|
||||||
|
"KeyError: <class ",
|
||||||
|
"TypeError: not a string",
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
class EmptyJob:
|
class EmptyJob:
|
||||||
job_name = "empty"
|
job_name = "empty"
|
||||||
@ -124,7 +143,9 @@ class CircleCIJob:
|
|||||||
# Examples special case: we need to download NLTK files in advance to avoid cuncurrency issues
|
# Examples special case: we need to download NLTK files in advance to avoid cuncurrency issues
|
||||||
timeout_cmd = f"timeout {self.command_timeout} " if self.command_timeout else ""
|
timeout_cmd = f"timeout {self.command_timeout} " if self.command_timeout else ""
|
||||||
marker_cmd = f"-m '{self.marker}'" if self.marker is not None else ""
|
marker_cmd = f"-m '{self.marker}'" if self.marker is not None else ""
|
||||||
additional_flags = f" -p no:warning -o junit_family=xunit1 --junitxml=test-results/junit.xml"
|
junit_flags = f" -p no:warning -o junit_family=xunit1 --junitxml=test-results/junit.xml"
|
||||||
|
joined_flaky_patterns = "|".join(FLAKY_TEST_FAILURE_PATTERNS)
|
||||||
|
repeat_on_failure_flags = f"--reruns 5 --reruns-delay 2 --only-rerun '({joined_flaky_patterns})'"
|
||||||
parallel = f' << pipeline.parameters.{self.job_name}_parallelism >> '
|
parallel = f' << pipeline.parameters.{self.job_name}_parallelism >> '
|
||||||
steps = [
|
steps = [
|
||||||
"checkout",
|
"checkout",
|
||||||
@ -152,7 +173,7 @@ class CircleCIJob:
|
|||||||
},
|
},
|
||||||
{"run": {
|
{"run": {
|
||||||
"name": "Run tests",
|
"name": "Run tests",
|
||||||
"command": f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {additional_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"}
|
"command": f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {junit_flags} {repeat_on_failure_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"}
|
||||||
},
|
},
|
||||||
{"run": {"name": "Expand to show skipped tests", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --skip"}},
|
{"run": {"name": "Expand to show skipped tests", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --skip"}},
|
||||||
{"run": {"name": "Failed tests: show reasons", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --fail"}},
|
{"run": {"name": "Failed tests: show reasons", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --fail"}},
|
||||||
@ -185,6 +206,9 @@ torch_job = CircleCIJob(
|
|||||||
generate_job = CircleCIJob(
|
generate_job = CircleCIJob(
|
||||||
"generate",
|
"generate",
|
||||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||||
|
# networkx==3.3 (after #36957) cause some issues
|
||||||
|
# TODO: remove this once it works directly
|
||||||
|
install_steps=["uv venv && uv pip install . && uv pip install networkx==3.2.1"],
|
||||||
marker="generate",
|
marker="generate",
|
||||||
parallelism=6,
|
parallelism=6,
|
||||||
)
|
)
|
||||||
@ -248,6 +272,7 @@ examples_torch_job = CircleCIJob(
|
|||||||
docker_image=[{"image":"huggingface/transformers-examples-torch"}],
|
docker_image=[{"image":"huggingface/transformers-examples-torch"}],
|
||||||
# TODO @ArthurZucker remove this once docker is easier to build
|
# TODO @ArthurZucker remove this once docker is easier to build
|
||||||
install_steps=["uv venv && uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"],
|
install_steps=["uv venv && uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"],
|
||||||
|
pytest_num_workers=4,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -255,6 +280,7 @@ examples_tensorflow_job = CircleCIJob(
|
|||||||
"examples_tensorflow",
|
"examples_tensorflow",
|
||||||
additional_env={"OMP_NUM_THREADS": 8},
|
additional_env={"OMP_NUM_THREADS": 8},
|
||||||
docker_image=[{"image":"huggingface/transformers-examples-tf"}],
|
docker_image=[{"image":"huggingface/transformers-examples-tf"}],
|
||||||
|
pytest_num_workers=2,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -305,6 +331,9 @@ repo_utils_job = CircleCIJob(
|
|||||||
non_model_job = CircleCIJob(
|
non_model_job = CircleCIJob(
|
||||||
"non_model",
|
"non_model",
|
||||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||||
|
# networkx==3.3 (after #36957) cause some issues
|
||||||
|
# TODO: remove this once it works directly
|
||||||
|
install_steps=["uv venv && uv pip install . && uv pip install networkx==3.2.1"],
|
||||||
marker="not generate",
|
marker="not generate",
|
||||||
parallelism=6,
|
parallelism=6,
|
||||||
)
|
)
|
||||||
@ -334,9 +363,9 @@ doc_test_job = CircleCIJob(
|
|||||||
pytest_num_workers=1,
|
pytest_num_workers=1,
|
||||||
)
|
)
|
||||||
|
|
||||||
REGULAR_TESTS = [torch_job, tf_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
|
REGULAR_TESTS = [torch_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
|
||||||
EXAMPLES_TESTS = [examples_torch_job, examples_tensorflow_job]
|
EXAMPLES_TESTS = [examples_torch_job]
|
||||||
PIPELINE_TESTS = [pipelines_torch_job, pipelines_tf_job]
|
PIPELINE_TESTS = [pipelines_torch_job]
|
||||||
REPO_UTIL_TESTS = [repo_utils_job]
|
REPO_UTIL_TESTS = [repo_utils_job]
|
||||||
DOC_TESTS = [doc_test_job]
|
DOC_TESTS = [doc_test_job]
|
||||||
ALL_TESTS = REGULAR_TESTS + EXAMPLES_TESTS + PIPELINE_TESTS + REPO_UTIL_TESTS + DOC_TESTS + [custom_tokenizers_job] + [exotic_models_job] # fmt: skip
|
ALL_TESTS = REGULAR_TESTS + EXAMPLES_TESTS + PIPELINE_TESTS + REPO_UTIL_TESTS + DOC_TESTS + [custom_tokenizers_job] + [exotic_models_job] # fmt: skip
|
||||||
|
10
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
10
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -38,21 +38,21 @@ body:
|
|||||||
|
|
||||||
- text models: @ArthurZucker
|
- text models: @ArthurZucker
|
||||||
- vision models: @amyeroberts, @qubvel
|
- vision models: @amyeroberts, @qubvel
|
||||||
- speech models: @ylacombe, @eustlb
|
- speech models: @eustlb
|
||||||
- graph models: @clefourrier
|
- graph models: @clefourrier
|
||||||
|
|
||||||
Library:
|
Library:
|
||||||
|
|
||||||
- flax: @sanchit-gandhi
|
- flax: @gante and @Rocketknight1
|
||||||
- generate: @zucchini-nlp (visual-language models) or @gante (all others)
|
- generate: @zucchini-nlp (visual-language models) or @gante (all others)
|
||||||
- pipelines: @Rocketknight1
|
- pipelines: @Rocketknight1
|
||||||
- tensorflow: @gante and @Rocketknight1
|
- tensorflow: @gante and @Rocketknight1
|
||||||
- tokenizers: @ArthurZucker and @itazap
|
- tokenizers: @ArthurZucker and @itazap
|
||||||
- trainer: @muellerzr @SunMarc
|
- trainer: @zach-huggingface @SunMarc
|
||||||
|
|
||||||
Integrations:
|
Integrations:
|
||||||
|
|
||||||
- deepspeed: HF Trainer/Accelerate: @muellerzr
|
- deepspeed: HF Trainer/Accelerate: @SunMarc @zach-huggingface
|
||||||
- ray/raytune: @richardliaw, @amogkam
|
- ray/raytune: @richardliaw, @amogkam
|
||||||
- Big Model Inference: @SunMarc
|
- Big Model Inference: @SunMarc
|
||||||
- quantization (bitsandbytes, autogpt): @SunMarc @MekkCyber
|
- quantization (bitsandbytes, autogpt): @SunMarc @MekkCyber
|
||||||
@ -72,7 +72,7 @@ body:
|
|||||||
|
|
||||||
Maintained examples (not research project or legacy):
|
Maintained examples (not research project or legacy):
|
||||||
|
|
||||||
- Flax: @sanchit-gandhi
|
- Flax: @Rocketknight1
|
||||||
- PyTorch: See Models above and tag the person corresponding to the modality of the example.
|
- PyTorch: See Models above and tag the person corresponding to the modality of the example.
|
||||||
- TensorFlow: @Rocketknight1
|
- TensorFlow: @Rocketknight1
|
||||||
|
|
||||||
|
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
10
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -41,22 +41,22 @@ Models:
|
|||||||
|
|
||||||
- text models: @ArthurZucker
|
- text models: @ArthurZucker
|
||||||
- vision models: @amyeroberts, @qubvel
|
- vision models: @amyeroberts, @qubvel
|
||||||
- speech models: @ylacombe, @eustlb
|
- speech models: @eustlb
|
||||||
- graph models: @clefourrier
|
- graph models: @clefourrier
|
||||||
|
|
||||||
Library:
|
Library:
|
||||||
|
|
||||||
- flax: @sanchit-gandhi
|
- flax: @gante and @Rocketknight1
|
||||||
- generate: @zucchini-nlp (visual-language models) or @gante (all others)
|
- generate: @zucchini-nlp (visual-language models) or @gante (all others)
|
||||||
- pipelines: @Rocketknight1
|
- pipelines: @Rocketknight1
|
||||||
- tensorflow: @gante and @Rocketknight1
|
- tensorflow: @gante and @Rocketknight1
|
||||||
- tokenizers: @ArthurZucker
|
- tokenizers: @ArthurZucker
|
||||||
- trainer: @muellerzr and @SunMarc
|
- trainer: @zach-huggingface and @SunMarc
|
||||||
- chat templates: @Rocketknight1
|
- chat templates: @Rocketknight1
|
||||||
|
|
||||||
Integrations:
|
Integrations:
|
||||||
|
|
||||||
- deepspeed: HF Trainer/Accelerate: @muellerzr
|
- deepspeed: HF Trainer/Accelerate: @SunMarc @zach-huggingface
|
||||||
- ray/raytune: @richardliaw, @amogkam
|
- ray/raytune: @richardliaw, @amogkam
|
||||||
- Big Model Inference: @SunMarc
|
- Big Model Inference: @SunMarc
|
||||||
- quantization (bitsandbytes, autogpt): @SunMarc @MekkCyber
|
- quantization (bitsandbytes, autogpt): @SunMarc @MekkCyber
|
||||||
@ -72,7 +72,7 @@ HF projects:
|
|||||||
|
|
||||||
Maintained examples (not research project or legacy):
|
Maintained examples (not research project or legacy):
|
||||||
|
|
||||||
- Flax: @sanchit-gandhi
|
- Flax: @Rocketknight1
|
||||||
- PyTorch: See Models above and tag the person corresponding to the modality of the example.
|
- PyTorch: See Models above and tag the person corresponding to the modality of the example.
|
||||||
- TensorFlow: @Rocketknight1
|
- TensorFlow: @Rocketknight1
|
||||||
|
|
||||||
|
102
.github/scripts/assign_reviewers.py
vendored
Normal file
102
.github/scripts/assign_reviewers.py
vendored
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
# coding=utf-8
|
||||||
|
# Copyright 2025 the HuggingFace Inc. team. All rights reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import github
|
||||||
|
import json
|
||||||
|
from github import Github
|
||||||
|
import re
|
||||||
|
from collections import Counter
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
def pattern_to_regex(pattern):
|
||||||
|
if pattern.startswith("/"):
|
||||||
|
start_anchor = True
|
||||||
|
pattern = re.escape(pattern[1:])
|
||||||
|
else:
|
||||||
|
start_anchor = False
|
||||||
|
pattern = re.escape(pattern)
|
||||||
|
# Replace `*` with "any number of non-slash characters"
|
||||||
|
pattern = pattern.replace(r"\*", "[^/]*")
|
||||||
|
if start_anchor:
|
||||||
|
pattern = r"^\/?" + pattern # Allow an optional leading slash after the start of the string
|
||||||
|
return pattern
|
||||||
|
|
||||||
|
def get_file_owners(file_path, codeowners_lines):
|
||||||
|
# Process lines in reverse (last matching pattern takes precedence)
|
||||||
|
for line in reversed(codeowners_lines):
|
||||||
|
# Skip comments and empty lines, strip inline comments
|
||||||
|
line = line.split('#')[0].strip()
|
||||||
|
if not line:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Split into pattern and owners
|
||||||
|
parts = line.split()
|
||||||
|
pattern = parts[0]
|
||||||
|
# Can be empty, e.g. for dummy files with explicitly no owner!
|
||||||
|
owners = [owner.removeprefix("@") for owner in parts[1:]]
|
||||||
|
|
||||||
|
# Check if file matches pattern
|
||||||
|
file_regex = pattern_to_regex(pattern)
|
||||||
|
if re.search(file_regex, file_path) is not None:
|
||||||
|
return owners # Remember, can still be empty!
|
||||||
|
return [] # Should never happen, but just in case
|
||||||
|
|
||||||
|
def main():
|
||||||
|
script_dir = Path(__file__).parent.absolute()
|
||||||
|
with open(script_dir / "codeowners_for_review_action") as f:
|
||||||
|
codeowners_lines = f.readlines()
|
||||||
|
|
||||||
|
g = Github(os.environ['GITHUB_TOKEN'])
|
||||||
|
repo = g.get_repo("huggingface/transformers")
|
||||||
|
with open(os.environ['GITHUB_EVENT_PATH']) as f:
|
||||||
|
event = json.load(f)
|
||||||
|
|
||||||
|
# The PR number is available in the event payload
|
||||||
|
pr_number = event['pull_request']['number']
|
||||||
|
pr = repo.get_pull(pr_number)
|
||||||
|
pr_author = pr.user.login
|
||||||
|
|
||||||
|
existing_reviews = list(pr.get_reviews())
|
||||||
|
if existing_reviews:
|
||||||
|
print(f"Already has reviews: {[r.user.login for r in existing_reviews]}")
|
||||||
|
return
|
||||||
|
|
||||||
|
users_requested, teams_requested = pr.get_review_requests()
|
||||||
|
users_requested = list(users_requested)
|
||||||
|
if users_requested:
|
||||||
|
print(f"Reviewers already requested: {users_requested}")
|
||||||
|
return
|
||||||
|
|
||||||
|
locs_per_owner = Counter()
|
||||||
|
for file in pr.get_files():
|
||||||
|
owners = get_file_owners(file.filename, codeowners_lines)
|
||||||
|
for owner in owners:
|
||||||
|
locs_per_owner[owner] += file.changes
|
||||||
|
|
||||||
|
# Assign the top 2 based on locs changed as reviewers, but skip the owner if present
|
||||||
|
locs_per_owner.pop(pr_author, None)
|
||||||
|
top_owners = locs_per_owner.most_common(2)
|
||||||
|
print("Top owners", top_owners)
|
||||||
|
top_owners = [owner[0] for owner in top_owners]
|
||||||
|
try:
|
||||||
|
pr.create_review_request(top_owners)
|
||||||
|
except github.GithubException as e:
|
||||||
|
print(f"Failed to request review for {top_owners}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
370
.github/scripts/codeowners_for_review_action
vendored
Normal file
370
.github/scripts/codeowners_for_review_action
vendored
Normal file
@ -0,0 +1,370 @@
|
|||||||
|
# Top-level rules are matched only if nothing else matches
|
||||||
|
* @Rocketknight1 @ArthurZucker # if no one is pinged based on the other rules, he will do the dispatch
|
||||||
|
*.md @stevhliu
|
||||||
|
*tokenization* @ArthurZucker
|
||||||
|
docs/ @stevhliu
|
||||||
|
/benchmark/ @McPatate
|
||||||
|
/docker/ @ydshieh @ArthurZucker
|
||||||
|
|
||||||
|
# More high-level globs catch cases when specific rules later don't apply
|
||||||
|
/src/transformers/models/*/processing* @molbap @yonigozlan @qubvel
|
||||||
|
/src/transformers/models/*/image_processing* @qubvel
|
||||||
|
/src/transformers/models/*/image_processing_*_fast* @yonigozlan
|
||||||
|
|
||||||
|
# Owners of subsections of the library
|
||||||
|
/src/transformers/generation/ @gante
|
||||||
|
/src/transformers/pipeline/ @Rocketknight1 @yonigozlan
|
||||||
|
/src/transformers/integrations/ @SunMarc @MekkCyber @zach-huggingface
|
||||||
|
/src/transformers/quantizers/ @SunMarc @MekkCyber
|
||||||
|
tests/ @ydshieh
|
||||||
|
tests/generation/ @gante
|
||||||
|
|
||||||
|
/src/transformers/models/auto/ @ArthurZucker
|
||||||
|
/src/transformers/utils/ @ArthurZucker @Rocketknight1
|
||||||
|
/src/transformers/loss/ @ArthurZucker
|
||||||
|
/src/transformers/onnx/ @michaelbenayoun
|
||||||
|
|
||||||
|
# Specific files come after the sections/globs, so they take priority
|
||||||
|
/.circleci/config.yml @ArthurZucker @ydshieh
|
||||||
|
/utils/tests_fetcher.py @ydshieh
|
||||||
|
trainer.py @zach-huggingface @SunMarc
|
||||||
|
trainer_utils.py @zach-huggingface @SunMarc
|
||||||
|
/utils/modular_model_converter.py @Cyrilvallez @ArthurZucker
|
||||||
|
|
||||||
|
# Owners of individual models are specific / high priority, and so they come last
|
||||||
|
# mod* captures modeling and modular files
|
||||||
|
|
||||||
|
# Text models
|
||||||
|
/src/transformers/models/albert/mod*_albert* @ArthurZucker
|
||||||
|
/src/transformers/models/bamba/mod*_bamba* @ArthurZucker
|
||||||
|
/src/transformers/models/bart/mod*_bart* @ArthurZucker
|
||||||
|
/src/transformers/models/barthez/mod*_barthez* @ArthurZucker
|
||||||
|
/src/transformers/models/bartpho/mod*_bartpho* @ArthurZucker
|
||||||
|
/src/transformers/models/bert/mod*_bert* @ArthurZucker
|
||||||
|
/src/transformers/models/bert_generation/mod*_bert_generation* @ArthurZucker
|
||||||
|
/src/transformers/models/bert_japanese/mod*_bert_japanese* @ArthurZucker
|
||||||
|
/src/transformers/models/bertweet/mod*_bertweet* @ArthurZucker
|
||||||
|
/src/transformers/models/big_bird/mod*_big_bird* @ArthurZucker
|
||||||
|
/src/transformers/models/bigbird_pegasus/mod*_bigbird_pegasus* @ArthurZucker
|
||||||
|
/src/transformers/models/biogpt/mod*_biogpt* @ArthurZucker
|
||||||
|
/src/transformers/models/blenderbot/mod*_blenderbot* @ArthurZucker
|
||||||
|
/src/transformers/models/blenderbot_small/mod*_blenderbot_small* @ArthurZucker
|
||||||
|
/src/transformers/models/bloom/mod*_bloom* @ArthurZucker
|
||||||
|
/src/transformers/models/bort/mod*_bort* @ArthurZucker
|
||||||
|
/src/transformers/models/byt5/mod*_byt5* @ArthurZucker
|
||||||
|
/src/transformers/models/camembert/mod*_camembert* @ArthurZucker
|
||||||
|
/src/transformers/models/canine/mod*_canine* @ArthurZucker
|
||||||
|
/src/transformers/models/codegen/mod*_codegen* @ArthurZucker
|
||||||
|
/src/transformers/models/code_llama/mod*_code_llama* @ArthurZucker
|
||||||
|
/src/transformers/models/cohere/mod*_cohere* @ArthurZucker
|
||||||
|
/src/transformers/models/cohere2/mod*_cohere2* @ArthurZucker
|
||||||
|
/src/transformers/models/convbert/mod*_convbert* @ArthurZucker
|
||||||
|
/src/transformers/models/cpm/mod*_cpm* @ArthurZucker
|
||||||
|
/src/transformers/models/cpmant/mod*_cpmant* @ArthurZucker
|
||||||
|
/src/transformers/models/ctrl/mod*_ctrl* @ArthurZucker
|
||||||
|
/src/transformers/models/dbrx/mod*_dbrx* @ArthurZucker
|
||||||
|
/src/transformers/models/deberta/mod*_deberta* @ArthurZucker
|
||||||
|
/src/transformers/models/deberta_v2/mod*_deberta_v2* @ArthurZucker
|
||||||
|
/src/transformers/models/dialogpt/mod*_dialogpt* @ArthurZucker
|
||||||
|
/src/transformers/models/diffllama/mod*_diffllama* @ArthurZucker
|
||||||
|
/src/transformers/models/distilbert/mod*_distilbert* @ArthurZucker
|
||||||
|
/src/transformers/models/dpr/mod*_dpr* @ArthurZucker
|
||||||
|
/src/transformers/models/electra/mod*_electra* @ArthurZucker
|
||||||
|
/src/transformers/models/encoder_decoder/mod*_encoder_decoder* @ArthurZucker
|
||||||
|
/src/transformers/models/ernie/mod*_ernie* @ArthurZucker
|
||||||
|
/src/transformers/models/ernie_m/mod*_ernie_m* @ArthurZucker
|
||||||
|
/src/transformers/models/esm/mod*_esm* @ArthurZucker
|
||||||
|
/src/transformers/models/falcon/mod*_falcon* @ArthurZucker
|
||||||
|
/src/transformers/models/falcon3/mod*_falcon3* @ArthurZucker
|
||||||
|
/src/transformers/models/falcon_mamba/mod*_falcon_mamba* @ArthurZucker
|
||||||
|
/src/transformers/models/fastspeech2_conformer/mod*_fastspeech2_conformer* @ArthurZucker
|
||||||
|
/src/transformers/models/flan_t5/mod*_flan_t5* @ArthurZucker
|
||||||
|
/src/transformers/models/flan_ul2/mod*_flan_ul2* @ArthurZucker
|
||||||
|
/src/transformers/models/flaubert/mod*_flaubert* @ArthurZucker
|
||||||
|
/src/transformers/models/fnet/mod*_fnet* @ArthurZucker
|
||||||
|
/src/transformers/models/fsmt/mod*_fsmt* @ArthurZucker
|
||||||
|
/src/transformers/models/funnel/mod*_funnel* @ArthurZucker
|
||||||
|
/src/transformers/models/fuyu/mod*_fuyu* @ArthurZucker
|
||||||
|
/src/transformers/models/gemma/mod*_gemma* @ArthurZucker
|
||||||
|
/src/transformers/models/gemma2/mod*_gemma2* @ArthurZucker
|
||||||
|
/src/transformers/models/glm/mod*_glm* @ArthurZucker
|
||||||
|
/src/transformers/models/openai_gpt/mod*_openai_gpt* @ArthurZucker
|
||||||
|
/src/transformers/models/gpt_neo/mod*_gpt_neo* @ArthurZucker
|
||||||
|
/src/transformers/models/gpt_neox/mod*_gpt_neox* @ArthurZucker
|
||||||
|
/src/transformers/models/gpt_neox_japanese/mod*_gpt_neox_japanese* @ArthurZucker
|
||||||
|
/src/transformers/models/gptj/mod*_gptj* @ArthurZucker
|
||||||
|
/src/transformers/models/gpt2/mod*_gpt2* @ArthurZucker
|
||||||
|
/src/transformers/models/gpt_bigcode/mod*_gpt_bigcode* @ArthurZucker
|
||||||
|
/src/transformers/models/gptsan_japanese/mod*_gptsan_japanese* @ArthurZucker
|
||||||
|
/src/transformers/models/gpt_sw3/mod*_gpt_sw3* @ArthurZucker
|
||||||
|
/src/transformers/models/granite/mod*_granite* @ArthurZucker
|
||||||
|
/src/transformers/models/granitemoe/mod*_granitemoe* @ArthurZucker
|
||||||
|
/src/transformers/models/herbert/mod*_herbert* @ArthurZucker
|
||||||
|
/src/transformers/models/ibert/mod*_ibert* @ArthurZucker
|
||||||
|
/src/transformers/models/jamba/mod*_jamba* @ArthurZucker
|
||||||
|
/src/transformers/models/jetmoe/mod*_jetmoe* @ArthurZucker
|
||||||
|
/src/transformers/models/jukebox/mod*_jukebox* @ArthurZucker
|
||||||
|
/src/transformers/models/led/mod*_led* @ArthurZucker
|
||||||
|
/src/transformers/models/llama/mod*_llama* @ArthurZucker @Cyrilvallez
|
||||||
|
/src/transformers/models/longformer/mod*_longformer* @ArthurZucker
|
||||||
|
/src/transformers/models/longt5/mod*_longt5* @ArthurZucker
|
||||||
|
/src/transformers/models/luke/mod*_luke* @ArthurZucker
|
||||||
|
/src/transformers/models/m2m_100/mod*_m2m_100* @ArthurZucker
|
||||||
|
/src/transformers/models/madlad_400/mod*_madlad_400* @ArthurZucker
|
||||||
|
/src/transformers/models/mamba/mod*_mamba* @ArthurZucker
|
||||||
|
/src/transformers/models/mamba2/mod*_mamba2* @ArthurZucker
|
||||||
|
/src/transformers/models/marian/mod*_marian* @ArthurZucker
|
||||||
|
/src/transformers/models/markuplm/mod*_markuplm* @ArthurZucker
|
||||||
|
/src/transformers/models/mbart/mod*_mbart* @ArthurZucker
|
||||||
|
/src/transformers/models/mega/mod*_mega* @ArthurZucker
|
||||||
|
/src/transformers/models/megatron_bert/mod*_megatron_bert* @ArthurZucker
|
||||||
|
/src/transformers/models/megatron_gpt2/mod*_megatron_gpt2* @ArthurZucker
|
||||||
|
/src/transformers/models/mistral/mod*_mistral* @ArthurZucker
|
||||||
|
/src/transformers/models/mixtral/mod*_mixtral* @ArthurZucker
|
||||||
|
/src/transformers/models/mluke/mod*_mluke* @ArthurZucker
|
||||||
|
/src/transformers/models/mobilebert/mod*_mobilebert* @ArthurZucker
|
||||||
|
/src/transformers/models/modernbert/mod*_modernbert* @ArthurZucker
|
||||||
|
/src/transformers/models/mpnet/mod*_mpnet* @ArthurZucker
|
||||||
|
/src/transformers/models/mpt/mod*_mpt* @ArthurZucker
|
||||||
|
/src/transformers/models/mra/mod*_mra* @ArthurZucker
|
||||||
|
/src/transformers/models/mt5/mod*_mt5* @ArthurZucker
|
||||||
|
/src/transformers/models/mvp/mod*_mvp* @ArthurZucker
|
||||||
|
/src/transformers/models/myt5/mod*_myt5* @ArthurZucker
|
||||||
|
/src/transformers/models/nemotron/mod*_nemotron* @ArthurZucker
|
||||||
|
/src/transformers/models/nezha/mod*_nezha* @ArthurZucker
|
||||||
|
/src/transformers/models/nllb/mod*_nllb* @ArthurZucker
|
||||||
|
/src/transformers/models/nllb_moe/mod*_nllb_moe* @ArthurZucker
|
||||||
|
/src/transformers/models/nystromformer/mod*_nystromformer* @ArthurZucker
|
||||||
|
/src/transformers/models/olmo/mod*_olmo* @ArthurZucker
|
||||||
|
/src/transformers/models/olmo2/mod*_olmo2* @ArthurZucker
|
||||||
|
/src/transformers/models/olmoe/mod*_olmoe* @ArthurZucker
|
||||||
|
/src/transformers/models/open_llama/mod*_open_llama* @ArthurZucker
|
||||||
|
/src/transformers/models/opt/mod*_opt* @ArthurZucker
|
||||||
|
/src/transformers/models/pegasus/mod*_pegasus* @ArthurZucker
|
||||||
|
/src/transformers/models/pegasus_x/mod*_pegasus_x* @ArthurZucker
|
||||||
|
/src/transformers/models/persimmon/mod*_persimmon* @ArthurZucker
|
||||||
|
/src/transformers/models/phi/mod*_phi* @ArthurZucker
|
||||||
|
/src/transformers/models/phi3/mod*_phi3* @ArthurZucker
|
||||||
|
/src/transformers/models/phimoe/mod*_phimoe* @ArthurZucker
|
||||||
|
/src/transformers/models/phobert/mod*_phobert* @ArthurZucker
|
||||||
|
/src/transformers/models/plbart/mod*_plbart* @ArthurZucker
|
||||||
|
/src/transformers/models/prophetnet/mod*_prophetnet* @ArthurZucker
|
||||||
|
/src/transformers/models/qdqbert/mod*_qdqbert* @ArthurZucker
|
||||||
|
/src/transformers/models/qwen2/mod*_qwen2* @ArthurZucker
|
||||||
|
/src/transformers/models/qwen2_moe/mod*_qwen2_moe* @ArthurZucker
|
||||||
|
/src/transformers/models/rag/mod*_rag* @ArthurZucker
|
||||||
|
/src/transformers/models/realm/mod*_realm* @ArthurZucker
|
||||||
|
/src/transformers/models/recurrent_gemma/mod*_recurrent_gemma* @ArthurZucker
|
||||||
|
/src/transformers/models/reformer/mod*_reformer* @ArthurZucker
|
||||||
|
/src/transformers/models/rembert/mod*_rembert* @ArthurZucker
|
||||||
|
/src/transformers/models/retribert/mod*_retribert* @ArthurZucker
|
||||||
|
/src/transformers/models/roberta/mod*_roberta* @ArthurZucker
|
||||||
|
/src/transformers/models/roberta_prelayernorm/mod*_roberta_prelayernorm* @ArthurZucker
|
||||||
|
/src/transformers/models/roc_bert/mod*_roc_bert* @ArthurZucker
|
||||||
|
/src/transformers/models/roformer/mod*_roformer* @ArthurZucker
|
||||||
|
/src/transformers/models/rwkv/mod*_rwkv* @ArthurZucker
|
||||||
|
/src/transformers/models/splinter/mod*_splinter* @ArthurZucker
|
||||||
|
/src/transformers/models/squeezebert/mod*_squeezebert* @ArthurZucker
|
||||||
|
/src/transformers/models/stablelm/mod*_stablelm* @ArthurZucker
|
||||||
|
/src/transformers/models/starcoder2/mod*_starcoder2* @ArthurZucker
|
||||||
|
/src/transformers/models/switch_transformers/mod*_switch_transformers* @ArthurZucker
|
||||||
|
/src/transformers/models/t5/mod*_t5* @ArthurZucker
|
||||||
|
/src/transformers/models/t5v1.1/mod*_t5v1.1* @ArthurZucker
|
||||||
|
/src/transformers/models/tapex/mod*_tapex* @ArthurZucker
|
||||||
|
/src/transformers/models/transfo_xl/mod*_transfo_xl* @ArthurZucker
|
||||||
|
/src/transformers/models/ul2/mod*_ul2* @ArthurZucker
|
||||||
|
/src/transformers/models/umt5/mod*_umt5* @ArthurZucker
|
||||||
|
/src/transformers/models/xmod/mod*_xmod* @ArthurZucker
|
||||||
|
/src/transformers/models/xglm/mod*_xglm* @ArthurZucker
|
||||||
|
/src/transformers/models/xlm/mod*_xlm* @ArthurZucker
|
||||||
|
/src/transformers/models/xlm_prophetnet/mod*_xlm_prophetnet* @ArthurZucker
|
||||||
|
/src/transformers/models/xlm_roberta/mod*_xlm_roberta* @ArthurZucker
|
||||||
|
/src/transformers/models/xlm_roberta_xl/mod*_xlm_roberta_xl* @ArthurZucker
|
||||||
|
/src/transformers/models/xlm_v/mod*_xlm_v* @ArthurZucker
|
||||||
|
/src/transformers/models/xlnet/mod*_xlnet* @ArthurZucker
|
||||||
|
/src/transformers/models/yoso/mod*_yoso* @ArthurZucker
|
||||||
|
/src/transformers/models/zamba/mod*_zamba* @ArthurZucker
|
||||||
|
|
||||||
|
# Vision models
|
||||||
|
/src/transformers/models/beit/mod*_beit* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/bit/mod*_bit* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/conditional_detr/mod*_conditional_detr* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/convnext/mod*_convnext* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/convnextv2/mod*_convnextv2* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/cvt/mod*_cvt* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/deformable_detr/mod*_deformable_detr* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/deit/mod*_deit* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/depth_anything/mod*_depth_anything* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/depth_anything_v2/mod*_depth_anything_v2* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/deta/mod*_deta* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/detr/mod*_detr* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/dinat/mod*_dinat* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/dinov2/mod*_dinov2* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/dinov2_with_registers/mod*_dinov2_with_registers* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/dit/mod*_dit* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/dpt/mod*_dpt* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/efficientformer/mod*_efficientformer* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/efficientnet/mod*_efficientnet* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/focalnet/mod*_focalnet* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/glpn/mod*_glpn* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/hiera/mod*_hiera* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/ijepa/mod*_ijepa* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/imagegpt/mod*_imagegpt* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/levit/mod*_levit* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/mask2former/mod*_mask2former* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/maskformer/mod*_maskformer* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/mobilenet_v1/mod*_mobilenet_v1* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/mobilenet_v2/mod*_mobilenet_v2* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/mobilevit/mod*_mobilevit* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/mobilevitv2/mod*_mobilevitv2* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/nat/mod*_nat* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/poolformer/mod*_poolformer* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/pvt/mod*_pvt* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/pvt_v2/mod*_pvt_v2* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/regnet/mod*_regnet* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/resnet/mod*_resnet* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/rt_detr/mod*_rt_detr* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/segformer/mod*_segformer* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/seggpt/mod*_seggpt* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/superpoint/mod*_superpoint* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/swiftformer/mod*_swiftformer* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/swin/mod*_swin* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/swinv2/mod*_swinv2* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/swin2sr/mod*_swin2sr* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/table_transformer/mod*_table_transformer* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/textnet/mod*_textnet* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/timm_wrapper/mod*_timm_wrapper* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/upernet/mod*_upernet* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/van/mod*_van* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/vit/mod*_vit* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/vit_hybrid/mod*_vit_hybrid* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/vitdet/mod*_vitdet* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/vit_mae/mod*_vit_mae* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/vitmatte/mod*_vitmatte* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/vit_msn/mod*_vit_msn* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/vitpose/mod*_vitpose* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/yolos/mod*_yolos* @amyeroberts @qubvel
|
||||||
|
/src/transformers/models/zoedepth/mod*_zoedepth* @amyeroberts @qubvel
|
||||||
|
|
||||||
|
# Audio models
|
||||||
|
/src/transformers/models/audio_spectrogram_transformer/mod*_audio_spectrogram_transformer* @eustlb
|
||||||
|
/src/transformers/models/bark/mod*_bark* @eustlb
|
||||||
|
/src/transformers/models/clap/mod*_clap* @eustlb
|
||||||
|
/src/transformers/models/dac/mod*_dac* @eustlb
|
||||||
|
/src/transformers/models/encodec/mod*_encodec* @eustlb
|
||||||
|
/src/transformers/models/hubert/mod*_hubert* @eustlb
|
||||||
|
/src/transformers/models/mctct/mod*_mctct* @eustlb
|
||||||
|
/src/transformers/models/mimi/mod*_mimi* @eustlb
|
||||||
|
/src/transformers/models/mms/mod*_mms* @eustlb
|
||||||
|
/src/transformers/models/moshi/mod*_moshi* @eustlb
|
||||||
|
/src/transformers/models/musicgen/mod*_musicgen* @eustlb
|
||||||
|
/src/transformers/models/musicgen_melody/mod*_musicgen_melody* @eustlb
|
||||||
|
/src/transformers/models/pop2piano/mod*_pop2piano* @eustlb
|
||||||
|
/src/transformers/models/seamless_m4t/mod*_seamless_m4t* @eustlb
|
||||||
|
/src/transformers/models/seamless_m4t_v2/mod*_seamless_m4t_v2* @eustlb
|
||||||
|
/src/transformers/models/sew/mod*_sew* @eustlb
|
||||||
|
/src/transformers/models/sew_d/mod*_sew_d* @eustlb
|
||||||
|
/src/transformers/models/speech_to_text/mod*_speech_to_text* @eustlb
|
||||||
|
/src/transformers/models/speech_to_text_2/mod*_speech_to_text_2* @eustlb
|
||||||
|
/src/transformers/models/speecht5/mod*_speecht5* @eustlb
|
||||||
|
/src/transformers/models/unispeech/mod*_unispeech* @eustlb
|
||||||
|
/src/transformers/models/unispeech_sat/mod*_unispeech_sat* @eustlb
|
||||||
|
/src/transformers/models/univnet/mod*_univnet* @eustlb
|
||||||
|
/src/transformers/models/vits/mod*_vits* @eustlb
|
||||||
|
/src/transformers/models/wav2vec2/mod*_wav2vec2* @eustlb
|
||||||
|
/src/transformers/models/wav2vec2_bert/mod*_wav2vec2_bert* @eustlb
|
||||||
|
/src/transformers/models/wav2vec2_conformer/mod*_wav2vec2_conformer* @eustlb
|
||||||
|
/src/transformers/models/wav2vec2_phoneme/mod*_wav2vec2_phoneme* @eustlb
|
||||||
|
/src/transformers/models/wavlm/mod*_wavlm* @eustlb
|
||||||
|
/src/transformers/models/whisper/mod*_whisper* @eustlb
|
||||||
|
/src/transformers/models/xls_r/mod*_xls_r* @eustlb
|
||||||
|
/src/transformers/models/xlsr_wav2vec2/mod*_xlsr_wav2vec2* @eustlb
|
||||||
|
|
||||||
|
# Video models
|
||||||
|
/src/transformers/models/timesformer/mod*_timesformer* @Rocketknight1
|
||||||
|
/src/transformers/models/videomae/mod*_videomae* @Rocketknight1
|
||||||
|
/src/transformers/models/vivit/mod*_vivit* @Rocketknight1
|
||||||
|
|
||||||
|
# Multimodal models
|
||||||
|
/src/transformers/models/align/mod*_align* @zucchini-nlp
|
||||||
|
/src/transformers/models/altclip/mod*_altclip* @zucchini-nlp
|
||||||
|
/src/transformers/models/aria/mod*_aria* @zucchini-nlp
|
||||||
|
/src/transformers/models/blip/mod*_blip* @zucchini-nlp
|
||||||
|
/src/transformers/models/blip_2/mod*_blip_2* @zucchini-nlp
|
||||||
|
/src/transformers/models/bridgetower/mod*_bridgetower* @zucchini-nlp
|
||||||
|
/src/transformers/models/bros/mod*_bros* @zucchini-nlp
|
||||||
|
/src/transformers/models/chameleon/mod*_chameleon* @zucchini-nlp
|
||||||
|
/src/transformers/models/chinese_clip/mod*_chinese_clip* @zucchini-nlp
|
||||||
|
/src/transformers/models/clip/mod*_clip* @zucchini-nlp
|
||||||
|
/src/transformers/models/clipseg/mod*_clipseg* @zucchini-nlp
|
||||||
|
/src/transformers/models/clvp/mod*_clvp* @zucchini-nlp
|
||||||
|
/src/transformers/models/colpali/mod*_colpali* @zucchini-nlp @yonigozlan
|
||||||
|
/src/transformers/models/data2vec/mod*_data2vec* @zucchini-nlp
|
||||||
|
/src/transformers/models/deplot/mod*_deplot* @zucchini-nlp
|
||||||
|
/src/transformers/models/donut/mod*_donut* @zucchini-nlp
|
||||||
|
/src/transformers/models/flava/mod*_flava* @zucchini-nlp
|
||||||
|
/src/transformers/models/git/mod*_git* @zucchini-nlp
|
||||||
|
/src/transformers/models/grounding_dino/mod*_grounding_dino* @qubvel
|
||||||
|
/src/transformers/models/groupvit/mod*_groupvit* @zucchini-nlp
|
||||||
|
/src/transformers/models/idefics/mod*_idefics* @zucchini-nlp
|
||||||
|
/src/transformers/models/idefics2/mod*_idefics2* @zucchini-nlp
|
||||||
|
/src/transformers/models/idefics3/mod*_idefics3* @zucchini-nlp
|
||||||
|
/src/transformers/models/instructblip/mod*_instructblip* @zucchini-nlp
|
||||||
|
/src/transformers/models/instructblipvideo/mod*_instructblipvideo* @zucchini-nlp
|
||||||
|
/src/transformers/models/kosmos_2/mod*_kosmos_2* @zucchini-nlp
|
||||||
|
/src/transformers/models/layoutlm/mod*_layoutlm* @NielsRogge
|
||||||
|
/src/transformers/models/layoutlmv2/mod*_layoutlmv2* @NielsRogge
|
||||||
|
/src/transformers/models/layoutlmv3/mod*_layoutlmv3* @NielsRogge
|
||||||
|
/src/transformers/models/layoutxlm/mod*_layoutxlm* @NielsRogge
|
||||||
|
/src/transformers/models/lilt/mod*_lilt* @zucchini-nlp
|
||||||
|
/src/transformers/models/llava/mod*_llava* @zucchini-nlp @arthurzucker
|
||||||
|
/src/transformers/models/llava_next/mod*_llava_next* @zucchini-nlp
|
||||||
|
/src/transformers/models/llava_next_video/mod*_llava_next_video* @zucchini-nlp
|
||||||
|
/src/transformers/models/llava_onevision/mod*_llava_onevision* @zucchini-nlp
|
||||||
|
/src/transformers/models/lxmert/mod*_lxmert* @zucchini-nlp
|
||||||
|
/src/transformers/models/matcha/mod*_matcha* @zucchini-nlp
|
||||||
|
/src/transformers/models/mgp_str/mod*_mgp_str* @zucchini-nlp
|
||||||
|
/src/transformers/models/mllama/mod*_mllama* @zucchini-nlp
|
||||||
|
/src/transformers/models/nougat/mod*_nougat* @NielsRogge
|
||||||
|
/src/transformers/models/omdet_turbo/mod*_omdet_turbo* @qubvel @yonigozlan
|
||||||
|
/src/transformers/models/oneformer/mod*_oneformer* @zucchini-nlp
|
||||||
|
/src/transformers/models/owlvit/mod*_owlvit* @qubvel
|
||||||
|
/src/transformers/models/owlv2/mod*_owlv2* @qubvel
|
||||||
|
/src/transformers/models/paligemma/mod*_paligemma* @zucchini-nlp @molbap
|
||||||
|
/src/transformers/models/perceiver/mod*_perceiver* @zucchini-nlp
|
||||||
|
/src/transformers/models/pix2struct/mod*_pix2struct* @zucchini-nlp
|
||||||
|
/src/transformers/models/pixtral/mod*_pixtral* @zucchini-nlp @ArthurZucker
|
||||||
|
/src/transformers/models/qwen2_audio/mod*_qwen2_audio* @zucchini-nlp @ArthurZucker
|
||||||
|
/src/transformers/models/qwen2_vl/mod*_qwen2_vl* @zucchini-nlp @ArthurZucker
|
||||||
|
/src/transformers/models/sam/mod*_sam* @zucchini-nlp @ArthurZucker
|
||||||
|
/src/transformers/models/siglip/mod*_siglip* @zucchini-nlp
|
||||||
|
/src/transformers/models/speech_encoder_decoder/mod*_speech_encoder_decoder* @zucchini-nlp
|
||||||
|
/src/transformers/models/tapas/mod*_tapas* @NielsRogge
|
||||||
|
/src/transformers/models/trocr/mod*_trocr* @zucchini-nlp
|
||||||
|
/src/transformers/models/tvlt/mod*_tvlt* @zucchini-nlp
|
||||||
|
/src/transformers/models/tvp/mod*_tvp* @zucchini-nlp
|
||||||
|
/src/transformers/models/udop/mod*_udop* @zucchini-nlp
|
||||||
|
/src/transformers/models/video_llava/mod*_video_llava* @zucchini-nlp
|
||||||
|
/src/transformers/models/vilt/mod*_vilt* @zucchini-nlp
|
||||||
|
/src/transformers/models/vipllava/mod*_vipllava* @zucchini-nlp
|
||||||
|
/src/transformers/models/vision_encoder_decoder/mod*_vision_encoder_decoder* @Rocketknight1
|
||||||
|
/src/transformers/models/vision_text_dual_encoder/mod*_vision_text_dual_encoder* @Rocketknight1
|
||||||
|
/src/transformers/models/visual_bert/mod*_visual_bert* @zucchini-nlp
|
||||||
|
/src/transformers/models/xclip/mod*_xclip* @zucchini-nlp
|
||||||
|
|
||||||
|
# Reinforcement learning models
|
||||||
|
/src/transformers/models/decision_transformer/mod*_decision_transformer* @Rocketknight1
|
||||||
|
/src/transformers/models/trajectory_transformer/mod*_trajectory_transformer* @Rocketknight1
|
||||||
|
|
||||||
|
# Time series models
|
||||||
|
/src/transformers/models/autoformer/mod*_autoformer* @Rocketknight1
|
||||||
|
/src/transformers/models/informer/mod*_informer* @Rocketknight1
|
||||||
|
/src/transformers/models/patchtsmixer/mod*_patchtsmixer* @Rocketknight1
|
||||||
|
/src/transformers/models/patchtst/mod*_patchtst* @Rocketknight1
|
||||||
|
/src/transformers/models/time_series_transformer/mod*_time_series_transformer* @Rocketknight1
|
||||||
|
|
||||||
|
# Graph models
|
||||||
|
/src/transformers/models/graphormer/mod*_graphormer* @clefourrier
|
||||||
|
|
||||||
|
# Finally, files with no owners that shouldn't generate pings, usually automatically generated and checked in the CI
|
||||||
|
utils/dummy*
|
26
.github/workflows/assign-reviewers.yml
vendored
Normal file
26
.github/workflows/assign-reviewers.yml
vendored
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
name: Assign PR Reviewers
|
||||||
|
on:
|
||||||
|
pull_request_target:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
types: [ready_for_review]
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
assign_reviewers:
|
||||||
|
permissions:
|
||||||
|
pull-requests: write
|
||||||
|
runs-on: ubuntu-22.04
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
- name: Set up Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.13'
|
||||||
|
- name: Install dependencies
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install PyGithub
|
||||||
|
- name: Run assignment script
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
run: python .github/scripts/assign_reviewers.py
|
1
.github/workflows/build_pr_documentation.yml
vendored
1
.github/workflows/build_pr_documentation.yml
vendored
@ -15,4 +15,3 @@ jobs:
|
|||||||
pr_number: ${{ github.event.number }}
|
pr_number: ${{ github.event.number }}
|
||||||
package: transformers
|
package: transformers
|
||||||
languages: ar de en es fr hi it ko pt tr zh ja te
|
languages: ar de en es fr hi it ko pt tr zh ja te
|
||||||
custom_container: huggingface/transformers-doc-builder
|
|
||||||
|
2
.github/workflows/change_pr_to_draft.yml
vendored
2
.github/workflows/change_pr_to_draft.yml
vendored
@ -22,4 +22,4 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
echo $PR_NUMBER
|
echo $PR_NUMBER
|
||||||
gh pr ready $PR_NUMBER --repo $REPO --undo
|
gh pr ready $PR_NUMBER --repo $REPO --undo
|
||||||
gh pr comment $PR_NUMBER --repo $REPO --body "Hi 👋, thank you for opening this pull request! The pull request is converted to draft by default. When it is ready for review, please click the \`Ready for review\` button (at the bottom of the PR page)."
|
gh pr comment $PR_NUMBER --repo $REPO --body "Hi 👋, thank you for opening this pull request! The pull request is converted to draft by default. The CI will be paused while the PR is in draft mode. When it is ready for review, please click the \`Ready for review\` button (at the bottom of the PR page). This will assign reviewers and trigger CI."
|
||||||
|
2
.github/workflows/push-important-models.yml
vendored
2
.github/workflows/push-important-models.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
|
uses: tj-actions/changed-files@1c8e6069583811afb28f97afeaf8e7da80c6be5c
|
||||||
with:
|
with:
|
||||||
files: src/transformers/models/**
|
files: src/transformers/models/**
|
||||||
|
|
||||||
|
2
.github/workflows/self-comment-ci.yml
vendored
2
.github/workflows/self-comment-ci.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
name: Get PR number
|
name: Get PR number
|
||||||
# For security: only allow team members to run
|
# For security: only allow team members to run
|
||||||
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
|
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr", "eustlb"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
|
||||||
outputs:
|
outputs:
|
||||||
PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }}
|
PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }}
|
||||||
steps:
|
steps:
|
||||||
|
4
.github/workflows/self-push-caller.yml
vendored
4
.github/workflows/self-push-caller.yml
vendored
@ -25,7 +25,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@v41
|
uses: tj-actions/changed-files@1c8e6069583811afb28f97afeaf8e7da80c6be5c
|
||||||
|
|
||||||
- name: Was setup changed
|
- name: Was setup changed
|
||||||
id: was_changed
|
id: was_changed
|
||||||
@ -51,4 +51,4 @@ jobs:
|
|||||||
needs: build-docker-containers
|
needs: build-docker-containers
|
||||||
steps:
|
steps:
|
||||||
- name: Trigger push CI via workflow_run
|
- name: Trigger push CI via workflow_run
|
||||||
run: echo "Trigger push CI via workflow_run"
|
run: echo "Trigger push CI via workflow_run"
|
||||||
|
2
.github/workflows/update_metdata.yml
vendored
2
.github/workflows/update_metdata.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
|||||||
- name: Setup environment
|
- name: Setup environment
|
||||||
run: |
|
run: |
|
||||||
pip install --upgrade pip
|
pip install --upgrade pip
|
||||||
pip install datasets pandas==2.0.3
|
pip install datasets pandas
|
||||||
pip install .[torch,tf,flax]
|
pip install .[torch,tf,flax]
|
||||||
|
|
||||||
- name: Update metadata
|
- name: Update metadata
|
||||||
|
@ -221,10 +221,10 @@ You'll need **[Python 3.9](https://github.com/huggingface/transformers/blob/main
|
|||||||
[Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide.
|
[Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide.
|
||||||
|
|
||||||
If you're modifying documents under the `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check
|
If you're modifying documents under the `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check
|
||||||
make sure you install the documentation builder:
|
make sure you install the [documentation builder](https://github.com/huggingface/doc-builder).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install ".[docs]"
|
pip install hf-doc-builder
|
||||||
```
|
```
|
||||||
|
|
||||||
Run the following command from the root of the repository:
|
Run the following command from the root of the repository:
|
||||||
|
@ -263,9 +263,9 @@ You are not required to read the following guidelines before opening an issue. H
|
|||||||
But if you're replying to a comment that happened some comments back it's always a good practice to quote just the relevant lines you're replying it. The `>` is used for quoting, or you can always use the menu to do so. For example your editor box will look like:
|
But if you're replying to a comment that happened some comments back it's always a good practice to quote just the relevant lines you're replying it. The `>` is used for quoting, or you can always use the menu to do so. For example your editor box will look like:
|
||||||
|
|
||||||
```
|
```
|
||||||
> How big is your gpu cluster?
|
> How big is your GPU cluster?
|
||||||
|
|
||||||
Our cluster is made of 256 gpus.
|
Our cluster is made of 256 GPUs.
|
||||||
```
|
```
|
||||||
|
|
||||||
If you are addressing multiple comments, quote the relevant parts of each before your answer. Some people use the same comment to do multiple replies, others separate them into separate comments. Either way works. The latter approach helps for linking to a specific comment.
|
If you are addressing multiple comments, quote the relevant parts of each before your answer. Some people use the same comment to do multiple replies, others separate them into separate comments. Either way works. The latter approach helps for linking to a specific comment.
|
||||||
|
386
README.md
386
README.md
@ -25,6 +25,7 @@ limitations under the License.
|
|||||||
</p>
|
</p>
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
|
<a href="https://huggingface.com/models"><img alt="Checkpoints on Hub" src="https://img.shields.io/endpoint?url=https://huggingface.co/api/shields/models&color=brightgreen"></a>
|
||||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||||
@ -54,275 +55,254 @@ limitations under the License.
|
|||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<p>State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow</p>
|
<p>State-of-the-art pretrained models for inference and training</p>
|
||||||
</h3>
|
</h3>
|
||||||
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||||
</h3>
|
</h3>
|
||||||
|
|
||||||
🤗 Transformers provides thousands of pretrained models to perform tasks on different modalities such as text, vision, and audio.
|
Transformers is a library of pretrained text, computer vision, audio, video, and multimodal models for inference and training. Use Transformers to fine-tune models on your data, build inference applications, and for generative AI use cases across multiple modalities.
|
||||||
|
|
||||||
These models can be applied on:
|
There are over 500K+ Transformers [model checkpoints](https://huggingface.co/models?library=transformers&sort=trending) on the [Hugging Face Hub](https://huggingface.com/models) you can use.
|
||||||
|
|
||||||
* 📝 Text, for tasks like text classification, information extraction, question answering, summarization, translation, and text generation, in over 100 languages.
|
Explore the [Hub](https://huggingface.com/) today to find a model and use Transformers to help you get started right away.
|
||||||
* 🖼️ Images, for tasks like image classification, object detection, and segmentation.
|
|
||||||
* 🗣️ Audio, for tasks like speech recognition and audio classification.
|
|
||||||
|
|
||||||
Transformer models can also perform tasks on **several modalities combined**, such as table question answering, optical character recognition, information extraction from scanned documents, video classification, and visual question answering.
|
## Installation
|
||||||
|
|
||||||
🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our [model hub](https://huggingface.co/models). At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments.
|
Transformers works with Python 3.9+ [PyTorch](https://pytorch.org/get-started/locally/) 2.0+, [TensorFlow](https://www.tensorflow.org/install/pip) 2.6+, and [Flax](https://flax.readthedocs.io/en/latest/) 0.4.1+.
|
||||||
|
|
||||||
🤗 Transformers is backed by the three most popular deep learning libraries — [Jax](https://jax.readthedocs.io/en/latest/), [PyTorch](https://pytorch.org/) and [TensorFlow](https://www.tensorflow.org/) — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other.
|
Create and activate a virtual environment with [venv](https://docs.python.org/3/library/venv.html) or [uv](https://docs.astral.sh/uv/), a fast Rust-based Python package and project manager.
|
||||||
|
|
||||||
## Online demos
|
```py
|
||||||
|
# venv
|
||||||
|
python -m venv .my-env
|
||||||
|
source .my-env/bin/activate
|
||||||
|
|
||||||
You can test most of our models directly on their pages from the [model hub](https://huggingface.co/models). We also offer [private model hosting, versioning, & an inference API](https://huggingface.co/pricing) for public and private models.
|
# uv
|
||||||
|
uv venv .my-env
|
||||||
Here are a few examples:
|
source .my-env/bin/activate
|
||||||
|
|
||||||
In Natural Language Processing:
|
|
||||||
- [Masked word completion with BERT](https://huggingface.co/google-bert/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France)
|
|
||||||
- [Named Entity Recognition with Electra](https://huggingface.co/dbmdz/electra-large-discriminator-finetuned-conll03-english?text=My+name+is+Sarah+and+I+live+in+London+city)
|
|
||||||
- [Text generation with Mistral](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)
|
|
||||||
- [Natural Language Inference with RoBERTa](https://huggingface.co/FacebookAI/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
|
||||||
- [Summarization with BART](https://huggingface.co/facebook/bart-large-cnn?text=The+tower+is+324+metres+%281%2C063+ft%29+tall%2C+about+the+same+height+as+an+81-storey+building%2C+and+the+tallest+structure+in+Paris.+Its+base+is+square%2C+measuring+125+metres+%28410+ft%29+on+each+side.+During+its+construction%2C+the+Eiffel+Tower+surpassed+the+Washington+Monument+to+become+the+tallest+man-made+structure+in+the+world%2C+a+title+it+held+for+41+years+until+the+Chrysler+Building+in+New+York+City+was+finished+in+1930.+It+was+the+first+structure+to+reach+a+height+of+300+metres.+Due+to+the+addition+of+a+broadcasting+aerial+at+the+top+of+the+tower+in+1957%2C+it+is+now+taller+than+the+Chrysler+Building+by+5.2+metres+%2817+ft%29.+Excluding+transmitters%2C+the+Eiffel+Tower+is+the+second+tallest+free-standing+structure+in+France+after+the+Millau+Viaduct)
|
|
||||||
- [Question answering with DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
|
|
||||||
- [Translation with T5](https://huggingface.co/google-t5/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
|
|
||||||
|
|
||||||
In Computer Vision:
|
|
||||||
- [Image classification with ViT](https://huggingface.co/google/vit-base-patch16-224)
|
|
||||||
- [Object Detection with DETR](https://huggingface.co/facebook/detr-resnet-50)
|
|
||||||
- [Semantic Segmentation with SegFormer](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
|
||||||
- [Panoptic Segmentation with Mask2Former](https://huggingface.co/facebook/mask2former-swin-large-coco-panoptic)
|
|
||||||
- [Depth Estimation with Depth Anything](https://huggingface.co/docs/transformers/main/model_doc/depth_anything)
|
|
||||||
- [Video Classification with VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)
|
|
||||||
- [Universal Segmentation with OneFormer](https://huggingface.co/shi-labs/oneformer_ade20k_dinat_large)
|
|
||||||
|
|
||||||
In Audio:
|
|
||||||
- [Automatic Speech Recognition with Whisper](https://huggingface.co/openai/whisper-large-v3)
|
|
||||||
- [Keyword Spotting with Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks)
|
|
||||||
- [Audio Classification with Audio Spectrogram Transformer](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593)
|
|
||||||
|
|
||||||
In Multimodal tasks:
|
|
||||||
- [Table Question Answering with TAPAS](https://huggingface.co/google/tapas-base-finetuned-wtq)
|
|
||||||
- [Visual Question Answering with ViLT](https://huggingface.co/dandelin/vilt-b32-finetuned-vqa)
|
|
||||||
- [Image captioning with LLaVa](https://huggingface.co/llava-hf/llava-1.5-7b-hf)
|
|
||||||
- [Zero-shot Image Classification with SigLIP](https://huggingface.co/google/siglip-so400m-patch14-384)
|
|
||||||
- [Document Question Answering with LayoutLM](https://huggingface.co/impira/layoutlm-document-qa)
|
|
||||||
- [Zero-shot Video Classification with X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)
|
|
||||||
- [Zero-shot Object Detection with OWLv2](https://huggingface.co/docs/transformers/en/model_doc/owlv2)
|
|
||||||
- [Zero-shot Image Segmentation with CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)
|
|
||||||
- [Automatic Mask Generation with SAM](https://huggingface.co/docs/transformers/model_doc/sam)
|
|
||||||
|
|
||||||
|
|
||||||
## 100 projects using Transformers
|
|
||||||
|
|
||||||
Transformers is more than a toolkit to use pretrained models: it's a community of projects built around it and the
|
|
||||||
Hugging Face Hub. We want Transformers to enable developers, researchers, students, professors, engineers, and anyone
|
|
||||||
else to build their dream projects.
|
|
||||||
|
|
||||||
In order to celebrate the 100,000 stars of transformers, we have decided to put the spotlight on the
|
|
||||||
community, and we have created the [awesome-transformers](./awesome-transformers.md) page which lists 100
|
|
||||||
incredible projects built in the vicinity of transformers.
|
|
||||||
|
|
||||||
If you own or use a project that you believe should be part of the list, please open a PR to add it!
|
|
||||||
|
|
||||||
## Serious about AI in your organisation? Build faster with the Hugging Face Enterprise Hub.
|
|
||||||
|
|
||||||
<a target="_blank" href="https://huggingface.co/enterprise">
|
|
||||||
<img alt="Hugging Face Enterprise Hub" src="https://github.com/user-attachments/assets/247fb16d-d251-4583-96c4-d3d76dda4925">
|
|
||||||
</a><br>
|
|
||||||
|
|
||||||
## Quick tour
|
|
||||||
|
|
||||||
To immediately use a model on a given input (text, image, audio, ...), we provide the `pipeline` API. Pipelines group together a pretrained model with the preprocessing that was used during that model's training. Here is how to quickly use a pipeline to classify positive versus negative texts:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> from transformers import pipeline
|
|
||||||
|
|
||||||
# Allocate a pipeline for sentiment-analysis
|
|
||||||
>>> classifier = pipeline('sentiment-analysis')
|
|
||||||
>>> classifier('We are very happy to introduce pipeline to the transformers repository.')
|
|
||||||
[{'label': 'POSITIVE', 'score': 0.9996980428695679}]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
The second line of code downloads and caches the pretrained model used by the pipeline, while the third evaluates it on the given text. Here, the answer is "positive" with a confidence of 99.97%.
|
Install Transformers in your virtual environment.
|
||||||
|
|
||||||
Many tasks have a pre-trained `pipeline` ready to go, in NLP but also in computer vision and speech. For example, we can easily extract detected objects in an image:
|
```py
|
||||||
|
# pip
|
||||||
|
pip install transformers
|
||||||
|
|
||||||
``` python
|
# uv
|
||||||
>>> import requests
|
uv pip install transformers
|
||||||
>>> from PIL import Image
|
|
||||||
>>> from transformers import pipeline
|
|
||||||
|
|
||||||
# Download an image with cute cats
|
|
||||||
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png"
|
|
||||||
>>> image_data = requests.get(url, stream=True).raw
|
|
||||||
>>> image = Image.open(image_data)
|
|
||||||
|
|
||||||
# Allocate a pipeline for object detection
|
|
||||||
>>> object_detector = pipeline('object-detection')
|
|
||||||
>>> object_detector(image)
|
|
||||||
[{'score': 0.9982201457023621,
|
|
||||||
'label': 'remote',
|
|
||||||
'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}},
|
|
||||||
{'score': 0.9960021376609802,
|
|
||||||
'label': 'remote',
|
|
||||||
'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}},
|
|
||||||
{'score': 0.9954745173454285,
|
|
||||||
'label': 'couch',
|
|
||||||
'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}},
|
|
||||||
{'score': 0.9988006353378296,
|
|
||||||
'label': 'cat',
|
|
||||||
'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}},
|
|
||||||
{'score': 0.9986783862113953,
|
|
||||||
'label': 'cat',
|
|
||||||
'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Here, we get a list of objects detected in the image, with a box surrounding the object and a confidence score. Here is the original image on the left, with the predictions displayed on the right:
|
Install Transformers from source if you want the latest changes in the library or are interested in contributing. However, the *latest* version may not be stable. Feel free to open an [issue](https://github.com/huggingface/transformers/issues) if you encounter an error.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
git clone https://github.com/huggingface/transformers.git
|
||||||
|
cd transformers
|
||||||
|
pip install .
|
||||||
|
```
|
||||||
|
|
||||||
|
## Quickstart
|
||||||
|
|
||||||
|
Get started with Transformers right away with the [Pipeline](https://huggingface.co/docs/transformers/pipeline_tutorial) API. The `Pipeline` is a high-level inference class that supports text, audio, vision, and multimodal tasks. It handles preprocessing the input and returns the appropriate output.
|
||||||
|
|
||||||
|
Instantiate a pipeline and specify model to use for text generation. The model is downloaded and cached so you can easily reuse it again. Finally, pass some text to prompt the model.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
|
pipeline = pipeline(task="text-generation", model="Qwen/Qwen2.5-1.5B")
|
||||||
|
pipeline("the secret to baking a really good cake is ")
|
||||||
|
[{'generated_text': 'the secret to baking a really good cake is 1) to use the right ingredients and 2) to follow the recipe exactly. the recipe for the cake is as follows: 1 cup of sugar, 1 cup of flour, 1 cup of milk, 1 cup of butter, 1 cup of eggs, 1 cup of chocolate chips. if you want to make 2 cakes, how much sugar do you need? To make 2 cakes, you will need 2 cups of sugar.'}]
|
||||||
|
```
|
||||||
|
|
||||||
|
To chat with a model, the usage pattern is the same. The only difference is you need to construct a chat history (the input to `Pipeline`) between you and the system.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> You can also chat with a model directly from the command line.
|
||||||
|
> ```shell
|
||||||
|
> transformers-cli chat --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct
|
||||||
|
> ```
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
|
chat = [
|
||||||
|
{"role": "system", "content": "You are a sassy, wise-cracking robot as imagined by Hollywood circa 1986."},
|
||||||
|
{"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"}
|
||||||
|
]
|
||||||
|
|
||||||
|
pipeline = pipeline(task="text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.bfloat16, device_map="auto")
|
||||||
|
response = pipeline(chat, max_new_tokens=512)
|
||||||
|
print(response[0]["generated_text"][-1]["content"])
|
||||||
|
```
|
||||||
|
|
||||||
|
Expand the examples below to see how `Pipeline` works for different modalities and tasks.
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Automatic speech recognition</summary>
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
|
pipeline = pipeline(task="automatic-speech-recognition", model="openai/whisper-large-v3")
|
||||||
|
pipeline("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
|
||||||
|
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'}
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
<details>
|
||||||
|
<summary>Image classification</summary>
|
||||||
|
|
||||||
<h3 align="center">
|
<h3 align="center">
|
||||||
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png" width="400"></a>
|
<a><img src="https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png"></a>
|
||||||
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample_post_processed.png" width="400"></a>
|
|
||||||
</h3>
|
</h3>
|
||||||
|
|
||||||
You can learn more about the tasks supported by the `pipeline` API in [this tutorial](https://huggingface.co/docs/transformers/task_summary).
|
```py
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
In addition to `pipeline`, to download and use any of the pretrained models on your given task, all it takes is three lines of code. Here is the PyTorch version:
|
pipeline = pipeline(task="image-classification", model="facebook/dinov2-small-imagenet1k-1-layer")
|
||||||
```python
|
pipeline("https://huggingface.co/datasets/Narsil/image_dummy/raw/main/parrots.png")
|
||||||
>>> from transformers import AutoTokenizer, AutoModel
|
[{'label': 'macaw', 'score': 0.997848391532898},
|
||||||
|
{'label': 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
|
'score': 0.0016551691805943847},
|
||||||
>>> model = AutoModel.from_pretrained("google-bert/bert-base-uncased")
|
{'label': 'lorikeet', 'score': 0.00018523589824326336},
|
||||||
|
{'label': 'African grey, African gray, Psittacus erithacus',
|
||||||
>>> inputs = tokenizer("Hello world!", return_tensors="pt")
|
'score': 7.85409429227002e-05},
|
||||||
>>> outputs = model(**inputs)
|
{'label': 'quail', 'score': 5.502637941390276e-05}]
|
||||||
```
|
```
|
||||||
|
|
||||||
And here is the equivalent code for TensorFlow:
|
</details>
|
||||||
```python
|
|
||||||
>>> from transformers import AutoTokenizer, TFAutoModel
|
|
||||||
|
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
|
<details>
|
||||||
>>> model = TFAutoModel.from_pretrained("google-bert/bert-base-uncased")
|
<summary>Visual question answering</summary>
|
||||||
|
|
||||||
>>> inputs = tokenizer("Hello world!", return_tensors="tf")
|
|
||||||
>>> outputs = model(**inputs)
|
<h3 align="center">
|
||||||
|
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/idefics-few-shot.jpg"></a>
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
|
pipeline = pipeline(task="visual-question-answering", model="Salesforce/blip-vqa-base")
|
||||||
|
pipeline(
|
||||||
|
image="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/idefics-few-shot.jpg",
|
||||||
|
question="What is in the image?",
|
||||||
|
)
|
||||||
|
[{'answer': 'statue of liberty'}]
|
||||||
```
|
```
|
||||||
|
|
||||||
The tokenizer is responsible for all the preprocessing the pretrained model expects and can be called directly on a single string (as in the above examples) or a list. It will output a dictionary that you can use in downstream code or simply directly pass to your model using the ** argument unpacking operator.
|
</details>
|
||||||
|
|
||||||
The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) or a [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (depending on your backend) which you can use as usual. [This tutorial](https://huggingface.co/docs/transformers/training) explains how to integrate such a model into a classic PyTorch or TensorFlow training loop, or how to use our `Trainer` API to quickly fine-tune on a new dataset.
|
## Why should I use Transformers?
|
||||||
|
|
||||||
## Why should I use transformers?
|
|
||||||
|
|
||||||
1. Easy-to-use state-of-the-art models:
|
1. Easy-to-use state-of-the-art models:
|
||||||
- High performance on natural language understanding & generation, computer vision, and audio tasks.
|
- High performance on natural language understanding & generation, computer vision, audio, video, and multimodal tasks.
|
||||||
- Low barrier to entry for educators and practitioners.
|
- Low barrier to entry for researchers, engineers, and developers.
|
||||||
- Few user-facing abstractions with just three classes to learn.
|
- Few user-facing abstractions with just three classes to learn.
|
||||||
- A unified API for using all our pretrained models.
|
- A unified API for using all our pretrained models.
|
||||||
|
|
||||||
1. Lower compute costs, smaller carbon footprint:
|
1. Lower compute costs, smaller carbon footprint:
|
||||||
- Researchers can share trained models instead of always retraining.
|
- Share trained models instead of training from scratch.
|
||||||
- Practitioners can reduce compute time and production costs.
|
- Reduce compute time and production costs.
|
||||||
- Dozens of architectures with over 400,000 pretrained models across all modalities.
|
- Dozens of model architectures with 1M+ pretrained checkpoints across all modalities.
|
||||||
|
|
||||||
1. Choose the right framework for every part of a model's lifetime:
|
1. Choose the right framework for every part of a models lifetime:
|
||||||
- Train state-of-the-art models in 3 lines of code.
|
- Train state-of-the-art models in 3 lines of code.
|
||||||
- Move a single model between TF2.0/PyTorch/JAX frameworks at will.
|
- Move a single model between PyTorch/JAX/TF2.0 frameworks at will.
|
||||||
- Seamlessly pick the right framework for training, evaluation, and production.
|
- Pick the right framework for training, evaluation, and production.
|
||||||
|
|
||||||
1. Easily customize a model or an example to your needs:
|
1. Easily customize a model or an example to your needs:
|
||||||
- We provide examples for each architecture to reproduce the results published by its original authors.
|
- We provide examples for each architecture to reproduce the results published by its original authors.
|
||||||
- Model internals are exposed as consistently as possible.
|
- Model internals are exposed as consistently as possible.
|
||||||
- Model files can be used independently of the library for quick experiments.
|
- Model files can be used independently of the library for quick experiments.
|
||||||
|
|
||||||
## Why shouldn't I use transformers?
|
<a target="_blank" href="https://huggingface.co/enterprise">
|
||||||
|
<img alt="Hugging Face Enterprise Hub" src="https://github.com/user-attachments/assets/247fb16d-d251-4583-96c4-d3d76dda4925">
|
||||||
|
</a><br>
|
||||||
|
|
||||||
|
## Why shouldn't I use Transformers?
|
||||||
|
|
||||||
- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files.
|
- This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files.
|
||||||
- The training API is not intended to work on any model but is optimized to work with the models provided by the library. For generic machine learning loops, you should use another library (possibly, [Accelerate](https://huggingface.co/docs/accelerate)).
|
- The training API is optimized to work with PyTorch models provided by Transformers. For generic machine learning loops, you should use another library like [Accelerate](https://huggingface.co/docs/accelerate).
|
||||||
- While we strive to present as many use cases as possible, the scripts in our [examples folder](https://github.com/huggingface/transformers/tree/main/examples) are just that: examples. It is expected that they won't work out-of-the-box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs.
|
- The [example scripts]((https://github.com/huggingface/transformers/tree/main/examples)) are only *examples*. They may not necessarily work out-of-the-box on your specific use case and you'll need to adapt the code for it to work.
|
||||||
|
|
||||||
## Installation
|
## 100 projects using Transformers
|
||||||
|
|
||||||
### With pip
|
Transformers is more than a toolkit to use pretrained models, it's a community of projects built around it and the
|
||||||
|
Hugging Face Hub. We want Transformers to enable developers, researchers, students, professors, engineers, and anyone
|
||||||
|
else to build their dream projects.
|
||||||
|
|
||||||
This repository is tested on Python 3.9+, Flax 0.4.1+, PyTorch 2.0+, and TensorFlow 2.6+.
|
In order to celebrate Transformers 100,000 stars, we wanted to put the spotlight on the
|
||||||
|
community with the [awesome-transformers](./awesome-transformers.md) page which lists 100
|
||||||
|
incredible projects built with Transformers.
|
||||||
|
|
||||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
If you own or use a project that you believe should be part of the list, please open a PR to add it!
|
||||||
|
|
||||||
First, create a virtual environment with the version of Python you're going to use and activate it.
|
## Example models
|
||||||
|
|
||||||
**macOS/Linux**
|
You can test most of our models directly on their [Hub model pages](https://huggingface.co/models).
|
||||||
|
|
||||||
```python -m venv env
|
Expand each modality below to see a few example models for various use cases.
|
||||||
source env/bin/activate
|
|
||||||
```
|
|
||||||
|
|
||||||
**Windows**
|
<details>
|
||||||
|
<summary>Audio</summary>
|
||||||
|
|
||||||
``` python -m venv env
|
- Audio classification with [Whisper](https://huggingface.co/openai/whisper-large-v3-turbo)
|
||||||
env\Scripts\activate
|
- Automatic speech recognition with [Moonshine](https://huggingface.co/UsefulSensors/moonshine)
|
||||||
```
|
- Keyword spotting with [Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks)
|
||||||
|
- Speech to speech generation with [Moshi](https://huggingface.co/kyutai/moshiko-pytorch-bf16)
|
||||||
|
- Text to audio with [MusicGen](https://huggingface.co/facebook/musicgen-large)
|
||||||
|
- Text to speech with [Bark](https://huggingface.co/suno/bark)
|
||||||
|
|
||||||
To use 🤗 Transformers, you must install at least one of Flax, PyTorch, or TensorFlow. Refer to the official installation guides for platform-specific commands:
|
</details>
|
||||||
|
|
||||||
[TensorFlow installation page](https://www.tensorflow.org/install/),
|
<details>
|
||||||
[PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax](https://github.com/google/flax#quick-install) and [Jax](https://github.com/google/jax#installation)
|
<summary>Computer vision</summary>
|
||||||
|
|
||||||
When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows:
|
- Automatic mask generation with [SAM](https://huggingface.co/facebook/sam-vit-base)
|
||||||
|
- Depth estimation with [DepthPro](https://huggingface.co/apple/DepthPro-hf)
|
||||||
|
- Image classification with [DINO v2](https://huggingface.co/facebook/dinov2-base)
|
||||||
|
- Keypoint detection with [SuperGlue](https://huggingface.co/magic-leap-community/superglue_outdoor)
|
||||||
|
- Keypoint matching with [SuperGlue](https://huggingface.co/magic-leap-community/superglue)
|
||||||
|
- Object detection with [RT-DETRv2](https://huggingface.co/PekingU/rtdetr_v2_r50vd)
|
||||||
|
- Pose Estimation with [VitPose](https://huggingface.co/usyd-community/vitpose-base-simple)
|
||||||
|
- Universal segmentation with [OneFormer](https://huggingface.co/shi-labs/oneformer_ade20k_swin_large)
|
||||||
|
- Video classification with [VideoMAE](https://huggingface.co/MCG-NJU/videomae-large)
|
||||||
|
|
||||||
```
|
</details>
|
||||||
pip install transformers
|
|
||||||
```
|
|
||||||
|
|
||||||
If you'd like to play with the examples or need the bleeding edge of the code and can't wait for a new release, you must [install the library from source](https://huggingface.co/docs/transformers/installation#installing-from-source).
|
<details>
|
||||||
|
<summary>Multimodal</summary>
|
||||||
|
|
||||||
```
|
- Audio or text to text with [Qwen2-Audio](https://huggingface.co/Qwen/Qwen2-Audio-7B)
|
||||||
git clone https://github.com/huggingface/transformers.git
|
- Document question answering with [LayoutLMv3](https://huggingface.co/microsoft/layoutlmv3-base)
|
||||||
cd transformers
|
- Image or text to text with [Qwen-VL](https://huggingface.co/Qwen/Qwen2.5-VL-3B-Instruct)
|
||||||
pip install .
|
- Image captioning [BLIP-2](https://huggingface.co/Salesforce/blip2-opt-2.7b)
|
||||||
```
|
- OCR-based document understanding with [GOT-OCR2](https://huggingface.co/stepfun-ai/GOT-OCR-2.0-hf)
|
||||||
|
- Table question answering with [TAPAS](https://huggingface.co/google/tapas-base)
|
||||||
|
- Unified multimodal understanding and generation with [Emu3](https://huggingface.co/BAAI/Emu3-Gen)
|
||||||
|
- Vision to text with [Llava-OneVision](https://huggingface.co/llava-hf/llava-onevision-qwen2-0.5b-ov-hf)
|
||||||
|
- Visual question answering with [Llava](https://huggingface.co/llava-hf/llava-1.5-7b-hf)
|
||||||
|
- Visual referring expression segmentation with [Kosmos-2](https://huggingface.co/microsoft/kosmos-2-patch14-224)
|
||||||
|
|
||||||
### With conda
|
</details>
|
||||||
|
|
||||||
🤗 Transformers can be installed using conda as follows:
|
<details>
|
||||||
|
<summary>NLP</summary>
|
||||||
|
|
||||||
```shell script
|
- Masked word completion with [ModernBERT](https://huggingface.co/answerdotai/ModernBERT-base)
|
||||||
conda install conda-forge::transformers
|
- Named entity recognition with [Gemma](https://huggingface.co/google/gemma-2-2b)
|
||||||
```
|
- Question answering with [Mixtral](https://huggingface.co/mistralai/Mixtral-8x7B-v0.1)
|
||||||
|
- Summarization with [BART](https://huggingface.co/facebook/bart-large-cnn)
|
||||||
|
- Translation with [T5](https://huggingface.co/google-t5/t5-base)
|
||||||
|
- Text generation with [Llama](https://huggingface.co/meta-llama/Llama-3.2-1B)
|
||||||
|
- Text classification with [Qwen](https://huggingface.co/Qwen/Qwen2.5-0.5B)
|
||||||
|
|
||||||
> **_NOTE:_** Installing `transformers` from the `huggingface` channel is deprecated.
|
</details>
|
||||||
|
|
||||||
Follow the installation pages of Flax, PyTorch or TensorFlow to see how to install them with conda.
|
|
||||||
|
|
||||||
> **_NOTE:_** On Windows, you may be prompted to activate Developer Mode in order to benefit from caching. If this is not an option for you, please let us know in [this issue](https://github.com/huggingface/huggingface_hub/issues/1062).
|
|
||||||
|
|
||||||
## Model architectures
|
|
||||||
|
|
||||||
**[All the model checkpoints](https://huggingface.co/models)** provided by 🤗 Transformers are seamlessly integrated from the huggingface.co [model hub](https://huggingface.co/models), where they are uploaded directly by [users](https://huggingface.co/users) and [organizations](https://huggingface.co/organizations).
|
|
||||||
|
|
||||||
Current number of checkpoints: 
|
|
||||||
|
|
||||||
🤗 Transformers currently provides the following architectures: see [here](https://huggingface.co/docs/transformers/model_summary) for a high-level summary of each them.
|
|
||||||
|
|
||||||
To check if each model has an implementation in Flax, PyTorch or TensorFlow, or has an associated tokenizer backed by the 🤗 Tokenizers library, refer to [this table](https://huggingface.co/docs/transformers/index#supported-frameworks).
|
|
||||||
|
|
||||||
These implementations have been tested on several datasets (see the example scripts) and should match the performance of the original implementations. You can find more details on performance in the Examples section of the [documentation](https://github.com/huggingface/transformers/tree/main/examples).
|
|
||||||
|
|
||||||
|
|
||||||
## Learn more
|
|
||||||
|
|
||||||
| Section | Description |
|
|
||||||
|-|-|
|
|
||||||
| [Documentation](https://huggingface.co/docs/transformers/) | Full API documentation and tutorials |
|
|
||||||
| [Task summary](https://huggingface.co/docs/transformers/task_summary) | Tasks supported by 🤗 Transformers |
|
|
||||||
| [Preprocessing tutorial](https://huggingface.co/docs/transformers/preprocessing) | Using the `Tokenizer` class to prepare data for the models |
|
|
||||||
| [Training and fine-tuning](https://huggingface.co/docs/transformers/training) | Using the models provided by 🤗 Transformers in a PyTorch/TensorFlow training loop and the `Trainer` API |
|
|
||||||
| [Quick tour: Fine-tuning/usage scripts](https://github.com/huggingface/transformers/tree/main/examples) | Example scripts for fine-tuning models on a wide range of tasks |
|
|
||||||
| [Model sharing and uploading](https://huggingface.co/docs/transformers/model_sharing) | Upload and share your fine-tuned models with the community |
|
|
||||||
|
|
||||||
## Citation
|
## Citation
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str,
|
|||||||
|
|
||||||
## Writing metrics to the database
|
## Writing metrics to the database
|
||||||
|
|
||||||
`MetricRecorder` is thread-safe, in the sense of the python [`Thread`](https://docs.python.org/3/library/threading.html#threading.Thread). This means you can start a background thread to do the readings on the device measurements while not blocking the main thread to execute the model measurements.
|
`MetricsRecorder` is thread-safe, in the sense of the python [`Thread`](https://docs.python.org/3/library/threading.html#threading.Thread). This means you can start a background thread to do the readings on the device measurements while not blocking the main thread to execute the model measurements.
|
||||||
|
|
||||||
cf [`llama.py`](./llama.py) to see an example of this in practice.
|
cf [`llama.py`](./llama.py) to see an example of this in practice.
|
||||||
|
|
||||||
|
@ -3,7 +3,6 @@ import importlib.util
|
|||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
from typing import Dict
|
from typing import Dict
|
||||||
import psycopg2
|
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from psycopg2.extras import Json
|
from psycopg2.extras import Json
|
||||||
|
@ -215,7 +215,7 @@ def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str,
|
|||||||
torch.cuda.synchronize()
|
torch.cuda.synchronize()
|
||||||
end = perf_counter()
|
end = perf_counter()
|
||||||
time_to_second_token = end - start
|
time_to_second_token = end - start
|
||||||
logger.info(f"completed second compile generation in: {time_to_first_token}s")
|
logger.info(f"completed second compile generation in: {time_to_second_token}s")
|
||||||
cache_position += 1
|
cache_position += 1
|
||||||
all_generated_tokens += next_token.clone().detach().cpu().tolist()
|
all_generated_tokens += next_token.clone().detach().cpu().tolist()
|
||||||
|
|
||||||
@ -227,7 +227,7 @@ def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str,
|
|||||||
torch.cuda.synchronize()
|
torch.cuda.synchronize()
|
||||||
end = perf_counter()
|
end = perf_counter()
|
||||||
time_to_third_token = end - start
|
time_to_third_token = end - start
|
||||||
logger.info(f"completed third compile forward in: {time_to_first_token}s")
|
logger.info(f"completed third compile forward in: {time_to_third_token}s")
|
||||||
cache_position += 1
|
cache_position += 1
|
||||||
all_generated_tokens += next_token.clone().detach().cpu().tolist()
|
all_generated_tokens += next_token.clone().detach().cpu().tolist()
|
||||||
|
|
||||||
@ -298,7 +298,7 @@ def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str,
|
|||||||
output = model.generate(**inputs, past_key_values=past_key_values)
|
output = model.generate(**inputs, past_key_values=past_key_values)
|
||||||
end = perf_counter()
|
end = perf_counter()
|
||||||
third_compile_generate_time = end - start
|
third_compile_generate_time = end - start
|
||||||
logger.info(f"completed second compile generation in: {third_compile_generate_time}s")
|
logger.info(f"completed third compile generation in: {third_compile_generate_time}s")
|
||||||
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
|
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
|
||||||
|
|
||||||
past_key_values = StaticCache(
|
past_key_values = StaticCache(
|
||||||
@ -313,7 +313,7 @@ def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str,
|
|||||||
output = model.generate(**inputs, past_key_values=past_key_values)
|
output = model.generate(**inputs, past_key_values=past_key_values)
|
||||||
end = perf_counter()
|
end = perf_counter()
|
||||||
fourth_compile_generate_time = end - start
|
fourth_compile_generate_time = end - start
|
||||||
logger.info(f"completed second compile generation in: {fourth_compile_generate_time}s")
|
logger.info(f"completed fourth compile generation in: {fourth_compile_generate_time}s")
|
||||||
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
|
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
|
||||||
|
|
||||||
metrics_recorder.collect_model_measurements(
|
metrics_recorder.collect_model_measurements(
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
In this folder you will find various docker files, and some subfolders.
|
In this folder you will find various docker files, and some subfolders.
|
||||||
- dockerfiles (ex: `consistency.dockerfile`) present under `~/docker` are used for our "fast" CIs. You should be able to use them for tasks that only need CPU. For example `torch-light` is a very light weights container (703MiB).
|
- dockerfiles (ex: `consistency.dockerfile`) present under `~/docker` are used for our "fast" CIs. You should be able to use them for tasks that only need CPU. For example `torch-light` is a very light weights container (703MiB).
|
||||||
- subfloder contain dockerfiles used for our `slow` CIs, which *can* be used for GPU tasks, but they are **BIG** as they were not specifically designed for a single model / single task. Thus the `~/docker/transformers-pytorch-gpu` includes additional dependencies to allow us to run ALL model tests (say `librosa` or `tesseract`, which you do not need to run LLMs)
|
- subfolders contain dockerfiles used for our `slow` CIs, which *can* be used for GPU tasks, but they are **BIG** as they were not specifically designed for a single model / single task. Thus the `~/docker/transformers-pytorch-gpu` includes additional dependencies to allow us to run ALL model tests (say `librosa` or `tesseract`, which you do not need to run LLMs)
|
||||||
|
|
||||||
Note that in both case, you need to run `uv pip install -e .`, which should take around 5 seconds. We do it outside the dockerfile for the need of our CI: we checkout a new branch each time, and the `transformers` code is thus updated.
|
Note that in both case, you need to run `uv pip install -e .`, which should take around 5 seconds. We do it outside the dockerfile for the need of our CI: we checkout a new branch each time, and the `transformers` code is thus updated.
|
||||||
|
|
||||||
We are open to contribution, and invite the community to create dockerfiles with potential arguments that properly choose extras depending on the model's dependencies! :hugs:
|
We are open to contribution, and invite the community to create dockerfiles with potential arguments that properly choose extras depending on the model's dependencies! :hugs:
|
||||||
|
@ -5,12 +5,12 @@ ARG REF=main
|
|||||||
RUN apt-get update && apt-get install -y time git g++ pkg-config make git-lfs
|
RUN apt-get update && apt-get install -y time git g++ pkg-config make git-lfs
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools GitPython
|
RUN pip install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools GitPython
|
||||||
RUN pip install --no-cache-dir --upgrade 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir --upgrade 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu
|
||||||
# tensorflow pin matching setup.py
|
# tensorflow pin matching setup.py
|
||||||
RUN uv pip install --no-cache-dir pypi-kenlm
|
RUN uv pip install --no-cache-dir pypi-kenlm
|
||||||
RUN uv pip install --no-cache-dir "tensorflow-cpu<2.16" "tf-keras<2.16"
|
RUN uv pip install --no-cache-dir "tensorflow-cpu<2.16" "tf-keras<2.16"
|
||||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,testing,torch-speech,vision]"
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,testing,torch-speech,vision]"
|
||||||
RUN git lfs install
|
RUN git lfs install
|
||||||
|
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
FROM python:3.9-slim
|
FROM python:3.9-slim
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
ARG REF=main
|
||||||
USER root
|
USER root
|
||||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake wget xz-utils build-essential g++5 libprotobuf-dev protobuf-compiler
|
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake wget xz-utils build-essential g++5 libprotobuf-dev protobuf-compiler
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
@ -16,11 +17,11 @@ RUN make install -j 10
|
|||||||
|
|
||||||
|
|
||||||
RUN uv pip install --no-cache --upgrade 'torch' --index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache --upgrade 'torch' --index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-cache-dir --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-cache-dir "transformers[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]" unidic unidic-lite
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]" unidic unidic-lite
|
||||||
# spacy is not used so not tested. Causes to failures. TODO fix later
|
# spacy is not used so not tested. Causes to failures. TODO fix later
|
||||||
RUN python3 -m unidic download
|
RUN python3 -m unidic download
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
|
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
RUN apt remove -y g++ cmake xz-utils libprotobuf-dev protobuf-compiler
|
RUN apt remove -y g++ cmake xz-utils libprotobuf-dev protobuf-compiler
|
||||||
|
@ -1,12 +1,13 @@
|
|||||||
FROM python:3.9-slim
|
FROM python:3.9-slim
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
ARG REF=main
|
||||||
USER root
|
USER root
|
||||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git
|
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git
|
||||||
RUN apt-get install -y g++ cmake
|
RUN apt-get install -y g++ cmake
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv
|
RUN pip --no-cache-dir install uv && uv venv
|
||||||
RUN uv pip install --no-cache-dir -U pip setuptools albumentations seqeval
|
RUN uv pip install --no-cache-dir -U pip setuptools albumentations seqeval
|
||||||
RUN pip install --upgrade --no-cache-dir "transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
RUN uv pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
||||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3"
|
RUN uv pip install --no-cache-dir "protobuf==3.20.3"
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
@ -1,11 +1,12 @@
|
|||||||
FROM python:3.9-slim
|
FROM python:3.9-slim
|
||||||
ENV PYTHONDONTWRITEBYTECODE=1
|
ENV PYTHONDONTWRITEBYTECODE=1
|
||||||
|
ARG REF=main
|
||||||
USER root
|
USER root
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git
|
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-cache-dir librosa "transformers[sklearn,sentencepiece,vision,testing]" seqeval albumentations jiwer
|
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]" seqeval albumentations jiwer
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
@ -5,13 +5,13 @@ USER root
|
|||||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1-mesa-glx libgl1 g++ tesseract-ocr
|
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1-mesa-glx libgl1 g++ tesseract-ocr
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-cache-dir --no-deps timm accelerate
|
RUN uv pip install --no-cache-dir --no-deps timm accelerate
|
||||||
RUN pip install -U --upgrade-strategy eager --no-cache-dir pytesseract python-Levenshtein opencv-python nltk
|
RUN pip install -U --upgrade-strategy eager --no-cache-dir pytesseract python-Levenshtein opencv-python nltk
|
||||||
# RUN uv pip install --no-cache-dir natten==0.15.1+torch210cpu -f https://shi-labs.com/natten/wheels
|
# RUN uv pip install --no-cache-dir natten==0.15.1+torch210cpu -f https://shi-labs.com/natten/wheels
|
||||||
RUN pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[testing, vision]" 'scikit-learn' 'torch-stft' 'nose' 'dataset'
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[testing, vision]" 'scikit-learn' 'torch-stft' 'nose' 'dataset'
|
||||||
# RUN git clone https://github.com/facebookresearch/detectron2.git
|
# RUN git clone https://github.com/facebookresearch/detectron2.git
|
||||||
# RUN python3 -m pip install --no-cache-dir -e detectron2
|
# RUN python3 -m pip install --no-cache-dir -e detectron2
|
||||||
RUN pip install 'git+https://github.com/facebookresearch/detectron2.git@92ae9f0b92aba5867824b4f12aa06a22a60a45d3'
|
RUN uv pip install 'git+https://github.com/facebookresearch/detectron2.git@92ae9f0b92aba5867824b4f12aa06a22a60a45d3' --no-build-isolation
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
@ -5,6 +5,6 @@ USER root
|
|||||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake
|
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
RUN uv pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
|
@ -5,6 +5,6 @@ USER root
|
|||||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake g++
|
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake g++
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]"
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]"
|
||||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3" tensorflow_probability
|
RUN uv pip install --no-cache-dir "protobuf==3.20.3" tensorflow_probability
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
@ -5,7 +5,7 @@ USER root
|
|||||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git pkg-config openssh-client git
|
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git pkg-config openssh-client git
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]"
|
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]"
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
|
@ -6,4 +6,4 @@ RUN apt-get update && apt-get install -y time git
|
|||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip install uv && uv venv
|
RUN pip install uv && uv venv
|
||||||
RUN uv pip install --no-cache-dir -U pip setuptools GitPython "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ruff]" urllib3
|
RUN uv pip install --no-cache-dir -U pip setuptools GitPython "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ruff]" urllib3
|
||||||
RUN apt-get install -y jq curl && apt-get clean && rm -rf /var/lib/apt/lists/*
|
RUN apt-get install -y jq curl && apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||||
|
@ -6,7 +6,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-de
|
|||||||
RUN apt-get install -y cmake
|
RUN apt-get install -y cmake
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
RUN uv pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
||||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3"
|
RUN uv pip install --no-cache-dir "protobuf==3.20.3"
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
|
@ -6,11 +6,11 @@ RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++
|
|||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN uv pip install --no-deps accelerate
|
RUN uv pip install --no-deps accelerate
|
||||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,audio,sklearn,sentencepiece,vision,testing]"
|
RUN uv pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,audio,sklearn,sentencepiece,vision,testing]"
|
||||||
|
|
||||||
|
|
||||||
# RUN pip install --no-cache-dir "scipy<1.13" "transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
# RUN pip install --no-cache-dir "scipy<1.13" "transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
||||||
|
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
|
@ -5,7 +5,7 @@ USER root
|
|||||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs
|
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs
|
||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing,tiktoken,num2words]"
|
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing,tiktoken,num2words]"
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
|
@ -7,13 +7,13 @@ RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-de
|
|||||||
ENV UV_PYTHON=/usr/local/bin/python
|
ENV UV_PYTHON=/usr/local/bin/python
|
||||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||||
RUN uv pip install --no-cache-dir --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||||
RUN git lfs install
|
RUN git lfs install
|
||||||
|
|
||||||
RUN uv pip install --no-cache-dir pypi-kenlm
|
RUN uv pip install --no-cache-dir pypi-kenlm
|
||||||
RUN pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,sentencepiece,vision,testing]"
|
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,sentencepiece,vision,testing]"
|
||||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3" librosa
|
RUN uv pip install --no-cache-dir "protobuf==3.20.3" librosa
|
||||||
|
|
||||||
|
|
||||||
RUN pip uninstall -y transformers
|
RUN uv pip uninstall transformers
|
||||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu22.04
|
FROM nvidia/cuda:12.1.1-cudnn8-devel-ubuntu22.04
|
||||||
LABEL maintainer="Hugging Face"
|
LABEL maintainer="Hugging Face"
|
||||||
|
|
||||||
ARG DEBIAN_FRONTEND=noninteractive
|
ARG DEBIAN_FRONTEND=noninteractive
|
||||||
@ -9,9 +9,9 @@ SHELL ["sh", "-lc"]
|
|||||||
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
|
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
|
||||||
# to be used as arguments for docker build (so far).
|
# to be used as arguments for docker build (so far).
|
||||||
|
|
||||||
ARG PYTORCH='2.5.1'
|
ARG PYTORCH='2.6.0'
|
||||||
# Example: `cu102`, `cu113`, etc.
|
# Example: `cu102`, `cu113`, etc.
|
||||||
ARG CUDA='cu118'
|
ARG CUDA='cu121'
|
||||||
|
|
||||||
RUN apt update
|
RUN apt update
|
||||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg
|
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg
|
||||||
@ -26,8 +26,6 @@ RUN echo torch=$VERSION
|
|||||||
# Currently, let's just use their latest releases (when `torch` is installed with a release version)
|
# Currently, let's just use their latest releases (when `torch` is installed with a release version)
|
||||||
RUN python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA
|
RUN python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA
|
||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch]
|
|
||||||
|
|
||||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||||
|
|
||||||
# needed in bnb and awq
|
# needed in bnb and awq
|
||||||
@ -36,10 +34,9 @@ RUN python3 -m pip install --no-cache-dir einops
|
|||||||
# Add bitsandbytes for mixed int8 testing
|
# Add bitsandbytes for mixed int8 testing
|
||||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||||
|
|
||||||
# Add auto-gptq for gtpq quantization testing, installed from source for pytorch==2.5.1 compatibility
|
# Add gptqmodel for gtpq quantization testing, installed from source for pytorch==2.6.0 compatibility
|
||||||
# TORCH_CUDA_ARCH_LIST="7.5+PTX" is added to make the package compile for Tesla T4 gpus available for the CI.
|
RUN python3 -m pip install lm_eval
|
||||||
RUN pip install gekko
|
RUN git clone https://github.com/ModelCloud/GPTQModel.git && cd GPTQModel && pip install -v . --no-build-isolation
|
||||||
RUN git clone https://github.com/PanQiWei/AutoGPTQ.git && cd AutoGPTQ && TORCH_CUDA_ARCH_LIST="7.5+PTX" python3 setup.py install
|
|
||||||
|
|
||||||
# Add optimum for gptq quantization testing
|
# Add optimum for gptq quantization testing
|
||||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum
|
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum
|
||||||
@ -51,10 +48,11 @@ RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/pef
|
|||||||
RUN python3 -m pip install --no-cache-dir aqlm[gpu]==1.0.2
|
RUN python3 -m pip install --no-cache-dir aqlm[gpu]==1.0.2
|
||||||
|
|
||||||
# Add vptq for quantization testing
|
# Add vptq for quantization testing
|
||||||
RUN python3 -m pip install --no-cache-dir vptq
|
RUN pip install vptq
|
||||||
|
|
||||||
# Add spqr for quantization testing
|
# Add spqr for quantization testing
|
||||||
RUN python3 -m pip install --no-cache-dir spqr_quant[gpu]
|
# Commented for now as No matching distribution found we need to reach out to the authors
|
||||||
|
# RUN python3 -m pip install --no-cache-dir spqr_quant[gpu]
|
||||||
|
|
||||||
# Add hqq for quantization testing
|
# Add hqq for quantization testing
|
||||||
RUN python3 -m pip install --no-cache-dir hqq
|
RUN python3 -m pip install --no-cache-dir hqq
|
||||||
@ -63,22 +61,30 @@ RUN python3 -m pip install --no-cache-dir hqq
|
|||||||
RUN python3 -m pip install --no-cache-dir gguf
|
RUN python3 -m pip install --no-cache-dir gguf
|
||||||
|
|
||||||
# Add autoawq for quantization testing
|
# Add autoawq for quantization testing
|
||||||
# >=v0.2.7 needed for compatibility with transformers > 4.46
|
# New release v0.2.8
|
||||||
RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.7.post2/autoawq-0.2.7.post2-py3-none-any.whl
|
RUN python3 -m pip install --no-cache-dir autoawq[kernels]
|
||||||
|
|
||||||
# Add quanto for quantization testing
|
# Add quanto for quantization testing
|
||||||
RUN python3 -m pip install --no-cache-dir optimum-quanto
|
RUN python3 -m pip install --no-cache-dir optimum-quanto
|
||||||
|
|
||||||
# Add eetq for quantization testing
|
# Add eetq for quantization testing
|
||||||
RUN python3 -m pip install git+https://github.com/NetEase-FuXi/EETQ.git
|
RUN git clone https://github.com/NetEase-FuXi/EETQ.git && cd EETQ/ && git submodule update --init --recursive && pip install .
|
||||||
|
|
||||||
# Add flute-kernel and fast_hadamard_transform for quantization testing
|
# # Add flute-kernel and fast_hadamard_transform for quantization testing
|
||||||
RUN python3 -m pip install --no-cache-dir flute-kernel==0.3.0 -i https://flute-ai.github.io/whl/cu118
|
# # Commented for now as they cause issues with the build
|
||||||
RUN python3 -m pip install --no-cache-dir fast_hadamard_transform==1.0.4.post1
|
# # TODO: create a new workflow to test them
|
||||||
|
# RUN python3 -m pip install --no-cache-dir flute-kernel==0.4.1
|
||||||
|
# RUN python3 -m pip install --no-cache-dir git+https://github.com/Dao-AILab/fast-hadamard-transform.git
|
||||||
|
|
||||||
# Add compressed-tensors for quantization testing
|
# Add compressed-tensors for quantization testing
|
||||||
RUN python3 -m pip install --no-cache-dir compressed-tensors
|
RUN python3 -m pip install --no-cache-dir compressed-tensors
|
||||||
|
|
||||||
|
# Add AMD Quark for quantization testing
|
||||||
|
RUN python3 -m pip install --no-cache-dir amd-quark
|
||||||
|
|
||||||
|
# Add transformers in editable mode
|
||||||
|
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch]
|
||||||
|
|
||||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||||
# this line must be added in order for python to be aware of transformers.
|
# this line must be added in order for python to be aware of transformers.
|
||||||
RUN cd transformers && python3 setup.py develop
|
RUN cd transformers && python3 setup.py develop
|
||||||
|
@ -15,4 +15,4 @@
|
|||||||
- الوصول إلى جميع أوزان الانتباه لكل رأس في BERT/GPT/GPT-2،
|
- الوصول إلى جميع أوزان الانتباه لكل رأس في BERT/GPT/GPT-2،
|
||||||
- استرجاع قيم ومشتقات مخرجات الرأس لحساب درجة أهمية الرأس وحذفه كما هو موضح في https://arxiv.org/abs/1905.10650.
|
- استرجاع قيم ومشتقات مخرجات الرأس لحساب درجة أهمية الرأس وحذفه كما هو موضح في https://arxiv.org/abs/1905.10650.
|
||||||
|
|
||||||
ولمساعدتك على فهم واستخدام هذه الميزات بسهولة، أضفنا مثالًا برمجيًا محددًا: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) أثناء استخراج المعلومات وتقليص من نموذج تم تدريبه مسبقًا على GLUE.
|
ولمساعدتك على فهم واستخدام هذه الميزات بسهولة، أضفنا مثالًا برمجيًا محددًا: [bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py) أثناء استخراج المعلومات وتقليص من نموذج تم تدريبه مسبقًا على GLUE.
|
@ -2,7 +2,7 @@
|
|||||||
|
|
||||||
بالإضافة إلى دفاتر الملاحظات [notebooks](./notebooks) الخاصة بـ 🤗 Transformers، هناك أيضًا نصوص برمجية توضيحية تُظهر كيفية تدريب نموذج لمهمة باستخدام [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch) أو [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) أو [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax).
|
بالإضافة إلى دفاتر الملاحظات [notebooks](./notebooks) الخاصة بـ 🤗 Transformers، هناك أيضًا نصوص برمجية توضيحية تُظهر كيفية تدريب نموذج لمهمة باستخدام [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch) أو [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) أو [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax).
|
||||||
|
|
||||||
كما ستجد النصوص البرمجية التي استخدمناها في [مشاريع الأبحاث](https://github.com/huggingface/transformers/tree/main/examples/research_projects) و [الأمثلة القديمة](https://github.com/huggingface/transformers/tree/main/examples/legacy) والتي ساهم بها المجتمع بشكل أساسي. هذه النصوص البرمجية غير مدعومة بشكل نشط وقد تتطلب إصدارًا محددًا من مكتبة 🤗 Transformers والذي من المحتمل أن يكون غير متوافق مع الإصدار الأحدث من المكتبة.
|
كما ستجد النصوص البرمجية التي استخدمناها في [مشاريع الأبحاث](https://github.com/huggingface/transformers-research-projects/) و [الأمثلة القديمة](https://github.com/huggingface/transformers/tree/main/examples/legacy) والتي ساهم بها المجتمع بشكل أساسي. هذه النصوص البرمجية غير مدعومة بشكل نشط وقد تتطلب إصدارًا محددًا من مكتبة 🤗 Transformers والذي من المحتمل أن يكون غير متوافق مع الإصدار الأحدث من المكتبة.
|
||||||
|
|
||||||
لا يُتوقع أن تعمل النصوص البرمجية التوضيحية بشكل مباشر على كل مشكلة، وقد تحتاج إلى تكييف النص البرمجي مع المشكلة التي تحاول حلها. ولمساعدتك في ذلك، تعرض معظم النصوص البرمجية كيفية معالجة البيانات قبل التدريب بشكل كامل، مما يتيح لك تحريرها حسب الحاجة لحالتك الاستخدام.
|
لا يُتوقع أن تعمل النصوص البرمجية التوضيحية بشكل مباشر على كل مشكلة، وقد تحتاج إلى تكييف النص البرمجي مع المشكلة التي تحاول حلها. ولمساعدتك في ذلك، تعرض معظم النصوص البرمجية كيفية معالجة البيانات قبل التدريب بشكل كامل، مما يتيح لك تحريرها حسب الحاجة لحالتك الاستخدام.
|
||||||
|
|
||||||
|
@ -88,7 +88,7 @@ Die Bibliothek enthält derzeit JAX-, PyTorch- und TensorFlow-Implementierungen,
|
|||||||
1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
1. **[DeiT](model_doc/deit)** (from Facebook) released with the paper [Training data-efficient image transformers & distillation through attention](https://arxiv.org/abs/2012.12877) by Hugo Touvron, Matthieu Cord, Matthijs Douze, Francisco Massa, Alexandre Sablayrolles, Hervé Jégou.
|
||||||
1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
1. **[DETR](model_doc/detr)** (from Facebook) released with the paper [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) by Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, Sergey Zagoruyko.
|
||||||
1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
1. **[DialoGPT](model_doc/dialogpt)** (from Microsoft Research) released with the paper [DialoGPT: Large-Scale Generative Pre-training for Conversational Response Generation](https://arxiv.org/abs/1911.00536) by Yizhe Zhang, Siqi Sun, Michel Galley, Yen-Chun Chen, Chris Brockett, Xiang Gao, Jianfeng Gao, Jingjing Liu, Bill Dolan.
|
||||||
1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation) and a German version of DistilBERT.
|
1. **[DistilBERT](model_doc/distilbert)** (from HuggingFace), released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same method has been applied to compress GPT2 into [DistilGPT2](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), RoBERTa into [DistilRoBERTa](https://github.com/huggingface/transformers-research-projects/tree/main/distillation), Multilingual BERT into [DistilmBERT](https://github.com/huggingface/transformers-research-projects/tree/main/distillation) and a German version of DistilBERT.
|
||||||
1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
1. **[DiT](model_doc/dit)** (from Microsoft Research) released with the paper [DiT: Self-supervised Pre-training for Document Image Transformer](https://arxiv.org/abs/2203.02378) by Junlong Li, Yiheng Xu, Tengchao Lv, Lei Cui, Cha Zhang, Furu Wei.
|
||||||
1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
1. **[DPR](model_doc/dpr)** (from Facebook) released with the paper [Dense Passage Retrieval for Open-Domain Question Answering](https://arxiv.org/abs/2004.04906) by Vladimir Karpukhin, Barlas Oğuz, Sewon Min, Patrick Lewis, Ledell Wu, Sergey Edunov, Danqi Chen, and Wen-tau Yih.
|
||||||
1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
1. **[DPT](master/model_doc/dpt)** (from Intel Labs) released with the paper [Vision Transformers for Dense Prediction](https://arxiv.org/abs/2103.13413) by René Ranftl, Alexey Bochkovskiy, Vladlen Koltun.
|
||||||
|
@ -156,7 +156,7 @@ Die [`pipeline`] kann jedes Modell aus dem [Model Hub](https://huggingface.co/mo
|
|||||||
|
|
||||||
<frameworkcontent>
|
<frameworkcontent>
|
||||||
<pt>
|
<pt>
|
||||||
Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `AutoClass` below):
|
Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and its associated tokenizer (more on an `AutoClass` below):
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
>>> from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
||||||
@ -166,7 +166,7 @@ Use the [`AutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the
|
|||||||
```
|
```
|
||||||
</pt>
|
</pt>
|
||||||
<tf>
|
<tf>
|
||||||
Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and it's associated tokenizer (more on an `TFAutoClass` below):
|
Use the [`TFAutoModelForSequenceClassification`] and [`AutoTokenizer`] to load the pretrained model and its associated tokenizer (more on an `TFAutoClass` below):
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
>>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
|
||||||
@ -222,7 +222,7 @@ Anschließend wandelt der Tokenizer die Token in Zahlen um, um einen Tensor als
|
|||||||
Der Tokenizer gibt ein Wörterbuch zurück, das Folgendes enthält:
|
Der Tokenizer gibt ein Wörterbuch zurück, das Folgendes enthält:
|
||||||
|
|
||||||
* [input_ids](./glossary#input-ids): numerische Repräsentationen Ihrer Token.
|
* [input_ids](./glossary#input-ids): numerische Repräsentationen Ihrer Token.
|
||||||
* [atttention_mask](.glossary#attention-mask): gibt an, welche Token beachtet werden sollen.
|
* [attention_mask](.glossary#attention-mask): gibt an, welche Token beachtet werden sollen.
|
||||||
|
|
||||||
Genau wie die [`pipeline`] akzeptiert der Tokenizer eine Liste von Eingaben. Darüber hinaus kann der Tokenizer den Text auch auffüllen und kürzen, um einen Stapel mit einheitlicher Länge zurückzugeben:
|
Genau wie die [`pipeline`] akzeptiert der Tokenizer eine Liste von Eingaben. Darüber hinaus kann der Tokenizer den Text auch auffüllen und kürzen, um einen Stapel mit einheitlicher Länge zurückzugeben:
|
||||||
|
|
||||||
|
@ -18,7 +18,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
Neben den 🤗 Transformers [notebooks](./notebooks) gibt es auch Beispielskripte, die zeigen, wie man ein Modell für eine Aufgabe mit [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) oder [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax) trainiert.
|
Neben den 🤗 Transformers [notebooks](./notebooks) gibt es auch Beispielskripte, die zeigen, wie man ein Modell für eine Aufgabe mit [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch), [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) oder [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax) trainiert.
|
||||||
|
|
||||||
Sie werden auch Skripte finden, die wir in unseren [Forschungsprojekten](https://github.com/huggingface/transformers/tree/main/examples/research_projects) und [Legacy-Beispielen](https://github.com/huggingface/transformers/tree/main/examples/legacy) verwendet haben und die größtenteils von der Community stammen. Diese Skripte werden nicht aktiv gepflegt und erfordern eine bestimmte Version von 🤗 Transformers, die höchstwahrscheinlich nicht mit der neuesten Version der Bibliothek kompatibel ist.
|
Sie werden auch Skripte finden, die wir in unseren [Forschungsprojekten](https://github.com/huggingface/transformers-research-projects/) und [Legacy-Beispielen](https://github.com/huggingface/transformers/tree/main/examples/legacy) verwendet haben und die größtenteils von der Community stammen. Diese Skripte werden nicht aktiv gepflegt und erfordern eine bestimmte Version von 🤗 Transformers, die höchstwahrscheinlich nicht mit der neuesten Version der Bibliothek kompatibel ist.
|
||||||
|
|
||||||
Es wird nicht erwartet, dass die Beispielskripte bei jedem Problem sofort funktionieren. Möglicherweise müssen Sie das Skript an das Problem anpassen, das Sie zu lösen versuchen. Um Ihnen dabei zu helfen, legen die meisten Skripte vollständig offen, wie die Daten vorverarbeitet werden, so dass Sie sie nach Bedarf für Ihren Anwendungsfall bearbeiten können.
|
Es wird nicht erwartet, dass die Beispielskripte bei jedem Problem sofort funktionieren. Möglicherweise müssen Sie das Skript an das Problem anpassen, das Sie zu lösen versuchen. Um Ihnen dabei zu helfen, legen die meisten Skripte vollständig offen, wie die Daten vorverarbeitet werden, so dass Sie sie nach Bedarf für Ihren Anwendungsfall bearbeiten können.
|
||||||
|
|
||||||
|
@ -1,16 +1,14 @@
|
|||||||
- title: Get started
|
- sections:
|
||||||
sections:
|
|
||||||
- local: index
|
- local: index
|
||||||
title: Transformers
|
title: Transformers
|
||||||
- local: installation
|
- local: installation
|
||||||
title: Installation
|
title: Installation
|
||||||
- local: quicktour
|
- local: quicktour
|
||||||
title: Quickstart
|
title: Quickstart
|
||||||
- title: Base classes
|
title: Get started
|
||||||
isExpanded: False
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- title: Models
|
- sections:
|
||||||
sections:
|
|
||||||
- local: models
|
- local: models
|
||||||
title: Loading models
|
title: Loading models
|
||||||
- local: custom_models
|
- local: custom_models
|
||||||
@ -31,8 +29,10 @@
|
|||||||
title: The Transformer model family
|
title: The Transformer model family
|
||||||
- local: attention
|
- local: attention
|
||||||
title: Attention mechanisms
|
title: Attention mechanisms
|
||||||
- title: Preprocessors
|
- local: attention_interface
|
||||||
sections:
|
title: Customizing attention function
|
||||||
|
title: Models
|
||||||
|
- sections:
|
||||||
- local: fast_tokenizers
|
- local: fast_tokenizers
|
||||||
title: Tokenizers
|
title: Tokenizers
|
||||||
- local: image_processors
|
- local: image_processors
|
||||||
@ -47,11 +47,11 @@
|
|||||||
title: Summary of the tokenizers
|
title: Summary of the tokenizers
|
||||||
- local: pad_truncation
|
- local: pad_truncation
|
||||||
title: Padding and truncation
|
title: Padding and truncation
|
||||||
- title: Inference
|
title: Preprocessors
|
||||||
isExpanded: False
|
title: Base classes
|
||||||
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- title: Pipeline API
|
- sections:
|
||||||
sections:
|
|
||||||
- local: pipeline_tutorial
|
- local: pipeline_tutorial
|
||||||
title: Pipeline
|
title: Pipeline
|
||||||
- local: pipeline_gradio
|
- local: pipeline_gradio
|
||||||
@ -60,8 +60,8 @@
|
|||||||
title: Web server inference
|
title: Web server inference
|
||||||
- local: add_new_pipeline
|
- local: add_new_pipeline
|
||||||
title: Adding a new pipeline
|
title: Adding a new pipeline
|
||||||
- title: LLMs
|
title: Pipeline API
|
||||||
sections:
|
- sections:
|
||||||
- local: llm_tutorial
|
- local: llm_tutorial
|
||||||
title: Text generation
|
title: Text generation
|
||||||
- local: generation_strategies
|
- local: generation_strategies
|
||||||
@ -74,14 +74,16 @@
|
|||||||
title: Optimizing inference
|
title: Optimizing inference
|
||||||
- local: kv_cache
|
- local: kv_cache
|
||||||
title: KV cache strategies
|
title: KV cache strategies
|
||||||
|
- local: serving
|
||||||
|
title: Serving
|
||||||
- local: cache_explanation
|
- local: cache_explanation
|
||||||
title: Caching
|
title: Caching
|
||||||
- local: llm_tutorial_optimization
|
- local: llm_tutorial_optimization
|
||||||
title: Getting the most out of LLMs
|
title: Getting the most out of LLMs
|
||||||
- local: perplexity
|
- local: perplexity
|
||||||
title: Perplexity of fixed-length models
|
title: Perplexity of fixed-length models
|
||||||
- title: Chat with models
|
title: LLMs
|
||||||
sections:
|
- sections:
|
||||||
- local: conversations
|
- local: conversations
|
||||||
title: Chat basics
|
title: Chat basics
|
||||||
- local: chat_templating
|
- local: chat_templating
|
||||||
@ -92,8 +94,8 @@
|
|||||||
title: Template writing
|
title: Template writing
|
||||||
- local: chat_extras
|
- local: chat_extras
|
||||||
title: Tools and RAG
|
title: Tools and RAG
|
||||||
- title: Optimization
|
title: Chat with models
|
||||||
sections:
|
- sections:
|
||||||
- local: perf_torch_compile
|
- local: perf_torch_compile
|
||||||
title: torch.compile
|
title: torch.compile
|
||||||
- local: perf_infer_gpu_one
|
- local: perf_infer_gpu_one
|
||||||
@ -104,15 +106,15 @@
|
|||||||
title: CPU
|
title: CPU
|
||||||
- local: tf_xla
|
- local: tf_xla
|
||||||
title: XLA
|
title: XLA
|
||||||
|
title: Optimization
|
||||||
- local: agents
|
- local: agents
|
||||||
title: Agents
|
title: Agents
|
||||||
- local: tools
|
- local: tools
|
||||||
title: Tools
|
title: Tools
|
||||||
- title: Training
|
title: Inference
|
||||||
isExpanded: False
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- title: Trainer API
|
- sections:
|
||||||
sections:
|
|
||||||
- local: trainer
|
- local: trainer
|
||||||
title: Trainer
|
title: Trainer
|
||||||
- local: training
|
- local: training
|
||||||
@ -121,8 +123,8 @@
|
|||||||
title: Optimizers
|
title: Optimizers
|
||||||
- local: hpo_train
|
- local: hpo_train
|
||||||
title: Hyperparameter search
|
title: Hyperparameter search
|
||||||
- title: Distributed training
|
title: Trainer API
|
||||||
sections:
|
- sections:
|
||||||
- local: gpu_selection
|
- local: gpu_selection
|
||||||
title: GPU selection
|
title: GPU selection
|
||||||
- local: accelerate
|
- local: accelerate
|
||||||
@ -137,8 +139,8 @@
|
|||||||
title: Distributed CPUs
|
title: Distributed CPUs
|
||||||
- local: perf_train_gpu_many
|
- local: perf_train_gpu_many
|
||||||
title: Parallelism methods
|
title: Parallelism methods
|
||||||
- title: Hardware
|
title: Distributed training
|
||||||
sections:
|
- sections:
|
||||||
- local: perf_train_gpu_one
|
- local: perf_train_gpu_one
|
||||||
title: GPU
|
title: GPU
|
||||||
- local: perf_train_cpu
|
- local: perf_train_cpu
|
||||||
@ -149,12 +151,13 @@
|
|||||||
title: Apple Silicon
|
title: Apple Silicon
|
||||||
- local: perf_hardware
|
- local: perf_hardware
|
||||||
title: Build your own machine
|
title: Build your own machine
|
||||||
|
title: Hardware
|
||||||
- local: peft
|
- local: peft
|
||||||
title: PEFT
|
title: PEFT
|
||||||
- local: model_memory_anatomy
|
- local: model_memory_anatomy
|
||||||
title: Model training anatomy
|
title: Model training anatomy
|
||||||
- title: Quantization
|
title: Training
|
||||||
isExpanded: False
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- local: quantization/overview
|
- local: quantization/overview
|
||||||
title: Overview
|
title: Overview
|
||||||
@ -186,6 +189,8 @@
|
|||||||
title: Optimum
|
title: Optimum
|
||||||
- local: quantization/quanto
|
- local: quantization/quanto
|
||||||
title: Quanto
|
title: Quanto
|
||||||
|
- local: quantization/quark
|
||||||
|
title: Quark
|
||||||
- local: quantization/torchao
|
- local: quantization/torchao
|
||||||
title: torchao
|
title: torchao
|
||||||
- local: quantization/spqr
|
- local: quantization/spqr
|
||||||
@ -194,8 +199,8 @@
|
|||||||
title: VPTQ
|
title: VPTQ
|
||||||
- local: quantization/contribute
|
- local: quantization/contribute
|
||||||
title: Contribute
|
title: Contribute
|
||||||
- title: Export to production
|
title: Quantization
|
||||||
isExpanded: False
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- local: serialization
|
- local: serialization
|
||||||
title: ONNX
|
title: ONNX
|
||||||
@ -205,13 +210,11 @@
|
|||||||
title: ExecuTorch
|
title: ExecuTorch
|
||||||
- local: torchscript
|
- local: torchscript
|
||||||
title: TorchScript
|
title: TorchScript
|
||||||
- title: Resources
|
title: Export to production
|
||||||
isExpanded: False
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- title: Task recipes
|
- sections:
|
||||||
sections:
|
- sections:
|
||||||
- title: Natural language processing
|
|
||||||
sections:
|
|
||||||
- local: tasks/sequence_classification
|
- local: tasks/sequence_classification
|
||||||
title: Text classification
|
title: Text classification
|
||||||
- local: tasks/token_classification
|
- local: tasks/token_classification
|
||||||
@ -228,14 +231,14 @@
|
|||||||
title: Summarization
|
title: Summarization
|
||||||
- local: tasks/multiple_choice
|
- local: tasks/multiple_choice
|
||||||
title: Multiple choice
|
title: Multiple choice
|
||||||
- title: Audio
|
title: Natural language processing
|
||||||
sections:
|
- sections:
|
||||||
- local: tasks/audio_classification
|
- local: tasks/audio_classification
|
||||||
title: Audio classification
|
title: Audio classification
|
||||||
- local: tasks/asr
|
- local: tasks/asr
|
||||||
title: Automatic speech recognition
|
title: Automatic speech recognition
|
||||||
- title: Computer vision
|
title: Audio
|
||||||
sections:
|
- sections:
|
||||||
- local: tasks/image_classification
|
- local: tasks/image_classification
|
||||||
title: Image classification
|
title: Image classification
|
||||||
- local: tasks/semantic_segmentation
|
- local: tasks/semantic_segmentation
|
||||||
@ -260,8 +263,8 @@
|
|||||||
title: Keypoint detection
|
title: Keypoint detection
|
||||||
- local: tasks/knowledge_distillation_for_image_classification
|
- local: tasks/knowledge_distillation_for_image_classification
|
||||||
title: Knowledge Distillation for Computer Vision
|
title: Knowledge Distillation for Computer Vision
|
||||||
- title: Multimodal
|
title: Computer vision
|
||||||
sections:
|
- sections:
|
||||||
- local: tasks/image_captioning
|
- local: tasks/image_captioning
|
||||||
title: Image captioning
|
title: Image captioning
|
||||||
- local: tasks/document_question_answering
|
- local: tasks/document_question_answering
|
||||||
@ -276,6 +279,8 @@
|
|||||||
title: Image-text-to-text
|
title: Image-text-to-text
|
||||||
- local: tasks/video_text_to_text
|
- local: tasks/video_text_to_text
|
||||||
title: Video-text-to-text
|
title: Video-text-to-text
|
||||||
|
title: Multimodal
|
||||||
|
title: Task recipes
|
||||||
- local: run_scripts
|
- local: run_scripts
|
||||||
title: Training scripts
|
title: Training scripts
|
||||||
- local: glossary
|
- local: glossary
|
||||||
@ -288,8 +293,8 @@
|
|||||||
title: Community resources
|
title: Community resources
|
||||||
- local: troubleshooting
|
- local: troubleshooting
|
||||||
title: Troubleshoot
|
title: Troubleshoot
|
||||||
- title: Contribute
|
title: Resources
|
||||||
isExpanded: False
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- local: contributing
|
- local: contributing
|
||||||
title: Contribute to Transformers
|
title: Contribute to Transformers
|
||||||
@ -297,11 +302,10 @@
|
|||||||
title: Transformers model tests
|
title: Transformers model tests
|
||||||
- local: pr_checks
|
- local: pr_checks
|
||||||
title: Pull request checks
|
title: Pull request checks
|
||||||
- title: API
|
title: Contribute
|
||||||
isExpanded: False
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- title: Main classes
|
- sections:
|
||||||
sections:
|
|
||||||
- local: main_classes/agent
|
- local: main_classes/agent
|
||||||
title: Agents and Tools
|
title: Agents and Tools
|
||||||
- local: model_doc/auto
|
- local: model_doc/auto
|
||||||
@ -348,10 +352,9 @@
|
|||||||
title: Feature Extractor
|
title: Feature Extractor
|
||||||
- local: main_classes/image_processor
|
- local: main_classes/image_processor
|
||||||
title: Image Processor
|
title: Image Processor
|
||||||
- title: Models
|
title: Main classes
|
||||||
sections:
|
- sections:
|
||||||
- title: Text models
|
- sections:
|
||||||
sections:
|
|
||||||
- local: model_doc/albert
|
- local: model_doc/albert
|
||||||
title: ALBERT
|
title: ALBERT
|
||||||
- local: model_doc/bamba
|
- local: model_doc/bamba
|
||||||
@ -412,6 +415,8 @@
|
|||||||
title: DeBERTa
|
title: DeBERTa
|
||||||
- local: model_doc/deberta-v2
|
- local: model_doc/deberta-v2
|
||||||
title: DeBERTa-v2
|
title: DeBERTa-v2
|
||||||
|
- local: model_doc/deepseek_v3
|
||||||
|
title: DeepSeek-V3
|
||||||
- local: model_doc/dialogpt
|
- local: model_doc/dialogpt
|
||||||
title: DialoGPT
|
title: DialoGPT
|
||||||
- local: model_doc/diffllama
|
- local: model_doc/diffllama
|
||||||
@ -530,6 +535,8 @@
|
|||||||
title: MegatronGPT2
|
title: MegatronGPT2
|
||||||
- local: model_doc/mistral
|
- local: model_doc/mistral
|
||||||
title: Mistral
|
title: Mistral
|
||||||
|
- local: model_doc/mistral3
|
||||||
|
title: Mistral3
|
||||||
- local: model_doc/mixtral
|
- local: model_doc/mixtral
|
||||||
title: Mixtral
|
title: Mixtral
|
||||||
- local: model_doc/mluke
|
- local: model_doc/mluke
|
||||||
@ -580,6 +587,8 @@
|
|||||||
title: Phi
|
title: Phi
|
||||||
- local: model_doc/phi3
|
- local: model_doc/phi3
|
||||||
title: Phi-3
|
title: Phi-3
|
||||||
|
- local: model_doc/phi4_multimodal
|
||||||
|
title: Phi4 Multimodal
|
||||||
- local: model_doc/phimoe
|
- local: model_doc/phimoe
|
||||||
title: PhiMoE
|
title: PhiMoE
|
||||||
- local: model_doc/phobert
|
- local: model_doc/phobert
|
||||||
@ -594,6 +603,10 @@
|
|||||||
title: Qwen2
|
title: Qwen2
|
||||||
- local: model_doc/qwen2_moe
|
- local: model_doc/qwen2_moe
|
||||||
title: Qwen2MoE
|
title: Qwen2MoE
|
||||||
|
- local: model_doc/qwen3
|
||||||
|
title: Qwen3
|
||||||
|
- local: model_doc/qwen3_moe
|
||||||
|
title: Qwen3MoE
|
||||||
- local: model_doc/rag
|
- local: model_doc/rag
|
||||||
title: RAG
|
title: RAG
|
||||||
- local: model_doc/realm
|
- local: model_doc/realm
|
||||||
@ -660,8 +673,8 @@
|
|||||||
title: Zamba
|
title: Zamba
|
||||||
- local: model_doc/zamba2
|
- local: model_doc/zamba2
|
||||||
title: Zamba2
|
title: Zamba2
|
||||||
- title: Vision models
|
title: Text models
|
||||||
sections:
|
- sections:
|
||||||
- local: model_doc/beit
|
- local: model_doc/beit
|
||||||
title: BEiT
|
title: BEiT
|
||||||
- local: model_doc/bit
|
- local: model_doc/bit
|
||||||
@ -732,6 +745,8 @@
|
|||||||
title: NAT
|
title: NAT
|
||||||
- local: model_doc/poolformer
|
- local: model_doc/poolformer
|
||||||
title: PoolFormer
|
title: PoolFormer
|
||||||
|
- local: model_doc/prompt_depth_anything
|
||||||
|
title: Prompt Depth Anything
|
||||||
- local: model_doc/pvt
|
- local: model_doc/pvt
|
||||||
title: Pyramid Vision Transformer (PVT)
|
title: Pyramid Vision Transformer (PVT)
|
||||||
- local: model_doc/pvt_v2
|
- local: model_doc/pvt_v2
|
||||||
@ -788,8 +803,8 @@
|
|||||||
title: YOLOS
|
title: YOLOS
|
||||||
- local: model_doc/zoedepth
|
- local: model_doc/zoedepth
|
||||||
title: ZoeDepth
|
title: ZoeDepth
|
||||||
- title: Audio models
|
title: Vision models
|
||||||
sections:
|
- sections:
|
||||||
- local: model_doc/audio-spectrogram-transformer
|
- local: model_doc/audio-spectrogram-transformer
|
||||||
title: Audio Spectrogram Transformer
|
title: Audio Spectrogram Transformer
|
||||||
- local: model_doc/bark
|
- local: model_doc/bark
|
||||||
@ -858,16 +873,16 @@
|
|||||||
title: XLS-R
|
title: XLS-R
|
||||||
- local: model_doc/xlsr_wav2vec2
|
- local: model_doc/xlsr_wav2vec2
|
||||||
title: XLSR-Wav2Vec2
|
title: XLSR-Wav2Vec2
|
||||||
- title: Video models
|
title: Audio models
|
||||||
sections:
|
- sections:
|
||||||
- local: model_doc/timesformer
|
- local: model_doc/timesformer
|
||||||
title: TimeSformer
|
title: TimeSformer
|
||||||
- local: model_doc/videomae
|
- local: model_doc/videomae
|
||||||
title: VideoMAE
|
title: VideoMAE
|
||||||
- local: model_doc/vivit
|
- local: model_doc/vivit
|
||||||
title: ViViT
|
title: ViViT
|
||||||
- title: Multimodal models
|
title: Video models
|
||||||
sections:
|
- sections:
|
||||||
- local: model_doc/align
|
- local: model_doc/align
|
||||||
title: ALIGN
|
title: ALIGN
|
||||||
- local: model_doc/altclip
|
- local: model_doc/altclip
|
||||||
@ -906,6 +921,8 @@
|
|||||||
title: Emu3
|
title: Emu3
|
||||||
- local: model_doc/flava
|
- local: model_doc/flava
|
||||||
title: FLAVA
|
title: FLAVA
|
||||||
|
- local: model_doc/gemma3
|
||||||
|
title: Gemma3
|
||||||
- local: model_doc/git
|
- local: model_doc/git
|
||||||
title: GIT
|
title: GIT
|
||||||
- local: model_doc/got_ocr2
|
- local: model_doc/got_ocr2
|
||||||
@ -978,6 +995,8 @@
|
|||||||
title: Qwen2VL
|
title: Qwen2VL
|
||||||
- local: model_doc/sam
|
- local: model_doc/sam
|
||||||
title: Segment Anything
|
title: Segment Anything
|
||||||
|
- local: model_doc/shieldgemma2
|
||||||
|
title: ShieldGemma2
|
||||||
- local: model_doc/siglip
|
- local: model_doc/siglip
|
||||||
title: SigLIP
|
title: SigLIP
|
||||||
- local: model_doc/siglip2
|
- local: model_doc/siglip2
|
||||||
@ -1010,14 +1029,14 @@
|
|||||||
title: VisualBERT
|
title: VisualBERT
|
||||||
- local: model_doc/xclip
|
- local: model_doc/xclip
|
||||||
title: X-CLIP
|
title: X-CLIP
|
||||||
- title: Reinforcement learning models
|
title: Multimodal models
|
||||||
sections:
|
- sections:
|
||||||
- local: model_doc/decision_transformer
|
- local: model_doc/decision_transformer
|
||||||
title: Decision Transformer
|
title: Decision Transformer
|
||||||
- local: model_doc/trajectory_transformer
|
- local: model_doc/trajectory_transformer
|
||||||
title: Trajectory Transformer
|
title: Trajectory Transformer
|
||||||
- title: Time series models
|
title: Reinforcement learning models
|
||||||
sections:
|
- sections:
|
||||||
- local: model_doc/autoformer
|
- local: model_doc/autoformer
|
||||||
title: Autoformer
|
title: Autoformer
|
||||||
- local: model_doc/informer
|
- local: model_doc/informer
|
||||||
@ -1028,14 +1047,17 @@
|
|||||||
title: PatchTST
|
title: PatchTST
|
||||||
- local: model_doc/time_series_transformer
|
- local: model_doc/time_series_transformer
|
||||||
title: Time Series Transformer
|
title: Time Series Transformer
|
||||||
- title: Graph models
|
title: Time series models
|
||||||
sections:
|
- sections:
|
||||||
- local: model_doc/graphormer
|
- local: model_doc/graphormer
|
||||||
title: Graphormer
|
title: Graphormer
|
||||||
- title: Internal helpers
|
title: Graph models
|
||||||
sections:
|
title: Models
|
||||||
|
- sections:
|
||||||
- local: internal/modeling_utils
|
- local: internal/modeling_utils
|
||||||
title: Custom Layers and Utilities
|
title: Custom Layers and Utilities
|
||||||
|
- local: internal/model_debugging_utils
|
||||||
|
title: Utilities for Model Debugging
|
||||||
- local: internal/pipelines_utils
|
- local: internal/pipelines_utils
|
||||||
title: Utilities for pipelines
|
title: Utilities for pipelines
|
||||||
- local: internal/tokenization_utils
|
- local: internal/tokenization_utils
|
||||||
@ -1052,4 +1074,5 @@
|
|||||||
title: General Utilities
|
title: General Utilities
|
||||||
- local: internal/time_series_utils
|
- local: internal/time_series_utils
|
||||||
title: Utilities for Time Series
|
title: Utilities for Time Series
|
||||||
|
title: Internal helpers
|
||||||
|
title: API
|
||||||
|
@ -476,7 +476,7 @@ When both implementations produce the same output, verify the outputs are within
|
|||||||
torch.allclose(original_output, output, atol=1e-3)
|
torch.allclose(original_output, output, atol=1e-3)
|
||||||
```
|
```
|
||||||
|
|
||||||
This is typically the most difficult part of the process. Congratulations if you've made it this far!
|
This is typically the most difficult part of the process. Congratulations if you've made it this far!
|
||||||
|
|
||||||
And if you're stuck or struggling with this step, don't hesitate to ask for help on your pull request.
|
And if you're stuck or struggling with this step, don't hesitate to ask for help on your pull request.
|
||||||
|
|
||||||
@ -541,6 +541,48 @@ input_ids = tokenizer(input_str).input_ids
|
|||||||
|
|
||||||
When both implementations have the same `input_ids`, add a tokenizer test file. This file is analogous to the modeling test files. The tokenizer test files should contain a couple of hardcoded integration tests.
|
When both implementations have the same `input_ids`, add a tokenizer test file. This file is analogous to the modeling test files. The tokenizer test files should contain a couple of hardcoded integration tests.
|
||||||
|
|
||||||
|
## Implement image processor
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Fast image processors use the [torchvision](https://pytorch.org/vision/stable/index.html) library and can perform image processing on the GPU, significantly improving processing speed.
|
||||||
|
> We recommend adding a fast image processor ([`BaseImageProcessorFast`]) in addition to the "slow" image processor ([`BaseImageProcessor`]) to provide users with the best performance. Feel free to tag [@yonigozlan](https://github.com/yonigozlan) for help adding a [`BaseImageProcessorFast`].
|
||||||
|
|
||||||
|
While this example doesn't include an image processor, you may need to implement one if your model requires image inputs. The image processor is responsible for converting images into a format suitable for your model. Before implementing a new one, check whether an existing image processor in the Transformers library can be reused, as many models share similar image processing techniques. Note that you can also use [modular](./modular_transformers) for image processors to reuse existing components.
|
||||||
|
|
||||||
|
If you do need to implement a new image processor, refer to an existing image processor to understand the expected structure. Slow image processors ([`BaseImageProcessor`]) and fast image processors ([`BaseImageProcessorFast`]) are designed differently, so make sure you follow the correct structure based on the processor type you're implementing.
|
||||||
|
|
||||||
|
Run the following command (only if you haven't already created the fast image processor with the `transformers-cli add-new-model-like` command) to generate the necessary imports and to create a prefilled template for the fast image processor. Modify the template to fit your model.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
transformers-cli add-fast-image-processor --model-name your_model_name
|
||||||
|
```
|
||||||
|
|
||||||
|
This command will generate the necessary imports and provide a pre-filled template for the fast image processor. You can then modify it to fit your model's needs.
|
||||||
|
|
||||||
|
Add tests for the image processor in `tests/models/your_model_name/test_image_processing_your_model_name.py`. These tests should be similar to those for other image processors and should verify that the image processor correctly handles image inputs. If your image processor includes unique features or processing methods, ensure you add specific tests for those as well.
|
||||||
|
|
||||||
|
## Implement processor
|
||||||
|
|
||||||
|
If your model accepts multiple modalities, like text and images, you need to add a processor. The processor centralizes the preprocessing of different modalities before passing them to the model.
|
||||||
|
|
||||||
|
The processor should call the appropriate modality-specific processors within its `__call__` function to handle each type of input correctly. Be sure to check existing processors in the library to understand their expected structure. Transformers uses the following convention in the `__call__` function signature.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def __call__(
|
||||||
|
self,
|
||||||
|
images: ImageInput = None,
|
||||||
|
text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
|
||||||
|
audio=None,
|
||||||
|
videos=None,
|
||||||
|
**kwargs: Unpack[YourModelProcessorKwargs],
|
||||||
|
) -> BatchFeature:
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
`YourModelProcessorKwargs` is a `TypedDict` that includes all the typical processing arguments and any extra arguments a specific processor may require.
|
||||||
|
|
||||||
|
Add tests for the processor in `tests/models/your_model_name/test_processor_your_model_name.py`. These tests should be similar to those for other processors and should verify that the processor correctly handles the different modalities.
|
||||||
|
|
||||||
## Integration tests
|
## Integration tests
|
||||||
|
|
||||||
Now that you have a model and tokenizer, add end-to-end integration tests for the model and tokenizer to `tests/models/brand_new_llama/test_modeling_brand_new_llama.py`.
|
Now that you have a model and tokenizer, add end-to-end integration tests for the model and tokenizer to `tests/models/brand_new_llama/test_modeling_brand_new_llama.py`.
|
||||||
@ -620,4 +662,4 @@ There are four timelines for model additions depending on the model contributor
|
|||||||
|
|
||||||
- **Hub-first release**: Transformers [remote-code](./models#custom-models) feature allows Transformers-based projects to be shared directly on the Hub. This is a good option if you don't have the bandwidth to add a model directly to Transformers.
|
- **Hub-first release**: Transformers [remote-code](./models#custom-models) feature allows Transformers-based projects to be shared directly on the Hub. This is a good option if you don't have the bandwidth to add a model directly to Transformers.
|
||||||
|
|
||||||
If a model ends up being very popular, then it's very likely that we'll integrate it in Transformers ourselves to enable better support (documentation, maintenance, optimization, etc.) for it. A Hub-first release is the most frictionless way to add a model.
|
If a model ends up being very popular, then it's very likely that we'll integrate it in Transformers ourselves to enable better support (documentation, maintenance, optimization, etc.) for it. A Hub-first release is the most frictionless way to add a model.
|
||||||
|
128
docs/source/en/attention_interface.md
Normal file
128
docs/source/en/attention_interface.md
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Attention Interface
|
||||||
|
|
||||||
|
This page describes how to use the `AttentionInterface` in order to register custom attention functions to use with
|
||||||
|
supported models.
|
||||||
|
|
||||||
|
## Customizing attention function
|
||||||
|
|
||||||
|
Most recent models can now switch from one attention function used in the Attention layer to the other, thanks to a simple mapping.
|
||||||
|
By default, we provide the implementation for [`sdpa`](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html),
|
||||||
|
[`flash_attention_2`](https://github.com/Dao-AILab/flash-attention) and [`flex_attention`](https://pytorch.org/docs/stable/nn.attention.flex_attention.html#module-torch.nn.attention.flex_attention)
|
||||||
|
as well as `eager`, which is a simple matrix multiplication without any optimization on top.
|
||||||
|
This is the setting you can usually choose when instantiating a model:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoModelForCausalLM
|
||||||
|
|
||||||
|
model_id = "meta-llama/Llama-3.2-1B"
|
||||||
|
|
||||||
|
# Here, using flash attention as an example
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation="flash_attention_2")
|
||||||
|
```
|
||||||
|
|
||||||
|
But what if you wanted to create your own attention function? Or simply play around with existing ones, adding
|
||||||
|
a few statements here and there? You can now do so with the `AttentionInterface`! Here is an example:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoModelForCausalLM, AttentionInterface
|
||||||
|
from transformers.integrations.sdpa_attention import sdpa_attention_forward
|
||||||
|
import torch
|
||||||
|
|
||||||
|
model_id = "meta-llama/Llama-3.2-1B"
|
||||||
|
|
||||||
|
def my_new_sdpa(*args, **kwargs):
|
||||||
|
print("I just entered the attention computation")
|
||||||
|
return sdpa_attention_forward(*args, **kwargs)
|
||||||
|
|
||||||
|
AttentionInterface.register("my_new_sdpa", my_new_sdpa)
|
||||||
|
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation="my_new_sdpa")
|
||||||
|
# Try running the forward with the new attention function
|
||||||
|
model(torch.ones(1, 5, dtype=int))
|
||||||
|
```
|
||||||
|
|
||||||
|
You will see it prints "I just entered the attention computation" as many times as there are layers in the model (with this example, 16 times).
|
||||||
|
|
||||||
|
## Dynamically switching attention function
|
||||||
|
|
||||||
|
You could dynamically change the model's attention function as well, by overriding the `config._attn_implementation` field:
|
||||||
|
|
||||||
|
```python
|
||||||
|
# Back to use original sdpa implementation
|
||||||
|
model.config._attn_implementation = "sdpa"
|
||||||
|
|
||||||
|
model(torch.ones(1, 5, dtype=int))
|
||||||
|
```
|
||||||
|
|
||||||
|
and it will stop printing the statements, as it now uses the `sdpa` attention.
|
||||||
|
This allows to quickly change an attention function, without needing to reload the model!
|
||||||
|
|
||||||
|
## What about new args needed in my custom attention function?
|
||||||
|
|
||||||
|
But indeed, what if the new function requires a new arg to be properly used? It's no issue! Models supporting the
|
||||||
|
`AttentionInterface` propagate kwargs all the way to the Attention layers, and to the used attention function. That way,
|
||||||
|
you can simply pass the arg (as a kwargs, i.e. you need to qualify the name of the arg) in the model's forward, and it will be correctly used in the attention. However, custom attention functions have some limitations. In particular, it must follow the signature and return format of other attention functions, i.e.
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoModelForCausalLM, AttentionInterface
|
||||||
|
from transformers.integrations.sdpa_attention import sdpa_attention_forward
|
||||||
|
import torch
|
||||||
|
|
||||||
|
def custom_attention(
|
||||||
|
module: torch.nn.Module, # required arg
|
||||||
|
query: torch.Tensor, # required arg
|
||||||
|
key: torch.Tensor, # required arg
|
||||||
|
value: torch.Tensor, # required arg
|
||||||
|
attention_mask: Optional[torch.Tensor], # required arg
|
||||||
|
a_new_kwargs = None, # You can now add as many kwargs as you need
|
||||||
|
another_new_kwargs = None, # You can now add as many kwargs as you need
|
||||||
|
**kwargs, # You need to accept **kwargs as models will pass other args
|
||||||
|
) -> Tuple[torch.Tensor, Optional[torch.Tensor]]
|
||||||
|
... # do your magic!
|
||||||
|
return attn_output, attn_weights # attn_weights are optional here
|
||||||
|
|
||||||
|
AttentionInterface.register("custom", custom_attention)
|
||||||
|
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_id, attn_implementation="custom")
|
||||||
|
# Forward pass with the new kwargs
|
||||||
|
model(torch.ones(1, 5, dtype=int), a_new_kwargs=..., another_new_kwargs=...)
|
||||||
|
```
|
||||||
|
|
||||||
|
If in doubt about what args/kwargs a given model sends to the attention function, simply check that model's modeling code on [GitHub](https://github.com/huggingface/transformers/tree/main/src/transformers/models)!
|
||||||
|
|
||||||
|
## Accessing current available implementations
|
||||||
|
|
||||||
|
Most of the time, you will simply need to `register` a new function. If, however, you need to access an existing one,
|
||||||
|
and/or perform a few checks, the prefered way is to use the global `ALL_ATTENTION_FUNCTIONS`. It behaves the same way you
|
||||||
|
would expect from a usual Python dictionary:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS
|
||||||
|
|
||||||
|
>>> list(ALL_ATTENTION_FUNCTIONS.keys())
|
||||||
|
>>> ['flash_attention_2', 'flex_attention', 'sdpa']
|
||||||
|
|
||||||
|
>>> ALL_ATTENTION_FUNCTIONS["sdpa"]
|
||||||
|
>>> <function transformers.integrations.sdpa_attention.sdpa_attention_forward>
|
||||||
|
|
||||||
|
>>> ALL_ATTENTION_FUNCTIONS.get("sdpa", None)
|
||||||
|
>>> <function transformers.integrations.sdpa_attention.sdpa_attention_forward>
|
||||||
|
|
||||||
|
# You can also globally `register` a new function directly on it
|
||||||
|
>>> ALL_ATTENTION_FUNCTIONS.register("new_func", new_func)
|
||||||
|
```
|
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
|||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
rendered properly in your Markdown viewer.
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
@ -62,7 +62,7 @@ for _ in range(max_new_tokens):
|
|||||||
# Greedily sample one next token
|
# Greedily sample one next token
|
||||||
next_token_ids = outputs.logits[:, -1:].argmax(-1)
|
next_token_ids = outputs.logits[:, -1:].argmax(-1)
|
||||||
generated_ids = torch.cat([generated_ids, next_token_ids], dim=-1)
|
generated_ids = torch.cat([generated_ids, next_token_ids], dim=-1)
|
||||||
# Prepare inputs for the next generation step by leaaving unprocessed tokens, in our case we have only one new token
|
# Prepare inputs for the next generation step by leaving unprocessed tokens, in our case we have only one new token
|
||||||
# and expanding attn mask for the new token, as explained above
|
# and expanding attn mask for the new token, as explained above
|
||||||
attention_mask = inputs["attention_mask"]
|
attention_mask = inputs["attention_mask"]
|
||||||
attention_mask = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1)
|
attention_mask = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1)
|
||||||
@ -88,7 +88,7 @@ model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", to
|
|||||||
inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
||||||
|
|
||||||
# `return_dict_in_generate=True` is required to return the cache and `return_legacy_cache` forces the returned cache
|
# `return_dict_in_generate=True` is required to return the cache and `return_legacy_cache` forces the returned cache
|
||||||
# in the the legacy format
|
# in the legacy format
|
||||||
generation_outputs = model.generate(**inputs, return_dict_in_generate=True, return_legacy_cache=True, max_new_tokens=5)
|
generation_outputs = model.generate(**inputs, return_dict_in_generate=True, return_legacy_cache=True, max_new_tokens=5)
|
||||||
|
|
||||||
cache = DynamicCache.from_legacy_cache(generation_outputs.past_key_values)
|
cache = DynamicCache.from_legacy_cache(generation_outputs.past_key_values)
|
||||||
|
@ -146,7 +146,7 @@ print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
|
|||||||
|
|
||||||
## Schema
|
## Schema
|
||||||
|
|
||||||
[`~PreTrainedTokenizerBase.apply_chat_template`] converts functions into a [JSON schema](https://json-schema.org/learn/getting-started-step-by-step) which is passed to the chat template. A LLM never sees the code inside the function. In other words, a LLM doesn't care how the model works technically, it only cares about function **definition** and **arguments**.
|
[`~PreTrainedTokenizerBase.apply_chat_template`] converts functions into a [JSON schema](https://json-schema.org/learn/getting-started-step-by-step) which is passed to the chat template. A LLM never sees the code inside the function. In other words, a LLM doesn't care how the function works technically, it only cares about function **definition** and **arguments**.
|
||||||
|
|
||||||
The JSON schema is automatically generated behind the scenes as long as your function follows the [rules](#tools) listed earlier above. But you can use [get_json_schema](https://github.com/huggingface/transformers/blob/14561209291255e51c55260306c7d00c159381a5/src/transformers/utils/chat_template_utils.py#L205) to manually convert a schema for more visibility or debugging.
|
The JSON schema is automatically generated behind the scenes as long as your function follows the [rules](#tools) listed earlier above. But you can use [get_json_schema](https://github.com/huggingface/transformers/blob/14561209291255e51c55260306c7d00c159381a5/src/transformers/utils/chat_template_utils.py#L205) to manually convert a schema for more visibility or debugging.
|
||||||
|
|
||||||
|
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
|||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
rendered properly in your Markdown viewer.
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
@ -18,7 +18,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
Multimodal model chat templates expect a similar [template](./chat_templating) as text-only models. It needs `messages` that includes a dictionary of the `role` and `content`.
|
Multimodal model chat templates expect a similar [template](./chat_templating) as text-only models. It needs `messages` that includes a dictionary of the `role` and `content`.
|
||||||
|
|
||||||
Multimodal templates are included in the [Processor](./processors) class and requires an additional `type` key for specifying whether the included content is an image, video, or text.
|
Multimodal templates are included in the [Processor](./processors) class and require an additional `type` key for specifying whether the included content is an image, video, or text.
|
||||||
|
|
||||||
This guide will show you how to format chat templates for multimodal models as well as some best practices for configuring the template
|
This guide will show you how to format chat templates for multimodal models as well as some best practices for configuring the template
|
||||||
|
|
||||||
@ -109,7 +109,7 @@ These inputs are now ready to be used in [`~GenerationMixin.generate`].
|
|||||||
|
|
||||||
Some vision models also support video inputs. The message format is very similar to the format for [image inputs](#image-inputs).
|
Some vision models also support video inputs. The message format is very similar to the format for [image inputs](#image-inputs).
|
||||||
|
|
||||||
- The content `"type"` should be `"video"` to indicate the the content is a video.
|
- The content `"type"` should be `"video"` to indicate the content is a video.
|
||||||
- For videos, it can be a link to the video (`"url"`) or it could be a file path (`"path"`). Videos loaded from a URL can only be decoded with [PyAV](https://pyav.basswood-io.com/docs/stable/) or [Decord](https://github.com/dmlc/decord).
|
- For videos, it can be a link to the video (`"url"`) or it could be a file path (`"path"`). Videos loaded from a URL can only be decoded with [PyAV](https://pyav.basswood-io.com/docs/stable/) or [Decord](https://github.com/dmlc/decord).
|
||||||
|
|
||||||
> [!WARNING]
|
> [!WARNING]
|
||||||
@ -141,7 +141,7 @@ Pass `messages` to [`~ProcessorMixin.apply_chat_template`] to tokenize the input
|
|||||||
|
|
||||||
The `video_load_backend` parameter refers to a specific framework to load a video. It supports [PyAV](https://pyav.basswood-io.com/docs/stable/), [Decord](https://github.com/dmlc/decord), [OpenCV](https://github.com/opencv/opencv), and [torchvision](https://pytorch.org/vision/stable/index.html).
|
The `video_load_backend` parameter refers to a specific framework to load a video. It supports [PyAV](https://pyav.basswood-io.com/docs/stable/), [Decord](https://github.com/dmlc/decord), [OpenCV](https://github.com/opencv/opencv), and [torchvision](https://pytorch.org/vision/stable/index.html).
|
||||||
|
|
||||||
The examples below uses Decord as the backend because it is a bit faster than PyAV.
|
The examples below use Decord as the backend because it is a bit faster than PyAV.
|
||||||
|
|
||||||
<hfoptions id="sampling">
|
<hfoptions id="sampling">
|
||||||
<hfoption id="fixed number of frames">
|
<hfoption id="fixed number of frames">
|
||||||
|
@ -131,7 +131,7 @@ class ResnetModel(PreTrainedModel):
|
|||||||
</hfoption>
|
</hfoption>
|
||||||
<hfoption id="ResnetModelForImageClassification">
|
<hfoption id="ResnetModelForImageClassification">
|
||||||
|
|
||||||
The `forward` method needs to be rewrittten to calculate the loss for each logit if labels are available. Otherwise, the ResNet model class is the same.
|
The `forward` method needs to be rewritten to calculate the loss for each logit if labels are available. Otherwise, the ResNet model class is the same.
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> Add `config_class` to the model class to enable [AutoClass](#autoclass-support) support.
|
> Add `config_class` to the model class to enable [AutoClass](#autoclass-support) support.
|
||||||
|
@ -271,7 +271,7 @@ tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
|||||||
|
|
||||||
## DoLa
|
## DoLa
|
||||||
|
|
||||||
[Decoding by Contrasting Layers (DoLa)](https://hf.co/papers/2309.03883) is a contrastive decoding strategy for improving factuality and reducing hallucination. This strategy works by contrasting the logit diffferences between the final and early layers. As a result, factual knowledge localized to particular layers are amplified. DoLa is not recommended for smaller models like GPT-2.
|
[Decoding by Contrasting Layers (DoLa)](https://hf.co/papers/2309.03883) is a contrastive decoding strategy for improving factuality and reducing hallucination. This strategy works by contrasting the logit differences between the final and early layers. As a result, factual knowledge localized to particular layers are amplified. DoLa is not recommended for smaller models like GPT-2.
|
||||||
|
|
||||||
Enable DoLa with the following parameters.
|
Enable DoLa with the following parameters.
|
||||||
|
|
||||||
|
@ -24,21 +24,23 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
The GGUF format also supports many quantized data types (refer to [quantization type table](https://hf.co/docs/hub/en/gguf#quantization-types) for a complete list of supported quantization types) which saves a significant amount of memory, making inference with large models like Whisper and Llama feasible on local and edge devices.
|
The GGUF format also supports many quantized data types (refer to [quantization type table](https://hf.co/docs/hub/en/gguf#quantization-types) for a complete list of supported quantization types) which saves a significant amount of memory, making inference with large models like Whisper and Llama feasible on local and edge devices.
|
||||||
|
|
||||||
Transformers supports loading models stored in the GGUF format for further training or finetuning. The GGUF format is dequantized to fp32 where the full model weights are available and compatible with PyTorch.
|
Transformers supports loading models stored in the GGUF format for further training or finetuning. The GGUF checkpoint is **dequantized to fp32** where the full model weights are available and compatible with PyTorch.
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> Models that support GGUF include Llama, Mistral, Qwen2, Qwen2Moe, Phi3, Bloom, Falcon, StableLM, GPT2, and Starcoder2.
|
> Models that support GGUF include Llama, Mistral, Qwen2, Qwen2Moe, Phi3, Bloom, Falcon, StableLM, GPT2, Starcoder2, and [more](https://github.com/huggingface/transformers/blob/main/src/transformers/integrations/ggml.py)
|
||||||
|
|
||||||
Add the `gguf_file` parameter to [`~PreTrainedModel.from_pretrained`] to specify the GGUF file to load.
|
Add the `gguf_file` parameter to [`~PreTrainedModel.from_pretrained`] to specify the GGUF file to load.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
|
# pip install gguf
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
|
|
||||||
model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
|
model_id = "TheBloke/TinyLlama-1.1B-Chat-v1.0-GGUF"
|
||||||
filename = "tinyllama-1.1b-chat-v1.0.Q6_K.gguf"
|
filename = "tinyllama-1.1b-chat-v1.0.Q6_K.gguf"
|
||||||
|
|
||||||
|
torch_dtype = torch.float32 # could be torch.float16 or torch.bfloat16 too
|
||||||
tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
|
tokenizer = AutoTokenizer.from_pretrained(model_id, gguf_file=filename)
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename)
|
model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename, torch_dtype=torch_dtype)
|
||||||
```
|
```
|
||||||
|
|
||||||
Once you're done tinkering with the model, save and convert it back to the GGUF format with the [convert-hf-to-gguf.py](https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py) script.
|
Once you're done tinkering with the model, save and convert it back to the GGUF format with the [convert-hf-to-gguf.py](https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py) script.
|
||||||
|
@ -9,7 +9,7 @@ Unless required by applicable law or agreed to in writing, software distributed
|
|||||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
specific language governing permissions and limitations under the License.
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
⚠️ Note that this file is in Markdown but contains specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
rendered properly in your Markdown viewer.
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
-->
|
-->
|
||||||
@ -56,7 +56,7 @@ deepspeed --num_gpus 2 trainer-program.py ...
|
|||||||
|
|
||||||
### Order of GPUs
|
### Order of GPUs
|
||||||
|
|
||||||
To select specific GPUs to use and their order, configure the the `CUDA_VISIBLE_DEVICES` environment variable. It is easiest to set the environment variable in `~/bashrc` or another startup config file. `CUDA_VISIBLE_DEVICES` is used to map which GPUs are used. For example, if there are 4 GPUs (0, 1, 2, 3) and you only want to run GPUs 0 and 2:
|
To select specific GPUs to use and their order, configure the `CUDA_VISIBLE_DEVICES` environment variable. It is easiest to set the environment variable in `~/bashrc` or another startup config file. `CUDA_VISIBLE_DEVICES` is used to map which GPUs are used. For example, if there are 4 GPUs (0, 1, 2, 3) and you only want to run GPUs 0 and 2:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ...
|
CUDA_VISIBLE_DEVICES=0,2 torchrun trainer-program.py ...
|
||||||
|
@ -36,7 +36,7 @@ This guide will show you how to customize a models attention mechanism in order
|
|||||||
|
|
||||||
## Attention class
|
## Attention class
|
||||||
|
|
||||||
[Segment Anything](./model_doc/sam) is an image segmentation model, and it combines the query-key-value (`qkv`) projection in its attention mechanims. To reduce the number of trainable parameters and computational overhead, you can apply LoRA to the `qkv` projection. This requires splitting the `qkv` projection so that you can separately target the `q` and `v` with LoRA.
|
[Segment Anything](./model_doc/sam) is an image segmentation model, and it combines the query-key-value (`qkv`) projection in its attention mechanisms. To reduce the number of trainable parameters and computational overhead, you can apply LoRA to the `qkv` projection. This requires splitting the `qkv` projection so that you can separately target the `q` and `v` with LoRA.
|
||||||
|
|
||||||
1. Create a custom attention class, `SamVisionAttentionSplit`, by subclassing the original `SamVisionAttention` class. In the `__init__`, delete the combined `qkv` and create a separate linear layer for `q`, `k` and `v`.
|
1. Create a custom attention class, `SamVisionAttentionSplit`, by subclassing the original `SamVisionAttention` class. In the `__init__`, delete the combined `qkv` and create a separate linear layer for `q`, `k` and `v`.
|
||||||
|
|
||||||
|
@ -43,4 +43,3 @@ Transformers is designed for developers and machine learning engineers and resea
|
|||||||
</a>
|
</a>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
Join us on the Hugging Face [Hub](https://huggingface.co/), [Discord](https://discord.com/invite/JfAtkvEtRb), or [forum](https://discuss.huggingface.co/) to collaborate and build models, datasets, and applications together.
|
|
||||||
|
@ -20,7 +20,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
# Installation
|
# Installation
|
||||||
|
|
||||||
Transformers works with [PyTorch](https://pytorch.org/get-started/locally/), [TensorFlow 2.0](https://www.tensorflow.org/install/pip), and [Flax](https://flax.readthedocs.io/en/latest/). It has been tested on Python 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, and Flax.
|
Transformers works with [PyTorch](https://pytorch.org/get-started/locally/), [TensorFlow 2.0](https://www.tensorflow.org/install/pip), and [Flax](https://flax.readthedocs.io/en/latest/). It has been tested on Python 3.9+, PyTorch 2.0+, TensorFlow 2.6+, and Flax 0.4.1+.
|
||||||
|
|
||||||
## Virtual environment
|
## Virtual environment
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ Create and activate a virtual environment in your project directory with [venv](
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
python -m venv .env
|
python -m venv .env
|
||||||
source ./env/bin/activate
|
source .env/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
@ -43,7 +43,7 @@ source ./env/bin/activate
|
|||||||
|
|
||||||
```bash
|
```bash
|
||||||
uv venv .env
|
uv venv .env
|
||||||
source ./env/bin/activate
|
source .env/bin/activate
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
|
71
docs/source/en/internal/model_debugging_utils.md
Normal file
71
docs/source/en/internal/model_debugging_utils.md
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Model debugging toolboxes
|
||||||
|
|
||||||
|
This page lists all the debugging and model adding tools used by the library, as well as the utility functions it provides for it.
|
||||||
|
|
||||||
|
Most of those are only useful if you are adding new models in the library.
|
||||||
|
|
||||||
|
|
||||||
|
## Model addition debuggers
|
||||||
|
|
||||||
|
|
||||||
|
### Model addition debugger - context manager for model adders
|
||||||
|
|
||||||
|
This context manager is a power user tool intended for model adders.
|
||||||
|
It tracks all forward calls within a model forward and logs a slice of each input and output on a nested Json.
|
||||||
|
To note, this context manager enforces `torch.inference_mode()`.
|
||||||
|
|
||||||
|
### Rationale
|
||||||
|
|
||||||
|
Because when porting models to transformers, even from python to python, model adders often have to do a lot of manual operations, involving saving and loading tensors, comparing dtypes, etc. This small tool can hopefully shave off some time.
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
Add this context manager as follows to debug a model:
|
||||||
|
|
||||||
|
```python
|
||||||
|
import torch
|
||||||
|
from PIL import Image
|
||||||
|
import requests
|
||||||
|
from transformers import LlavaProcessor, LlavaForConditionalGeneration
|
||||||
|
torch.random.manual_seed(673)
|
||||||
|
|
||||||
|
# load pretrained model and processor
|
||||||
|
model_id = "llava-hf/llava-1.5-7b-hf"
|
||||||
|
processor = LlavaProcessor.from_pretrained(model_id)
|
||||||
|
model = LlavaForConditionalGeneration.from_pretrained(model_id, low_cpu_mem_usage=True)
|
||||||
|
|
||||||
|
# create random image input
|
||||||
|
random_image = Image.fromarray(torch.randint(0, 256, (224, 224, 3), dtype=torch.uint8).numpy())
|
||||||
|
|
||||||
|
# prompt
|
||||||
|
prompt = "<image>Describe this image."
|
||||||
|
|
||||||
|
# process inputs
|
||||||
|
inputs = processor(text=prompt, images=random_image, return_tensors="pt")
|
||||||
|
|
||||||
|
# call forward method (not .generate!)
|
||||||
|
with model_addition_debugger_context(model, "optional_path_to_your_output_file.json"):
|
||||||
|
output = model.forward(**inputs)
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
[[autodoc]] model_addition_debugger
|
||||||
|
|
||||||
|
[[autodoc]] model_addition_debugger_context
|
@ -16,10 +16,14 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
# Custom Layers and Utilities
|
# Custom Layers and Utilities
|
||||||
|
|
||||||
This page lists all the custom layers used by the library, as well as the utility functions it provides for modeling.
|
This page lists all the custom layers used by the library, as well as the utility functions and classes it provides for modeling.
|
||||||
|
|
||||||
Most of those are only useful if you are studying the code of the models in the library.
|
Most of those are only useful if you are studying the code of the models in the library.
|
||||||
|
|
||||||
|
## Attention Functions
|
||||||
|
|
||||||
|
[[autodoc]] AttentionInterface
|
||||||
|
- register
|
||||||
|
|
||||||
## Pytorch custom modules
|
## Pytorch custom modules
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ To give some examples of how much VRAM it roughly takes to load a model in bfloa
|
|||||||
|
|
||||||
As of writing this document, the largest GPU chip on the market is the A100 & H100 offering 80GB of VRAM. Most of the models listed before require more than 80GB just to be loaded and therefore necessarily require [tensor parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#tensor-parallelism) and/or [pipeline parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism).
|
As of writing this document, the largest GPU chip on the market is the A100 & H100 offering 80GB of VRAM. Most of the models listed before require more than 80GB just to be loaded and therefore necessarily require [tensor parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#tensor-parallelism) and/or [pipeline parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism).
|
||||||
|
|
||||||
🤗 Transformers now supports tensor parallelism for supported models having `base_tp_plan` in their respecitve config classes. Learn more about Tensor Parallelism [here](perf_train_gpu_many#tensor-parallelism). Furthermore, if you're interested in writing models in a tensor-parallelism-friendly way, feel free to have a look at [the text-generation-inference library](https://github.com/huggingface/text-generation-inference/tree/main/server/text_generation_server/models/custom_modeling).
|
🤗 Transformers now supports tensor parallelism for supported models having `base_tp_plan` in their respective config classes. Learn more about Tensor Parallelism [here](perf_train_gpu_many#tensor-parallelism). Furthermore, if you're interested in writing models in a tensor-parallelism-friendly way, feel free to have a look at [the text-generation-inference library](https://github.com/huggingface/text-generation-inference/tree/main/server/text_generation_server/models/custom_modeling).
|
||||||
|
|
||||||
Naive pipeline parallelism is supported out of the box. For this, simply load the model with `device="auto"` which will automatically place the different layers on the available GPUs as explained [here](https://huggingface.co/docs/accelerate/v0.22.0/en/concept_guides/big_model_inference).
|
Naive pipeline parallelism is supported out of the box. For this, simply load the model with `device="auto"` which will automatically place the different layers on the available GPUs as explained [here](https://huggingface.co/docs/accelerate/v0.22.0/en/concept_guides/big_model_inference).
|
||||||
Note, however that while very effective, this naive pipeline parallelism does not tackle the issues of GPU idling. For this more advanced pipeline parallelism is required as explained [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism).
|
Note, however that while very effective, this naive pipeline parallelism does not tackle the issues of GPU idling. For this more advanced pipeline parallelism is required as explained [here](https://huggingface.co/docs/transformers/en/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism).
|
||||||
@ -551,7 +551,7 @@ $$ \mathbf{\hat{q}}_i^T \mathbf{\hat{x}}_j = \mathbf{{q}}_i^T \mathbf{R}_{\theta
|
|||||||
|
|
||||||
\\( \mathbf{R}_{\theta, i - j} \\) thereby represents a rotational matrix. \\( \theta \\) is *not* learned during training, but instead set to a pre-defined value that depends on the maximum input sequence length during training.
|
\\( \mathbf{R}_{\theta, i - j} \\) thereby represents a rotational matrix. \\( \theta \\) is *not* learned during training, but instead set to a pre-defined value that depends on the maximum input sequence length during training.
|
||||||
|
|
||||||
> By doing so, the propability score between \\( \mathbf{q}_i \\) and \\( \mathbf{q}_j \\) is only affected if \\( i \ne j \\) and solely depends on the relative distance \\( i - j \\) regardless of each vector's specific positions \\( i \\) and \\( j \\) .
|
> By doing so, the probability score between \\( \mathbf{q}_i \\) and \\( \mathbf{q}_j \\) is only affected if \\( i \ne j \\) and solely depends on the relative distance \\( i - j \\) regardless of each vector's specific positions \\( i \\) and \\( j \\) .
|
||||||
|
|
||||||
*RoPE* is used in multiple of today's most important LLMs, such as:
|
*RoPE* is used in multiple of today's most important LLMs, such as:
|
||||||
|
|
||||||
|
@ -22,9 +22,6 @@ The `.optimization` module provides:
|
|||||||
- several schedules in the form of schedule objects that inherit from `_LRSchedule`:
|
- several schedules in the form of schedule objects that inherit from `_LRSchedule`:
|
||||||
- a gradient accumulation class to accumulate the gradients of multiple batches
|
- a gradient accumulation class to accumulate the gradients of multiple batches
|
||||||
|
|
||||||
## AdamW (PyTorch)
|
|
||||||
|
|
||||||
[[autodoc]] AdamW
|
|
||||||
|
|
||||||
## AdaFactor (PyTorch)
|
## AdaFactor (PyTorch)
|
||||||
|
|
||||||
|
@ -88,3 +88,7 @@ Learn how to quantize models in the [Quantization](../quantization) guide.
|
|||||||
## FineGrainedFP8Config
|
## FineGrainedFP8Config
|
||||||
|
|
||||||
[[autodoc]] FineGrainedFP8Config
|
[[autodoc]] FineGrainedFP8Config
|
||||||
|
|
||||||
|
## QuarkConfig
|
||||||
|
|
||||||
|
[[autodoc]] QuarkConfig
|
||||||
|
@ -18,6 +18,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -14,159 +14,85 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# BERT
|
<div style="float: right;">
|
||||||
|
<div class="flex flex-wrap space-x-1">
|
||||||
<div class="flex flex-wrap space-x-1">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
||||||
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
||||||
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
">
|
||||||
">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## Overview
|
# BERT
|
||||||
|
|
||||||
The BERT model was proposed in [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805) by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a
|
[BERT](https://huggingface.co/papers/1810.04805) is a bidirectional transformer pretrained on unlabeled text to predict masked tokens in a sentence and to predict whether one sentence follows another. The main idea is that by randomly masking some tokens, the model can train on text to the left and right, giving it a more thorough understanding. BERT is also very versatile because its learned language representations can be adapted for other NLP tasks by fine-tuning an additional layer or head.
|
||||||
bidirectional transformer pretrained using a combination of masked language modeling objective and next sentence
|
|
||||||
prediction on a large corpus comprising the Toronto Book Corpus and Wikipedia.
|
|
||||||
|
|
||||||
The abstract from the paper is the following:
|
You can find all the original BERT checkpoints under the [BERT](https://huggingface.co/collections/google/bert-release-64ff5e7a4be99045d1896dbc) collection.
|
||||||
|
|
||||||
*We introduce a new language representation model called BERT, which stands for Bidirectional Encoder Representations
|
> [!TIP]
|
||||||
from Transformers. Unlike recent language representation models, BERT is designed to pre-train deep bidirectional
|
> Click on the BERT models in the right sidebar for more examples of how to apply BERT to different language tasks.
|
||||||
representations from unlabeled text by jointly conditioning on both left and right context in all layers. As a result,
|
|
||||||
the pre-trained BERT model can be fine-tuned with just one additional output layer to create state-of-the-art models
|
|
||||||
for a wide range of tasks, such as question answering and language inference, without substantial task-specific
|
|
||||||
architecture modifications.*
|
|
||||||
|
|
||||||
*BERT is conceptually simple and empirically powerful. It obtains new state-of-the-art results on eleven natural
|
The example below demonstrates how to predict the `[MASK]` token with [`Pipeline`], [`AutoModel`], and from the command line.
|
||||||
language processing tasks, including pushing the GLUE score to 80.5% (7.7% point absolute improvement), MultiNLI
|
|
||||||
accuracy to 86.7% (4.6% absolute improvement), SQuAD v1.1 question answering Test F1 to 93.2 (1.5 point absolute
|
|
||||||
improvement) and SQuAD v2.0 Test F1 to 83.1 (5.1 point absolute improvement).*
|
|
||||||
|
|
||||||
This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/google-research/bert).
|
<hfoptions id="usage">
|
||||||
|
<hfoption id="Pipeline">
|
||||||
|
|
||||||
## Usage tips
|
```py
|
||||||
|
import torch
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
- BERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than
|
pipeline = pipeline(
|
||||||
the left.
|
task="fill-mask",
|
||||||
- BERT was trained with the masked language modeling (MLM) and next sentence prediction (NSP) objectives. It is
|
model="google-bert/bert-base-uncased",
|
||||||
efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation.
|
torch_dtype=torch.float16,
|
||||||
- Corrupts the inputs by using random masking, more precisely, during pretraining, a given percentage of tokens (usually 15%) is masked by:
|
device=0
|
||||||
|
)
|
||||||
* a special mask token with probability 0.8
|
pipeline("Plants create [MASK] through a process known as photosynthesis.")
|
||||||
* a random token different from the one masked with probability 0.1
|
|
||||||
* the same token with probability 0.1
|
|
||||||
|
|
||||||
- The model must predict the original sentence, but has a second objective: inputs are two sentences A and B (with a separation token in between). With probability 50%, the sentences are consecutive in the corpus, in the remaining 50% they are not related. The model has to predict if the sentences are consecutive or not.
|
|
||||||
|
|
||||||
### Using Scaled Dot Product Attention (SDPA)
|
|
||||||
|
|
||||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
|
|
||||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
|
|
||||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
|
||||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
|
|
||||||
page for more information.
|
|
||||||
|
|
||||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
|
|
||||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
|
|
||||||
|
|
||||||
```
|
|
||||||
from transformers import BertModel
|
|
||||||
|
|
||||||
model = BertModel.from_pretrained("bert-base-uncased", torch_dtype=torch.float16, attn_implementation="sdpa")
|
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
|
</hfoption>
|
||||||
|
<hfoption id="AutoModel">
|
||||||
|
|
||||||
On a local benchmark (A100-80GB, CPUx12, RAM 96.6GB, PyTorch 2.2.0, OS Ubuntu 22.04) with `float16`, we saw the
|
```py
|
||||||
following speedups during training and inference.
|
import torch
|
||||||
|
from transformers import AutoModelForMaskedLM, AutoTokenizer
|
||||||
|
|
||||||
#### Training
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
|
"google-bert/bert-base-uncased",
|
||||||
|
)
|
||||||
|
model = AutoModelForMaskedLM.from_pretrained(
|
||||||
|
"google-bert/bert-base-uncased",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
device_map="auto",
|
||||||
|
attn_implementation="sdpa"
|
||||||
|
)
|
||||||
|
inputs = tokenizer("Plants create [MASK] through a process known as photosynthesis.", return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
|batch_size|seq_len|Time per batch (eager - s)|Time per batch (sdpa - s)|Speedup (%)|Eager peak mem (MB)|sdpa peak mem (MB)|Mem saving (%)|
|
with torch.no_grad():
|
||||||
|----------|-------|--------------------------|-------------------------|-----------|-------------------|------------------|--------------|
|
outputs = model(**inputs)
|
||||||
|4 |256 |0.023 |0.017 |35.472 |939.213 |764.834 |22.800 |
|
predictions = outputs.logits
|
||||||
|4 |512 |0.023 |0.018 |23.687 |1970.447 |1227.162 |60.569 |
|
|
||||||
|8 |256 |0.023 |0.018 |23.491 |1594.295 |1226.114 |30.028 |
|
|
||||||
|8 |512 |0.035 |0.025 |43.058 |3629.401 |2134.262 |70.054 |
|
|
||||||
|16 |256 |0.030 |0.024 |25.583 |2874.426 |2134.262 |34.680 |
|
|
||||||
|16 |512 |0.064 |0.044 |46.223 |6964.659 |3961.013 |75.830 |
|
|
||||||
|
|
||||||
#### Inference
|
masked_index = torch.where(inputs['input_ids'] == tokenizer.mask_token_id)[1]
|
||||||
|
predicted_token_id = predictions[0, masked_index].argmax(dim=-1)
|
||||||
|
predicted_token = tokenizer.decode(predicted_token_id)
|
||||||
|
|
||||||
|batch_size|seq_len|Per token latency eager (ms)|Per token latency SDPA (ms)|Speedup (%)|Mem eager (MB)|Mem BT (MB)|Mem saved (%)|
|
print(f"The predicted token is: {predicted_token}")
|
||||||
|----------|-------|----------------------------|---------------------------|-----------|--------------|-----------|-------------|
|
```
|
||||||
|1 |128 |5.736 |4.987 |15.022 |282.661 |282.924 |-0.093 |
|
|
||||||
|1 |256 |5.689 |4.945 |15.055 |298.686 |298.948 |-0.088 |
|
|
||||||
|2 |128 |6.154 |4.982 |23.521 |314.523 |314.785 |-0.083 |
|
|
||||||
|2 |256 |6.201 |4.949 |25.303 |347.546 |347.033 |0.148 |
|
|
||||||
|4 |128 |6.049 |4.987 |21.305 |378.895 |379.301 |-0.107 |
|
|
||||||
|4 |256 |6.285 |5.364 |17.166 |443.209 |444.382 |-0.264 |
|
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="transformers-cli">
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo -e "Plants create [MASK] through a process known as photosynthesis." | transformers-cli run --task fill-mask --model google-bert/bert-base-uncased --device 0
|
||||||
|
```
|
||||||
|
|
||||||
## Resources
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with BERT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
## Notes
|
||||||
|
|
||||||
<PipelineTag pipeline="text-classification"/>
|
- Inputs should be padded on the right because BERT uses absolute position embeddings.
|
||||||
|
|
||||||
- A blog post on [BERT Text Classification in a different language](https://www.philschmid.de/bert-text-classification-in-a-different-language).
|
|
||||||
- A notebook for [Finetuning BERT (and friends) for multi-label text classification](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Fine_tuning_BERT_(and_friends)_for_multi_label_text_classification.ipynb).
|
|
||||||
- A notebook on how to [Finetune BERT for multi-label classification using PyTorch](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb). 🌎
|
|
||||||
- A notebook on how to [warm-start an EncoderDecoder model with BERT for summarization](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb).
|
|
||||||
- [`BertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb).
|
|
||||||
- [`TFBertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb).
|
|
||||||
- [`FlaxBertForSequenceClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb).
|
|
||||||
- [Text classification task guide](../tasks/sequence_classification)
|
|
||||||
|
|
||||||
<PipelineTag pipeline="token-classification"/>
|
|
||||||
|
|
||||||
- A blog post on how to use [Hugging Face Transformers with Keras: Fine-tune a non-English BERT for Named Entity Recognition](https://www.philschmid.de/huggingface-transformers-keras-tf).
|
|
||||||
- A notebook for [Finetuning BERT for named-entity recognition](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT_only_first_wordpiece.ipynb) using only the first wordpiece of each word in the word label during tokenization. To propagate the label of the word to all wordpieces, see this [version](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT.ipynb) of the notebook instead.
|
|
||||||
- [`BertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb).
|
|
||||||
- [`TFBertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb).
|
|
||||||
- [`FlaxBertForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification).
|
|
||||||
- [Token classification](https://huggingface.co/course/chapter7/2?fw=pt) chapter of the 🤗 Hugging Face Course.
|
|
||||||
- [Token classification task guide](../tasks/token_classification)
|
|
||||||
|
|
||||||
<PipelineTag pipeline="fill-mask"/>
|
|
||||||
|
|
||||||
- [`BertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb).
|
|
||||||
- [`TFBertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
|
|
||||||
- [`FlaxBertForMaskedLM`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb).
|
|
||||||
- [Masked language modeling](https://huggingface.co/course/chapter7/3?fw=pt) chapter of the 🤗 Hugging Face Course.
|
|
||||||
- [Masked language modeling task guide](../tasks/masked_language_modeling)
|
|
||||||
|
|
||||||
<PipelineTag pipeline="question-answering"/>
|
|
||||||
|
|
||||||
- [`BertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb).
|
|
||||||
- [`TFBertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb).
|
|
||||||
- [`FlaxBertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering).
|
|
||||||
- [Question answering](https://huggingface.co/course/chapter7/7?fw=pt) chapter of the 🤗 Hugging Face Course.
|
|
||||||
- [Question answering task guide](../tasks/question_answering)
|
|
||||||
|
|
||||||
**Multiple choice**
|
|
||||||
- [`BertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb).
|
|
||||||
- [`TFBertForMultipleChoice`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb).
|
|
||||||
- [Multiple choice task guide](../tasks/multiple_choice)
|
|
||||||
|
|
||||||
⚡️ **Inference**
|
|
||||||
- A blog post on how to [Accelerate BERT inference with Hugging Face Transformers and AWS Inferentia](https://huggingface.co/blog/bert-inferentia-sagemaker).
|
|
||||||
- A blog post on how to [Accelerate BERT inference with DeepSpeed-Inference on GPUs](https://www.philschmid.de/bert-deepspeed-inference).
|
|
||||||
|
|
||||||
⚙️ **Pretraining**
|
|
||||||
- A blog post on [Pre-Training BERT with Hugging Face Transformers and Habana Gaudi](https://www.philschmid.de/pre-training-bert-habana).
|
|
||||||
|
|
||||||
🚀 **Deploy**
|
|
||||||
- A blog post on how to [Convert Transformers to ONNX with Hugging Face Optimum](https://www.philschmid.de/convert-transformers-to-onnx).
|
|
||||||
- A blog post on how to [Setup Deep Learning environment for Hugging Face Transformers with Habana Gaudi on AWS](https://www.philschmid.de/getting-started-habana-gaudi#conclusion).
|
|
||||||
- A blog post on [Autoscaling BERT with Hugging Face Transformers, Amazon SageMaker and Terraform module](https://www.philschmid.de/terraform-huggingface-amazon-sagemaker-advanced).
|
|
||||||
- A blog post on [Serverless BERT with HuggingFace, AWS Lambda, and Docker](https://www.philschmid.de/serverless-bert-with-huggingface-aws-lambda-docker).
|
|
||||||
- A blog post on [Hugging Face Transformers BERT fine-tuning using Amazon SageMaker and Training Compiler](https://www.philschmid.de/huggingface-amazon-sagemaker-training-compiler).
|
|
||||||
- A blog post on [Task-specific knowledge distillation for BERT using Transformers & Amazon SageMaker](https://www.philschmid.de/knowledge-distillation-bert-transformers).
|
|
||||||
|
|
||||||
## BertConfig
|
## BertConfig
|
||||||
|
|
||||||
@ -181,35 +107,10 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
- create_token_type_ids_from_sequences
|
- create_token_type_ids_from_sequences
|
||||||
- save_vocabulary
|
- save_vocabulary
|
||||||
|
|
||||||
<frameworkcontent>
|
|
||||||
<pt>
|
|
||||||
|
|
||||||
## BertTokenizerFast
|
## BertTokenizerFast
|
||||||
|
|
||||||
[[autodoc]] BertTokenizerFast
|
[[autodoc]] BertTokenizerFast
|
||||||
|
|
||||||
</pt>
|
|
||||||
<tf>
|
|
||||||
|
|
||||||
## TFBertTokenizer
|
|
||||||
|
|
||||||
[[autodoc]] TFBertTokenizer
|
|
||||||
|
|
||||||
</tf>
|
|
||||||
</frameworkcontent>
|
|
||||||
|
|
||||||
## Bert specific outputs
|
|
||||||
|
|
||||||
[[autodoc]] models.bert.modeling_bert.BertForPreTrainingOutput
|
|
||||||
|
|
||||||
[[autodoc]] models.bert.modeling_tf_bert.TFBertForPreTrainingOutput
|
|
||||||
|
|
||||||
[[autodoc]] models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput
|
|
||||||
|
|
||||||
|
|
||||||
<frameworkcontent>
|
|
||||||
<pt>
|
|
||||||
|
|
||||||
## BertModel
|
## BertModel
|
||||||
|
|
||||||
[[autodoc]] BertModel
|
[[autodoc]] BertModel
|
||||||
@ -255,8 +156,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
[[autodoc]] BertForQuestionAnswering
|
[[autodoc]] BertForQuestionAnswering
|
||||||
- forward
|
- forward
|
||||||
|
|
||||||
</pt>
|
## TFBertTokenizer
|
||||||
<tf>
|
|
||||||
|
[[autodoc]] TFBertTokenizer
|
||||||
|
|
||||||
## TFBertModel
|
## TFBertModel
|
||||||
|
|
||||||
@ -303,9 +205,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
[[autodoc]] TFBertForQuestionAnswering
|
[[autodoc]] TFBertForQuestionAnswering
|
||||||
- call
|
- call
|
||||||
|
|
||||||
</tf>
|
|
||||||
<jax>
|
|
||||||
|
|
||||||
## FlaxBertModel
|
## FlaxBertModel
|
||||||
|
|
||||||
[[autodoc]] FlaxBertModel
|
[[autodoc]] FlaxBertModel
|
||||||
@ -351,7 +250,10 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
[[autodoc]] FlaxBertForQuestionAnswering
|
[[autodoc]] FlaxBertForQuestionAnswering
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
</jax>
|
## Bert specific outputs
|
||||||
</frameworkcontent>
|
|
||||||
|
|
||||||
|
[[autodoc]] models.bert.modeling_bert.BertForPreTrainingOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.bert.modeling_tf_bert.TFBertForPreTrainingOutput
|
||||||
|
|
||||||
|
[[autodoc]] models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput
|
184
docs/source/en/model_doc/deepseek_v3.md
Normal file
184
docs/source/en/model_doc/deepseek_v3.md
Normal file
@ -0,0 +1,184 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# DeepSeek-V3
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The DeepSeek-V3 model was proposed in [DeepSeek-V3 Technical Report](https://arxiv.org/abs/2412.19437) by DeepSeek-AI Team.
|
||||||
|
|
||||||
|
The abstract from the paper is the following:
|
||||||
|
We present DeepSeek-V3, a strong Mixture-of-Experts (MoE) language model with 671B total parameters with 37B activated for each token. To achieve efficient inference and cost-effective training, DeepSeek-V3 adopts Multi-head Latent Attention (MLA) and DeepSeekMoE architectures, which were thoroughly validated in DeepSeek-V2. Furthermore, DeepSeek-V3 pioneers an auxiliary-loss-free strategy for load balancing and sets a multi-token prediction training objective for stronger performance. We pre-train DeepSeek-V3 on 14.8 trillion diverse and high-quality tokens, followed by Supervised Fine-Tuning and Reinforcement Learning stages to fully harness its capabilities. Comprehensive evaluations reveal that DeepSeek-V3 outperforms other open-source models and achieves performance comparable to leading closed-source models. Despite its excellent performance, DeepSeek-V3 requires only 2.788M H800 GPU hours for its full training. In addition, its training process is remarkably stable. Throughout the entire training process, we did not experience any irrecoverable loss spikes or perform any rollbacks. The model checkpoints are available at https://github.com/deepseek-ai/DeepSeek-V3.
|
||||||
|
|
||||||
|
## Limitations and call for contribution!
|
||||||
|
|
||||||
|
We are super happy to make this code community-powered, and would love to see how you can best optimize the following:
|
||||||
|
|
||||||
|
- current implementation uses the "naive" attention compution (so not really MLA)
|
||||||
|
- current implementation loops through the experts. This should be replaced. Pointers to use `get_packed_weights` from `intetrations/tensor_parallel`.
|
||||||
|
- current implementation uses the eleuther formula for ROPE, using the orginal one would be more efficient! (should still follow our API)
|
||||||
|
- static cache is not supported (this should be just a generation config issue / config shape issues)
|
||||||
|
|
||||||
|
### Usage tips
|
||||||
|
The model uses Multi-head Latent Attention (MLA) and DeepSeekMoE architectures for efficient inference and cost-effective training. It employs an auxiliary-loss-free strategy for load balancing and multi-token prediction training objective. The model can be used for various language tasks after being pre-trained on 14.8 trillion tokens and going through Supervised Fine-Tuning and Reinforcement Learning stages.
|
||||||
|
|
||||||
|
You can run the model in `FP8` automatically, using 2 nodes of 8 H100 should be more than enough!
|
||||||
|
|
||||||
|
```python
|
||||||
|
# `run_deepseek_v1.py`
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
import torch
|
||||||
|
torch.manual_seed(30)
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained("deepseek-r1")
|
||||||
|
|
||||||
|
chat = [
|
||||||
|
{"role": "user", "content": "Hello, how are you?"},
|
||||||
|
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
||||||
|
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
model = AutoModelForCausalLM.from_pretrained("deepseek-r1", device_map="auto", torch_dtype=torch.bfloat16)
|
||||||
|
inputs = tokenizer.apply_chat_template(chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||||
|
import time
|
||||||
|
start = time.time()
|
||||||
|
outputs = model.generate(inputs, max_new_tokens=50)
|
||||||
|
print(tokenizer.batch_decode(outputs))
|
||||||
|
print(time.time()-start)
|
||||||
|
```
|
||||||
|
This generated:
|
||||||
|
|
||||||
|
``````
|
||||||
|
<|Assistant|><think>
|
||||||
|
Okay, the user wants to demonstrate how chat templating works. Let me break down what that means. Chat templating is about structuring the conversation data, especially for models that need specific input formats. Maybe they're referring to something like how messages are formatted with roles (user, assistant, system) in APIs like OpenAI.
|
||||||
|
|
||||||
|
First, I should explain what chat templating is. It's the process of formatting conversation data into a structured format that the model can understand. This usually includes roles and content. For example, user messages, assistant responses, and system messages each have their own role tags.
|
||||||
|
|
||||||
|
They might want an example. Let me think of a simple conversation. The user says "Hello, how are you?" and the assistant responds "I'm doing great. How can I help you today?" Then the user follows up with wanting to show off chat templating. So the example should include the history and the new message.
|
||||||
|
|
||||||
|
In some frameworks, like Hugging Face's Transformers, chat templates are applied using Jinja2 templates. The template might look something like combining system messages, then looping through user and assistant messages with appropriate tags. For instance, using {% for message in messages %} and assigning roles like <|user|>, <|assistant|>, etc.
|
||||||
|
|
||||||
|
I should structure the example with the messages array, showing each role and content. Then apply a hypothetical template to convert that into a formatted string the model uses. Also, mention that different models have different templating requirements, like using special tokens or varying role labels.
|
||||||
|
|
||||||
|
Wait, the user mentioned "chat templating" in the context of showing off. Maybe they want a practical example they can present. So providing a code snippet or a structured data example would be helpful. Let me outline a typical messages array and then the templated output.
|
||||||
|
|
||||||
|
Also, it's important to note that proper templating ensures the model knows the conversation flow, which is crucial for generating coherent responses. Maybe include a note about why it's important, like maintaining context and role-specific processing.
|
||||||
|
|
||||||
|
Let me check if there are any common mistakes or things to avoid. For example, not closing tags properly, or mismatching roles. But maybe that's too detailed unless the user asks. Focus on the positive example first.
|
||||||
|
|
||||||
|
Putting it all together, the response should have an example messages array, the applied template, and the final formatted string. Maybe use angle brackets or special tokens as placeholders. Also, mention that this helps in training or fine-tuning models with structured data.
|
||||||
|
|
||||||
|
I think that's a solid approach. Let me structure it step by step to make it clear.
|
||||||
|
</think>
|
||||||
|
|
||||||
|
Chat templating is a way to structure conversation data (e.g., user/assistant interactions) into a format that language models understand. This is especially important for models trained to handle multi-turn dialogues, where the input must explicitly separate roles (user, assistant, system, etc.) and messages. Let’s break this down with an example!
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Step 1: Raw Conversation History**
|
||||||
|
Suppose we have this conversation:
|
||||||
|
- **User**: "Hello, how are you?"
|
||||||
|
- **Assistant**: "I'm doing great. How can I help you today?"
|
||||||
|
- **User**: "I'd like to show off how chat templating works!"
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Step 2: Structured Messages**
|
||||||
|
In frameworks like Hugging Face Transformers or OpenAI, conversations are often formatted as a list of dictionaries with `role` and `content`:
|
||||||
|
```python
|
||||||
|
messages = [
|
||||||
|
{"role": "user", "content": "Hello, how are you?"},
|
||||||
|
{"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
||||||
|
{"role": "user", "content": "I'd like to show off how chat templating works!"},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Step 3: Apply a Chat Template**
|
||||||
|
A **chat template** converts this structured data into a single string formatted for the model. For example, using a Jinja-style template (common in Hugging Face):
|
||||||
|
|
||||||
|
```jinja
|
||||||
|
{% for message in messages %}
|
||||||
|
{% if message['role'] == 'user' %}
|
||||||
|
<|user|>{{ message['content'] }}<|end|>
|
||||||
|
{% elif message['role'] == 'assistant' %}
|
||||||
|
<|assistant|>{{ message['content'] }}<|end|>
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
<|assistant|>
|
||||||
|
```
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Step 4: Final Templated Output**
|
||||||
|
Applying the template to our `messages` list would produce:
|
||||||
|
```text
|
||||||
|
<|user|>Hello, how are you?<|end|>
|
||||||
|
<|assistant|>I'm doing great. How can I help you today?<|end|>
|
||||||
|
<|user|>I'd like to show off how chat templating works!<|end|>
|
||||||
|
<|assistant|>
|
||||||
|
```
|
||||||
|
|
||||||
|
This tells the model:
|
||||||
|
1. The conversation history (user/assistant turns).
|
||||||
|
2. The model’s turn to generate a response (`<|assistant|>` at the end).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Key Notes**:
|
||||||
|
- **Role Separation**: Tags like `<|user|>` and `<|assistant|>` help the model distinguish speakers.
|
||||||
|
- **Special Tokens**: Models often use unique tokens (e.g., `<|end|>`) to mark message boundaries.
|
||||||
|
- **Flexibility**: Templates vary by model (e.g., OpenAI uses `{"role": "user", "content": "..."}` instead of tags).
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### **Why This Matters**:
|
||||||
|
- **Consistency**: Ensures the model understands dialogue structure.
|
||||||
|
- **Context Preservation**: Maintains the flow of multi-turn conversations.
|
||||||
|
- **Alignment**: Matches the format the model was trained on for better performance.
|
||||||
|
|
||||||
|
Want to dive deeper or see a specific framework’s implementation (e.g., OpenAI, Llama, Mistral)? Let me know! 😊<|end▁of▁sentence|>
|
||||||
|
``````
|
||||||
|
|
||||||
|
Use the following to run it
|
||||||
|
```bash
|
||||||
|
torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0|1 --rdzv-id an_id --rdzv-backend c10d --rdzv-endpoint master_addr:master_port run_deepseek_r1.py
|
||||||
|
```
|
||||||
|
|
||||||
|
If you have:
|
||||||
|
```bash
|
||||||
|
[rank0]: ncclInternalError: Internal check failed.
|
||||||
|
[rank0]: Last error:
|
||||||
|
[rank0]: Bootstrap : no socket interface found
|
||||||
|
```
|
||||||
|
error, it means NCCL was probably not loaded.
|
||||||
|
|
||||||
|
|
||||||
|
## DeepseekV3Config
|
||||||
|
|
||||||
|
[[autodoc]] DeepseekV3Config
|
||||||
|
|
||||||
|
## DeepseekV3Model
|
||||||
|
|
||||||
|
[[autodoc]] DeepseekV3Model
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## DeepseekV3ForCausalLM
|
||||||
|
|
||||||
|
[[autodoc]] DeepseekV3ForCausalLM
|
||||||
|
- forward
|
@ -19,6 +19,7 @@ rendered properly in your Markdown viewer.
|
|||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -90,7 +90,7 @@ The `DepthProEncoder` further uses two encoders:
|
|||||||
- `image_encoder`
|
- `image_encoder`
|
||||||
- Input image is also rescaled to `patch_size` and processed by the **`image_encoder`**
|
- Input image is also rescaled to `patch_size` and processed by the **`image_encoder`**
|
||||||
|
|
||||||
Both these encoders can be configured via `patch_model_config` and `image_model_config` respectively, both of which are seperate `Dinov2Model` by default.
|
Both these encoders can be configured via `patch_model_config` and `image_model_config` respectively, both of which are separate `Dinov2Model` by default.
|
||||||
|
|
||||||
Outputs from both encoders (`last_hidden_state`) and selected intermediate states (`hidden_states`) from **`patch_encoder`** are fused by a `DPT`-based `FeatureFusionStage` for depth estimation.
|
Outputs from both encoders (`last_hidden_state`) and selected intermediate states (`hidden_states`) from **`patch_encoder`** are fused by a `DPT`-based `FeatureFusionStage` for depth estimation.
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
||||||
">
|
">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -11,6 +11,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ demonstrate its capabilities for on-device computations in a proof-of-concept ex
|
|||||||
study.*
|
study.*
|
||||||
|
|
||||||
This model was contributed by [victorsanh](https://huggingface.co/victorsanh). This model jax version was
|
This model was contributed by [victorsanh](https://huggingface.co/victorsanh). This model jax version was
|
||||||
contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/distillation).
|
contributed by [kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/huggingface/transformers-research-projects/tree/main/distillation).
|
||||||
|
|
||||||
## Usage tips
|
## Usage tips
|
||||||
|
|
||||||
|
@ -18,6 +18,8 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
265
docs/source/en/model_doc/gemma3.md
Normal file
265
docs/source/en/model_doc/gemma3.md
Normal file
@ -0,0 +1,265 @@
|
|||||||
|
|
||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
<div style="float: right;">
|
||||||
|
<div class="flex flex-wrap space-x-1">
|
||||||
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
# Gemma 3
|
||||||
|
|
||||||
|
[Gemma 3](https://goo.gle/Gemma3Report) is a multimodal model with pretrained and instruction-tuned variants, available in 1B, 13B, and 27B parameters. The architecture is mostly the same as the previous Gemma versions. The key differences are alternating 5 local sliding window self-attention layers for every global self-attention layer, support for a longer context length of 128K tokens, and a [SigLip](./siglip) encoder that can "pan & scan" high-resolution images to prevent information from disappearing in high resolution images or images with non-square aspect ratios.
|
||||||
|
|
||||||
|
The instruction-tuned variant was post-trained with knowledge distillation and reinforcement learning.
|
||||||
|
|
||||||
|
You can find all the original Gemma 3 checkpoints under the [Gemma 3](https://huggingface.co/collections/meta-llama/llama-2-family-661da1f90a9d678b6f55773b) release.
|
||||||
|
|
||||||
|
> [!TIP]
|
||||||
|
> Click on the Gemma 3 models in the right sidebar for more examples of how to apply Gemma to different vision and language tasks.
|
||||||
|
|
||||||
|
The example below demonstrates how to generate text based on an image with [`Pipeline`] or the [`AutoModel`] class.
|
||||||
|
|
||||||
|
<hfoptions id="usage">
|
||||||
|
<hfoption id="Pipeline">
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
|
pipeline = pipeline(
|
||||||
|
task="image-text-to-text",
|
||||||
|
model="google/gemma-3-4b-pt",
|
||||||
|
device=0,
|
||||||
|
torch_dtype=torch.bfloat16
|
||||||
|
)
|
||||||
|
pipeline(
|
||||||
|
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
|
||||||
|
text="<start_of_image> What is shown in this image?"
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="AutoModel">
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from transformers import AutoProcessor, Gemma3ForConditionalGeneration
|
||||||
|
|
||||||
|
model = Gemma3ForConditionalGeneration.from_pretrained(
|
||||||
|
"google/gemma-3-4b-it",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map="auto",
|
||||||
|
attn_implementation="sdpa"
|
||||||
|
)
|
||||||
|
processor = AutoProcessor.from_pretrained(
|
||||||
|
"google/gemma-3-4b-it",
|
||||||
|
padding_side="left"
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": [
|
||||||
|
{"type": "text", "text": "You are a helpful assistant."}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user", "content": [
|
||||||
|
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"},
|
||||||
|
{"type": "text", "text": "What is shown in this image?"},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
]
|
||||||
|
inputs = processor.apply_chat_template(
|
||||||
|
messages,
|
||||||
|
tokenize=True,
|
||||||
|
return_dict=True,
|
||||||
|
return_tensors="pt",
|
||||||
|
add_generation_prompt=True,
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
output = model.generate(**inputs, max_new_tokens=50, cache_implementation="static")
|
||||||
|
print(processor.decode(output[0], skip_special_tokens=True))
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="transformers-cli">
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo -e "Plants create energy through a process known as" | transformers-cli run --task text-generation --model google/gemma-3-1b-pt --device 0
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
|
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
|
||||||
|
|
||||||
|
The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4.
|
||||||
|
|
||||||
|
```py
|
||||||
|
# pip install torchao
|
||||||
|
import torch
|
||||||
|
from transformers import TorchAoConfig, Gemma3ForConditionalGeneration, AutoProcessor
|
||||||
|
|
||||||
|
quantization_config = TorchAoConfig("int4_weight_only", group_size=128)
|
||||||
|
model = Gemma3ForConditionalGeneration.from_pretrained(
|
||||||
|
"google/gemma-3-27b-it",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map="auto",
|
||||||
|
quantization_config=quantization_config
|
||||||
|
)
|
||||||
|
processor = AutoProcessor.from_pretrained(
|
||||||
|
"google/gemma-3-27b-it",
|
||||||
|
padding_side="left"
|
||||||
|
)
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": [
|
||||||
|
{"type": "text", "text": "You are a helpful assistant."}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user", "content": [
|
||||||
|
{"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"},
|
||||||
|
{"type": "text", "text": "What is shown in this image?"},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
]
|
||||||
|
inputs = processor.apply_chat_template(
|
||||||
|
messages,
|
||||||
|
tokenize=True,
|
||||||
|
return_dict=True,
|
||||||
|
return_tensors="pt",
|
||||||
|
add_generation_prompt=True,
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
|
output = model.generate(**inputs, max_new_tokens=50, cache_implementation="static")
|
||||||
|
print(processor.decode(output[0], skip_special_tokens=True))
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the [AttentionMaskVisualizer](https://github.com/huggingface/transformers/blob/beb9b5b02246b9b7ee81ddf938f93f44cfeaad19/src/transformers/utils/attention_visualizer.py#L139) to better understand what tokens the model can and cannot attend to.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers.utils.attention_visualizer import AttentionMaskVisualizer
|
||||||
|
|
||||||
|
visualizer = AttentionMaskVisualizer("google/gemma-3-4b-it")
|
||||||
|
visualizer("<img>What is shown in this image?")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/gemma-3-attn-mask.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- Use [`Gemma3ForConditionalGeneration`] for image-and-text and image-only inputs.
|
||||||
|
- Gemma 3 supports multiple input images, but make sure the images are correctly batched before passing them to the processor. Each batch should be a list of one or more images.
|
||||||
|
|
||||||
|
```py
|
||||||
|
url_cow = "https://media.istockphoto.com/id/1192867753/photo/cow-in-berchida-beach-siniscola.jpg?s=612x612&w=0&k=20&c=v0hjjniwsMNfJSuKWZuIn8pssmD5h5bSN1peBd1CmH4="
|
||||||
|
url_cat = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
|
||||||
|
|
||||||
|
messages =[
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": [
|
||||||
|
{"type": "text", "text": "You are a helpful assistant."}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"role": "user",
|
||||||
|
"content": [
|
||||||
|
{"type": "image", "url": url_cow},
|
||||||
|
{"type": "image", "url": url_cat},
|
||||||
|
{"type": "text", "text": "Which image is cuter?"},
|
||||||
|
]
|
||||||
|
},
|
||||||
|
]
|
||||||
|
```
|
||||||
|
- Text passed to the processor should have a `<start_of_image>` token wherever an image should be inserted.
|
||||||
|
- The processor has its own [`~ProcessorMixin.apply_chat_template`] method to convert chat messages to model inputs.
|
||||||
|
- By default, images aren't cropped and only the base image is forwarded to the model. In high resolution images or images with non-square aspect ratios, artifacts can result because the vision encoder uses a fixed resolution of 896x896. To prevent these artifacts and improve performance during inference, set `do_pan_and_scan=True` to crop the image into multiple smaller patches and concatenate them with the base image embedding. You can disable pan and scan for faster inference.
|
||||||
|
|
||||||
|
```diff
|
||||||
|
inputs = processor.apply_chat_template(
|
||||||
|
messages,
|
||||||
|
tokenize=True,
|
||||||
|
return_dict=True,
|
||||||
|
return_tensors="pt",
|
||||||
|
add_generation_prompt=True,
|
||||||
|
+ do_pan_and_scan=True,
|
||||||
|
).to("cuda")
|
||||||
|
```
|
||||||
|
- For Gemma-3 1B checkpoint trained in text-only mode, use [`AutoModelForCausalLM`] instead.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
|
"google/gemma-3-1b-pt",
|
||||||
|
)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
"google/gemma-3-1b-pt",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map="auto",
|
||||||
|
attn_implementation="sdpa"
|
||||||
|
)
|
||||||
|
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
|
output = model.generate(**input_ids, cache_implementation="static")
|
||||||
|
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||||
|
```
|
||||||
|
|
||||||
|
## Gemma3ImageProcessor
|
||||||
|
|
||||||
|
[[autodoc]] Gemma3ImageProcessor
|
||||||
|
|
||||||
|
## Gemma3ImageProcessorFast
|
||||||
|
|
||||||
|
[[autodoc]] Gemma3ImageProcessorFast
|
||||||
|
|
||||||
|
## Gemma3Processor
|
||||||
|
|
||||||
|
[[autodoc]] Gemma3Processor
|
||||||
|
|
||||||
|
## Gemma3TextConfig
|
||||||
|
|
||||||
|
[[autodoc]] Gemma3TextConfig
|
||||||
|
|
||||||
|
## Gemma3Config
|
||||||
|
|
||||||
|
[[autodoc]] Gemma3Config
|
||||||
|
|
||||||
|
## Gemma3TextModel
|
||||||
|
|
||||||
|
[[autodoc]] Gemma3TextModel
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Gemma3ForCausalLM
|
||||||
|
|
||||||
|
[[autodoc]] Gemma3ForCausalLM
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Gemma3ForConditionalGeneration
|
||||||
|
|
||||||
|
[[autodoc]] Gemma3ForConditionalGeneration
|
||||||
|
- forward
|
@ -71,9 +71,10 @@ pip install -U flash-attn --no-build-isolation
|
|||||||
Below is an expected speedup diagram comparing the pure inference time between the native implementation in transformers of `facebook/hubert-large-ls960-ft`, the flash-attention-2 and the sdpa (scale-dot-product-attention) version. We show the average speedup obtained on the `librispeech_asr` `clean` validation split:
|
Below is an expected speedup diagram comparing the pure inference time between the native implementation in transformers of `facebook/hubert-large-ls960-ft`, the flash-attention-2 and the sdpa (scale-dot-product-attention) version. We show the average speedup obtained on the `librispeech_asr` `clean` validation split:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import Wav2Vec2Model
|
>>> from transformers import HubertModel
|
||||||
|
>>> import torch
|
||||||
|
|
||||||
model = Wav2Vec2Model.from_pretrained("facebook/hubert-large-ls960-ft", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to(device)
|
>>> model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to("cuda")
|
||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ LayoutLMv3 is nearly identical to LayoutLMv2, so we've also included LayoutLMv2
|
|||||||
</Tip>
|
</Tip>
|
||||||
|
|
||||||
- Demo notebooks for LayoutLMv3 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3).
|
- Demo notebooks for LayoutLMv3 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LayoutLMv3).
|
||||||
- Demo scripts can be found [here](https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3).
|
- Demo scripts can be found [here](https://github.com/huggingface/transformers-research-projects/tree/main/layoutlmv3).
|
||||||
|
|
||||||
<PipelineTag pipeline="text-classification"/>
|
<PipelineTag pipeline="text-classification"/>
|
||||||
|
|
||||||
@ -61,7 +61,7 @@ LayoutLMv3 is nearly identical to LayoutLMv2, so we've also included LayoutLMv2
|
|||||||
|
|
||||||
<PipelineTag pipeline="token-classification"/>
|
<PipelineTag pipeline="token-classification"/>
|
||||||
|
|
||||||
- [`LayoutLMv3ForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/layoutlmv3) and [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv3/Fine_tune_LayoutLMv3_on_FUNSD_(HuggingFace_Trainer).ipynb).
|
- [`LayoutLMv3ForTokenClassification`] is supported by this [example script](https://github.com/huggingface/transformers-research-projects/tree/main/layoutlmv3) and [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv3/Fine_tune_LayoutLMv3_on_FUNSD_(HuggingFace_Trainer).ipynb).
|
||||||
- A [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Inference_with_LayoutLMv2ForTokenClassification.ipynb) for how to perform inference with [`LayoutLMv2ForTokenClassification`] and a [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/True_inference_with_LayoutLMv2ForTokenClassification_%2B_Gradio_demo.ipynb) for how to perform inference when no labels are available with [`LayoutLMv2ForTokenClassification`].
|
- A [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Inference_with_LayoutLMv2ForTokenClassification.ipynb) for how to perform inference with [`LayoutLMv2ForTokenClassification`] and a [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/True_inference_with_LayoutLMv2ForTokenClassification_%2B_Gradio_demo.ipynb) for how to perform inference when no labels are available with [`LayoutLMv2ForTokenClassification`].
|
||||||
- A [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Fine_tuning_LayoutLMv2ForTokenClassification_on_FUNSD_using_HuggingFace_Trainer.ipynb) for how to finetune [`LayoutLMv2ForTokenClassification`] with the 🤗 Trainer.
|
- A [notebook](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLMv2/FUNSD/Fine_tuning_LayoutLMv2ForTokenClassification_on_FUNSD_using_HuggingFace_Trainer.ipynb) for how to finetune [`LayoutLMv2ForTokenClassification`] with the 🤗 Trainer.
|
||||||
- [Token classification task guide](../tasks/token_classification)
|
- [Token classification task guide](../tasks/token_classification)
|
||||||
|
@ -14,79 +14,115 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# LLaMA
|
<div style="float: right;">
|
||||||
|
<div class="flex flex-wrap space-x-1">
|
||||||
<div class="flex flex-wrap space-x-1">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
||||||
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
">
|
||||||
">
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## Overview
|
# Llama
|
||||||
|
|
||||||
The LLaMA model was proposed in [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. It is a collection of foundation language models ranging from 7B to 65B parameters.
|
[Llama](https://huggingface.co/papers/2302.13971) is a family of large language models ranging from 7B to 65B parameters. These models are focused on efficient inference (important for serving language models) by training a smaller model on more tokens rather than training a larger model on fewer tokens. The Llama model is based on the GPT architecture, but it uses pre-normalization to improve training stability, replaces ReLU with SwiGLU to improve performance, and replaces absolute positional embeddings with rotary positional embeddings (RoPE) to better handle longer sequence lengths.
|
||||||
|
|
||||||
The abstract from the paper is the following:
|
You can find all the original Llama checkpoints under the [Huggy Llama](https://huggingface.co/huggyllama) organization.
|
||||||
|
|
||||||
*We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community. *
|
> [!TIP]
|
||||||
|
> Click on the Llama models in the right sidebar for more examples of how to apply Llama to different language tasks.
|
||||||
|
|
||||||
This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama).
|
The example below demonstrates how to generate text with [`Pipeline`] or the [`AutoModel`], and from the command line.
|
||||||
|
|
||||||
## Usage tips
|
<hfoptions id="usage">
|
||||||
|
<hfoption id="Pipeline">
|
||||||
|
|
||||||
- Weights for the LLaMA models can be obtained from by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form)
|
```py
|
||||||
- After downloading the weights, they will need to be converted to the Hugging Face Transformers format using the [conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). The script can be called with the following (example) command:
|
import torch
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
|
pipeline = pipeline(
|
||||||
|
task="text-generation",
|
||||||
|
model="huggyllama/llama-7b",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
device=0
|
||||||
|
)
|
||||||
|
pipeline("Plants create energy through a process known as")
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="AutoModel">
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
|
"huggyllama/llama-7b",
|
||||||
|
)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
"huggyllama/llama-7b",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
device_map="auto",
|
||||||
|
attn_implementation="sdpa"
|
||||||
|
)
|
||||||
|
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
|
output = model.generate(**input_ids, cache_implementation="static")
|
||||||
|
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||||
|
```
|
||||||
|
|
||||||
|
</hfoption>
|
||||||
|
<hfoption id="transformers-cli">
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python src/transformers/models/llama/convert_llama_weights_to_hf.py \
|
echo -e "Plants create energy through a process known as" | transformers-cli run --task text-generation --model huggyllama/llama-7b --device 0
|
||||||
--input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- After conversion, the model and tokenizer can be loaded via:
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
```python
|
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
|
||||||
from transformers import LlamaForCausalLM, LlamaTokenizer
|
|
||||||
|
|
||||||
tokenizer = LlamaTokenizer.from_pretrained("/output/path")
|
The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4.
|
||||||
model = LlamaForCausalLM.from_pretrained("/output/path")
|
|
||||||
|
```py
|
||||||
|
# pip install torchao
|
||||||
|
import torch
|
||||||
|
from transformers import TorchAoConfig, AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
quantization_config = TorchAoConfig("int4_weight_only", group_size=128)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
"huggyllama/llama-30b",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map="auto",
|
||||||
|
quantization_config=quantization_config
|
||||||
|
)
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-30b")
|
||||||
|
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
|
output = model.generate(**input_ids, cache_implementation="static")
|
||||||
|
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that executing the script requires enough CPU RAM to host the whole model in float16 precision (even if the biggest versions
|
Use the [AttentionMaskVisualizer](https://github.com/huggingface/transformers/blob/beb9b5b02246b9b7ee81ddf938f93f44cfeaad19/src/transformers/utils/attention_visualizer.py#L139) to better understand what tokens the model can and cannot attend to.
|
||||||
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). For the 65B model, it's thus 130GB of RAM needed.
|
|
||||||
|
|
||||||
- The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string.
|
```py
|
||||||
|
from transformers.utils.attention_visualizer import AttentionMaskVisualizer
|
||||||
|
|
||||||
This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). The Flax version of the implementation was contributed by [afmck](https://huggingface.co/afmck) with the code in the implementation based on Hugging Face's Flax GPT-Neo.
|
visualizer = AttentionMaskVisualizer("huggyllama/llama-7b")
|
||||||
|
visualizer("Plants create energy through a process known as")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llama-attn-mask.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
Based on the original LLaMA model, Meta AI has released some follow-up works:
|
## Notes
|
||||||
|
|
||||||
- **Llama2**: Llama2 is an improved version of Llama with some architectural tweaks (Grouped Query Attention), and is pre-trained on 2Trillion tokens. Refer to the documentation of Llama2 which can be found [here](llama2).
|
- The tokenizer is a byte-pair encoding model based on [SentencePiece](https://github.com/google/sentencepiece). During decoding, if the first token is the start of the word (for example, "Banana"), the tokenizer doesn't prepend the prefix space to the string.
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LLaMA. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
|
||||||
|
|
||||||
<PipelineTag pipeline="text-classification"/>
|
|
||||||
|
|
||||||
- A [notebook](https://colab.research.google.com/github/bigscience-workshop/petals/blob/main/examples/prompt-tuning-sst2.ipynb#scrollTo=f04ba4d2) on how to use prompt tuning to adapt the LLaMA model for text classification task. 🌎
|
|
||||||
|
|
||||||
<PipelineTag pipeline="question-answering"/>
|
|
||||||
|
|
||||||
- [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama#stackllama-a-hands-on-guide-to-train-llama-with-rlhf), a blog post about how to train LLaMA to answer questions on [Stack Exchange](https://stackexchange.com/) with RLHF.
|
|
||||||
|
|
||||||
⚗️ Optimization
|
|
||||||
- A [notebook](https://colab.research.google.com/drive/1SQUXq1AMZPSLD4mk3A3swUIc6Y2dclme?usp=sharing) on how to fine-tune LLaMA model using xturing library on GPU which has limited memory. 🌎
|
|
||||||
|
|
||||||
⚡️ Inference
|
|
||||||
- A [notebook](https://colab.research.google.com/github/DominguesM/alpaca-lora-ptbr-7b/blob/main/notebooks/02%20-%20Evaluate.ipynb) on how to run the LLaMA Model using PeftModel from the 🤗 PEFT library. 🌎
|
|
||||||
- A [notebook](https://colab.research.google.com/drive/1l2GiSSPbajVyp2Nk3CFT4t3uH6-5TiBe?usp=sharing) on how to load a PEFT adapter LLaMA model with LangChain. 🌎
|
|
||||||
|
|
||||||
🚀 Deploy
|
|
||||||
- A [notebook](https://colab.research.google.com/github/lxe/simple-llama-finetuner/blob/master/Simple_LLaMA_FineTuner.ipynb#scrollTo=3PM_DilAZD8T) on how to fine-tune LLaMA model using LoRA method via the 🤗 PEFT library with intuitive UI. 🌎
|
|
||||||
- A [notebook](https://github.com/aws/amazon-sagemaker-examples/blob/main/introduction_to_amazon_algorithms/jumpstart-foundation-models/text-generation-open-llama.ipynb) on how to deploy Open-LLaMA model for text generation on Amazon SageMaker. 🌎
|
|
||||||
|
|
||||||
## LlamaConfig
|
## LlamaConfig
|
||||||
|
|
||||||
|
@ -14,97 +14,129 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Llama2
|
<div style="float: right;">
|
||||||
|
<div class="flex flex-wrap space-x-1">
|
||||||
<div class="flex flex-wrap space-x-1">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
||||||
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
">
|
||||||
">
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## Overview
|
# Llama 2
|
||||||
|
|
||||||
The Llama2 model was proposed in [LLaMA: Open Foundation and Fine-Tuned Chat Models](https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/) by Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, Dan Bikel, Lukas Blecher, Cristian Canton Ferrer, Moya Chen, Guillem Cucurull, David Esiobu, Jude Fernandes, Jeremy Fu, Wenyin Fu, Brian Fuller, Cynthia Gao, Vedanuj Goswami, Naman Goyal, Anthony Hartshorn, Saghar Hosseini, Rui Hou, Hakan Inan, Marcin Kardas, Viktor Kerkez Madian Khabsa, Isabel Kloumann, Artem Korenev, Punit Singh Koura, Marie-Anne Lachaux, Thibaut Lavril, Jenya Lee, Diana Liskovich, Yinghai Lu, Yuning Mao, Xavier Martinet, Todor Mihaylov, Pushka rMishra, Igor Molybog, Yixin Nie, Andrew Poulton, Jeremy Reizenstein, Rashi Rungta, Kalyan Saladi, Alan Schelten, Ruan Silva, Eric Michael Smith, Ranjan Subramanian, Xiaoqing EllenTan, Binh Tang, Ross Taylor, Adina Williams, Jian Xiang Kuan, Puxin Xu, Zheng Yan, Iliyan Zarov, Yuchen Zhang, Angela Fan, Melanie Kambadur, Sharan Narang, Aurelien Rodriguez, Robert Stojnic, Sergey Edunov, Thomas Scialom. It is a collection of foundation language models ranging from 7B to 70B parameters, with checkpoints finetuned for chat application!
|
[Llama 2](https://huggingface.co/papers/2307.09288) is a family of large language models, Llama 2 and Llama 2-Chat, available in 7B, 13B, and 70B parameters. The Llama 2 model mostly keeps the same architecture as [Llama](./llama), but it is pretrained on more tokens, doubles the context length, and uses grouped-query attention (GQA) in the 70B model to improve inference.
|
||||||
|
|
||||||
The abstract from the paper is the following:
|
Llama 2-Chat is trained with supervised fine-tuning (SFT), and reinforcement learning with human feedback (RLHF) - rejection sampling and proximal policy optimization (PPO) - is applied to the fine-tuned model to align the chat model with human preferences.
|
||||||
|
|
||||||
*In this work, we develop and release Llama 2, a collection of pretrained and fine-tuned large language models (LLMs) ranging in scale from 7 billion to 70 billion parameters. Our fine-tuned LLMs, called Llama 2-Chat, are optimized for dialogue use cases. Our models outperform open-source chat models on most benchmarks we tested, and based on our human evaluations for helpfulness and safety, may be a suitable substitute for closed-source models. We provide a detailed description of our approach to fine-tuning and safety improvements of Llama 2-Chat in order to enable the community to build on our work and contribute to the responsible development of LLMs.*
|
You can find all the original Llama 2 checkpoints under the [Llama 2 Family](https://huggingface.co/collections/meta-llama/llama-2-family-661da1f90a9d678b6f55773b) collection.
|
||||||
|
|
||||||
Checkout all Llama2 model checkpoints [here](https://huggingface.co/models?search=llama2).
|
> [!TIP]
|
||||||
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ) with contributions from [Lysandre Debut](https://huggingface.co/lysandre). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama).
|
> Click on the Llama 2 models in the right sidebar for more examples of how to apply Llama to different language tasks.
|
||||||
|
|
||||||
## Usage tips
|
The example below demonstrates how to generate text with [`Pipeline`], [`AutoModel`], and how to chat with Llama 2-Chat from the command line.
|
||||||
|
|
||||||
<Tip warning={true}>
|
<hfoptions id="usage">
|
||||||
|
<hfoption id="Pipeline">
|
||||||
|
|
||||||
The `Llama2` models were trained using `bfloat16`, but the original inference uses `float16`. The checkpoints uploaded on the Hub use `torch_dtype = 'float16'`, which will be
|
```py
|
||||||
used by the `AutoModel` API to cast the checkpoints from `torch.float32` to `torch.float16`.
|
import torch
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
The `dtype` of the online weights is mostly irrelevant unless you are using `torch_dtype="auto"` when initializing a model using `model = AutoModelForCausalLM.from_pretrained("path", torch_dtype = "auto")`. The reason is that the model will first be downloaded ( using the `dtype` of the checkpoints online), then it will be casted to the default `dtype` of `torch` (becomes `torch.float32`), and finally, if there is a `torch_dtype` provided in the config, it will be used.
|
pipeline = pipeline(
|
||||||
|
task="text-generation",
|
||||||
|
model="meta-llama/Llama-2-7b-hf",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
device=0
|
||||||
|
)
|
||||||
|
pipeline("Plants create energy through a process known as")
|
||||||
|
```
|
||||||
|
|
||||||
Training the model in `float16` is not recommended and is known to produce `nan`; as such, the model should be trained in `bfloat16`.
|
</hfoption>
|
||||||
|
<hfoption id="AutoModel">
|
||||||
|
|
||||||
</Tip>
|
```py
|
||||||
|
import torch
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
Tips:
|
tokenizer = AutoTokenizer.from_pretrained(
|
||||||
|
"meta-llama/Llama-2-7b-hf",
|
||||||
|
)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
"meta-llama/Llama-2-7b-hf",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
device_map="auto",
|
||||||
|
attn_implementation="sdpa"
|
||||||
|
)
|
||||||
|
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
- Weights for the Llama2 models can be obtained by filling out [this form](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
|
output = model.generate(**input_ids, cache_implementation="static")
|
||||||
- The architecture is very similar to the first Llama, with the addition of Grouped Query Attention (GQA) following this [paper](https://arxiv.org/pdf/2305.13245.pdf)
|
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||||
- Setting `config.pretraining_tp` to a value different than 1 will activate the more accurate but slower computation of the linear layers, which should better match the original logits.
|
```
|
||||||
- The original model uses `pad_id = -1` which means that there is no padding token. We can't have the same logic, make sure to add a padding token using `tokenizer.add_special_tokens({"pad_token":"<pad>"})` and resize the token embedding accordingly. You should also set the `model.config.pad_token_id`. The `embed_tokens` layer of the model is initialized with `self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.config.padding_idx)`, which makes sure that encoding the padding token will output zeros, so passing it when initializing is recommended.
|
|
||||||
- After filling out the form and gaining access to the model checkpoints, you should be able to use the already converted checkpoints. Otherwise, if you are converting your own model, feel free to use the [conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). The script can be called with the following (example) command:
|
</hfoption>
|
||||||
|
<hfoption id="transformers-cli">
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python src/transformers/models/llama/convert_llama_weights_to_hf.py \
|
transformers-cli chat --model_name_or_path meta-llama/Llama-2-7b-chat-hf --torch_dtype auto --attn_implementation flash_attention_2
|
||||||
--input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path
|
|
||||||
```
|
```
|
||||||
|
|
||||||
- After conversion, the model and tokenizer can be loaded via:
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
```python
|
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
|
||||||
from transformers import LlamaForCausalLM, LlamaTokenizer
|
|
||||||
|
|
||||||
tokenizer = LlamaTokenizer.from_pretrained("/output/path")
|
The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4.
|
||||||
model = LlamaForCausalLM.from_pretrained("/output/path")
|
|
||||||
|
```py
|
||||||
|
# pip install torchao
|
||||||
|
import torch
|
||||||
|
from transformers import TorchAoConfig, AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
quantization_config = TorchAoConfig("int4_weight_only", group_size=128)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
"meta-llama/Llama-2-13b-hf",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map="auto",
|
||||||
|
quantization_config=quantization_config
|
||||||
|
)
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-13b-hf")
|
||||||
|
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
|
output = model.generate(**input_ids, cache_implementation="static")
|
||||||
|
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||||
```
|
```
|
||||||
|
|
||||||
Note that executing the script requires enough CPU RAM to host the whole model in float16 precision (even if the biggest versions
|
Use the [AttentionMaskVisualizer](https://github.com/huggingface/transformers/blob/beb9b5b02246b9b7ee81ddf938f93f44cfeaad19/src/transformers/utils/attention_visualizer.py#L139) to better understand what tokens the model can and cannot attend to.
|
||||||
come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). For the 75B model, it's thus 145GB of RAM needed.
|
|
||||||
|
|
||||||
- The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string.
|
```py
|
||||||
|
from transformers.utils.attention_visualizer import AttentionMaskVisualizer
|
||||||
|
|
||||||
- When using Flash Attention 2 via `attn_implementation="flash_attention_2"`, don't pass `torch_dtype` to the `from_pretrained` class method and use Automatic Mixed-Precision training. When using `Trainer`, it is simply specifying either `fp16` or `bf16` to `True`. Otherwise, make sure you are using `torch.autocast`. This is required because the Flash Attention only support `fp16` and `bf16` data type.
|
visualizer = AttentionMaskVisualizer("meta-llama/Llama-2-7b-hf")
|
||||||
|
visualizer("Plants create energy through a process known as")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/llama-2-attn-mask.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
## Resources
|
## Notes
|
||||||
|
|
||||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LLaMA2. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
- Setting `config.pretraining_tp` to a value besides `1` activates a more accurate but slower computation of the linear layers. This matches the original logits better.
|
||||||
|
- The original model uses `pad_id = -1` to indicate a padding token. The Transformers implementation requires adding a padding token and resizing the token embedding accordingly.
|
||||||
|
|
||||||
- [Llama 2 is here - get it on Hugging Face](https://huggingface.co/blog/llama2), a blog post about Llama 2 and how to use it with 🤗 Transformers and 🤗 PEFT.
|
```py
|
||||||
- [LLaMA 2 - Every Resource you need](https://www.philschmid.de/llama-2), a compilation of relevant resources to learn about LLaMA 2 and how to get started quickly.
|
tokenizer.add_special_tokens({"pad_token":"<pad>"})
|
||||||
|
# update model config with padding token
|
||||||
<PipelineTag pipeline="text-generation"/>
|
model.config.pad_token_id
|
||||||
|
```
|
||||||
- A [notebook](https://colab.research.google.com/drive/1PEQyJO1-f6j0S_XJ8DV50NkpzasXkrzd?usp=sharing) on how to fine-tune Llama 2 in Google Colab using QLoRA and 4-bit precision. 🌎
|
- It is recommended to initialize the `embed_tokens` layer with the following code to ensure encoding the padding token outputs zeros.
|
||||||
- A [notebook](https://colab.research.google.com/drive/134o_cXcMe_lsvl15ZE_4Y75Kstepsntu?usp=sharing) on how to fine-tune the "Llama-v2-7b-guanaco" model with 4-bit QLoRA and generate Q&A datasets from PDFs. 🌎
|
|
||||||
|
|
||||||
<PipelineTag pipeline="text-classification"/>
|
|
||||||
|
|
||||||
- A [notebook](https://colab.research.google.com/drive/1ggaa2oRFphdBmqIjSEbnb_HGkcIRC2ZB?usp=sharing) on how to fine-tune the Llama 2 model with QLoRa, TRL, and Korean text classification dataset. 🌎🇰🇷
|
|
||||||
|
|
||||||
⚗️ Optimization
|
|
||||||
- [Fine-tune Llama 2 with DPO](https://huggingface.co/blog/dpo-trl), a guide to using the TRL library's DPO method to fine tune Llama 2 on a specific dataset.
|
|
||||||
- [Extended Guide: Instruction-tune Llama 2](https://www.philschmid.de/instruction-tune-llama-2), a guide to training Llama 2 to generate instructions from inputs, transforming the model from instruction-following to instruction-giving.
|
|
||||||
- A [notebook](https://colab.research.google.com/drive/1SYpgFpcmtIUzdE7pxqknrM4ArCASfkFQ?usp=sharing) on how to fine-tune the Llama 2 model on a personal computer using QLoRa and TRL. 🌎
|
|
||||||
|
|
||||||
⚡️ Inference
|
|
||||||
- A [notebook](https://colab.research.google.com/drive/1TC56ArKerXUpbgRy5vM3woRsbTEVNq7h?usp=sharing) on how to quantize the Llama 2 model using GPTQ from the AutoGPTQ library. 🌎
|
|
||||||
- A [notebook](https://colab.research.google.com/drive/1X1z9Q6domMKl2CnEM0QGHNwidLfR4dW2?usp=sharing) on how to run the Llama 2 Chat Model with 4-bit quantization on a local computer or Google Colab. 🌎
|
|
||||||
|
|
||||||
🚀 Deploy
|
|
||||||
- [Fine-tune LLaMA 2 (7-70B) on Amazon SageMaker](https://www.philschmid.de/sagemaker-llama2-qlora), a complete guide from setup to QLoRA fine-tuning and deployment on Amazon SageMaker.
|
|
||||||
- [Deploy Llama 2 7B/13B/70B on Amazon SageMaker](https://www.philschmid.de/sagemaker-llama-llm), a guide on using Hugging Face's LLM DLC container for secure and scalable deployment.
|
|
||||||
|
|
||||||
|
```py
|
||||||
|
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.config.padding_idx)
|
||||||
|
```
|
||||||
|
- The tokenizer is a byte-pair encoding model based on [SentencePiece](https://github.com/google/sentencepiece). During decoding, if the first token is the start of the word (for example, "Banana"), the tokenizer doesn't prepend the prefix space to the string.
|
||||||
|
- Don't use the `torch_dtype` parameter in [`~AutoModel.from_pretrained`] if you're using FlashAttention-2 because it only supports fp16 or bf16. You should use [Automatic Mixed Precision](https://pytorch.org/tutorials/recipes/recipes/amp_recipe.html), set fp16 or bf16 to `True` if using [`Trainer`], or use [torch.autocast](https://pytorch.org/docs/stable/amp.html#torch.autocast).
|
||||||
|
|
||||||
## LlamaConfig
|
## LlamaConfig
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ On January 30, 2024, we released LLaVA-NeXT, an open-source Large Multimodal Mod
|
|||||||
|
|
||||||
**In today’s exploration, we delve into the performance of LLaVA-NeXT within the realm of video understanding tasks. We reveal that LLaVA-NeXT surprisingly has strong performance in understanding video content. The current version of LLaVA-NeXT for videos has several improvements:
|
**In today’s exploration, we delve into the performance of LLaVA-NeXT within the realm of video understanding tasks. We reveal that LLaVA-NeXT surprisingly has strong performance in understanding video content. The current version of LLaVA-NeXT for videos has several improvements:
|
||||||
|
|
||||||
- Zero-shot video representation capabilities with AnyRes: The AnyRes technique naturally represents a high-resolution image into multiple images that a pre-trained VIT is able to digest, and forms them into a concantenated sequence. This technique is naturally generalizable to represent videos (consisting of multiple frames), allowing the image-only-trained LLaVA-Next model to perform surprisingly well on video tasks. Notably, this is the first time that LMMs show strong zero-shot modality transfer ability.
|
- Zero-shot video representation capabilities with AnyRes: The AnyRes technique naturally represents a high-resolution image into multiple images that a pre-trained VIT is able to digest, and forms them into a concatenated sequence. This technique is naturally generalizable to represent videos (consisting of multiple frames), allowing the image-only-trained LLaVA-Next model to perform surprisingly well on video tasks. Notably, this is the first time that LMMs show strong zero-shot modality transfer ability.
|
||||||
- Inference with length generalization improves on longer videos. The linear scaling technique enables length generalization, allowing LLaVA-NeXT to effectively handle long-video beyond the limitation of the "max_token_length" of the LLM.
|
- Inference with length generalization improves on longer videos. The linear scaling technique enables length generalization, allowing LLaVA-NeXT to effectively handle long-video beyond the limitation of the "max_token_length" of the LLM.
|
||||||
- Strong video understanding ability. (1) LLaVA-Next-Image, which combines the above two techniques, yields superior zero-shot performance than open-source LMMs tuned on videos. (2) LLaVA-Next-Video, further supervised fine-tuning (SFT) LLaVA-Next-Image on video data, achieves better video understanding capabilities compared to LLaVA-Next-Image. (3) LLaVA-Next-Video-DPO, which aligns the model response with AI feedback using direct preference optimization (DPO), showing significant performance boost.
|
- Strong video understanding ability. (1) LLaVA-Next-Image, which combines the above two techniques, yields superior zero-shot performance than open-source LMMs tuned on videos. (2) LLaVA-Next-Video, further supervised fine-tuning (SFT) LLaVA-Next-Image on video data, achieves better video understanding capabilities compared to LLaVA-Next-Image. (3) LLaVA-Next-Video-DPO, which aligns the model response with AI feedback using direct preference optimization (DPO), showing significant performance boost.
|
||||||
- Efficient deployment and inference with SGLang. It allows 5x faster inference on video tasks, allowing more scalable serving such as million-level video re-captioning. See instructions in our repo.**
|
- Efficient deployment and inference with SGLang. It allows 5x faster inference on video tasks, allowing more scalable serving such as million-level video re-captioning. See instructions in our repo.**
|
||||||
|
234
docs/source/en/model_doc/mistral3.md
Normal file
234
docs/source/en/model_doc/mistral3.md
Normal file
@ -0,0 +1,234 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Mistral3
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Building upon Mistral Small 3 (2501), Mistral Small 3.1 (2503) adds state-of-the-art vision understanding and enhances long context capabilities up to 128k tokens without compromising text performance. With 24 billion parameters, this model achieves top-tier capabilities in both text and vision tasks.
|
||||||
|
|
||||||
|
It is ideal for:
|
||||||
|
- Fast-response conversational agents.
|
||||||
|
- Low-latency function calling.
|
||||||
|
- Subject matter experts via fine-tuning.
|
||||||
|
- Local inference for hobbyists and organizations handling sensitive data.
|
||||||
|
- Programming and math reasoning.
|
||||||
|
- Long document understanding.
|
||||||
|
- Visual understanding.
|
||||||
|
|
||||||
|
This model was contributed by [cyrilvallez](https://huggingface.co/cyrilvallez) and [yonigozlan](https://huggingface.co/yonigozlan).
|
||||||
|
|
||||||
|
The original code can be found [here](https://github.com/vllm-project/vllm/blob/main/vllm/model_executor/models/pixtral.py) and [here](https://github.com/mistralai/mistral-common).
|
||||||
|
|
||||||
|
## Usage example
|
||||||
|
|
||||||
|
### Inference with Pipeline
|
||||||
|
|
||||||
|
Here is how you can use the `image-text-to-text` pipeline to perform inference with the `Mistral3` models in just a few lines of code:
|
||||||
|
```python
|
||||||
|
>>> from transformers import pipeline
|
||||||
|
|
||||||
|
>>> messages = [
|
||||||
|
... {
|
||||||
|
... "role": "user",
|
||||||
|
... "content": [
|
||||||
|
... {
|
||||||
|
... "type": "image",
|
||||||
|
... "image": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg",
|
||||||
|
... },
|
||||||
|
... {"type": "text", "text": "Describe this image."},
|
||||||
|
... ],
|
||||||
|
... },
|
||||||
|
... ]
|
||||||
|
|
||||||
|
>>> pipe = pipeline("image-text-to-text", model="mistralai/Mistral-Small-3.1-24B-Instruct-2503", torch_dtype=torch.bfloat16)
|
||||||
|
>>> outputs = pipe(text=messages, max_new_tokens=50, return_full_text=False)
|
||||||
|
>>> outputs[0]["generated_text"]
|
||||||
|
'The image depicts a vibrant and lush garden scene featuring a variety of wildflowers and plants. The central focus is on a large, pinkish-purple flower, likely a Greater Celandine (Chelidonium majus), with a'
|
||||||
|
```
|
||||||
|
### Inference on a single image
|
||||||
|
|
||||||
|
This example demonstrates how to perform inference on a single image with the Mistral3 models using chat templates.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
>>> import torch
|
||||||
|
|
||||||
|
>>> torch_device = "cuda"
|
||||||
|
>>> model_checkpoint = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
||||||
|
>>> processor = AutoProcessor.from_pretrained(model_checkpoint)
|
||||||
|
>>> model = AutoModelForImageTextToText.from_pretrained(model_checkpoint, device_map=torch_device, torch_dtype=torch.bfloat16)
|
||||||
|
|
||||||
|
>>> messages = [
|
||||||
|
... {
|
||||||
|
... "role": "user",
|
||||||
|
... "content": [
|
||||||
|
... {"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
|
||||||
|
... {"type": "text", "text": "Describe this image"},
|
||||||
|
... ],
|
||||||
|
... }
|
||||||
|
... ]
|
||||||
|
|
||||||
|
>>> inputs = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt").to(model.device, dtype=torch.bfloat16)
|
||||||
|
|
||||||
|
>>> generate_ids = model.generate(**inputs, max_new_tokens=20)
|
||||||
|
>>> decoded_output = processor.decode(generate_ids[0, inputs["input_ids"].shape[1] :], skip_special_tokens=True)
|
||||||
|
|
||||||
|
>>> decoded_output
|
||||||
|
"The image depicts two cats lying on a pink blanket. The larger cat, which appears to be an"...
|
||||||
|
```
|
||||||
|
|
||||||
|
### Text-only generation
|
||||||
|
This example shows how to generate text using the Mistral3 model without providing any image input.
|
||||||
|
|
||||||
|
|
||||||
|
````python
|
||||||
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
>>> import torch
|
||||||
|
|
||||||
|
>>> torch_device = "cuda"
|
||||||
|
>>> model_checkpoint = ".mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
||||||
|
>>> processor = AutoProcessor.from_pretrained(model_checkpoint)
|
||||||
|
>>> model = AutoModelForImageTextToText.from_pretrained(model_checkpoint, device_map=torch_device, torch_dtype=torch.bfloat16)
|
||||||
|
|
||||||
|
>>> SYSTEM_PROMPT = "You are a conversational agent that always answers straight to the point, always end your accurate response with an ASCII drawing of a cat."
|
||||||
|
>>> user_prompt = "Give me 5 non-formal ways to say 'See you later' in French."
|
||||||
|
|
||||||
|
>>> messages = [
|
||||||
|
... {"role": "system", "content": SYSTEM_PROMPT},
|
||||||
|
... {"role": "user", "content": user_prompt},
|
||||||
|
... ]
|
||||||
|
|
||||||
|
>>> text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
||||||
|
>>> inputs = processor(text=text, return_tensors="pt").to(0, dtype=torch.float16)
|
||||||
|
>>> generate_ids = model.generate(**inputs, max_new_tokens=50, do_sample=False)
|
||||||
|
>>> decoded_output = processor.batch_decode(generate_ids[:, inputs["input_ids"].shape[1] :], skip_special_tokens=True)[0]
|
||||||
|
|
||||||
|
>>> print(decoded_output)
|
||||||
|
"1. À plus tard!
|
||||||
|
2. Salut, à plus!
|
||||||
|
3. À toute!
|
||||||
|
4. À la prochaine!
|
||||||
|
5. Je me casse, à plus!
|
||||||
|
|
||||||
|
```
|
||||||
|
/\_/\
|
||||||
|
( o.o )
|
||||||
|
> ^ <
|
||||||
|
```"
|
||||||
|
````
|
||||||
|
|
||||||
|
### Batched image and text inputs
|
||||||
|
Mistral3 models also support batched image and text inputs.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
>>> import torch
|
||||||
|
|
||||||
|
>>> torch_device = "cuda"
|
||||||
|
>>> model_checkpoint = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
||||||
|
>>> processor = AutoProcessor.from_pretrained(model_checkpoint)
|
||||||
|
>>> model = AutoModelForImageTextToText.from_pretrained(model_checkpoint, device_map=torch_device, torch_dtype=torch.bfloat16)
|
||||||
|
|
||||||
|
>>> messages = [
|
||||||
|
... [
|
||||||
|
... {
|
||||||
|
... "role": "user",
|
||||||
|
... "content": [
|
||||||
|
... {"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"},
|
||||||
|
... {"type": "text", "text": "Write a haiku for this image"},
|
||||||
|
... ],
|
||||||
|
... },
|
||||||
|
... ],
|
||||||
|
... [
|
||||||
|
... {
|
||||||
|
... "role": "user",
|
||||||
|
... "content": [
|
||||||
|
... {"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
|
||||||
|
... {"type": "text", "text": "Describe this image"},
|
||||||
|
... ],
|
||||||
|
... },
|
||||||
|
... ],
|
||||||
|
... ]
|
||||||
|
|
||||||
|
|
||||||
|
>>> inputs = processor.apply_chat_template(messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt").to(model.device, dtype=torch.bfloat16)
|
||||||
|
|
||||||
|
>>> output = model.generate(**inputs, max_new_tokens=25)
|
||||||
|
|
||||||
|
>>> decoded_outputs = processor.batch_decode(output, skip_special_tokens=True)
|
||||||
|
>>> decoded_outputs
|
||||||
|
["Write a haiku for this imageCalm waters reflect\nWhispers of the forest's breath\nPeace on wooden path"
|
||||||
|
, "Describe this imageThe image depicts a vibrant street scene in what appears to be a Chinatown district. The focal point is a traditional Chinese"]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Batched multi-image input and quantization with BitsAndBytes
|
||||||
|
This implementation of the Mistral3 models supports batched text-images inputs with different number of images for each text.
|
||||||
|
This example also how to use `BitsAndBytes` to load the model in 4bit quantization.
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText, BitsAndBytesConfig
|
||||||
|
>>> import torch
|
||||||
|
|
||||||
|
>>> torch_device = "cuda"
|
||||||
|
>>> model_checkpoint = "mistralai/Mistral-Small-3.1-24B-Instruct-2503"
|
||||||
|
>>> processor = AutoProcessor.from_pretrained(model_checkpoint)
|
||||||
|
>>> quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
||||||
|
>>> model = AutoModelForImageTextToText.from_pretrained(
|
||||||
|
... model_checkpoint, quantization_config=quantization_config
|
||||||
|
... )
|
||||||
|
|
||||||
|
>>> messages = [
|
||||||
|
... [
|
||||||
|
... {
|
||||||
|
... "role": "user",
|
||||||
|
... "content": [
|
||||||
|
... {"type": "image", "url": "https://llava-vl.github.io/static/images/view.jpg"},
|
||||||
|
... {"type": "text", "text": "Write a haiku for this image"},
|
||||||
|
... ],
|
||||||
|
... },
|
||||||
|
... ],
|
||||||
|
... [
|
||||||
|
... {
|
||||||
|
... "role": "user",
|
||||||
|
... "content": [
|
||||||
|
... {"type": "image", "url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"},
|
||||||
|
... {"type": "image", "url": "https://thumbs.dreamstime.com/b/golden-gate-bridge-san-francisco-purple-flowers-california-echium-candicans-36805947.jpg"},
|
||||||
|
... {"type": "text", "text": "These images depict two different landmarks. Can you identify them?"},
|
||||||
|
... ],
|
||||||
|
... },
|
||||||
|
... ],
|
||||||
|
>>> ]
|
||||||
|
|
||||||
|
>>> inputs = processor.apply_chat_template(messages, padding=True, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt").to(model.device, dtype=torch.bfloat16)
|
||||||
|
|
||||||
|
>>> output = model.generate(**inputs, max_new_tokens=25)
|
||||||
|
|
||||||
|
>>> decoded_outputs = processor.batch_decode(output, skip_special_tokens=True)
|
||||||
|
>>> decoded_outputs
|
||||||
|
["Write a haiku for this imageSure, here is a haiku inspired by the image:\n\nCalm lake's wooden path\nSilent forest stands guard\n", "These images depict two different landmarks. Can you identify them? Certainly! The images depict two iconic landmarks:\n\n1. The first image shows the Statue of Liberty in New York City."]
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## Mistral3Config
|
||||||
|
|
||||||
|
[[autodoc]] Mistral3Config
|
||||||
|
|
||||||
|
|
||||||
|
## Mistral3ForConditionalGeneration
|
||||||
|
|
||||||
|
[[autodoc]] Mistral3ForConditionalGeneration
|
||||||
|
- forward
|
@ -60,6 +60,9 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
|
|
||||||
- [Masked language modeling task guide](../tasks/masked_language_modeling)
|
- [Masked language modeling task guide](../tasks/masked_language_modeling)
|
||||||
|
|
||||||
|
<PipelineTag pipeline="question-answering"/>
|
||||||
|
|
||||||
|
- [`ModernBertForQuestionAnswering`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) and [colab notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb).
|
||||||
|
|
||||||
## ModernBertConfig
|
## ModernBertConfig
|
||||||
|
|
||||||
@ -88,5 +91,15 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
[[autodoc]] ModernBertForTokenClassification
|
[[autodoc]] ModernBertForTokenClassification
|
||||||
- forward
|
- forward
|
||||||
|
|
||||||
|
## ModernBertForQuestionAnswering
|
||||||
|
|
||||||
|
[[autodoc]] ModernBertForQuestionAnswering
|
||||||
|
- forward
|
||||||
|
|
||||||
|
### Usage tips
|
||||||
|
|
||||||
|
The ModernBert model can be fine-tuned using the HuggingFace Transformers library with its [official script](https://github.com/huggingface/transformers/blob/main/examples/pytorch/question-answering/run_qa.py) for question-answering tasks.
|
||||||
|
|
||||||
|
|
||||||
</pt>
|
</pt>
|
||||||
</frameworkcontent>
|
</frameworkcontent>
|
||||||
|
@ -14,89 +14,157 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# PaliGemma
|
<div style="float: right;">
|
||||||
|
<div class="flex flex-wrap space-x-1">
|
||||||
<div class="flex flex-wrap space-x-1">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## Overview
|
# PaliGemma
|
||||||
|
|
||||||
The PaliGemma model was proposed in [PaliGemma – Google's Cutting-Edge Open Vision Language Model](https://huggingface.co/blog/paligemma) by Google. It is a 3B vision-language model composed by a [SigLIP](siglip) vision encoder and a [Gemma](gemma) language decoder linked by a multimodal linear projection. It cuts an image into a fixed number of VIT tokens and prepends it to an optional prompt. One particularity is that the model uses full block attention on all the image tokens plus the input text tokens. It comes in 3 resolutions, 224x224, 448x448 and 896x896 with 3 base models, with 55 fine-tuned versions for different tasks, and 2 mix models.
|
[PaliGemma](https://huggingface.co/papers/2407.07726) is a family of vision-language models (VLMs), combining [SigLIP](./siglip) with the [Gemma](./gemma) 2B model. PaliGemma is available in 3B, 10B, and 28B parameters. The main purpose of PaliGemma is to provide an adaptable base VLM that is easy to transfer to other tasks. The SigLIP vision encoder is a "shape optimized" contrastively pretrained [ViT](./vit) that converts an image into a sequence of tokens and prepended to an optional prompt. The Gemma 2B model is used as the decoder. PaliGemma uses full attention on all image and text tokens to maximize its capacity.
|
||||||
|
|
||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/paligemma/paligemma_arch.png"
|
[PaliGemma 2](https://huggingface.co/papers/2412.03555) improves on the first model by using Gemma 2 (2B, 9B, and 27B parameter variants) as the decoder. These are available as **pt** or **mix** variants. The **pt** checkpoints are intended for further fine-tuning and the **mix** checkpoints are ready for use out of the box.
|
||||||
alt="drawing" width="600"/>
|
|
||||||
|
|
||||||
<small> PaliGemma architecture. Taken from the <a href="https://huggingface.co/blog/paligemma">blog post.</a> </small>
|
You can find all the original PaliGemma checkpoints under the [PaliGemma](https://huggingface.co/collections/google/paligemma-release-6643a9ffbf57de2ae0448dda), [PaliGemma 2](https://huggingface.co/collections/google/paligemma-2-release-67500e1e1dbfdd4dee27ba48), and [PaliGemma 2 Mix](https://huggingface.co/collections/google/paligemma-2-mix-67ac6a251aaf3ee73679dcc4) collections.
|
||||||
|
|
||||||
This model was contributed by [Molbap](https://huggingface.co/Molbap).
|
> [!TIP]
|
||||||
|
> Click on the PaliGemma models in the right sidebar for more examples of how to apply PaliGemma to different vision and language tasks.
|
||||||
|
|
||||||
## Usage tips
|
The example below demonstrates how to generate text based on an image with [`Pipeline`] or the [`AutoModel`] class.
|
||||||
|
|
||||||
- PaliGemma is not meant for conversational use, and it works best when fine-tuning to a specific use case. Some downstream tasks on which PaliGemma can be fine-tuned include image captioning, visual question answering (VQA), object detection, referring expression segmentation and document understanding.
|
<hfoptions id="usage">
|
||||||
- One can use `PaliGemmaProcessor` to prepare images, text and optional labels for the model. When fine-tuning a PaliGemma model, the `suffix` argument can be passed to the processor which creates the `labels` for the model:
|
<hfoption id="Pipeline">
|
||||||
|
|
||||||
```python
|
```py
|
||||||
prompt = "What is on the flower?"
|
import torch
|
||||||
answer = "a bee"
|
from transformers import pipeline
|
||||||
inputs = processor(images=raw_image, text=prompt, suffix=answer, return_tensors="pt")
|
|
||||||
|
pipeline = pipeline(
|
||||||
|
task="image-text-to-text",
|
||||||
|
model="google/paligemma2-3b-mix-224",
|
||||||
|
device=0,
|
||||||
|
torch_dtype=torch.bfloat16
|
||||||
|
)
|
||||||
|
pipeline(
|
||||||
|
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg",
|
||||||
|
text="What is in this image?"
|
||||||
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage Example
|
</hfoption>
|
||||||
|
<hfoption id="AutoModel">
|
||||||
|
|
||||||
The model can accept a single or multiple images. According to the [paper](https://arxiv.org/abs/2407.07726v1), the checkpoint PaliGemma can transfer to tasks which take multiple images as input. NLVR2 is one such task, which asks one question about two images, and requires looking at both to give the correct answer. Here's an example code for single and multi image inference.
|
```py
|
||||||
|
import torch
|
||||||
### Single-image Inference
|
import requests
|
||||||
|
from PIL import Image
|
||||||
```python
|
|
||||||
from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
|
from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
|
||||||
|
|
||||||
model_id = "google/paligemma-3b-mix-224"
|
model = PaliGemmaForConditionalGeneration.from_pretrained(
|
||||||
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
|
"google/paligemma2-3b-mix-224",
|
||||||
processor = AutoProcessor.from_pretrained(model_id)
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map="auto",
|
||||||
prompt = "What is on the flower?"
|
attn_implementation="sdpa"
|
||||||
image_file = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg?download=true"
|
|
||||||
raw_image = Image.open(requests.get(image_file, stream=True).raw)
|
|
||||||
inputs = processor(raw_image, prompt, return_tensors="pt")
|
|
||||||
output = model.generate(**inputs, max_new_tokens=20)
|
|
||||||
|
|
||||||
print(processor.decode(output[0], skip_special_tokens=True)[inputs.input_ids.shape[1]: ])
|
|
||||||
```
|
|
||||||
|
|
||||||
### Multi-image Inference
|
|
||||||
|
|
||||||
```python
|
|
||||||
model_id = "google/paligemma-3b-ft-nlvr2-448" # checkpoint tuned for multiple images
|
|
||||||
model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
|
|
||||||
processor = PaliGemmaProcessor.from_pretrained(model_id)
|
|
||||||
|
|
||||||
prompt = "answer en Which of the two pictures shows a snowman, first or second?"
|
|
||||||
stop_sign_image = Image.open(
|
|
||||||
requests.get("https://www.ilankelman.org/stopsigns/australia.jpg", stream=True).raw
|
|
||||||
)
|
)
|
||||||
snow_image = Image.open(
|
processor = AutoProcessor.from_pretrained(
|
||||||
requests.get(
|
"google/paligemma2-3b-mix-224",
|
||||||
"https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg", stream=True
|
|
||||||
).raw
|
|
||||||
)
|
)
|
||||||
|
|
||||||
inputs = processor(images=[[snow_image, stop_sign_image]], text=prompt, return_tensors="pt")
|
prompt = "What is in this image?"
|
||||||
|
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
|
||||||
output = model.generate(**inputs, max_new_tokens=20)
|
image = Image.open(requests.get(url, stream=True).raw)
|
||||||
print(processor.decode(output[0], skip_special_tokens=True)[inputs.input_ids.shape[1]: ])
|
inputs = processor(image, prompt, return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
|
output = model.generate(**inputs, max_new_tokens=50, cache_implementation="static")
|
||||||
|
print(processor.decode(output[0], skip_special_tokens=True))
|
||||||
```
|
```
|
||||||
|
|
||||||
## Resources
|
</hfoption>
|
||||||
|
</hfoptions>
|
||||||
|
|
||||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with PaliGemma. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
|
||||||
|
|
||||||
- A blog post introducing all the features of PaliGemma can be found [here](https://huggingface.co/blog/paligemma).
|
The example below uses [torchao](../quantization/torchao) to only quantize the weights to int4.
|
||||||
- Demo notebooks on how to fine-tune PaliGemma for VQA with the Trainer API along with inference can be found [here](https://github.com/huggingface/notebooks/tree/main/examples/paligemma).
|
|
||||||
- Demo notebooks on how to fine-tune PaliGemma on a custom dataset (receipt image -> JSON) along with inference can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/PaliGemma). 🌎
|
```py
|
||||||
|
# pip install torchao
|
||||||
|
import torch
|
||||||
|
import requests
|
||||||
|
from PIL import Image
|
||||||
|
from transformers import TorchAoConfig, AutoProcessor, PaliGemmaForConditionalGeneration
|
||||||
|
|
||||||
|
quantization_config = TorchAoConfig("int4_weight_only", group_size=128)
|
||||||
|
model = PaliGemmaForConditionalGeneration.from_pretrained(
|
||||||
|
"google/paligemma2-28b-mix-224",
|
||||||
|
torch_dtype=torch.bfloat16,
|
||||||
|
device_map="auto",
|
||||||
|
quantization_config=quantization_config
|
||||||
|
)
|
||||||
|
processor = AutoProcessor.from_pretrained(
|
||||||
|
"google/paligemma2-28b-mix-224",
|
||||||
|
)
|
||||||
|
|
||||||
|
prompt = "What is in this image?"
|
||||||
|
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
|
||||||
|
image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
inputs = processor(image, prompt, return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
|
output = model.generate(**inputs, max_new_tokens=50, cache_implementation="static")
|
||||||
|
print(processor.decode(output[0], skip_special_tokens=True))
|
||||||
|
```
|
||||||
|
|
||||||
|
Use the [AttentionMaskVisualizer](https://github.com/huggingface/transformers/blob/beb9b5b02246b9b7ee81ddf938f93f44cfeaad19/src/transformers/utils/attention_visualizer.py#L139) to better understand what tokens the model can and cannot attend to.
|
||||||
|
|
||||||
|
```py
|
||||||
|
from transformers.utils.attention_visualizer import AttentionMaskVisualizer
|
||||||
|
|
||||||
|
visualizer = AttentionMaskVisualizer("google/paligemma2-3b-mix-224")
|
||||||
|
visualizer("<img> What is in this image?")
|
||||||
|
```
|
||||||
|
|
||||||
|
<div class="flex justify-center">
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/paligemma2-attn-mask.png"/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
## Notes
|
||||||
|
|
||||||
|
- PaliGemma is not a conversational model and works best when fine-tuned for specific downstream tasks such as image captioning, visual question answering (VQA), object detection, and document understanding.
|
||||||
|
- [`PaliGemmaProcessor`] can prepare images, text, and optional labels for the model. Pass the `suffix` parameter to the processor to create labels for the model during fine-tuning.
|
||||||
|
|
||||||
|
```py
|
||||||
|
prompt = "What is in this image?"
|
||||||
|
answer = "a pallas cat"
|
||||||
|
inputs = processor(images=image, text=prompt, suffix=answer, return_tensors="pt")
|
||||||
|
```
|
||||||
|
- PaliGemma can support multiple input images if it is fine-tuned to accept multiple images. For example, the [NLVR2](https://huggingface.co/google/paligemma-3b-ft-nlvr2-448) checkpoint supports multiple images. Pass the images as a list to the processor.
|
||||||
|
|
||||||
|
```py
|
||||||
|
import torch
|
||||||
|
import requests
|
||||||
|
from PIL import Image
|
||||||
|
from transformers import TorchAoConfig, AutoProcessor, PaliGemmaForConditionalGeneration
|
||||||
|
|
||||||
|
model = PaliGemmaForConditionalGeneration.from_pretrained("google/paligemma-3b-ft-nlvr2-448")
|
||||||
|
processor = AutoProcessor.from_pretrained("google/paligemma-3b-ft-nlvr2-448")
|
||||||
|
|
||||||
|
prompt = "Are these two images the same?"
|
||||||
|
cat_image = Image.open(
|
||||||
|
requests.get("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg", stream=True).raw
|
||||||
|
)
|
||||||
|
cow_image = Image.open(
|
||||||
|
requests.get(
|
||||||
|
"https://media.istockphoto.com/id/1192867753/photo/cow-in-berchida-beach-siniscola.jpg?s=612x612&w=0&k=20&c=v0hjjniwsMNfJSuKWZuIn8pssmD5h5bSN1peBd1CmH4=", stream=True
|
||||||
|
).raw
|
||||||
|
)
|
||||||
|
|
||||||
|
inputs = processor(images=[[cat_image, cow_image]], text=prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
output = model.generate(**inputs, max_new_tokens=20, cache_implementation="static")
|
||||||
|
print(processor.decode(output[0], skip_special_tokens=True))
|
||||||
|
```
|
||||||
|
|
||||||
## PaliGemmaConfig
|
## PaliGemmaConfig
|
||||||
|
|
||||||
|
@ -96,7 +96,7 @@ All the [checkpoints](https://huggingface.co/models?search=pegasus) are fine-tun
|
|||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
- [Script](https://github.com/huggingface/transformers/tree/main/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh) to fine-tune pegasus
|
- [Script](https://github.com/huggingface/transformers-research-projects/tree/main/seq2seq-distillation/finetune_pegasus_xsum.sh) to fine-tune pegasus
|
||||||
on the XSUM dataset. Data download instructions at [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md).
|
on the XSUM dataset. Data download instructions at [examples/pytorch/summarization/](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization/README.md).
|
||||||
- [Causal language modeling task guide](../tasks/language_modeling)
|
- [Causal language modeling task guide](../tasks/language_modeling)
|
||||||
- [Translation task guide](../tasks/translation)
|
- [Translation task guide](../tasks/translation)
|
||||||
|
149
docs/source/en/model_doc/phi4_multimodal.md
Normal file
149
docs/source/en/model_doc/phi4_multimodal.md
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Phi4 Multimodal
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Phi4 Multimodal is a lightweight open multimodal foundation model that leverages the language, vision, and speech research and datasets used for Phi-3.5 and 4.0 models. The model processes text, image, and audio inputs, generating text outputs, and comes with 128K token context length. The model underwent an enhancement process, incorporating both supervised fine-tuning, direct preference optimization and RLHF (Reinforcement Learning from Human Feedback) to support precise instruction adherence and safety measures. The languages that each modal supports are the following:
|
||||||
|
|
||||||
|
- Text: Arabic, Chinese, Czech, Danish, Dutch, English, Finnish, French, German, Hebrew, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, Portuguese, Russian, Spanish, Swedish, Thai, Turkish, Ukrainian
|
||||||
|
- Vision: English
|
||||||
|
- Audio: English, Chinese, German, French, Italian, Japanese, Spanish, Portuguese
|
||||||
|
|
||||||
|
This model was contributed by [Cyril Vallez](https://huggingface.co/cyrilvallez). The most recent code can be
|
||||||
|
found [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/phi4_multimodal/modeling_phi4_multimodal.py).
|
||||||
|
|
||||||
|
|
||||||
|
## Usage tips
|
||||||
|
|
||||||
|
`Phi4-multimodal-instruct` can be found on the [Huggingface Hub](https://huggingface.co/microsoft/Phi-4-multimodal-instruct)
|
||||||
|
|
||||||
|
In the following, we demonstrate how to use it for inference depending on the input modalities (text, image, audio).
|
||||||
|
|
||||||
|
```python
|
||||||
|
import requests
|
||||||
|
import torch
|
||||||
|
import os
|
||||||
|
import io
|
||||||
|
from PIL import Image
|
||||||
|
import soundfile as sf
|
||||||
|
from transformers import AutoModelForCausalLM, AutoProcessor, GenerationConfig
|
||||||
|
from urllib.request import urlopen
|
||||||
|
|
||||||
|
|
||||||
|
# Define model path
|
||||||
|
model_path = "microsoft/Phi-4-multimodal-instruct"
|
||||||
|
device = "cuda:0"
|
||||||
|
|
||||||
|
# Load model and processor
|
||||||
|
processor = AutoProcessor.from_pretrained(model_path)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device, torch_dtype=torch.float16)
|
||||||
|
|
||||||
|
# Optional: load the adapters (note that without them, the base model will very likely not work well)
|
||||||
|
model.load_adapter(model_path, adapter_name="speech", device_map=device, adapter_kwargs={"subfolder": 'speech-lora'})
|
||||||
|
model.load_adapter(model_path, adapter_name="vision", device_map=device, adapter_kwargs={"subfolder": 'vision-lora'})
|
||||||
|
|
||||||
|
# Define prompt structure
|
||||||
|
user_prompt = '<|user|>'
|
||||||
|
assistant_prompt = '<|assistant|>'
|
||||||
|
prompt_suffix = '<|end|>'
|
||||||
|
|
||||||
|
# Part 1: Image Processing
|
||||||
|
model.set_adapter("vision") # if loaded, activate the vision adapter
|
||||||
|
print("\n--- IMAGE PROCESSING ---")
|
||||||
|
image_url = 'https://www.ilankelman.org/stopsigns/australia.jpg'
|
||||||
|
prompt = f'{user_prompt}<|image_1|>What is shown in this image?{prompt_suffix}{assistant_prompt}'
|
||||||
|
print(f'>>> Prompt\n{prompt}')
|
||||||
|
|
||||||
|
# Download and open image
|
||||||
|
image = Image.open(requests.get(image_url, stream=True).raw)
|
||||||
|
inputs = processor(text=prompt, images=image, return_tensors='pt').to(device)
|
||||||
|
|
||||||
|
# Generate response
|
||||||
|
generate_ids = model.generate(
|
||||||
|
**inputs,
|
||||||
|
max_new_tokens=1000,
|
||||||
|
do_sample=False,
|
||||||
|
)
|
||||||
|
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
|
||||||
|
response = processor.batch_decode(
|
||||||
|
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||||||
|
)[0]
|
||||||
|
print(f'>>> Response\n{response}')
|
||||||
|
|
||||||
|
# Part 2: Audio Processing
|
||||||
|
model.set_adapter("speech") # if loaded, activate the speech adapter
|
||||||
|
print("\n--- AUDIO PROCESSING ---")
|
||||||
|
audio_url = "https://upload.wikimedia.org/wikipedia/commons/b/b0/Barbara_Sahakian_BBC_Radio4_The_Life_Scientific_29_May_2012_b01j5j24.flac"
|
||||||
|
speech_prompt = "Transcribe the audio to text, and then translate the audio to French. Use <sep> as a separator between the original transcript and the translation."
|
||||||
|
prompt = f'{user_prompt}<|audio_1|>{speech_prompt}{prompt_suffix}{assistant_prompt}'
|
||||||
|
print(f'>>> Prompt\n{prompt}')
|
||||||
|
|
||||||
|
# Downlowd and open audio file
|
||||||
|
audio, sample_rate = sf.read(io.BytesIO(urlopen(audio_url).read()))
|
||||||
|
|
||||||
|
# Process with the model
|
||||||
|
inputs = processor(text=prompt, audios=audio, sample_rate=sample_rate, return_tensors='pt').to(device)
|
||||||
|
|
||||||
|
generate_ids = model.generate(
|
||||||
|
**inputs,
|
||||||
|
max_new_tokens=1000,
|
||||||
|
do_sample=False,
|
||||||
|
)
|
||||||
|
generate_ids = generate_ids[:, inputs['input_ids'].shape[1]:]
|
||||||
|
response = processor.batch_decode(
|
||||||
|
generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||||||
|
)[0]
|
||||||
|
print(f'>>> Response\n{response}')
|
||||||
|
```
|
||||||
|
|
||||||
|
## Phi4MultimodalFeatureExtractor
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalFeatureExtractor
|
||||||
|
|
||||||
|
## Phi4MultimodalImageProcessorFast
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalImageProcessorFast
|
||||||
|
|
||||||
|
## Phi4MultimodalProcessor
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalProcessor
|
||||||
|
|
||||||
|
## Phi4MultimodalAudioConfig
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalAudioConfig
|
||||||
|
|
||||||
|
## Phi4MultimodalVisionConfig
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalVisionConfig
|
||||||
|
|
||||||
|
## Phi4MultimodalConfig
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalConfig
|
||||||
|
|
||||||
|
## Phi4MultimodalAudioModel
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalAudioModel
|
||||||
|
|
||||||
|
## Phi4MultimodalVisionModel
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalVisionModel
|
||||||
|
|
||||||
|
## Phi4MultimodalModel
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalModel
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Phi4MultimodalForCausalLM
|
||||||
|
|
||||||
|
[[autodoc]] Phi4MultimodalForCausalLM
|
||||||
|
- forward
|
96
docs/source/en/model_doc/prompt_depth_anything.md
Normal file
96
docs/source/en/model_doc/prompt_depth_anything.md
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Prompt Depth Anything
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Prompt Depth Anything model was introduced in [Prompting Depth Anything for 4K Resolution Accurate Metric Depth Estimation](https://arxiv.org/abs/2412.14015) by Haotong Lin, Sida Peng, Jingxiao Chen, Songyou Peng, Jiaming Sun, Minghuan Liu, Hujun Bao, Jiashi Feng, Xiaowei Zhou, Bingyi Kang.
|
||||||
|
|
||||||
|
|
||||||
|
The abstract from the paper is as follows:
|
||||||
|
|
||||||
|
*Prompts play a critical role in unleashing the power of language and vision foundation models for specific tasks. For the first time, we introduce prompting into depth foundation models, creating a new paradigm for metric depth estimation termed Prompt Depth Anything. Specifically, we use a low-cost LiDAR as the prompt to guide the Depth Anything model for accurate metric depth output, achieving up to 4K resolution. Our approach centers on a concise prompt fusion design that integrates the LiDAR at multiple scales within the depth decoder. To address training challenges posed by limited datasets containing both LiDAR depth and precise GT depth, we propose a scalable data pipeline that includes synthetic data LiDAR simulation and real data pseudo GT depth generation. Our approach sets new state-of-the-arts on the ARKitScenes and ScanNet++ datasets and benefits downstream applications, including 3D reconstruction and generalized robotic grasping.*
|
||||||
|
|
||||||
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/prompt_depth_anything_architecture.jpg"
|
||||||
|
alt="drawing" width="600"/>
|
||||||
|
|
||||||
|
<small> Prompt Depth Anything overview. Taken from the <a href="https://arxiv.org/pdf/2412.14015">original paper</a>.</small>
|
||||||
|
|
||||||
|
## Usage example
|
||||||
|
|
||||||
|
The Transformers library allows you to use the model with just a few lines of code:
|
||||||
|
|
||||||
|
```python
|
||||||
|
>>> import torch
|
||||||
|
>>> import requests
|
||||||
|
>>> import numpy as np
|
||||||
|
|
||||||
|
>>> from PIL import Image
|
||||||
|
>>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation
|
||||||
|
|
||||||
|
>>> url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/image.jpg?raw=true"
|
||||||
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
|
||||||
|
>>> image_processor = AutoImageProcessor.from_pretrained("depth-anything/prompt-depth-anything-vits-hf")
|
||||||
|
>>> model = AutoModelForDepthEstimation.from_pretrained("depth-anything/prompt-depth-anything-vits-hf")
|
||||||
|
|
||||||
|
>>> prompt_depth_url = "https://github.com/DepthAnything/PromptDA/blob/main/assets/example_images/arkit_depth.png?raw=true"
|
||||||
|
>>> prompt_depth = Image.open(requests.get(prompt_depth_url, stream=True).raw)
|
||||||
|
>>> # the prompt depth can be None, and the model will output a monocular relative depth.
|
||||||
|
|
||||||
|
>>> # prepare image for the model
|
||||||
|
>>> inputs = image_processor(images=image, return_tensors="pt", prompt_depth=prompt_depth)
|
||||||
|
|
||||||
|
>>> with torch.no_grad():
|
||||||
|
... outputs = model(**inputs)
|
||||||
|
|
||||||
|
>>> # interpolate to original size
|
||||||
|
>>> post_processed_output = image_processor.post_process_depth_estimation(
|
||||||
|
... outputs,
|
||||||
|
... target_sizes=[(image.height, image.width)],
|
||||||
|
... )
|
||||||
|
|
||||||
|
>>> # visualize the prediction
|
||||||
|
>>> predicted_depth = post_processed_output[0]["predicted_depth"]
|
||||||
|
>>> depth = predicted_depth * 1000
|
||||||
|
>>> depth = depth.detach().cpu().numpy()
|
||||||
|
>>> depth = Image.fromarray(depth.astype("uint16")) # mm
|
||||||
|
```
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Prompt Depth Anything.
|
||||||
|
|
||||||
|
- [Prompt Depth Anything Demo](https://huggingface.co/spaces/depth-anything/PromptDA)
|
||||||
|
- [Prompt Depth Anything Interactive Results](https://promptda.github.io/interactive.html)
|
||||||
|
|
||||||
|
If you are interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
||||||
|
|
||||||
|
## PromptDepthAnythingConfig
|
||||||
|
|
||||||
|
[[autodoc]] PromptDepthAnythingConfig
|
||||||
|
|
||||||
|
## PromptDepthAnythingForDepthEstimation
|
||||||
|
|
||||||
|
[[autodoc]] PromptDepthAnythingForDepthEstimation
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## PromptDepthAnythingImageProcessor
|
||||||
|
|
||||||
|
[[autodoc]] PromptDepthAnythingImageProcessor
|
||||||
|
- preprocess
|
||||||
|
- post_process_depth_estimation
|
@ -54,7 +54,7 @@ This model was contributed by [shangz](https://huggingface.co/shangz).
|
|||||||
- QDQBERT model can be loaded from any checkpoint of HuggingFace BERT model (for example *google-bert/bert-base-uncased*), and
|
- QDQBERT model can be loaded from any checkpoint of HuggingFace BERT model (for example *google-bert/bert-base-uncased*), and
|
||||||
perform Quantization Aware Training/Post Training Quantization.
|
perform Quantization Aware Training/Post Training Quantization.
|
||||||
- A complete example of using QDQBERT model to perform Quatization Aware Training and Post Training Quantization for
|
- A complete example of using QDQBERT model to perform Quatization Aware Training and Post Training Quantization for
|
||||||
SQUAD task can be found at [transformers/examples/research_projects/quantization-qdqbert/](examples/research_projects/quantization-qdqbert/).
|
SQUAD task can be found at https://github.com/huggingface/transformers-research-projects/tree/main/quantization-qdqbert.
|
||||||
|
|
||||||
### Set default quantizers
|
### Set default quantizers
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ The Qwen2-Audio is the new model series of large audio-language models from the
|
|||||||
* voice chat: users can freely engage in voice interactions with Qwen2-Audio without text input
|
* voice chat: users can freely engage in voice interactions with Qwen2-Audio without text input
|
||||||
* audio analysis: users could provide audio and text instructions for analysis during the interaction
|
* audio analysis: users could provide audio and text instructions for analysis during the interaction
|
||||||
|
|
||||||
It was proposed in [Qwen2-Audio Technical Report](https://arxiv.org/abs/2407.10759) by Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, Chang Zhou, Jingren Zhou.
|
It was proposed in [Qwen2-Audio Technical Report](https://arxiv.org/abs/2407.10759) by Yunfei Chu, Jin Xu, Qian Yang, Haojie Wei, Xipin Wei, Zhifang Guo, Yichong Leng, Yuanjun Lv, Jinzheng He, Junyang Lin, Chang Zhou, Jingren Zhou.
|
||||||
|
|
||||||
The abstract from the paper is the following:
|
The abstract from the paper is the following:
|
||||||
|
|
||||||
@ -100,7 +100,7 @@ for message in conversation:
|
|||||||
for ele in message["content"]:
|
for ele in message["content"]:
|
||||||
if ele["type"] == "audio":
|
if ele["type"] == "audio":
|
||||||
audios.append(librosa.load(
|
audios.append(librosa.load(
|
||||||
BytesIO(urlopen(ele['audio_url']).read()),
|
BytesIO(urlopen(ele['audio_url']).read()),
|
||||||
sr=processor.feature_extractor.sampling_rate)[0]
|
sr=processor.feature_extractor.sampling_rate)[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -125,7 +125,7 @@ processor = AutoProcessor.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct")
|
|||||||
model = Qwen2AudioForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct", device_map="auto")
|
model = Qwen2AudioForConditionalGeneration.from_pretrained("Qwen/Qwen2-Audio-7B-Instruct", device_map="auto")
|
||||||
|
|
||||||
conversation = [
|
conversation = [
|
||||||
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
{'role': 'system', 'content': 'You are a helpful assistant.'},
|
||||||
{"role": "user", "content": [
|
{"role": "user", "content": [
|
||||||
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"},
|
{"type": "audio", "audio_url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-Audio/audio/glass-breaking-151256.mp3"},
|
||||||
{"type": "text", "text": "What's that sound?"},
|
{"type": "text", "text": "What's that sound?"},
|
||||||
@ -148,7 +148,7 @@ for message in conversation:
|
|||||||
if ele["type"] == "audio":
|
if ele["type"] == "audio":
|
||||||
audios.append(
|
audios.append(
|
||||||
librosa.load(
|
librosa.load(
|
||||||
BytesIO(urlopen(ele['audio_url']).read()),
|
BytesIO(urlopen(ele['audio_url']).read()),
|
||||||
sr=processor.feature_extractor.sampling_rate)[0]
|
sr=processor.feature_extractor.sampling_rate)[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -203,7 +203,7 @@ for conversation in conversations:
|
|||||||
if ele["type"] == "audio":
|
if ele["type"] == "audio":
|
||||||
audios.append(
|
audios.append(
|
||||||
librosa.load(
|
librosa.load(
|
||||||
BytesIO(urlopen(ele['audio_url']).read()),
|
BytesIO(urlopen(ele['audio_url']).read()),
|
||||||
sr=processor.feature_extractor.sampling_rate)[0]
|
sr=processor.feature_extractor.sampling_rate)[0]
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -221,7 +221,7 @@ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_
|
|||||||
|
|
||||||
[[autodoc]] Qwen2AudioConfig
|
[[autodoc]] Qwen2AudioConfig
|
||||||
|
|
||||||
## Qwen2AudioConfig
|
## Qwen2AudioEncoderConfig
|
||||||
|
|
||||||
[[autodoc]] Qwen2AudioEncoderConfig
|
[[autodoc]] Qwen2AudioEncoderConfig
|
||||||
|
|
||||||
@ -229,6 +229,11 @@ response = processor.batch_decode(generate_ids, skip_special_tokens=True, clean_
|
|||||||
|
|
||||||
[[autodoc]] Qwen2AudioProcessor
|
[[autodoc]] Qwen2AudioProcessor
|
||||||
|
|
||||||
|
## Qwen2AudioEncoder
|
||||||
|
|
||||||
|
[[autodoc]] Qwen2AudioEncoder
|
||||||
|
- forward
|
||||||
|
|
||||||
## Qwen2AudioForConditionalGeneration
|
## Qwen2AudioForConditionalGeneration
|
||||||
|
|
||||||
[[autodoc]] Qwen2AudioForConditionalGeneration
|
[[autodoc]] Qwen2AudioForConditionalGeneration
|
||||||
|
59
docs/source/en/model_doc/qwen3.md
Normal file
59
docs/source/en/model_doc/qwen3.md
Normal file
@ -0,0 +1,59 @@
|
|||||||
|
<!--Copyright 2024 The Qwen Team and The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Qwen3
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
To be released with the official model launch.
|
||||||
|
|
||||||
|
### Model Details
|
||||||
|
|
||||||
|
To be released with the official model launch.
|
||||||
|
|
||||||
|
|
||||||
|
## Usage tips
|
||||||
|
|
||||||
|
To be released with the official model launch.
|
||||||
|
|
||||||
|
## Qwen3Config
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3Config
|
||||||
|
|
||||||
|
## Qwen3Model
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3Model
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Qwen3ForCausalLM
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3ForCausalLM
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Qwen3ForSequenceClassification
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3ForSequenceClassification
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Qwen3ForTokenClassification
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3ForTokenClassification
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Qwen3ForQuestionAnswering
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3ForQuestionAnswering
|
||||||
|
- forward
|
58
docs/source/en/model_doc/qwen3_moe.md
Normal file
58
docs/source/en/model_doc/qwen3_moe.md
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
<!--Copyright 2024 The Qwen Team and The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Qwen3MoE
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
To be released with the official model launch.
|
||||||
|
|
||||||
|
### Model Details
|
||||||
|
|
||||||
|
To be released with the official model launch.
|
||||||
|
|
||||||
|
## Usage tips
|
||||||
|
|
||||||
|
To be released with the official model launch.
|
||||||
|
|
||||||
|
## Qwen3MoeConfig
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3MoeConfig
|
||||||
|
|
||||||
|
## Qwen3MoeModel
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3MoeModel
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Qwen3MoeForCausalLM
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3MoeForCausalLM
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Qwen3MoeForSequenceClassification
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3MoeForSequenceClassification
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Qwen3MoeForTokenClassification
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3MoeForTokenClassification
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## Qwen3MoeForQuestionAnswering
|
||||||
|
|
||||||
|
[[autodoc]] Qwen3MoeForQuestionAnswering
|
||||||
|
- forward
|
@ -149,12 +149,24 @@ alt="drawing" width="900"/>
|
|||||||
[[autodoc]] SamImageProcessor
|
[[autodoc]] SamImageProcessor
|
||||||
|
|
||||||
|
|
||||||
|
## SamVisionModel
|
||||||
|
|
||||||
|
[[autodoc]] SamVisionModel
|
||||||
|
- forward
|
||||||
|
|
||||||
|
|
||||||
## SamModel
|
## SamModel
|
||||||
|
|
||||||
[[autodoc]] SamModel
|
[[autodoc]] SamModel
|
||||||
- forward
|
- forward
|
||||||
|
|
||||||
|
|
||||||
|
## TFSamVisionModel
|
||||||
|
|
||||||
|
[[autodoc]] TFSamVisionModel
|
||||||
|
- call
|
||||||
|
|
||||||
|
|
||||||
## TFSamModel
|
## TFSamModel
|
||||||
|
|
||||||
[[autodoc]] TFSamModel
|
[[autodoc]] TFSamModel
|
||||||
|
100
docs/source/en/model_doc/shieldgemma2.md
Normal file
100
docs/source/en/model_doc/shieldgemma2.md
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
|
||||||
|
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# ShieldGemma 2
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The ShieldGemma 2 model was proposed in a forthcoming technical report by Google. ShieldGemma 2 is built on [Gemma 3](https://ai.google.dev/gemma/docs/core/model_card_3), is a 4 billion (4B) parameter model that checks the safety of both synthetic and natural images against key categories to help you build robust datasets and models. With this addition to the Gemma family of models, researchers and developers can now easily minimize the risk of harmful content in their models across key areas of harm as defined below:
|
||||||
|
|
||||||
|
- No Sexually Explicit content: The image shall not contain content that depicts explicit or graphic sexual acts (e.g., pornography, erotic nudity, depictions of rape or sexual assault).
|
||||||
|
- No Dangerous Content: The image shall not contain content that facilitates or encourages activities that could cause real-world harm (e.g., building firearms and explosive devices, promotion of terrorism, instructions for suicide).
|
||||||
|
- No Violence/Gore content: The image shall not contain content that depicts shocking, sensational, or gratuitous violence (e.g., excessive blood and gore, gratuitous violence against animals, extreme injury or moment of death).
|
||||||
|
|
||||||
|
We recommend using ShieldGemma 2 as an input filter to vision language models, or as an output filter of image generation systems. To train a robust image safety model, we curated training datasets of natural and synthetic images and instruction-tuned Gemma 3 to demonstrate strong performance.
|
||||||
|
|
||||||
|
This model was contributed by [Ryan Mullins](https://huggingface.co/RyanMullins).
|
||||||
|
|
||||||
|
## Usage Example
|
||||||
|
|
||||||
|
- ShieldGemma 2 provides a Processor that accepts a list of `images` and an optional list of `policies` as input, and constructs a batch of prompts as the product of these two lists using the provided chat template.
|
||||||
|
- You can extend ShieldGemma's built-in in policies with the `custom_policies` argument to the Processor. Using the same key as one of the built-in policies will overwrite that policy with your custom defintion.
|
||||||
|
- ShieldGemma 2 does not support the image cropping capabilities used by Gemma 3.
|
||||||
|
|
||||||
|
### Classification against Built-in Policies
|
||||||
|
|
||||||
|
```python
|
||||||
|
from PIL import Image
|
||||||
|
import requests
|
||||||
|
from transformers import AutoProcessor, ShieldGemma2ForImageClassification
|
||||||
|
|
||||||
|
model_id = "google/shieldgemma-2-4b-it"
|
||||||
|
model = ShieldGemma2ForImageClassification.from_pretrained(model_id, device_map="auto")
|
||||||
|
processor = AutoProcessor.from_pretrained(model_id)
|
||||||
|
|
||||||
|
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"
|
||||||
|
image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
|
||||||
|
inputs = processor(images=[image], return_tensors="pt").to(model.device)
|
||||||
|
|
||||||
|
output = model(**inputs)
|
||||||
|
print(output.probabilities)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Classification against Custom Policies
|
||||||
|
|
||||||
|
```python
|
||||||
|
from PIL import Image
|
||||||
|
import requests
|
||||||
|
from transformers import AutoProcessor, ShieldGemma2ForImageClassification
|
||||||
|
|
||||||
|
model_id = "google/shieldgemma-2-4b-it"
|
||||||
|
model = ShieldGemma2ForImageClassification.from_pretrained(model_id, device_map="auto")
|
||||||
|
processor = AutoProcessor.from_pretrained(model_id)
|
||||||
|
|
||||||
|
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"
|
||||||
|
image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
|
||||||
|
custom_policies = {
|
||||||
|
"key_a": "descrition_a",
|
||||||
|
"key_b": "descrition_b",
|
||||||
|
}
|
||||||
|
|
||||||
|
inputs = processor(
|
||||||
|
images=[image],
|
||||||
|
custom_policies=custom_policies,
|
||||||
|
policies=["dangerous", "key_a", "key_b"],
|
||||||
|
return_tensors="pt",
|
||||||
|
).to(model.device)
|
||||||
|
|
||||||
|
output = model(**inputs)
|
||||||
|
print(output.probabilities)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
## ShieldGemma2Processor
|
||||||
|
|
||||||
|
[[autodoc]] ShieldGemma2Processor
|
||||||
|
|
||||||
|
## ShieldGemma2Config
|
||||||
|
|
||||||
|
[[autodoc]] ShieldGemma2Config
|
||||||
|
|
||||||
|
## ShieldGemma2ForImageClassification
|
||||||
|
|
||||||
|
[[autodoc]] ShieldGemma2ForImageClassification
|
||||||
|
- forward
|
@ -18,6 +18,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -64,7 +64,7 @@ appropriately for the textual and visual parts.
|
|||||||
The [`BertTokenizer`] is used to encode the text. A custom detector/image processor must be used
|
The [`BertTokenizer`] is used to encode the text. A custom detector/image processor must be used
|
||||||
to get the visual embeddings. The following example notebooks show how to use VisualBERT with Detectron-like models:
|
to get the visual embeddings. The following example notebooks show how to use VisualBERT with Detectron-like models:
|
||||||
|
|
||||||
- [VisualBERT VQA demo notebook](https://github.com/huggingface/transformers/tree/main/examples/research_projects/visual_bert) : This notebook
|
- [VisualBERT VQA demo notebook](https://github.com/huggingface/transformers-research-projects/tree/main/visual_bert) : This notebook
|
||||||
contains an example on VisualBERT VQA.
|
contains an example on VisualBERT VQA.
|
||||||
|
|
||||||
- [Generate Embeddings for VisualBERT (Colab Notebook)](https://colab.research.google.com/drive/1bLGxKdldwqnMVA5x4neY7-l_8fKGWQYI?usp=sharing) : This notebook contains
|
- [Generate Embeddings for VisualBERT (Colab Notebook)](https://colab.research.google.com/drive/1bLGxKdldwqnMVA5x4neY7-l_8fKGWQYI?usp=sharing) : This notebook contains
|
||||||
|
@ -14,143 +14,83 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Vision Transformer (ViT)
|
<div style="float: right;">
|
||||||
|
<div class="flex flex-wrap space-x-1">
|
||||||
<div class="flex flex-wrap space-x-1">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
||||||
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
||||||
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
">
|
||||||
">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## Overview
|
# Vision Transformer (ViT)
|
||||||
|
|
||||||
The Vision Transformer (ViT) model was proposed in [An Image is Worth 16x16 Words: Transformers for Image Recognition
|
[Vision Transformer (ViT)](https://huggingface.co/papers/2010.11929) is a transformer adapted for computer vision tasks. An image is split into smaller fixed-sized patches which are treated as a sequence of tokens, similar to words for NLP tasks. ViT requires less resources to pretrain compared to convolutional architectures and its performance on large datasets can be transferred to smaller downstream tasks.
|
||||||
at Scale](https://arxiv.org/abs/2010.11929) by Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk
|
|
||||||
Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob
|
|
||||||
Uszkoreit, Neil Houlsby. It's the first paper that successfully trains a Transformer encoder on ImageNet, attaining
|
|
||||||
very good results compared to familiar convolutional architectures.
|
|
||||||
|
|
||||||
The abstract from the paper is the following:
|
You can find all the original ViT checkpoints under the [Google](https://huggingface.co/google?search_models=vit) organization.
|
||||||
|
|
||||||
*While the Transformer architecture has become the de-facto standard for natural language processing tasks, its
|
> [!TIP]
|
||||||
applications to computer vision remain limited. In vision, attention is either applied in conjunction with
|
> Click on the ViT models in the right sidebar for more examples of how to apply ViT to different computer vision tasks.
|
||||||
convolutional networks, or used to replace certain components of convolutional networks while keeping their overall
|
|
||||||
structure in place. We show that this reliance on CNNs is not necessary and a pure transformer applied directly to
|
|
||||||
sequences of image patches can perform very well on image classification tasks. When pre-trained on large amounts of
|
|
||||||
data and transferred to multiple mid-sized or small image recognition benchmarks (ImageNet, CIFAR-100, VTAB, etc.),
|
|
||||||
Vision Transformer (ViT) attains excellent results compared to state-of-the-art convolutional networks while requiring
|
|
||||||
substantially fewer computational resources to train.*
|
|
||||||
|
|
||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vit_architecture.jpg"
|
The example below demonstrates how to classify an image with [`Pipeline`] or the [`AutoModel`] class.
|
||||||
alt="drawing" width="600"/>
|
|
||||||
|
|
||||||
<small> ViT architecture. Taken from the <a href="https://arxiv.org/abs/2010.11929">original paper.</a> </small>
|
<hfoptions id="usage">
|
||||||
|
<hfoption id="Pipeline">
|
||||||
|
|
||||||
Following the original Vision Transformer, some follow-up works have been made:
|
```py
|
||||||
|
import torch
|
||||||
|
from transformers import pipeline
|
||||||
|
|
||||||
- [DeiT](deit) (Data-efficient Image Transformers) by Facebook AI. DeiT models are distilled vision transformers.
|
pipeline = pipeline(
|
||||||
The authors of DeiT also released more efficiently trained ViT models, which you can directly plug into [`ViTModel`] or
|
task="image-classification",
|
||||||
[`ViTForImageClassification`]. There are 4 variants available (in 3 different sizes): *facebook/deit-tiny-patch16-224*,
|
model="google/vit-base-patch16-224",
|
||||||
*facebook/deit-small-patch16-224*, *facebook/deit-base-patch16-224* and *facebook/deit-base-patch16-384*. Note that one should
|
torch_dtype=torch.float16,
|
||||||
use [`DeiTImageProcessor`] in order to prepare images for the model.
|
device=0
|
||||||
|
)
|
||||||
- [BEiT](beit) (BERT pre-training of Image Transformers) by Microsoft Research. BEiT models outperform supervised pre-trained
|
pipeline(images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg")
|
||||||
vision transformers using a self-supervised method inspired by BERT (masked image modeling) and based on a VQ-VAE.
|
|
||||||
|
|
||||||
- DINO (a method for self-supervised training of Vision Transformers) by Facebook AI. Vision Transformers trained using
|
|
||||||
the DINO method show very interesting properties not seen with convolutional models. They are capable of segmenting
|
|
||||||
objects, without having ever been trained to do so. DINO checkpoints can be found on the [hub](https://huggingface.co/models?other=dino).
|
|
||||||
|
|
||||||
- [MAE](vit_mae) (Masked Autoencoders) by Facebook AI. By pre-training Vision Transformers to reconstruct pixel values for a high portion
|
|
||||||
(75%) of masked patches (using an asymmetric encoder-decoder architecture), the authors show that this simple method outperforms
|
|
||||||
supervised pre-training after fine-tuning.
|
|
||||||
|
|
||||||
This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code (written in JAX) can be
|
|
||||||
found [here](https://github.com/google-research/vision_transformer).
|
|
||||||
|
|
||||||
Note that we converted the weights from Ross Wightman's [timm library](https://github.com/rwightman/pytorch-image-models),
|
|
||||||
who already converted the weights from JAX to PyTorch. Credits go to him!
|
|
||||||
|
|
||||||
## Usage tips
|
|
||||||
|
|
||||||
- To feed images to the Transformer encoder, each image is split into a sequence of fixed-size non-overlapping patches,
|
|
||||||
which are then linearly embedded. A [CLS] token is added to serve as representation of an entire image, which can be
|
|
||||||
used for classification. The authors also add absolute position embeddings, and feed the resulting sequence of
|
|
||||||
vectors to a standard Transformer encoder.
|
|
||||||
- As the Vision Transformer expects each image to be of the same size (resolution), one can use
|
|
||||||
[`ViTImageProcessor`] to resize (or rescale) and normalize images for the model.
|
|
||||||
- Both the patch resolution and image resolution used during pre-training or fine-tuning are reflected in the name of
|
|
||||||
each checkpoint. For example, `google/vit-base-patch16-224` refers to a base-sized architecture with patch
|
|
||||||
resolution of 16x16 and fine-tuning resolution of 224x224. All checkpoints can be found on the [hub](https://huggingface.co/models?search=vit).
|
|
||||||
- The available checkpoints are either (1) pre-trained on [ImageNet-21k](http://www.image-net.org/) (a collection of
|
|
||||||
14 million images and 21k classes) only, or (2) also fine-tuned on [ImageNet](http://www.image-net.org/challenges/LSVRC/2012/) (also referred to as ILSVRC 2012, a collection of 1.3 million
|
|
||||||
images and 1,000 classes).
|
|
||||||
- The Vision Transformer was pre-trained using a resolution of 224x224. During fine-tuning, it is often beneficial to
|
|
||||||
use a higher resolution than pre-training [(Touvron et al., 2019)](https://arxiv.org/abs/1906.06423), [(Kolesnikov
|
|
||||||
et al., 2020)](https://arxiv.org/abs/1912.11370). In order to fine-tune at higher resolution, the authors perform
|
|
||||||
2D interpolation of the pre-trained position embeddings, according to their location in the original image.
|
|
||||||
- The best results are obtained with supervised pre-training, which is not the case in NLP. The authors also performed
|
|
||||||
an experiment with a self-supervised pre-training objective, namely masked patched prediction (inspired by masked
|
|
||||||
language modeling). With this approach, the smaller ViT-B/16 model achieves 79.9% accuracy on ImageNet, a significant
|
|
||||||
improvement of 2% to training from scratch, but still 4% behind supervised pre-training.
|
|
||||||
|
|
||||||
### Using Scaled Dot Product Attention (SDPA)
|
|
||||||
|
|
||||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
|
|
||||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
|
|
||||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
|
||||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
|
|
||||||
page for more information.
|
|
||||||
|
|
||||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
|
|
||||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
|
|
||||||
|
|
||||||
```
|
|
||||||
from transformers import ViTForImageClassification
|
|
||||||
model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224", attn_implementation="sdpa", torch_dtype=torch.float16)
|
|
||||||
...
|
|
||||||
```
|
```
|
||||||
|
|
||||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
|
</hfoption>
|
||||||
|
<hfoption id="AutoModel">
|
||||||
|
|
||||||
On a local benchmark (A100-40GB, PyTorch 2.3.0, OS Ubuntu 22.04) with `float32` and `google/vit-base-patch16-224` model, we saw the following speedups during inference.
|
```py
|
||||||
|
import torch
|
||||||
|
import requests
|
||||||
|
from PIL import Image
|
||||||
|
from transformers import AutoModelForImageClassification, AutoImageProcessor
|
||||||
|
|
||||||
| Batch size | Average inference time (ms), eager mode | Average inference time (ms), sdpa model | Speed up, Sdpa / Eager (x) |
|
image_processor = AutoImageProcessor.from_pretrained(
|
||||||
|--------------|-------------------------------------------|-------------------------------------------|------------------------------|
|
"google/vit-base-patch16-224",
|
||||||
| 1 | 7 | 6 | 1.17 |
|
use_fast=True,
|
||||||
| 2 | 8 | 6 | 1.33 |
|
)
|
||||||
| 4 | 8 | 6 | 1.33 |
|
model = AutoModelForImageClassification.from_pretrained(
|
||||||
| 8 | 8 | 6 | 1.33 |
|
"google/vit-base-patch16-224",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
device_map="auto",
|
||||||
|
attn_implementation="sdpa"
|
||||||
|
)
|
||||||
|
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
|
||||||
|
image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
inputs = image_processor(image, return_tensors="pt").to("cuda")
|
||||||
|
|
||||||
## Resources
|
with torch.no_grad():
|
||||||
|
logits = model(**inputs).logits
|
||||||
|
predicted_class_id = logits.argmax(dim=-1).item()
|
||||||
|
|
||||||
Demo notebooks regarding inference as well as fine-tuning ViT on custom data can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/VisionTransformer).
|
class_labels = model.config.id2label
|
||||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViT. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
predicted_class_label = class_labels[predicted_class_id]
|
||||||
|
print(f"The predicted class label is: {predicted_class_label}")
|
||||||
|
```
|
||||||
|
|
||||||
`ViTForImageClassification` is supported by:
|
</hfoption>
|
||||||
<PipelineTag pipeline="image-classification"/>
|
</hfoptions>
|
||||||
|
|
||||||
- A blog post on how to [Fine-Tune ViT for Image Classification with Hugging Face Transformers](https://huggingface.co/blog/fine-tune-vit)
|
## Notes
|
||||||
- A blog post on [Image Classification with Hugging Face Transformers and `Keras`](https://www.philschmid.de/image-classification-huggingface-transformers-keras)
|
|
||||||
- A notebook on [Fine-tuning for Image Classification with Hugging Face Transformers](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb)
|
|
||||||
- A notebook on how to [Fine-tune the Vision Transformer on CIFAR-10 with the Hugging Face Trainer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb)
|
|
||||||
- A notebook on how to [Fine-tune the Vision Transformer on CIFAR-10 with PyTorch Lightning](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb)
|
|
||||||
|
|
||||||
⚗️ Optimization
|
- The best results are obtained with supervised pretraining, and during fine-tuning, it may be better to use images with a resolution higher than 224x224.
|
||||||
|
- Use [`ViTImageProcessorFast`] to resize (or rescale) and normalize images to the expected size.
|
||||||
- A blog post on how to [Accelerate Vision Transformer (ViT) with Quantization using Optimum](https://www.philschmid.de/optimizing-vision-transformer)
|
- The patch and image resolution are reflected in the checkpoint name. For example, google/vit-base-patch16-224, is the **base-sized** architecture with a patch resolution of 16x16 and fine-tuning resolution of 224x224.
|
||||||
|
|
||||||
⚡️ Inference
|
|
||||||
|
|
||||||
- A notebook on [Quick demo: Vision Transformer (ViT) by Google Brain](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Quick_demo_of_HuggingFace_version_of_Vision_Transformer_inference.ipynb)
|
|
||||||
|
|
||||||
🚀 Deploy
|
|
||||||
|
|
||||||
- A blog post on [Deploying Tensorflow Vision Models in Hugging Face with TF Serving](https://huggingface.co/blog/tf-serving-vision)
|
|
||||||
- A blog post on [Deploying Hugging Face ViT on Vertex AI](https://huggingface.co/blog/deploy-vertex-ai)
|
|
||||||
- A blog post on [Deploying Hugging Face ViT on Kubernetes with TF Serving](https://huggingface.co/blog/deploy-tfserving-kubernetes)
|
|
||||||
|
|
||||||
## ViTConfig
|
## ViTConfig
|
||||||
|
|
||||||
@ -171,9 +111,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
[[autodoc]] ViTImageProcessorFast
|
[[autodoc]] ViTImageProcessorFast
|
||||||
- preprocess
|
- preprocess
|
||||||
|
|
||||||
<frameworkcontent>
|
|
||||||
<pt>
|
|
||||||
|
|
||||||
## ViTModel
|
## ViTModel
|
||||||
|
|
||||||
[[autodoc]] ViTModel
|
[[autodoc]] ViTModel
|
||||||
@ -189,9 +126,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
[[autodoc]] ViTForImageClassification
|
[[autodoc]] ViTForImageClassification
|
||||||
- forward
|
- forward
|
||||||
|
|
||||||
</pt>
|
|
||||||
<tf>
|
|
||||||
|
|
||||||
## TFViTModel
|
## TFViTModel
|
||||||
|
|
||||||
[[autodoc]] TFViTModel
|
[[autodoc]] TFViTModel
|
||||||
@ -202,9 +136,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
[[autodoc]] TFViTForImageClassification
|
[[autodoc]] TFViTForImageClassification
|
||||||
- call
|
- call
|
||||||
|
|
||||||
</tf>
|
|
||||||
<jax>
|
|
||||||
|
|
||||||
## FlaxVitModel
|
## FlaxVitModel
|
||||||
|
|
||||||
[[autodoc]] FlaxViTModel
|
[[autodoc]] FlaxViTModel
|
||||||
@ -214,6 +145,3 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
|
|
||||||
[[autodoc]] FlaxViTForImageClassification
|
[[autodoc]] FlaxViTForImageClassification
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
</jax>
|
|
||||||
</frameworkcontent>
|
|
||||||
|
@ -19,6 +19,7 @@ rendered properly in your Markdown viewer.
|
|||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -14,6 +14,7 @@ specific language governing permissions and limitations under the License.
|
|||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -14,152 +14,86 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
-->
|
-->
|
||||||
|
|
||||||
# Whisper
|
|
||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div style="float: right;">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
<img alt="TensorFlow" src="https://img.shields.io/badge/TensorFlow-FF6F00?style=flat&logo=tensorflow&logoColor=white">
|
||||||
">
|
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
||||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
## Overview
|
# Whisper
|
||||||
|
|
||||||
The Whisper model was proposed in [Robust Speech Recognition via Large-Scale Weak Supervision](https://cdn.openai.com/papers/whisper.pdf) by Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, Ilya Sutskever.
|
[Whisper](https://hf.co/papers/2212.04356) is a encoder-decoder (sequence-to-sequence) transformer pretrained on 680,000 hours of labeled audio data. This amount of pretraining data enables zero-shot performance on audio tasks in English and many other languages. The decoder allows Whisper to map the encoders learned speech representations to useful outputs, such as text, without additional fine-tuning. Whisper just works out of the box.
|
||||||
|
|
||||||
The abstract from the paper is the following:
|
You can find all the original Whisper checkpoints under the [Whisper](https://huggingface.co/collections/openai/whisper-release-6501bba2cf999715fd953013) collection.
|
||||||
|
|
||||||
*We study the capabilities of speech processing systems trained simply to predict large amounts of transcripts of audio on the internet. When scaled to 680,000 hours of multilingual and multitask supervision, the resulting models generalize well to standard benchmarks and are often competitive with prior fully supervised results but in a zeroshot transfer setting without the need for any finetuning. When compared to humans, the models approach their accuracy and robustness. We are releasing models and inference code to serve as a foundation for further work on robust speech processing.*
|
> [!TIP]
|
||||||
|
> Click on the Whisper models in the right sidebar for more examples of how to apply Whisper to different audio tasks.
|
||||||
|
|
||||||
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts).
|
The example below demonstrates how to automatically transcribe speech into text with [`Pipeline`] or the [`AutoModel`] class.
|
||||||
The original code can be found [here](https://github.com/openai/whisper).
|
|
||||||
|
|
||||||
## Quick usage
|
<hfoptions id="usage">
|
||||||
|
<hfoption id="Pipeline">
|
||||||
You can run Whisper in less than 4 lines of code and transcribe in less than a minute!
|
|
||||||
|
|
||||||
```python
|
|
||||||
# pip install transformers torch
|
|
||||||
|
|
||||||
|
```py
|
||||||
import torch
|
import torch
|
||||||
from transformers import pipeline
|
from transformers import pipeline
|
||||||
|
|
||||||
whisper = pipeline("automatic-speech-recognition", "openai/whisper-large-v3", torch_dtype=torch.float16, device="cuda:0")
|
pipeline = pipeline(
|
||||||
|
task="automatic-speech-recognition",
|
||||||
transcription = whisper("<audio_file.mp3>")
|
model="openai/whisper-large-v3-turbo",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
print(transcription["text"])
|
device=0
|
||||||
|
)
|
||||||
|
pipeline("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
|
||||||
```
|
```
|
||||||
|
|
||||||
Voila! You can swap the model with any [Whisper checkpoints](https://huggingface.co/models?other=whisper&sort=downloads) on the Hugging Face Hub with the same pipeline based on your needs.
|
</hfoption>
|
||||||
|
<hfoption id="AutoModel">
|
||||||
|
|
||||||
Bonus: You can replace `"cuda"` with `"mps"` to make it seamlessly work on Macs.
|
```py
|
||||||
|
# pip install datasets
|
||||||
|
import torch
|
||||||
|
from datasets import load_dataset
|
||||||
|
from transformers import AutoProcessor, WhisperForConditionalGeneration
|
||||||
|
|
||||||
## Usage tips
|
processor = AutoProcessor.from_pretrained(
|
||||||
|
"openai/whisper-large-v3-turbo",
|
||||||
|
)
|
||||||
|
model = WhisperForConditionalGeneration.from_pretrained(
|
||||||
|
"openai/whisper-large-v3-turbo",
|
||||||
|
torch_dtype=torch.float16,
|
||||||
|
device_map="auto",
|
||||||
|
attn_implementation="sdpa"
|
||||||
|
).to("cuda")
|
||||||
|
|
||||||
- The model usually performs well without requiring any finetuning.
|
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||||
- The architecture follows a classic encoder-decoder architecture, which means that it relies on the [`~generation.GenerationMixin.generate`] function for inference.
|
audio_sample = ds[0]["audio"]
|
||||||
- One can use [`WhisperProcessor`] to prepare audio for the model, and decode the predicted ID's back into text.
|
|
||||||
|
|
||||||
- To convert the model and the processor, we recommend using the following:
|
input_features = processor(
|
||||||
|
audio_sample["array"],
|
||||||
|
sampling_rate=audio_sample["sampling_rate"],
|
||||||
|
return_tensors="pt"
|
||||||
|
).input_features
|
||||||
|
input_features = input_features.to("cuda", dtype=torch.float16)
|
||||||
|
|
||||||
```bash
|
predicted_ids = model.generate(input_features, cache_implementation="static")
|
||||||
python src/transformers/models/whisper/convert_openai_to_hf.py --checkpoint_path "" --pytorch_dump_folder_path "Arthur/whisper-3" --convert_preprocessor True
|
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
|
||||||
```
|
transcription[0]
|
||||||
The script will automatically determine all necessary parameters from the OpenAI checkpoint. A `tiktoken` library needs to be installed
|
|
||||||
to perform the conversion of the OpenAI tokenizer to the `tokenizers` version.
|
|
||||||
|
|
||||||
## Inference
|
|
||||||
|
|
||||||
Here is a step-by-step guide to transcribing an audio sample using a pre-trained Whisper model:
|
|
||||||
|
|
||||||
```python
|
|
||||||
>>> from datasets import load_dataset
|
|
||||||
>>> from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
|
||||||
|
|
||||||
>>> # Select an audio file and read it:
|
|
||||||
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
|
||||||
>>> audio_sample = ds[0]["audio"]
|
|
||||||
|
|
||||||
>>> # Load the Whisper model in Hugging Face format:
|
|
||||||
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
|
|
||||||
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en")
|
|
||||||
|
|
||||||
>>> # Use the model and processor to transcribe the audio:
|
|
||||||
>>> input_features = processor(
|
|
||||||
... audio_sample["array"], sampling_rate=audio_sample["sampling_rate"], return_tensors="pt"
|
|
||||||
... ).input_features
|
|
||||||
|
|
||||||
>>> # Generate token ids
|
|
||||||
>>> predicted_ids = model.generate(input_features)
|
|
||||||
|
|
||||||
>>> # Decode token ids to text
|
|
||||||
>>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
|
|
||||||
|
|
||||||
>>> transcription[0]
|
|
||||||
' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
|
|
||||||
```
|
```
|
||||||
|
|
||||||
Whisper is compatible with the following optimisations for both short and long-form generation:
|
</hfoption>
|
||||||
- [PyTorch Scaled Dot Product Attention (SDPA)](../perf_infer_gpu_one#pytorch-scaled-dot-product-attention): flash attention and memory-efficient attention kernels. Enabled by default for `torch>=2.1.1`.
|
</hfoptions>
|
||||||
- [Flash Attention 2](../perf_infer_gpu_one#flashattention-2): improved implementation of flash attention through better parallelism and work partitioning.
|
|
||||||
- [torch.compile](../llm_optims#static-kv-cache-and-torchcompile): JIT-compile the forward pass to dispatch to efficient fused kernels.
|
|
||||||
|
|
||||||
As an example, the following codesnippet enables SDPA and `torch.compile` for up to 5x faster inference:
|
## Notes
|
||||||
|
|
||||||
```python
|
- Whisper relies on [`~GenerationMixin.generate`] for inference.
|
||||||
>>> from datasets import load_dataset
|
- The [`WhisperProcessor`] can be used for preparing audio and decoding predicted ids back into text.
|
||||||
>>> from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
|
||||||
|
|
||||||
>>> # Select an audio file and read it:
|
|
||||||
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
|
||||||
>>> audio_sample = ds[0]["audio"]
|
|
||||||
|
|
||||||
>>> # Load the Whisper model with SDPA attention
|
|
||||||
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
|
|
||||||
>>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", attn_implementation="sdpa")
|
|
||||||
|
|
||||||
>>> # Enable static cache and compile the forward pass
|
|
||||||
>>> model.generation_config.cache_implementation = "static"
|
|
||||||
>>> model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
|
||||||
|
|
||||||
>>> # Use the model and processor to transcribe the audio:
|
|
||||||
>>> input_features = processor(
|
|
||||||
... audio_sample["array"], sampling_rate=audio_sample["sampling_rate"], return_tensors="pt"
|
|
||||||
... ).input_features
|
|
||||||
|
|
||||||
>>> # Compile the forward pass
|
|
||||||
>>> for _ in range(2):
|
|
||||||
>>> model.generate(input_features)
|
|
||||||
|
|
||||||
>>> # Generate token ids using compiled graph (fast!)
|
|
||||||
>>> predicted_ids = model.generate(input_features)
|
|
||||||
|
|
||||||
>>> # Decode token ids to text
|
|
||||||
>>> transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
|
|
||||||
|
|
||||||
>>> transcription[0]
|
|
||||||
' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.'
|
|
||||||
```
|
|
||||||
|
|
||||||
For more details on each optimisation, refer to the documentation linked above.
|
|
||||||
|
|
||||||
## Resources
|
|
||||||
|
|
||||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Whisper. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
|
||||||
|
|
||||||
- [Fine-tune Whisper](https://huggingface.co/blog/fine-tune-whisper) on your own dataset for better downstream performance.
|
|
||||||
- [Distil-Whisper](https://huggingface.co/distil-whisper): Upto 6x faster, 2x smaller distilled Whisper models for English. We release the [model checkpoints](https://huggingface.co/distil-whisper), and [distillation code](https://github.com/huggingface/distil-whisper).
|
|
||||||
- A fork with a script to [convert a Whisper model in Hugging Face format to OpenAI format](https://github.com/zuazo-forks/transformers/blob/convert_hf_to_openai/src/transformers/models/whisper/convert_hf_to_openai.py). 🌎
|
|
||||||
Usage example:
|
|
||||||
```bash
|
|
||||||
pip install -U openai-whisper
|
|
||||||
python convert_hf_to_openai.py \
|
|
||||||
--checkpoint openai/whisper-tiny \
|
|
||||||
--whisper_dump_path whisper-tiny-openai.pt
|
|
||||||
```
|
|
||||||
|
|
||||||
## WhisperConfig
|
## WhisperConfig
|
||||||
|
|
||||||
@ -205,9 +139,6 @@ python convert_hf_to_openai.py \
|
|||||||
- batch_decode
|
- batch_decode
|
||||||
- decode
|
- decode
|
||||||
|
|
||||||
<frameworkcontent>
|
|
||||||
<pt>
|
|
||||||
|
|
||||||
## WhisperModel
|
## WhisperModel
|
||||||
|
|
||||||
[[autodoc]] WhisperModel
|
[[autodoc]] WhisperModel
|
||||||
@ -230,9 +161,6 @@ python convert_hf_to_openai.py \
|
|||||||
[[autodoc]] WhisperForAudioClassification
|
[[autodoc]] WhisperForAudioClassification
|
||||||
- forward
|
- forward
|
||||||
|
|
||||||
</pt>
|
|
||||||
<tf>
|
|
||||||
|
|
||||||
## TFWhisperModel
|
## TFWhisperModel
|
||||||
|
|
||||||
[[autodoc]] TFWhisperModel
|
[[autodoc]] TFWhisperModel
|
||||||
@ -243,9 +171,6 @@ python convert_hf_to_openai.py \
|
|||||||
[[autodoc]] TFWhisperForConditionalGeneration
|
[[autodoc]] TFWhisperForConditionalGeneration
|
||||||
- call
|
- call
|
||||||
|
|
||||||
</tf>
|
|
||||||
<jax>
|
|
||||||
|
|
||||||
## FlaxWhisperModel
|
## FlaxWhisperModel
|
||||||
|
|
||||||
[[autodoc]] FlaxWhisperModel
|
[[autodoc]] FlaxWhisperModel
|
||||||
@ -260,7 +185,3 @@ python convert_hf_to_openai.py \
|
|||||||
|
|
||||||
[[autodoc]] FlaxWhisperForAudioClassification
|
[[autodoc]] FlaxWhisperForAudioClassification
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
</jax>
|
|
||||||
</frameworkcontent>
|
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
<div class="flex flex-wrap space-x-1">
|
<div class="flex flex-wrap space-x-1">
|
||||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
|
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -78,7 +78,7 @@ class RobertaModel(BertModel):
|
|||||||
super().__init__(config)
|
super().__init__(config)
|
||||||
self.embeddings = RobertaEmbeddings(config)
|
self.embeddings = RobertaEmbeddings(config)
|
||||||
|
|
||||||
|
|
||||||
# The model heads now only need to redefine the model inside to `RobertaModel`
|
# The model heads now only need to redefine the model inside to `RobertaModel`
|
||||||
class RobertaForMaskedLM(BertForMaskedLM):
|
class RobertaForMaskedLM(BertForMaskedLM):
|
||||||
def __init__(self, config):
|
def __init__(self, config):
|
||||||
@ -546,7 +546,7 @@ This makes it very easy to switch decorators and makes it explicit that the only
|
|||||||
|
|
||||||
## Docstring variables
|
## Docstring variables
|
||||||
|
|
||||||
If an object defined in both the modular and modeling file from which it inherits, the modular definition has precedence unless for assignments containing the pattern `DOCSTRING`. These variables are typically used in `MODEL_START_DOCSTRING` and `MODEL_INPUT_DOCSTRING` in the modeling files. They are big blocks of docstrings and the linter rewrites the names everywhere. For this reason, assignments containing the `DOCSTRING` variable always uses the definition found in the source file instead of the modular file.
|
If an object defined in both the modular and modeling file from which it inherits, the modular definition has precedence unless for assignments containing the pattern `DOCSTRING`. These variables are typically used in `MODEL_START_DOCSTRING` and `MODEL_INPUT_DOCSTRING` in the modeling files. They are big blocks of docstrings and the linter rewrites the names everywhere. For this reason, assignments containing the `DOCSTRING` variable can use the definition found in the source file without copying the whole docstring, by simply setting the variable to `None` in the modular file.
|
||||||
|
|
||||||
This is very useful if you need the variable reference somewhere but you don't want to clutter the modular file with docstrings which are always the same. The example code below allows you to automatically use the same docstrings from [Mistral](./model_doc/mistral) in [Starcoder2](./model_doc/starcoder2).
|
This is very useful if you need the variable reference somewhere but you don't want to clutter the modular file with docstrings which are always the same. The example code below allows you to automatically use the same docstrings from [Mistral](./model_doc/mistral) in [Starcoder2](./model_doc/starcoder2).
|
||||||
|
|
||||||
@ -561,6 +561,8 @@ class Starcoder2Model(MistralModel):
|
|||||||
...
|
...
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Setting the variable to anything other than `None` will override the docstring, so that you can customize the docstrings if needed.
|
||||||
|
|
||||||
## Special naming
|
## Special naming
|
||||||
|
|
||||||
The linter automatically renames everything when inheriting from a class. For consistency, you should always use the same class name prefix when inheriting from different classes from the same file.
|
The linter automatically renames everything when inheriting from a class. For consistency, you should always use the same class name prefix when inheriting from different classes from the same file.
|
||||||
@ -586,7 +588,7 @@ We detected multiple prefix names when inheriting from transformers.models.llama
|
|||||||
If there are automatic dependencies with a prefix, but you want another one, explicitly rename the classes locally with a `pass` class as shown in the following.
|
If there are automatic dependencies with a prefix, but you want another one, explicitly rename the classes locally with a `pass` class as shown in the following.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
class Emu3TextMLP(LlamaMLP):
|
class Emu3TextMLP(LlamaMLP):
|
||||||
pass
|
pass
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -44,11 +44,6 @@ import os
|
|||||||
import torch
|
import torch
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
# initialize distributed environment
|
|
||||||
rank = int(os.environ["RANK"])
|
|
||||||
device = torch.device(f"cuda:{rank}")
|
|
||||||
torch.cuda.set_device(device)
|
|
||||||
torch.distributed.init_process_group("nccl", device_id=device)
|
|
||||||
|
|
||||||
# enable tensor parallelism
|
# enable tensor parallelism
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
@ -59,7 +54,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|||||||
# prepare input tokens
|
# prepare input tokens
|
||||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
||||||
prompt = "Can I help"
|
prompt = "Can I help"
|
||||||
inputs = tokenizer(prompt, return_tensors="pt").input_ids.to(device)
|
inputs = tokenizer(prompt, return_tensors="pt").input_ids.to(model.device)
|
||||||
|
|
||||||
# distributed run
|
# distributed run
|
||||||
outputs = model(inputs)
|
outputs = model(inputs)
|
||||||
@ -71,6 +66,13 @@ Launch the inference script above on [torchrun](https://pytorch.org/docs/stable/
|
|||||||
torchrun --nproc-per-node 4 demo.py
|
torchrun --nproc-per-node 4 demo.py
|
||||||
```
|
```
|
||||||
|
|
||||||
|
For CPU, please binding different socket on each rank. For example, if you are using Intel 4th Gen Xeon:
|
||||||
|
```bash
|
||||||
|
export OMP_NUM_THREADS=56
|
||||||
|
numactl -C 0-55 -m 0 torchrun --nnodes=2 --node_rank=0 --master_addr="127.0.0.1" --master_port=29500 --nproc-per-node 1 demo.py & numactl -C 56-111 -m 1 torchrun --nnodes=2 --node_rank=1 --master_addr="127.0.0.1" --master_port=29500 --nproc-per-node 1 demo.py & wait
|
||||||
|
```
|
||||||
|
The CPU benchmark data will be released soon.
|
||||||
|
|
||||||
You can benefit from considerable speed ups for inference, especially for inputs with large batch size or long sequences.
|
You can benefit from considerable speed ups for inference, especially for inputs with large batch size or long sequences.
|
||||||
|
|
||||||
For a single forward pass on [Llama](./model_doc/llama) with a sequence length of 512 and various batch sizes, you can expect the following speed ups.
|
For a single forward pass on [Llama](./model_doc/llama) with a sequence length of 512 and various batch sizes, you can expect the following speed ups.
|
||||||
|
@ -29,8 +29,8 @@ import requests
|
|||||||
|
|
||||||
processor = AutoProcessor.from_pretrained("google/paligemma-3b-pt-224")
|
processor = AutoProcessor.from_pretrained("google/paligemma-3b-pt-224")
|
||||||
|
|
||||||
prompt = "answer en Where is the cow standing?"
|
prompt = "answer en Where is the cat standing?"
|
||||||
url = "https://huggingface.co/gv-hf/PaliGemma-test-224px-hf/resolve/main/cow_beach_1.png"
|
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
|
||||||
image = Image.open(requests.get(url, stream=True).raw)
|
image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
|
||||||
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
inputs = processor(text=prompt, images=image, return_tensors="pt")
|
||||||
|
@ -20,7 +20,10 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
[LLM.int8()](https://hf.co/papers/2208.07339) is a quantization method that aims to make large language model inference more accessible without significant degradation. Unlike naive 8-bit quantization, which can result in loss of critical information and accuracy, LLM.int8() dynamically adapts to ensure sensitive components of the computation retain higher precision when needed.
|
[LLM.int8()](https://hf.co/papers/2208.07339) is a quantization method that aims to make large language model inference more accessible without significant degradation. Unlike naive 8-bit quantization, which can result in loss of critical information and accuracy, LLM.int8() dynamically adapts to ensure sensitive components of the computation retain higher precision when needed.
|
||||||
|
|
||||||
QLoRA, or 4-bit quantization, compresses a model even further to 4-bits and inserts a small set of trainable low-rank adaptation (LoRA) weights to allowing training.
|
QLoRA, or 4-bit quantization, compresses a model even further to 4-bits and inserts a small set of trainable low-rank adaptation (LoRA) weights to allowing training.
|
||||||
|
|
||||||
|
> **Note:** For a user-friendly quantization experience, you can use the `bitsandbytes` [community space](https://huggingface.co/spaces/bnb-community/bnb-my-repo).
|
||||||
|
|
||||||
|
|
||||||
Run the command below to install bitsandbytes.
|
Run the command below to install bitsandbytes.
|
||||||
|
|
||||||
|
@ -40,10 +40,20 @@ Use the Space below to help you pick a quantization method depending on your har
|
|||||||
| [VPTQ](./vptq) | 🔴 | 🔴 | 🟢 | 🟡 | 🔴 | 🔴 | 🟢 | 1/8 | 🔴 | 🟢 | 🟢 | https://github.com/microsoft/VPTQ |
|
| [VPTQ](./vptq) | 🔴 | 🔴 | 🟢 | 🟡 | 🔴 | 🔴 | 🟢 | 1/8 | 🔴 | 🟢 | 🟢 | https://github.com/microsoft/VPTQ |
|
||||||
| [FINEGRAINED_FP8](./finegrained_fp8) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🔴 | 8 | 🔴 | 🟢 | 🟢 | |
|
| [FINEGRAINED_FP8](./finegrained_fp8) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🔴 | 8 | 🔴 | 🟢 | 🟢 | |
|
||||||
| [SpQR](./spqr) | 🔴 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 3 | 🔴 | 🟢 | 🟢 | https://github.com/Vahe1994/SpQR/ |
|
| [SpQR](./spqr) | 🔴 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 3 | 🔴 | 🟢 | 🟢 | https://github.com/Vahe1994/SpQR/ |
|
||||||
|
| [Quark](./quark.md) | 🔴 | 🟢 | 🟢 | 🟢 | 🟢 | 🟢 | ? | 2/4/6/8/9/16 | 🔴 | 🔴 | 🟢 | https://quark.docs.amd.com/latest/ |
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
If you are new to quantization, we recommend checking out these beginner-friendly quantization courses in collaboration with DeepLearning.AI.
|
If you are new to quantization, we recommend checking out these beginner-friendly quantization courses in collaboration with DeepLearning.AI.
|
||||||
|
|
||||||
* [Quantization Fundamentals with Hugging Face](https://www.deeplearning.ai/short-courses/quantization-fundamentals-with-hugging-face/)
|
* [Quantization Fundamentals with Hugging Face](https://www.deeplearning.ai/short-courses/quantization-fundamentals-with-hugging-face/)
|
||||||
* [Quantization in Depth](https://www.deeplearning.ai/short-courses/quantization-in-depth
|
* [Quantization in Depth](https://www.deeplearning.ai/short-courses/quantization-in-depth)
|
||||||
|
|
||||||
|
## User-Friendly Quantization Tools
|
||||||
|
|
||||||
|
If you are looking for a user-friendly quantization experience, you can use the following community spaces and notebooks:
|
||||||
|
|
||||||
|
* [Bitsandbytes Space](https://huggingface.co/spaces/bnb-community/bnb-my-repo)
|
||||||
|
* [GGUF Space](https://huggingface.co/spaces/ggml-org/gguf-my-repo)
|
||||||
|
* [MLX Space](https://huggingface.co/spaces/mlx-community/mlx-my-repo)
|
||||||
|
* [AuoQuant Notebook](https://colab.research.google.com/drive/1b6nqC7UZVt8bx4MksX7s656GXPM-eWw4?usp=sharing#scrollTo=ZC9Nsr9u5WhN)
|
||||||
|
@ -26,7 +26,7 @@ Install Quanto with the following command.
|
|||||||
pip install optimum-quanto accelerate transformers
|
pip install optimum-quanto accelerate transformers
|
||||||
```
|
```
|
||||||
|
|
||||||
Quantize a model by creating a [`QuantoConfig`] and specifiying the `weights` parameter to quantize to. This works for any model in any modality as long as it contains [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) layers.
|
Quantize a model by creating a [`QuantoConfig`] and specifying the `weights` parameter to quantize to. This works for any model in any modality as long as it contains [torch.nn.Linear](https://pytorch.org/docs/stable/generated/torch.nn.Linear.html) layers.
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> The Transformers integration only supports weight quantization. Use the Quanto library directly if you need activation quantization, calibration, or QAT.
|
> The Transformers integration only supports weight quantization. Use the Quanto library directly if you need activation quantization, calibration, or QAT.
|
||||||
|
84
docs/source/en/quantization/quark.md
Normal file
84
docs/source/en/quantization/quark.md
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
<!--Copyright 2025 Advanced Micro Devices, Inc. and The HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||||
|
the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||||
|
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations under the License.
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||||
|
rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
# Quark
|
||||||
|
|
||||||
|
[Quark](https://quark.docs.amd.com/latest/) is a deep learning quantization toolkit designed to be agnostic to specific data types, algorithms, and hardware. Different pre-processing strategies, algorithms and data-types can be combined in Quark.
|
||||||
|
|
||||||
|
The PyTorch support integrated through 🤗 Transformers primarily targets AMD CPUs and GPUs, and is primarily meant to be used for evaluation purposes. For example, it is possible to use [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) with 🤗 Transformers backend and evaluate a wide range of models quantized through Quark seamlessly.
|
||||||
|
|
||||||
|
Users interested in Quark can refer to its [documentation](https://quark.docs.amd.com/latest/) to get started quantizing models and using them in supported open-source libraries!
|
||||||
|
|
||||||
|
Although Quark has its own checkpoint / [configuration format](https://huggingface.co/amd/Llama-3.1-8B-Instruct-FP8-KV-Quark-test/blob/main/config.json#L26), the library also supports producing models with a serialization layout compliant with other quantization/runtime implementations ([AutoAWQ](https://huggingface.co/docs/transformers/quantization/awq), [native fp8 in 🤗 Transformers](https://huggingface.co/docs/transformers/quantization/finegrained_fp8)).
|
||||||
|
|
||||||
|
To be able to load Quark quantized models in Transformers, the library first needs to be installed:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
pip install amd-quark
|
||||||
|
```
|
||||||
|
|
||||||
|
## Support matrix
|
||||||
|
|
||||||
|
Models quantized through Quark support a large range of features, that can be combined together. All quantized models independently of their configuration can seamlessly be reloaded through `PretrainedModel.from_pretrained`.
|
||||||
|
|
||||||
|
The table below shows a few features supported by Quark:
|
||||||
|
|
||||||
|
| **Feature** | **Supported subset in Quark** | |
|
||||||
|
|---------------------------------|-----------------------------------------------------------------------------------------------------------|---|
|
||||||
|
| Data types | int8, int4, int2, bfloat16, float16, fp8_e5m2, fp8_e4m3, fp6_e3m2, fp6_e2m3, fp4, OCP MX, MX6, MX9, bfp16 | |
|
||||||
|
| Pre-quantization transformation | SmoothQuant, QuaRot, SpinQuant, AWQ | |
|
||||||
|
| Quantization algorithm | GPTQ | |
|
||||||
|
| Supported operators | ``nn.Linear``, ``nn.Conv2d``, ``nn.ConvTranspose2d``, ``nn.Embedding``, ``nn.EmbeddingBag`` | |
|
||||||
|
| Granularity | per-tensor, per-channel, per-block, per-layer, per-layer type | |
|
||||||
|
| KV cache | fp8 | |
|
||||||
|
| Activation calibration | MinMax / Percentile / MSE | |
|
||||||
|
| Quantization strategy | weight-only, static, dynamic, with or without output quantization | |
|
||||||
|
|
||||||
|
## Models on Hugging Face Hub
|
||||||
|
|
||||||
|
Public models using Quark native serialization can be found at https://huggingface.co/models?other=quark.
|
||||||
|
|
||||||
|
Although Quark also supports [models using `quant_method="fp8"`](https://huggingface.co/models?other=fp8) and [models using `quant_method="awq"`](https://huggingface.co/models?other=awq), Transformers loads these models rather through [AutoAWQ](https://huggingface.co/docs/transformers/quantization/awq) or uses the [native fp8 support in 🤗 Transformers](https://huggingface.co/docs/transformers/quantization/finegrained_fp8).
|
||||||
|
|
||||||
|
## Using Quark models in Transformers
|
||||||
|
|
||||||
|
Here is an example of how one can load a Quark model in Transformers:
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
model_id = "EmbeddedLLM/Llama-3.1-8B-Instruct-w_fp8_per_channel_sym"
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(model_id)
|
||||||
|
model = model.to("cuda")
|
||||||
|
|
||||||
|
print(model.model.layers[0].self_attn.q_proj)
|
||||||
|
# QParamsLinear(
|
||||||
|
# (weight_quantizer): ScaledRealQuantizer()
|
||||||
|
# (input_quantizer): ScaledRealQuantizer()
|
||||||
|
# (output_quantizer): ScaledRealQuantizer()
|
||||||
|
# )
|
||||||
|
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||||
|
inp = tokenizer("Where is a good place to cycle around Tokyo?", return_tensors="pt")
|
||||||
|
inp = inp.to("cuda")
|
||||||
|
|
||||||
|
res = model.generate(**inp, min_new_tokens=50, max_new_tokens=100)
|
||||||
|
|
||||||
|
print(tokenizer.batch_decode(res)[0])
|
||||||
|
# <|begin_of_text|>Where is a good place to cycle around Tokyo? There are several places in Tokyo that are suitable for cycling, depending on your skill level and interests. Here are a few suggestions:
|
||||||
|
# 1. Yoyogi Park: This park is a popular spot for cycling and has a wide, flat path that's perfect for beginners. You can also visit the Meiji Shrine, a famous Shinto shrine located in the park.
|
||||||
|
# 2. Imperial Palace East Garden: This beautiful garden has a large, flat path that's perfect for cycling. You can also visit the
|
||||||
|
```
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user