mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-21 17:48:57 +08:00
Compare commits
251 Commits
v4.56.0
...
refactor-t
Author | SHA1 | Date | |
---|---|---|---|
1b406864d6 | |||
ddfa3d4402 | |||
46ea7e613d | |||
ebdc17b8e5 | |||
e2dbde280f | |||
155f7e2e62 | |||
61eff450d3 | |||
5f6e278a51 | |||
4df2529d79 | |||
5ac3c5171a | |||
d9d7f6a6b9 | |||
738b223f57 | |||
dd7ac4cd59 | |||
2ce35a248f | |||
6e51ac31ef | |||
9378f874c1 | |||
7cf1f5ced0 | |||
f6104189fd | |||
c532575795 | |||
564fde14f1 | |||
5748352c27 | |||
438343d93f | |||
449da6bb30 | |||
3bb1b4867c | |||
58e13b9f12 | |||
529d3a2b06 | |||
a2ac4de8b0 | |||
8e837f6ae2 | |||
eb04363a0d | |||
ecc1d778ce | |||
c5553b4120 | |||
14f01aee39 | |||
26b65fb516 | |||
66f97d3f64 | |||
3853bfe4d5 | |||
6cade29278 | |||
48a5565179 | |||
89949c5d2d | |||
c830fc1207 | |||
f6999b00c3 | |||
8428c7b9c8 | |||
ddd4caf066 | |||
b82cd1c240 | |||
6e50a8afb2 | |||
cccef4be91 | |||
beb09cbd5a | |||
d4af0d9f03 | |||
3b3f6cd0c1 | |||
88ba0f107e | |||
270da89708 | |||
df03fc1f9c | |||
96bc19bcdf | |||
d0af4269ec | |||
65f9ede359 | |||
0c1839d609 | |||
3688a977d0 | |||
087775d10e | |||
1aff033ec9 | |||
65adc3aaa3 | |||
8e1a12bbee | |||
21c8379fb0 | |||
5af248b3e3 | |||
20ee3a73f0 | |||
2141a5b764 | |||
2a83792165 | |||
04d1c8f3d4 | |||
ff26fe8302 | |||
6254bb4a68 | |||
e674e9dadb | |||
0957999f7f | |||
5e9ec59d0c | |||
3442b2f300 | |||
c0dbe095b0 | |||
fc5f9105da | |||
96d3795cfc | |||
f5e1641857 | |||
ada64ce452 | |||
93f810e6fa | |||
c65fea0b92 | |||
9c804f7ec4 | |||
02ea2b3433 | |||
d42e96a2a7 | |||
6eb3255842 | |||
e682f90f60 | |||
8d8459132a | |||
291772b6b5 | |||
8502b41bf1 | |||
f384bb8ad5 | |||
4cb41ad2a2 | |||
ef053939ca | |||
98a8078127 | |||
77aa35ee9c | |||
797859c9b8 | |||
6e69b60806 | |||
827b65c42c | |||
5e2e77fb45 | |||
c81f426f9a | |||
cf084f5b40 | |||
dfae7dd98d | |||
c264c0ee7e | |||
895b3ebe41 | |||
6d369124ad | |||
0f1b128d33 | |||
02f1d7c091 | |||
de01a22aff | |||
ec532f20fb | |||
df67cd35f0 | |||
549ba5b8b6 | |||
dae1ccfb98 | |||
7d57b31e16 | |||
3378e7dabf | |||
e5ecb03c92 | |||
abbed7010b | |||
75202b0928 | |||
7401cfa57c | |||
8ab2448707 | |||
6c9f412105 | |||
0997c2f2ab | |||
a72e5a4b9d | |||
a5ecd94a3f | |||
08edec9f7d | |||
c52889bd51 | |||
3340ccbd40 | |||
b9282355be | |||
79fdbf2a4a | |||
37c14430c9 | |||
d09fdf5e52 | |||
d33c189e5a | |||
71ac7ea048 | |||
7aaef98cbe | |||
de5cbe8b79 | |||
1cdbbb3e9d | |||
ed100211cb | |||
82d66e5dd0 | |||
a871f6f58d | |||
aee5000f16 | |||
126264d015 | |||
5a468e56b7 | |||
e8db153599 | |||
fd2a29d468 | |||
bb8e9cd675 | |||
a9b313a0c2 | |||
2077f17547 | |||
dc262ee6f5 | |||
9ab6078323 | |||
2a1eb5b508 | |||
7b8d40ea7a | |||
def7558f74 | |||
44b3888d2a | |||
3f7bda4209 | |||
bb45d3631e | |||
12b8e10dbf | |||
6b232618b6 | |||
948bc0fa34 | |||
828044cadb | |||
e9d6a6907b | |||
96a5774f2e | |||
c76387e580 | |||
21f09032db | |||
b62e5b6051 | |||
313effa7ad | |||
f3211b5db7 | |||
a2a8a3ca1e | |||
4e195f1949 | |||
93df343def | |||
89e103c15e | |||
a2fffa505d | |||
4a88e81532 | |||
9db11b728b | |||
acd820561f | |||
16b821c542 | |||
519c2524af | |||
586dc5d06e | |||
ad2da3ea83 | |||
e39f222096 | |||
d8f670583e | |||
4cbca0d1af | |||
9a6c6568db | |||
87f38dbfce | |||
5b0c01b5e2 | |||
1f3cc935cc | |||
669230a86f | |||
91b34be9cf | |||
25b4a0d8ae | |||
30a4b8707d | |||
7f92e1f91a | |||
ca9b36a9c1 | |||
d40e7ea52d | |||
34595cf296 | |||
f22ec7f174 | |||
459c1fa47a | |||
afd1393df1 | |||
68b9cbb7f5 | |||
55676d7d4c | |||
b67608f587 | |||
30d66dc3bc | |||
3f40ebf620 | |||
a8f400367d | |||
57f5668d0b | |||
238a8274b4 | |||
f2416b4fd2 | |||
5ea5c8179b | |||
fe1a9e0dba | |||
5e2e496149 | |||
03708ccf6f | |||
c485c52db4 | |||
2bbf98a83d | |||
acc968c581 | |||
cb54ce4ec6 | |||
0f5e45a6d1 | |||
e690fe61e8 | |||
00a8364271 | |||
ed49376a42 | |||
d47ad91c3c | |||
a470f21396 | |||
37103d6f22 | |||
4f542052b9 | |||
8c60a7c385 | |||
97266dfd50 | |||
91be12bdc6 | |||
bbd8085b0b | |||
b2b1c30b1b | |||
8a091cc07c | |||
514b3e81b7 | |||
b3655507bb | |||
4da03d7f57 | |||
abf5900a76 | |||
3beac9c659 | |||
21e708c8fd | |||
c99d43e6ec | |||
3c3dac3c12 | |||
2b71c5b7a6 | |||
8e0b2c8baf | |||
a543095c99 | |||
8564e210ca | |||
564be6d895 | |||
3bccb02616 | |||
90953d5bc1 | |||
2537ed4477 | |||
48ebae975e | |||
db6821b79c | |||
6546f288a1 | |||
cfed99d310 | |||
1d742644c0 | |||
0b24507379 | |||
b0db5a02f3 | |||
1363fceeec | |||
36fddebcee | |||
2d3b8863e8 | |||
ce48e9cac0 | |||
155fd926d2 |
@ -177,11 +177,29 @@ class CircleCIJob:
|
||||
"command": f"TESTS=$(circleci tests split --split-by=timings {self.job_name}_test_list.txt) && echo $TESTS > splitted_tests.txt && echo $TESTS | tr ' ' '\n'" if self.parallelism else f"awk '{{printf \"%s \", $0}}' {self.job_name}_test_list.txt > splitted_tests.txt"
|
||||
}
|
||||
},
|
||||
{"run": {"name": "fetch hub objects before pytest", "command": "python3 utils/fetch_hub_objects_for_ci.py"}},
|
||||
# During the CircleCI docker images build time, we might already (or not) download the data.
|
||||
# If it's done already, the files are inside the directory `/test_data/`.
|
||||
{"run": {"name": "fetch hub objects before pytest", "command": "cp -r /test_data/* . 2>/dev/null || true; python3 utils/fetch_hub_objects_for_ci.py"}},
|
||||
{"run": {
|
||||
"name": "Run tests",
|
||||
"command": f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {junit_flags} {repeat_on_failure_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"}
|
||||
},
|
||||
{"run":
|
||||
{
|
||||
"name": "Check for test crashes",
|
||||
"when": "always",
|
||||
"command": """if [ ! -f tests_output.txt ]; then
|
||||
echo "ERROR: tests_output.txt does not exist - tests may not have run properly"
|
||||
exit 1
|
||||
elif grep -q "crashed and worker restarting disabled" tests_output.txt; then
|
||||
echo "ERROR: Worker crash detected in test output"
|
||||
echo "Found: crashed and worker restarting disabled"
|
||||
exit 1
|
||||
else
|
||||
echo "Tests output file exists and no worker crashes detected"
|
||||
fi"""
|
||||
},
|
||||
},
|
||||
{"run": {"name": "Expand to show skipped tests", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --skip"}},
|
||||
{"run": {"name": "Failed tests: show reasons", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --fail"}},
|
||||
{"run": {"name": "Errors", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --errors"}},
|
||||
@ -246,7 +264,6 @@ custom_tokenizers_job = CircleCIJob(
|
||||
docker_image=[{"image": "huggingface/transformers-custom-tokenizers"}],
|
||||
)
|
||||
|
||||
|
||||
examples_torch_job = CircleCIJob(
|
||||
"examples_torch",
|
||||
additional_env={"OMP_NUM_THREADS": 8},
|
||||
@ -270,19 +287,6 @@ hub_job = CircleCIJob(
|
||||
resource_class="medium",
|
||||
)
|
||||
|
||||
|
||||
onnx_job = CircleCIJob(
|
||||
"onnx",
|
||||
docker_image=[{"image":"huggingface/transformers-torch-tf-light"}],
|
||||
install_steps=[
|
||||
"uv pip install .[testing,sentencepiece,onnxruntime,vision,rjieba]",
|
||||
],
|
||||
pytest_options={"k onnx": None},
|
||||
pytest_num_workers=1,
|
||||
resource_class="small",
|
||||
)
|
||||
|
||||
|
||||
exotic_models_job = CircleCIJob(
|
||||
"exotic_models",
|
||||
docker_image=[{"image":"huggingface/transformers-exotic-models"}],
|
||||
@ -290,7 +294,6 @@ exotic_models_job = CircleCIJob(
|
||||
pytest_options={"durations": 100},
|
||||
)
|
||||
|
||||
|
||||
repo_utils_job = CircleCIJob(
|
||||
"repo_utils",
|
||||
docker_image=[{"image":"huggingface/transformers-consistency"}],
|
||||
@ -298,7 +301,6 @@ repo_utils_job = CircleCIJob(
|
||||
resource_class="large",
|
||||
)
|
||||
|
||||
|
||||
non_model_job = CircleCIJob(
|
||||
"non_model",
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
@ -334,7 +336,7 @@ doc_test_job = CircleCIJob(
|
||||
pytest_num_workers=1,
|
||||
)
|
||||
|
||||
REGULAR_TESTS = [torch_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
|
||||
REGULAR_TESTS = [torch_job, hub_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
|
||||
EXAMPLES_TESTS = [examples_torch_job]
|
||||
PIPELINE_TESTS = [pipelines_torch_job]
|
||||
REPO_UTIL_TESTS = [repo_utils_job]
|
||||
|
28
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
28
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -36,19 +36,23 @@ body:
|
||||
|
||||
Models:
|
||||
|
||||
- text models: @ArthurZucker
|
||||
- vision models: @amyeroberts, @qubvel
|
||||
- speech models: @eustlb
|
||||
- text models: @ArthurZucker @Cyrilvallez
|
||||
- vision models: @yonigozlan @molbap
|
||||
- audio models: @eustlb @ebezzam @vasqu
|
||||
- multimodal models: @zucchini-nlp
|
||||
- graph models: @clefourrier
|
||||
|
||||
Library:
|
||||
|
||||
- flax: @gante and @Rocketknight1
|
||||
- generate: @zucchini-nlp (visual-language models) or @gante (all others)
|
||||
- continuous batching: @remi-or @ArthurZucker @McPatate
|
||||
- pipelines: @Rocketknight1
|
||||
- tensorflow: @gante and @Rocketknight1
|
||||
- tokenizers: @ArthurZucker and @itazap
|
||||
- trainer: @zach-huggingface @SunMarc
|
||||
- attention: @vasqu @ArthurZucker @CyrilVallez
|
||||
- model loading (from pretrained, etc): @CyrilVallez
|
||||
- distributed: @3outeille @ArthurZucker @S1ro1
|
||||
- CIs: @ydshieh
|
||||
|
||||
Integrations:
|
||||
|
||||
@ -56,6 +60,7 @@ body:
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- Big Model Inference: @SunMarc
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc @MekkCyber
|
||||
- kernels: @MekkCyber @drbh
|
||||
|
||||
Devices/Backends:
|
||||
|
||||
@ -69,19 +74,6 @@ body:
|
||||
|
||||
- for issues with a model, report at https://discuss.huggingface.co/ and tag the model's creator.
|
||||
|
||||
HF projects:
|
||||
|
||||
- accelerate: [different repo](https://github.com/huggingface/accelerate)
|
||||
- datasets: [different repo](https://github.com/huggingface/datasets)
|
||||
- diffusers: [different repo](https://github.com/huggingface/diffusers)
|
||||
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||
|
||||
Maintained examples (not research project or legacy):
|
||||
|
||||
- Flax: @Rocketknight1
|
||||
- PyTorch: See Models above and tag the person corresponding to the modality of the example.
|
||||
- TensorFlow: @Rocketknight1
|
||||
|
||||
Research projects are not maintained and should be taken as is.
|
||||
|
||||
placeholder: "@Username ..."
|
||||
|
39
.github/copilot-instructions.md
vendored
Normal file
39
.github/copilot-instructions.md
vendored
Normal file
@ -0,0 +1,39 @@
|
||||
# copilot-instructions.md Guide for Hugging Face Transformers
|
||||
|
||||
This copilot-instructions.md file provides guidance for code agents working with this codebase.
|
||||
|
||||
## Core Project Structure
|
||||
|
||||
- `/src/transformers`: This contains the core source code for the library
|
||||
- `/models`: Code for individual models. Models inherit from base classes in the root `/src/transformers` directory.
|
||||
- `/tests`: This contains the core test classes for the library. These are usually inherited rather than directly run.
|
||||
- `/models`: Tests for individual models. Model tests inherit from common tests in the root `/tests` directory.
|
||||
- `/docs`: This contains the documentation for the library, including guides, tutorials, and API references.
|
||||
|
||||
## Coding Conventions for Hugging Face Transformers
|
||||
|
||||
- PRs should be as brief as possible. Bugfix PRs in particular can often be only one or two lines long, and do not need large comments, docstrings or new functions in this case. Aim to minimize the size of the diff.
|
||||
- When writing tests, they should be added to an existing file. The only exception is for PRs to add a new model, when a new test directory should be created for that model.
|
||||
- Code style is enforced in the CI. You can install the style tools with `pip install -e .[quality]`. You can then run `make fixup` to apply style and consistency fixes to your code.
|
||||
|
||||
## Copying and inheritance
|
||||
|
||||
Many models in the codebase have similar code, but it is not shared by inheritance because we want each model file to be self-contained.
|
||||
We use two mechanisms to keep this code in sync:
|
||||
|
||||
- "Copied from" syntax. Functions or entire classes can have a comment at the top like this: `# Copied from transformers.models.llama.modeling_llama.rotate_half` or `# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->MT5`
|
||||
These comments are actively checked by the style tools, and copies will automatically be updated when the base code is updated. If you need to update a copied function, you should
|
||||
either update the base function and use `make fixup` to propagate the change to all copies, or simply remove the `# Copied from` comment if that is inappropriate.
|
||||
- "Modular" files. These files briefly define models by composing them using inheritance from other models. They are not meant to be used directly. Instead, the style tools
|
||||
automatically generate a complete modeling file, like `modeling_bert.py`, from the modular file like `modular_bert.py`. If a model has a modular file, the modeling file
|
||||
should never be edited directly! Instead, changes should be made in the modular file, and then you should run `make fixup` to update the modeling file automatically.
|
||||
|
||||
When adding new models, you should prefer `modular` style and inherit as many classes as possible from existing models.
|
||||
|
||||
## Testing
|
||||
|
||||
After making changes, you should usually run `make fixup` to ensure any copies and modular files are updated, and then test all affected models. This includes both
|
||||
the model you made the changes in and any other models that were updated by `make fixup`. Tests can be run with `pytest tests/models/[name]/test_modeling_[name].py`
|
||||
If your changes affect code in other classes like tokenizers or processors, you should run those tests instead, like `test_processing_[name].py` or `test_tokenization_[name].py`.
|
||||
|
||||
In order to run tests, you may need to install dependencies. You can do this with `pip install -e .[testing]`. You will probably also need to `pip install torch accelerate` if your environment does not already have them.
|
82
.github/workflows/benchmark_v2.yml
vendored
Normal file
82
.github/workflows/benchmark_v2.yml
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
name: Benchmark v2 Framework
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
runner:
|
||||
description: 'GH Actions runner group to use'
|
||||
required: true
|
||||
type: string
|
||||
commit_sha:
|
||||
description: 'Commit SHA to benchmark'
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
upload_to_hub:
|
||||
description: 'Uploading results to a HuggingFace Dataset'
|
||||
required: false
|
||||
type: string
|
||||
default: 'false'
|
||||
run_id:
|
||||
description: 'Custom run ID for organizing results (auto-generated if not provided)'
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
benchmark_repo_id:
|
||||
description: 'HuggingFace Dataset to upload results to (e.g., "org/benchmark-results")'
|
||||
required: false
|
||||
type: string
|
||||
default: ''
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
||||
# This token is created under the bot `hf-transformers-bot`.
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
|
||||
jobs:
|
||||
benchmark-v2:
|
||||
name: Benchmark v2
|
||||
runs-on: ${{ inputs.runner }}
|
||||
if: |
|
||||
(github.event_name == 'pull_request' && contains( github.event.pull_request.labels.*.name, 'run-benchmark')) ||
|
||||
(github.event_name == 'schedule')
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-gpu
|
||||
options: --gpus all --privileged --ipc host --shm-size "16gb"
|
||||
steps:
|
||||
- name: Get repo
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ inputs.commit_sha || github.sha }}
|
||||
|
||||
- name: Install benchmark dependencies
|
||||
run: |
|
||||
python3 -m pip install -r benchmark_v2/requirements.txt
|
||||
|
||||
- name: Reinstall transformers in edit mode
|
||||
run: |
|
||||
python3 -m pip uninstall -y transformers
|
||||
python3 -m pip install -e ".[torch]"
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
run: |
|
||||
python3 -m pip list
|
||||
python3 -c "import torch; print(f'PyTorch version: {torch.__version__}')"
|
||||
python3 -c "import torch; print(f'CUDA available: {torch.cuda.is_available()}')"
|
||||
python3 -c "import torch; print(f'CUDA device count: {torch.cuda.device_count()}')" || true
|
||||
nvidia-smi || true
|
||||
|
||||
- name: Run benchmark v2
|
||||
working-directory: benchmark_v2
|
||||
run: |
|
||||
echo "Running benchmarks"
|
||||
python3 run_benchmarks.py \
|
||||
--commit-id '${{ inputs.commit_sha || github.sha }}' \
|
||||
--upload-to-hub '${{ inputs.upload_to_hub || false}}' \
|
||||
--run-id '${{ inputs.run_id }}' \
|
||||
--benchmark-repo-id '${{ inputs.benchmark_repo_id}}' \
|
||||
--log-level INFO
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
20
.github/workflows/benchmark_v2_a10_caller.yml
vendored
Normal file
20
.github/workflows/benchmark_v2_a10_caller.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
name: Benchmark v2 Scheduled Runner - A10 Single-GPU
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run daily at 16:30 UTC
|
||||
- cron: "30 16 * * *"
|
||||
pull_request:
|
||||
types: [ opened, labeled, reopened, synchronize ]
|
||||
|
||||
jobs:
|
||||
benchmark-v2-default:
|
||||
name: Benchmark v2 - Default Models
|
||||
uses: ./.github/workflows/benchmark_v2.yml
|
||||
with:
|
||||
runner: aws-g5-4xlarge-cache-use1-public-80
|
||||
commit_sha: ${{ github.sha }}
|
||||
upload_to_hub: true
|
||||
run_id: ${{ github.run_id }}
|
||||
benchmark_repo_id: hf-internal-testing/transformers-daily-benchmarks
|
||||
secrets: inherit
|
20
.github/workflows/benchmark_v2_mi325_caller.yml
vendored
Normal file
20
.github/workflows/benchmark_v2_mi325_caller.yml
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
name: Benchmark v2 Scheduled Runner - MI325 Single-GPU
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run daily at 16:30 UTC
|
||||
- cron: "30 16 * * *"
|
||||
pull_request:
|
||||
types: [ opened, labeled, reopened, synchronize ]
|
||||
|
||||
jobs:
|
||||
benchmark-v2-default:
|
||||
name: Benchmark v2 - Default Models
|
||||
uses: ./.github/workflows/benchmark_v2.yml
|
||||
with:
|
||||
runner: amd-mi325-ci-1gpu
|
||||
commit_sha: ${{ github.sha }}
|
||||
upload_to_hub: true
|
||||
run_id: ${{ github.run_id }}
|
||||
benchmark_repo_id: hf-internal-testing/transformers-daily-benchmarks
|
||||
secrets: inherit
|
2
.github/workflows/build-ci-docker-images.yml
vendored
2
.github/workflows/build-ci-docker-images.yml
vendored
@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
file: ["quality", "consistency", "custom-tokenizers", "torch-light", "tf-light", "exotic-models", "torch-tf-light", "jax-light", "examples-torch", "examples-tf"]
|
||||
file: ["quality", "consistency", "custom-tokenizers", "torch-light", "exotic-models", "examples-torch"]
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
|
@ -2,6 +2,10 @@ name: Build docker images (Nightly CI)
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
job:
|
||||
required: true
|
||||
type: string
|
||||
push:
|
||||
branches:
|
||||
- build_nightly_ci_docker_image*
|
||||
@ -12,7 +16,8 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
latest-with-torch-nightly-docker:
|
||||
name: "Nightly PyTorch + Stable TensorFlow"
|
||||
name: "Nightly PyTorch"
|
||||
if: inputs.job == 'latest-with-torch-nightly-docker' || inputs.job == ''
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
@ -41,6 +46,7 @@ jobs:
|
||||
|
||||
nightly-torch-deepspeed-docker:
|
||||
name: "Nightly PyTorch + DeepSpeed"
|
||||
if: inputs.job == 'nightly-torch-deepspeed-docker' || inputs.job == ''
|
||||
runs-on:
|
||||
group: aws-g4dn-2xlarge-cache
|
||||
steps:
|
||||
|
62
.github/workflows/model_jobs.yml
vendored
62
.github/workflows/model_jobs.yml
vendored
@ -12,9 +12,6 @@ on:
|
||||
slice_id:
|
||||
required: true
|
||||
type: number
|
||||
runner_map:
|
||||
required: false
|
||||
type: string
|
||||
docker:
|
||||
required: true
|
||||
type: string
|
||||
@ -25,6 +22,12 @@ on:
|
||||
required: false
|
||||
default: run_models_gpu
|
||||
type: string
|
||||
runner_type:
|
||||
required: false
|
||||
type: string
|
||||
report_repo_id:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
@ -48,10 +51,12 @@ jobs:
|
||||
matrix:
|
||||
folders: ${{ fromJson(inputs.folder_slices)[inputs.slice_id] }}
|
||||
runs-on:
|
||||
group: ${{ fromJson(inputs.runner_map)[matrix.folders][inputs.machine_type] }}
|
||||
group: '${{ inputs.machine_type }}'
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
outputs:
|
||||
machine_type: ${{ steps.set_machine_type.outputs.machine_type }}
|
||||
steps:
|
||||
- name: Echo input and matrix info
|
||||
shell: bash
|
||||
@ -105,6 +110,7 @@ jobs:
|
||||
run: pip freeze
|
||||
|
||||
- name: Set `machine_type` for report and artifact names
|
||||
id: set_machine_type
|
||||
working-directory: /transformers
|
||||
shell: bash
|
||||
run: |
|
||||
@ -120,26 +126,58 @@ jobs:
|
||||
|
||||
echo "$machine_type"
|
||||
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
||||
echo "machine_type=$machine_type" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Create report directory if it doesn't exist
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports
|
||||
echo "dummy" > /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports/dummy.txt
|
||||
ls -la /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -rsfE -v --make-reports=${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
run: |
|
||||
script -q -c "PATCH_TESTING_METHODS_TO_COLLECT_OUTPUTS=yes _PATCHED_TESTING_METHODS_OUTPUT_DIR=/transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports python3 -m pytest -rsfE -v --make-reports=${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports tests/${{ matrix.folders }}" test_outputs.txt
|
||||
ls -la
|
||||
# Extract the exit code from the output file
|
||||
PYTEST_EXIT_CODE=$(tail -1 test_outputs.txt | grep "PYTEST_EXIT_CODE:" | cut -d: -f2)
|
||||
exit ${PYTEST_EXIT_CODE:-1}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
# This step is only to show information on Github Actions log.
|
||||
# Always mark this step as successful, even if the report directory or the file `failures_short.txt` in it doesn't exist
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports/failures_short.txt
|
||||
run: cat /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports/failures_short.txt
|
||||
|
||||
- name: Run test
|
||||
shell: bash
|
||||
- name: Captured information
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: |
|
||||
mkdir -p /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports
|
||||
echo "hello" > /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports/hello.txt
|
||||
echo "${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports"
|
||||
cat /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports/captured_info.txt
|
||||
|
||||
- name: Copy test_outputs.txt
|
||||
if: ${{ always() }}
|
||||
continue-on-error: true
|
||||
run: |
|
||||
cp /transformers/test_outputs.txt /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports
|
||||
path: /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports
|
||||
path: /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports
|
||||
|
||||
collated_reports:
|
||||
name: Collated Reports
|
||||
if: ${{ always() }}
|
||||
needs: run_models_gpu
|
||||
uses: huggingface/transformers/.github/workflows/collated-reports.yml@main
|
||||
with:
|
||||
job: run_models_gpu
|
||||
report_repo_id: ${{ inputs.report_repo_id }}
|
||||
gpu_name: ${{ inputs.runner_type }}
|
||||
machine_type: ${{ needs.run_models_gpu.outputs.machine_type }}
|
||||
secrets: inherit
|
||||
|
2
.github/workflows/self-comment-ci.yml
vendored
2
.github/workflows/self-comment-ci.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
||||
runs-on: ubuntu-22.04
|
||||
name: Get PR number
|
||||
# For security: only allow team members to run
|
||||
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr", "eustlb", "MekkCyber", "manueldeprada", "vasqu", "ivarflakstad", "stevhliu", "ebezzam"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
|
||||
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr", "eustlb", "MekkCyber", "manueldeprada", "vasqu", "ivarflakstad", "stevhliu", "ebezzam", "remi-or"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
|
||||
outputs:
|
||||
PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }}
|
||||
steps:
|
||||
|
2
.github/workflows/self-nightly-caller.yml
vendored
2
.github/workflows/self-nightly-caller.yml
vendored
@ -22,6 +22,8 @@ jobs:
|
||||
build_nightly_torch_ci_images:
|
||||
name: Build CI Docker Images with nightly torch
|
||||
uses: ./.github/workflows/build-nightly-ci-docker-images.yml
|
||||
with:
|
||||
job: latest-with-torch-nightly-docker
|
||||
secrets: inherit
|
||||
|
||||
setup:
|
||||
|
@ -21,9 +21,9 @@ jobs:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#amd-hf-ci"
|
||||
runner_scale_set: amd-mi355-ci
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
docker: huggingface/testing-rocm7.0-preview
|
||||
ci_event: Scheduled CI (AMD) - mi355
|
||||
report_repo_id: optimum-amd/transformers_daily_ci
|
||||
report_repo_id: hf-transformers-bot/transformers-ci-dummy
|
||||
secrets: inherit
|
||||
|
||||
torch-pipeline:
|
||||
@ -33,9 +33,9 @@ jobs:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#amd-hf-ci"
|
||||
runner_scale_set: amd-mi355-ci
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
docker: huggingface/testing-rocm7.0-preview
|
||||
ci_event: Scheduled CI (AMD) - mi355
|
||||
report_repo_id: optimum-amd/transformers_daily_ci
|
||||
report_repo_id: hf-transformers-bot/transformers-ci-dummy
|
||||
secrets: inherit
|
||||
|
||||
example-ci:
|
||||
@ -45,9 +45,9 @@ jobs:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#amd-hf-ci"
|
||||
runner_scale_set: amd-mi355-ci
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
docker: huggingface/testing-rocm7.0-preview
|
||||
ci_event: Scheduled CI (AMD) - mi355
|
||||
report_repo_id: optimum-amd/transformers_daily_ci
|
||||
report_repo_id: hf-transformers-bot/transformers-ci-dummy
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
@ -57,7 +57,7 @@ jobs:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#amd-hf-ci"
|
||||
runner_scale_set: amd-mi355-ci
|
||||
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
|
||||
docker: huggingface/testing-rocm7.0-preview
|
||||
ci_event: Scheduled CI (AMD) - mi355
|
||||
report_repo_id: optimum-amd/transformers_daily_ci
|
||||
report_repo_id: hf-transformers-bot/transformers-ci-dummy
|
||||
secrets: inherit
|
||||
|
2
.github/workflows/self-scheduled-caller.yml
vendored
2
.github/workflows/self-scheduled-caller.yml
vendored
@ -52,6 +52,7 @@ jobs:
|
||||
slack_report_channel: "#transformers-ci-daily-models"
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
ci_event: Daily CI
|
||||
runner_type: "a10"
|
||||
report_repo_id: hf-internal-testing/transformers_daily_ci
|
||||
commit_sha: ${{ github.sha }}
|
||||
secrets: inherit
|
||||
@ -87,6 +88,7 @@ jobs:
|
||||
job: run_trainer_and_fsdp_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-training"
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
runner_type: "a10"
|
||||
ci_event: Daily CI
|
||||
report_repo_id: hf-internal-testing/transformers_daily_ci
|
||||
commit_sha: ${{ github.sha }}
|
||||
|
13
.github/workflows/self-scheduled.yml
vendored
13
.github/workflows/self-scheduled.yml
vendored
@ -31,6 +31,9 @@ on:
|
||||
commit_sha:
|
||||
required: false
|
||||
type: string
|
||||
runner_type:
|
||||
required: false
|
||||
type: string
|
||||
models:
|
||||
default: ""
|
||||
required: false
|
||||
@ -65,7 +68,6 @@ jobs:
|
||||
outputs:
|
||||
folder_slices: ${{ steps.set-matrix.outputs.folder_slices }}
|
||||
slice_ids: ${{ steps.set-matrix.outputs.slice_ids }}
|
||||
runner_map: ${{ steps.set-matrix.outputs.runner_map }}
|
||||
quantization_matrix: ${{ steps.set-matrix-quantization.outputs.quantization_matrix }}
|
||||
steps:
|
||||
- name: Update clone
|
||||
@ -92,7 +94,6 @@ jobs:
|
||||
if [ "${{ inputs.job }}" = "run_models_gpu" ]; then
|
||||
echo "folder_slices=$(python3 ../utils/split_model_tests.py --models '${{ inputs.models }}' --num_splits ${{ env.NUM_SLICES }})" >> $GITHUB_OUTPUT
|
||||
echo "slice_ids=$(python3 -c 'd = list(range(${{ env.NUM_SLICES }})); print(d)')" >> $GITHUB_OUTPUT
|
||||
echo "runner_map=$(python3 ../utils/get_runner_map.py)" >> $GITHUB_OUTPUT
|
||||
elif [ "${{ inputs.job }}" = "run_trainer_and_fsdp_gpu" ]; then
|
||||
echo "folder_slices=[['trainer'], ['fsdp']]" >> $GITHUB_OUTPUT
|
||||
echo "slice_ids=[0, 1]" >> $GITHUB_OUTPUT
|
||||
@ -116,16 +117,17 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache]
|
||||
slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }}
|
||||
uses: ./.github/workflows/model_jobs.yml
|
||||
with:
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
machine_type: ${{ matrix.machine_type }}
|
||||
slice_id: ${{ matrix.slice_id }}
|
||||
runner_map: ${{ needs.setup.outputs.runner_map }}
|
||||
docker: ${{ inputs.docker }}
|
||||
commit_sha: ${{ inputs.commit_sha || github.sha }}
|
||||
runner_type: ${{ inputs.runner_type }}
|
||||
report_repo_id: ${{ inputs.report_repo_id }}
|
||||
secrets: inherit
|
||||
|
||||
run_trainer_and_fsdp_gpu:
|
||||
@ -142,9 +144,10 @@ jobs:
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
machine_type: ${{ matrix.machine_type }}
|
||||
slice_id: ${{ matrix.slice_id }}
|
||||
runner_map: ${{ needs.setup.outputs.runner_map }}
|
||||
docker: ${{ inputs.docker }}
|
||||
commit_sha: ${{ inputs.commit_sha || github.sha }}
|
||||
runner_type: ${{ inputs.runner_type }}
|
||||
report_repo_id: ${{ inputs.report_repo_id }}
|
||||
report_name_prefix: run_trainer_and_fsdp_gpu
|
||||
secrets: inherit
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -3,7 +3,7 @@
|
||||
# make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
|
||||
export PYTHONPATH = src
|
||||
|
||||
check_dirs := examples tests src utils
|
||||
check_dirs := examples tests src utils scripts benchmark benchmark_v2
|
||||
|
||||
exclude_folders := ""
|
||||
|
||||
|
@ -80,7 +80,7 @@ Explore the [Hub](https://huggingface.com/) today to find a model and use Transf
|
||||
|
||||
## Installation
|
||||
|
||||
Transformers works with Python 3.9+ [PyTorch](https://pytorch.org/get-started/locally/) 2.1+, [TensorFlow](https://www.tensorflow.org/install/pip) 2.6+, and [Flax](https://flax.readthedocs.io/en/latest/) 0.4.1+.
|
||||
Transformers works with Python 3.9+, and [PyTorch](https://pytorch.org/get-started/locally/) 2.1+.
|
||||
|
||||
Create and activate a virtual environment with [venv](https://docs.python.org/3/library/venv.html) or [uv](https://docs.astral.sh/uv/), a fast Rust-based Python package and project manager.
|
||||
|
||||
|
@ -11,25 +11,28 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from logging import Logger
|
||||
import os
|
||||
import sys
|
||||
from logging import Logger
|
||||
from threading import Event, Thread
|
||||
from time import perf_counter, sleep
|
||||
from typing import Optional
|
||||
import sys
|
||||
|
||||
|
||||
# Add the parent directory to Python path to import benchmarks_entrypoint
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from benchmarks_entrypoint import MetricsRecorder
|
||||
|
||||
import gpustat
|
||||
import psutil
|
||||
import psycopg2
|
||||
from benchmarks_entrypoint import MetricsRecorder
|
||||
|
||||
|
||||
# Optional heavy ML dependencies - only required when actually running the benchmark
|
||||
try:
|
||||
import torch
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, StaticCache
|
||||
|
||||
TRANSFORMERS_AVAILABLE = True
|
||||
except ImportError:
|
||||
TRANSFORMERS_AVAILABLE = False
|
||||
@ -63,7 +66,13 @@ def collect_metrics(benchmark_id, continue_metric_collection, metrics_recorder):
|
||||
|
||||
|
||||
def run_benchmark(
|
||||
logger: Logger, repository: str, branch: str, commit_id: str, commit_msg: str, metrics_recorder=None, num_tokens_to_generate=100
|
||||
logger: Logger,
|
||||
repository: str,
|
||||
branch: str,
|
||||
commit_id: str,
|
||||
commit_msg: str,
|
||||
metrics_recorder=None,
|
||||
num_tokens_to_generate=100,
|
||||
):
|
||||
# Check if required ML dependencies are available
|
||||
if not TRANSFORMERS_AVAILABLE:
|
||||
@ -71,11 +80,11 @@ def run_benchmark(
|
||||
logger.error("pip install torch transformers")
|
||||
logger.error("Skipping LLaMA benchmark due to missing dependencies.")
|
||||
return
|
||||
|
||||
|
||||
continue_metric_collection = Event()
|
||||
metrics_thread = None
|
||||
model_id = "meta-llama/Llama-2-7b-hf"
|
||||
|
||||
|
||||
# If no metrics_recorder is provided, create one for backward compatibility
|
||||
if metrics_recorder is None:
|
||||
try:
|
||||
@ -154,7 +163,7 @@ def run_benchmark(
|
||||
# First eager forward pass
|
||||
logger.info("running first eager forward pass")
|
||||
start = perf_counter()
|
||||
outputs = model(**inputs)
|
||||
_ = model(**inputs)
|
||||
torch.cuda.synchronize()
|
||||
end = perf_counter()
|
||||
first_eager_fwd_pass_time = end - start
|
||||
@ -163,7 +172,7 @@ def run_benchmark(
|
||||
# Second eager forward pass (should be faster)
|
||||
logger.info("running second eager forward pass")
|
||||
start = perf_counter()
|
||||
outputs = model(**inputs)
|
||||
_ = model(**inputs)
|
||||
torch.cuda.synchronize()
|
||||
end = perf_counter()
|
||||
second_eager_fwd_pass_time = end - start
|
||||
@ -339,7 +348,7 @@ def run_benchmark(
|
||||
continue_metric_collection.set()
|
||||
if metrics_thread is not None:
|
||||
metrics_thread.join()
|
||||
|
||||
|
||||
# Only close the recorder if we created it locally
|
||||
if should_close_recorder:
|
||||
metrics_recorder.close()
|
||||
metrics_recorder.close()
|
||||
|
@ -31,9 +31,7 @@ from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
from git import Repo
|
||||
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
from optimum_benchmark import Benchmark
|
||||
from optimum_benchmark_wrapper import main
|
||||
|
||||
|
@ -13,19 +13,20 @@
|
||||
# limitations under the License.
|
||||
import argparse
|
||||
import importlib.util
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Dict, Tuple, Optional, List
|
||||
|
||||
import pandas as pd
|
||||
|
||||
|
||||
try:
|
||||
from psycopg2.extensions import register_adapter
|
||||
from psycopg2.extras import Json
|
||||
|
||||
register_adapter(dict, Json)
|
||||
PSYCOPG2_AVAILABLE = True
|
||||
except ImportError:
|
||||
@ -38,8 +39,14 @@ class ImportModuleException(Exception):
|
||||
|
||||
class MetricsRecorder:
|
||||
def __init__(
|
||||
self, connection, logger: logging.Logger, repository: str, branch: str, commit_id: str, commit_msg: str,
|
||||
collect_csv_data: bool = True
|
||||
self,
|
||||
connection,
|
||||
logger: logging.Logger,
|
||||
repository: str,
|
||||
branch: str,
|
||||
commit_id: str,
|
||||
commit_msg: str,
|
||||
collect_csv_data: bool = True,
|
||||
):
|
||||
self.conn = connection
|
||||
self.use_database = connection is not None
|
||||
@ -51,27 +58,43 @@ class MetricsRecorder:
|
||||
self.commit_id = commit_id
|
||||
self.commit_msg = commit_msg
|
||||
self.collect_csv_data = collect_csv_data
|
||||
|
||||
|
||||
# For CSV export - store all data in pandas DataFrames (only if CSV collection is enabled)
|
||||
if self.collect_csv_data:
|
||||
# Initialize empty DataFrames with proper schemas
|
||||
self.benchmarks_df = pd.DataFrame(columns=[
|
||||
'benchmark_id', 'repository', 'branch', 'commit_id', 'commit_message',
|
||||
'metadata', 'created_at'
|
||||
])
|
||||
self.device_measurements_df = pd.DataFrame(columns=[
|
||||
'benchmark_id', 'cpu_util', 'mem_megabytes', 'gpu_util',
|
||||
'gpu_mem_megabytes', 'time'
|
||||
])
|
||||
self.model_measurements_df = pd.DataFrame(columns=[
|
||||
'benchmark_id', 'time', 'model_load_time', 'first_eager_forward_pass_time_secs',
|
||||
'second_eager_forward_pass_time_secs', 'first_eager_generate_time_secs',
|
||||
'second_eager_generate_time_secs', 'time_to_first_token_secs',
|
||||
'time_to_second_token_secs', 'time_to_third_token_secs',
|
||||
'time_to_next_token_mean_secs', 'first_compile_generate_time_secs',
|
||||
'second_compile_generate_time_secs', 'third_compile_generate_time_secs',
|
||||
'fourth_compile_generate_time_secs'
|
||||
])
|
||||
self.benchmarks_df = pd.DataFrame(
|
||||
columns=[
|
||||
"benchmark_id",
|
||||
"repository",
|
||||
"branch",
|
||||
"commit_id",
|
||||
"commit_message",
|
||||
"metadata",
|
||||
"created_at",
|
||||
]
|
||||
)
|
||||
self.device_measurements_df = pd.DataFrame(
|
||||
columns=["benchmark_id", "cpu_util", "mem_megabytes", "gpu_util", "gpu_mem_megabytes", "time"]
|
||||
)
|
||||
self.model_measurements_df = pd.DataFrame(
|
||||
columns=[
|
||||
"benchmark_id",
|
||||
"time",
|
||||
"model_load_time",
|
||||
"first_eager_forward_pass_time_secs",
|
||||
"second_eager_forward_pass_time_secs",
|
||||
"first_eager_generate_time_secs",
|
||||
"second_eager_generate_time_secs",
|
||||
"time_to_first_token_secs",
|
||||
"time_to_second_token_secs",
|
||||
"time_to_third_token_secs",
|
||||
"time_to_next_token_mean_secs",
|
||||
"first_compile_generate_time_secs",
|
||||
"second_compile_generate_time_secs",
|
||||
"third_compile_generate_time_secs",
|
||||
"fourth_compile_generate_time_secs",
|
||||
]
|
||||
)
|
||||
else:
|
||||
self.benchmarks_df = None
|
||||
self.device_measurements_df = None
|
||||
@ -83,7 +106,7 @@ class MetricsRecorder:
|
||||
"""
|
||||
# Generate a unique UUID for this benchmark
|
||||
benchmark_id = str(uuid.uuid4())
|
||||
|
||||
|
||||
if self.use_database:
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute(
|
||||
@ -91,28 +114,32 @@ class MetricsRecorder:
|
||||
(benchmark_id, self.repository, self.branch, self.commit_id, self.commit_msg, metadata),
|
||||
)
|
||||
self.logger.debug(f"initialised benchmark #{benchmark_id}")
|
||||
|
||||
|
||||
# Store benchmark data for CSV export (if enabled)
|
||||
if self.collect_csv_data:
|
||||
# Add row to pandas DataFrame
|
||||
new_row = pd.DataFrame([{
|
||||
'benchmark_id': benchmark_id,
|
||||
'repository': self.repository,
|
||||
'branch': self.branch,
|
||||
'commit_id': self.commit_id,
|
||||
'commit_message': self.commit_msg,
|
||||
'metadata': json.dumps(metadata),
|
||||
'created_at': datetime.utcnow().isoformat()
|
||||
}])
|
||||
new_row = pd.DataFrame(
|
||||
[
|
||||
{
|
||||
"benchmark_id": benchmark_id,
|
||||
"repository": self.repository,
|
||||
"branch": self.branch,
|
||||
"commit_id": self.commit_id,
|
||||
"commit_message": self.commit_msg,
|
||||
"metadata": json.dumps(metadata),
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
}
|
||||
]
|
||||
)
|
||||
self.benchmarks_df = pd.concat([self.benchmarks_df, new_row], ignore_index=True)
|
||||
|
||||
|
||||
mode_info = []
|
||||
if self.use_database:
|
||||
mode_info.append("database")
|
||||
if self.collect_csv_data:
|
||||
mode_info.append("CSV")
|
||||
mode_str = " + ".join(mode_info) if mode_info else "no storage"
|
||||
|
||||
|
||||
self.logger.debug(f"initialised benchmark #{benchmark_id} ({mode_str} mode)")
|
||||
return benchmark_id
|
||||
|
||||
@ -123,16 +150,20 @@ class MetricsRecorder:
|
||||
# Store device measurements for CSV export (if enabled)
|
||||
if self.collect_csv_data:
|
||||
# Add row to pandas DataFrame
|
||||
new_row = pd.DataFrame([{
|
||||
'benchmark_id': benchmark_id,
|
||||
'cpu_util': cpu_util,
|
||||
'mem_megabytes': mem_megabytes,
|
||||
'gpu_util': gpu_util,
|
||||
'gpu_mem_megabytes': gpu_mem_megabytes,
|
||||
'time': datetime.utcnow().isoformat()
|
||||
}])
|
||||
new_row = pd.DataFrame(
|
||||
[
|
||||
{
|
||||
"benchmark_id": benchmark_id,
|
||||
"cpu_util": cpu_util,
|
||||
"mem_megabytes": mem_megabytes,
|
||||
"gpu_util": gpu_util,
|
||||
"gpu_mem_megabytes": gpu_mem_megabytes,
|
||||
"time": datetime.utcnow().isoformat(),
|
||||
}
|
||||
]
|
||||
)
|
||||
self.device_measurements_df = pd.concat([self.device_measurements_df, new_row], ignore_index=True)
|
||||
|
||||
|
||||
# Store in database if available
|
||||
if self.use_database:
|
||||
with self.conn.cursor() as cur:
|
||||
@ -140,7 +171,7 @@ class MetricsRecorder:
|
||||
"INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)",
|
||||
(benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes),
|
||||
)
|
||||
|
||||
|
||||
self.logger.debug(
|
||||
f"collected device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]"
|
||||
)
|
||||
@ -149,16 +180,13 @@ class MetricsRecorder:
|
||||
# Store model measurements for CSV export (if enabled)
|
||||
if self.collect_csv_data:
|
||||
# Add row to pandas DataFrame with flattened measurements
|
||||
row_data = {
|
||||
'benchmark_id': benchmark_id,
|
||||
'time': datetime.utcnow().isoformat()
|
||||
}
|
||||
row_data = {"benchmark_id": benchmark_id, "time": datetime.utcnow().isoformat()}
|
||||
# Flatten the measurements dict into the row
|
||||
row_data.update(measurements)
|
||||
|
||||
|
||||
new_row = pd.DataFrame([row_data])
|
||||
self.model_measurements_df = pd.concat([self.model_measurements_df, new_row], ignore_index=True)
|
||||
|
||||
|
||||
# Store in database if available
|
||||
if self.use_database:
|
||||
with self.conn.cursor() as cur:
|
||||
@ -174,7 +202,7 @@ class MetricsRecorder:
|
||||
measurements,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
self.logger.debug(f"collected model measurements for benchmark #{benchmark_id}: {measurements}")
|
||||
|
||||
def export_to_csv(self, output_dir: str = "benchmark_results"):
|
||||
@ -184,19 +212,19 @@ class MetricsRecorder:
|
||||
if not self.collect_csv_data:
|
||||
self.logger.warning("CSV data collection is disabled - no CSV files will be generated")
|
||||
return
|
||||
|
||||
|
||||
if not os.path.exists(output_dir):
|
||||
os.makedirs(output_dir)
|
||||
self.logger.info(f"Created output directory: {output_dir}")
|
||||
|
||||
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
files_created = []
|
||||
|
||||
|
||||
# Export using pandas DataFrames
|
||||
self._export_pandas_data(output_dir, timestamp, files_created)
|
||||
|
||||
|
||||
self.logger.info(f"CSV export complete! Created {len(files_created)} files in {output_dir}")
|
||||
|
||||
|
||||
def _export_pandas_data(self, output_dir: str, timestamp: str, files_created: list):
|
||||
"""
|
||||
Export CSV files using pandas DataFrames
|
||||
@ -206,24 +234,24 @@ class MetricsRecorder:
|
||||
self.benchmarks_df.to_csv(benchmarks_file, index=False)
|
||||
files_created.append(benchmarks_file)
|
||||
self.logger.info(f"Exported {len(self.benchmarks_df)} benchmark records to {benchmarks_file}")
|
||||
|
||||
# Export device measurements
|
||||
|
||||
# Export device measurements
|
||||
device_file = os.path.join(output_dir, f"device_measurements_{timestamp}.csv")
|
||||
self.device_measurements_df.to_csv(device_file, index=False)
|
||||
files_created.append(device_file)
|
||||
self.logger.info(f"Exported {len(self.device_measurements_df)} device measurement records to {device_file}")
|
||||
|
||||
|
||||
# Export model measurements (already flattened)
|
||||
model_file = os.path.join(output_dir, f"model_measurements_{timestamp}.csv")
|
||||
self.model_measurements_df.to_csv(model_file, index=False)
|
||||
files_created.append(model_file)
|
||||
self.logger.info(f"Exported {len(self.model_measurements_df)} model measurement records to {model_file}")
|
||||
|
||||
|
||||
# Create comprehensive summary using pandas operations
|
||||
summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.csv")
|
||||
self._create_summary(summary_file)
|
||||
files_created.append(summary_file)
|
||||
|
||||
|
||||
def _create_summary(self, summary_file: str):
|
||||
"""
|
||||
Create a comprehensive summary CSV using pandas operations
|
||||
@ -234,36 +262,42 @@ class MetricsRecorder:
|
||||
summary_df.to_csv(summary_file, index=False)
|
||||
self.logger.info(f"Created empty benchmark summary at {summary_file}")
|
||||
return
|
||||
|
||||
|
||||
# Start with benchmarks as the base
|
||||
summary_df = self.benchmarks_df.copy()
|
||||
|
||||
|
||||
# Add model measurements (join on benchmark_id)
|
||||
if len(self.model_measurements_df) > 0:
|
||||
# Drop 'time' column from model measurements to avoid conflicts
|
||||
model_df = self.model_measurements_df.drop(columns=['time'], errors='ignore')
|
||||
summary_df = summary_df.merge(model_df, on='benchmark_id', how='left')
|
||||
|
||||
model_df = self.model_measurements_df.drop(columns=["time"], errors="ignore")
|
||||
summary_df = summary_df.merge(model_df, on="benchmark_id", how="left")
|
||||
|
||||
# Calculate device measurement aggregates using pandas groupby
|
||||
if len(self.device_measurements_df) > 0:
|
||||
device_agg = self.device_measurements_df.groupby('benchmark_id').agg({
|
||||
'cpu_util': ['mean', 'max', 'std', 'count'],
|
||||
'mem_megabytes': ['mean', 'max', 'std'],
|
||||
'gpu_util': ['mean', 'max', 'std'],
|
||||
'gpu_mem_megabytes': ['mean', 'max', 'std']
|
||||
}).round(3)
|
||||
|
||||
device_agg = (
|
||||
self.device_measurements_df.groupby("benchmark_id")
|
||||
.agg(
|
||||
{
|
||||
"cpu_util": ["mean", "max", "std", "count"],
|
||||
"mem_megabytes": ["mean", "max", "std"],
|
||||
"gpu_util": ["mean", "max", "std"],
|
||||
"gpu_mem_megabytes": ["mean", "max", "std"],
|
||||
}
|
||||
)
|
||||
.round(3)
|
||||
)
|
||||
|
||||
# Flatten column names
|
||||
device_agg.columns = [f"{col[0]}_{col[1]}" for col in device_agg.columns]
|
||||
device_agg = device_agg.reset_index()
|
||||
|
||||
|
||||
# Rename count column to be more descriptive
|
||||
if 'cpu_util_count' in device_agg.columns:
|
||||
device_agg = device_agg.rename(columns={'cpu_util_count': 'device_measurement_count'})
|
||||
|
||||
if "cpu_util_count" in device_agg.columns:
|
||||
device_agg = device_agg.rename(columns={"cpu_util_count": "device_measurement_count"})
|
||||
|
||||
# Merge with summary
|
||||
summary_df = summary_df.merge(device_agg, on='benchmark_id', how='left')
|
||||
|
||||
summary_df = summary_df.merge(device_agg, on="benchmark_id", how="left")
|
||||
|
||||
# Export the comprehensive summary
|
||||
summary_df.to_csv(summary_file, index=False)
|
||||
self.logger.info(f"Created comprehensive benchmark summary with {len(summary_df)} records at {summary_file}")
|
||||
@ -312,23 +346,18 @@ def parse_arguments() -> tuple[str, str, str, str, bool, str]:
|
||||
type=str,
|
||||
help="The commit message associated with the commit, truncated to 70 characters.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--csv",
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="Enable CSV output files generation."
|
||||
)
|
||||
|
||||
|
||||
parser.add_argument("--csv", action="store_true", default=False, help="Enable CSV output files generation.")
|
||||
|
||||
parser.add_argument(
|
||||
"--csv-output-dir",
|
||||
type=str,
|
||||
default="benchmark_results",
|
||||
help="Directory for CSV output files (default: benchmark_results)."
|
||||
help="Directory for CSV output files (default: benchmark_results).",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
# CSV is disabled by default, only enabled when --csv is used
|
||||
generate_csv = args.csv
|
||||
|
||||
@ -353,9 +382,10 @@ def create_database_connection():
|
||||
if not PSYCOPG2_AVAILABLE:
|
||||
logger.warning("psycopg2 not available - running in CSV-only mode")
|
||||
return None
|
||||
|
||||
|
||||
try:
|
||||
import psycopg2
|
||||
|
||||
conn = psycopg2.connect("dbname=metrics")
|
||||
logger.info("Successfully connected to database")
|
||||
return conn
|
||||
@ -364,27 +394,28 @@ def create_database_connection():
|
||||
return None
|
||||
|
||||
|
||||
def create_global_metrics_recorder(repository: str, branch: str, commit_id: str, commit_msg: str,
|
||||
generate_csv: bool = False) -> MetricsRecorder:
|
||||
def create_global_metrics_recorder(
|
||||
repository: str, branch: str, commit_id: str, commit_msg: str, generate_csv: bool = False
|
||||
) -> MetricsRecorder:
|
||||
"""
|
||||
Create a global metrics recorder that will be used across all benchmarks.
|
||||
"""
|
||||
connection = create_database_connection()
|
||||
recorder = MetricsRecorder(connection, logger, repository, branch, commit_id, commit_msg, generate_csv)
|
||||
|
||||
|
||||
# Log the storage mode
|
||||
storage_modes = []
|
||||
if connection is not None:
|
||||
storage_modes.append("database")
|
||||
if generate_csv:
|
||||
storage_modes.append("CSV")
|
||||
|
||||
|
||||
if not storage_modes:
|
||||
logger.warning("Running benchmarks with NO data storage (no database connection, CSV disabled)")
|
||||
logger.warning("Use --csv flag to enable CSV output when database is unavailable")
|
||||
else:
|
||||
logger.info(f"Running benchmarks with: {' + '.join(storage_modes)} storage")
|
||||
|
||||
|
||||
return recorder
|
||||
|
||||
|
||||
@ -393,16 +424,16 @@ if __name__ == "__main__":
|
||||
benches_folder_path = os.path.join(benchmarks_folder_path, "benches")
|
||||
|
||||
repository, branch, commit_id, commit_msg, generate_csv, csv_output_dir = parse_arguments()
|
||||
|
||||
|
||||
# Create a global metrics recorder
|
||||
global_metrics_recorder = create_global_metrics_recorder(repository, branch, commit_id, commit_msg, generate_csv)
|
||||
|
||||
|
||||
successful_benchmarks = 0
|
||||
failed_benchmarks = 0
|
||||
|
||||
|
||||
# Automatically discover all benchmark modules in benches/ folder
|
||||
benchmark_modules = []
|
||||
|
||||
|
||||
if os.path.exists(benches_folder_path):
|
||||
logger.debug(f"Scanning for benchmarks in: {benches_folder_path}")
|
||||
for entry in os.scandir(benches_folder_path):
|
||||
@ -410,12 +441,12 @@ if __name__ == "__main__":
|
||||
continue
|
||||
if entry.name.startswith("__"): # Skip __init__.py, __pycache__, etc.
|
||||
continue
|
||||
|
||||
|
||||
# Check if the file has a run_benchmark function
|
||||
try:
|
||||
logger.debug(f"checking if benches/{entry.name} has run_benchmark function")
|
||||
module = import_from_path(entry.name.split(".")[0], entry.path)
|
||||
if hasattr(module, 'run_benchmark'):
|
||||
if hasattr(module, "run_benchmark"):
|
||||
benchmark_modules.append(entry.name)
|
||||
logger.debug(f"discovered benchmark: {entry.name}")
|
||||
else:
|
||||
@ -436,16 +467,18 @@ if __name__ == "__main__":
|
||||
logger.debug(f"loading: {module_name}")
|
||||
module = import_from_path(module_name.split(".")[0], module_path)
|
||||
logger.info(f"running benchmarks in: {module_name}")
|
||||
|
||||
|
||||
# Check if the module has an updated run_benchmark function that accepts metrics_recorder
|
||||
try:
|
||||
# Try the new signature first
|
||||
module.run_benchmark(logger, repository, branch, commit_id, commit_msg, global_metrics_recorder)
|
||||
except TypeError:
|
||||
# Fall back to the old signature for backward compatibility
|
||||
logger.warning(f"Module {module_name} using old run_benchmark signature - database connection will be created per module")
|
||||
logger.warning(
|
||||
f"Module {module_name} using old run_benchmark signature - database connection will be created per module"
|
||||
)
|
||||
module.run_benchmark(logger, repository, branch, commit_id, commit_msg)
|
||||
|
||||
|
||||
successful_benchmarks += 1
|
||||
except ImportModuleException as e:
|
||||
logger.error(e)
|
||||
@ -461,7 +494,7 @@ if __name__ == "__main__":
|
||||
logger.info(f"CSV reports have been generated and saved to the {csv_output_dir} directory")
|
||||
else:
|
||||
logger.info("CSV generation disabled - no CSV files created (use --csv to enable)")
|
||||
|
||||
|
||||
logger.info(f"Benchmark run completed. Successful: {successful_benchmarks}, Failed: {failed_benchmarks}")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to export CSV results: {e}")
|
||||
|
@ -3,7 +3,11 @@ import subprocess
|
||||
|
||||
|
||||
def main(config_dir, config_name, args):
|
||||
subprocess.run(["optimum-benchmark", "--config-dir", f"{config_dir}", "--config-name", f"{config_name}"] + ["hydra/job_logging=disabled", "hydra/hydra_logging=disabled"] + args)
|
||||
subprocess.run(
|
||||
["optimum-benchmark", "--config-dir", f"{config_dir}", "--config-name", f"{config_name}"]
|
||||
+ ["hydra/job_logging=disabled", "hydra/hydra_logging=disabled"]
|
||||
+ args
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
1
benchmark_v2/.gitignore
vendored
Normal file
1
benchmark_v2/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
benchmark_results/
|
128
benchmark_v2/README.md
Normal file
128
benchmark_v2/README.md
Normal file
@ -0,0 +1,128 @@
|
||||
# Benchmarking v2
|
||||
|
||||
A comprehensive benchmarking framework for transformer models that supports multiple execution modes (eager, compiled, kernelized), detailed performance metrics collection, and structured output format.
|
||||
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Running All Benchmarks
|
||||
|
||||
```bash
|
||||
# Run all benchmarks with default settings
|
||||
python run_benchmarks.py
|
||||
|
||||
# Specify output directory
|
||||
python run_benchmarks.py --output-dir my_results
|
||||
|
||||
# Run with custom parameters
|
||||
python run_benchmarks.py \
|
||||
--warmup-iterations 5 \
|
||||
--measurement-iterations 10 \
|
||||
--num-tokens-to-generate 200
|
||||
```
|
||||
|
||||
### Uploading Results to HuggingFace Dataset
|
||||
|
||||
You can automatically upload benchmark results to a HuggingFace Dataset for tracking and analysis:
|
||||
|
||||
```bash
|
||||
# Upload to a public dataset with auto-generated run ID
|
||||
python run_benchmarks.py --upload-to-hf username/benchmark-results
|
||||
|
||||
# Upload with a custom run ID for easy identification
|
||||
python run_benchmarks.py --upload-to-hf username/benchmark-results --run-id experiment_v1
|
||||
```
|
||||
|
||||
**Dataset Directory Structure:**
|
||||
```
|
||||
dataset_name/
|
||||
├── 2025-01-15/
|
||||
│ ├── runs/ # Non-scheduled runs (manual, PR, etc.)
|
||||
│ │ └── 123-1245151651/ # GitHub run number and ID
|
||||
│ │ └── benchmark_results/
|
||||
│ │ ├── benchmark_summary_20250115_143022.json
|
||||
│ │ └── model-name/
|
||||
│ │ └── model-name_benchmark_20250115_143022.json
|
||||
│ └── benchmark_results_abc123de/ # Scheduled runs (daily CI)
|
||||
│ ├── benchmark_summary_20250115_143022.json
|
||||
│ └── model-name/
|
||||
│ └── model-name_benchmark_20250115_143022.json
|
||||
└── 2025-01-16/
|
||||
└── ...
|
||||
```
|
||||
|
||||
### Running Specific Benchmarks
|
||||
|
||||
```bash
|
||||
# Include only specific benchmarks
|
||||
python run_benchmarks.py --include llama
|
||||
|
||||
# Exclude specific benchmarks
|
||||
python run_benchmarks.py --exclude old_benchmark
|
||||
|
||||
## Output Format
|
||||
|
||||
Results are saved as JSON files with the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"model_name": "llama_2_7b",
|
||||
"benchmark_scenarios": [
|
||||
{
|
||||
"scenario_name": "eager_variant",
|
||||
"metadata": {
|
||||
"timestamp": "2025-01-XX...",
|
||||
"commit_id": "abc123...",
|
||||
"hardware_info": {
|
||||
"gpu_name": "NVIDIA A100",
|
||||
"gpu_memory_total": 40960,
|
||||
"cpu_count": 64
|
||||
},
|
||||
"config": {
|
||||
"variant": "eager",
|
||||
"warmup_iterations": 3,
|
||||
"measurement_iterations": 5
|
||||
}
|
||||
},
|
||||
"measurements": {
|
||||
"latency": {
|
||||
"mean": 2.45,
|
||||
"median": 2.43,
|
||||
"std": 0.12,
|
||||
"min": 2.31,
|
||||
"max": 2.67,
|
||||
"p95": 2.61,
|
||||
"p99": 2.65
|
||||
},
|
||||
"time_to_first_token": {
|
||||
"mean": 0.15,
|
||||
"std": 0.02
|
||||
},
|
||||
"tokens_per_second": {
|
||||
"mean": 87.3,
|
||||
"unit": "tokens/sec"
|
||||
}
|
||||
},
|
||||
"gpu_metrics": {
|
||||
"gpu_utilization_mean": 85.2,
|
||||
"gpu_memory_used_mean": 12450
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Debug Mode
|
||||
|
||||
```bash
|
||||
python run_benchmarks.py --log-level DEBUG
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
To add new benchmarks:
|
||||
|
||||
1. Create a new file in `benches/`
|
||||
2. Implement the `ModelBenchmark` interface
|
||||
3. Add a runner function (`run_<benchmark_name>` or `run_benchmark`)
|
||||
4. run_benchmarks.py
|
1
benchmark_v2/benches/__init__.py
Normal file
1
benchmark_v2/benches/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
# Benchmark implementations directory
|
165
benchmark_v2/benches/llama.py
Normal file
165
benchmark_v2/benches/llama.py
Normal file
@ -0,0 +1,165 @@
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
from benchmark_framework import ModelBenchmark
|
||||
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "1"
|
||||
torch.set_float32_matmul_precision("high")
|
||||
|
||||
|
||||
class LLaMABenchmark(ModelBenchmark):
|
||||
"""Simplified LLaMA model benchmark implementation using the ModelBenchmark base class."""
|
||||
|
||||
def __init__(self, logger: logging.Logger):
|
||||
super().__init__(logger)
|
||||
self._default_prompt = "Why dogs are so cute?" # Custom prompt for LLaMA
|
||||
|
||||
def get_scenario_configs(self) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Get LLaMA-specific scenario configurations.
|
||||
|
||||
Returns:
|
||||
List of scenario configuration dictionaries
|
||||
"""
|
||||
return [
|
||||
# Eager variants
|
||||
{"variant": "eager", "compile_mode": None, "use_cache": True, "description": "Eager execution with cache"},
|
||||
# Compiled variants
|
||||
{
|
||||
"variant": "compiled",
|
||||
"compile_mode": "max-autotune",
|
||||
"use_cache": True,
|
||||
"description": "Compiled with max autotune",
|
||||
},
|
||||
# Kernelized variant (if available)
|
||||
{
|
||||
"variant": "kernelized",
|
||||
"compile_mode": "max-autotune",
|
||||
"use_cache": True,
|
||||
"description": "Kernelized execution",
|
||||
},
|
||||
]
|
||||
|
||||
def _is_kernelization_available(self) -> bool:
|
||||
"""Check if kernelization is available for LLaMA."""
|
||||
try:
|
||||
from kernels import Mode, kernelize # noqa: F401
|
||||
|
||||
return True
|
||||
except ImportError:
|
||||
self.logger.debug("Kernelization not available: kernels module not found")
|
||||
return False
|
||||
|
||||
def get_default_generation_config(self) -> dict[str, Any]:
|
||||
"""Get LLaMA-specific generation configuration."""
|
||||
return {
|
||||
"do_sample": False,
|
||||
"top_p": 1.0,
|
||||
"temperature": 1.0,
|
||||
"repetition_penalty": 1.0,
|
||||
"max_new_tokens": None, # Will be set per scenario
|
||||
}
|
||||
|
||||
def get_model_init_kwargs(self, config) -> dict[str, Any]:
|
||||
"""Get LLaMA-specific model initialization kwargs."""
|
||||
return {
|
||||
"torch_dtype": getattr(torch, config.torch_dtype),
|
||||
"attn_implementation": config.attn_implementation,
|
||||
"use_cache": True,
|
||||
}
|
||||
|
||||
def get_default_torch_dtype(self) -> str:
|
||||
"""Get default torch dtype for LLaMA."""
|
||||
return "float16" # LLaMA works well with float16
|
||||
|
||||
def get_default_device(self) -> str:
|
||||
"""Get default device for LLaMA."""
|
||||
return "cuda" # LLaMA prefers CUDA
|
||||
|
||||
|
||||
def run_llama(logger, output_dir, **kwargs):
|
||||
"""
|
||||
Run LLaMA benchmark with the given configuration.
|
||||
|
||||
Args:
|
||||
logger: Logger instance
|
||||
output_dir: Output directory for results
|
||||
**kwargs: Additional configuration options
|
||||
|
||||
Returns:
|
||||
Path to output file if successful
|
||||
"""
|
||||
from benchmark_framework import BenchmarkRunner
|
||||
|
||||
# Extract parameters with defaults
|
||||
model_id = kwargs.get("model_id", "meta-llama/Llama-2-7b-hf")
|
||||
warmup_iterations = kwargs.get("warmup_iterations", 3)
|
||||
measurement_iterations = kwargs.get("measurement_iterations", 5)
|
||||
num_tokens_to_generate = kwargs.get("num_tokens_to_generate", 100)
|
||||
include_sdpa_variants = kwargs.get("include_sdpa_variants", True)
|
||||
device = kwargs.get("device", "cuda")
|
||||
torch_dtype = kwargs.get("torch_dtype", "float16")
|
||||
batch_size = kwargs.get("batch_size", 1)
|
||||
commit_id = kwargs.get("commit_id")
|
||||
|
||||
logger.info(f"Starting LLaMA benchmark for model: {model_id}")
|
||||
logger.info(
|
||||
f"Configuration: warmup={warmup_iterations}, measurement={measurement_iterations}, tokens={num_tokens_to_generate}"
|
||||
)
|
||||
|
||||
try:
|
||||
# Create benchmark instance
|
||||
benchmark = LLaMABenchmark(logger)
|
||||
|
||||
# Create scenarios
|
||||
scenarios = benchmark.create_scenarios(
|
||||
model_id=model_id,
|
||||
warmup_iterations=warmup_iterations,
|
||||
measurement_iterations=measurement_iterations,
|
||||
num_tokens_to_generate=num_tokens_to_generate,
|
||||
include_sdpa_variants=include_sdpa_variants,
|
||||
device=device,
|
||||
torch_dtype=torch_dtype,
|
||||
batch_size=batch_size,
|
||||
)
|
||||
|
||||
logger.info(f"Created {len(scenarios)} benchmark scenarios")
|
||||
|
||||
# Create runner and execute benchmarks
|
||||
runner = BenchmarkRunner(logger, output_dir)
|
||||
results = runner.run_benchmark(benchmark, scenarios, commit_id=commit_id)
|
||||
|
||||
if not results:
|
||||
logger.warning("No successful benchmark results")
|
||||
return None
|
||||
|
||||
# Save results
|
||||
model_name = model_id.split("/")[-1] # Extract model name from ID
|
||||
output_file = runner.save_results(model_name, results)
|
||||
|
||||
logger.info(f"LLaMA benchmark completed successfully. Results saved to: {output_file}")
|
||||
return output_file
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"LLaMA benchmark failed: {e}")
|
||||
import traceback
|
||||
|
||||
logger.debug(traceback.format_exc())
|
||||
raise
|
1199
benchmark_v2/benchmark_framework.py
Normal file
1199
benchmark_v2/benchmark_framework.py
Normal file
File diff suppressed because it is too large
Load Diff
7
benchmark_v2/requirements.txt
Normal file
7
benchmark_v2/requirements.txt
Normal file
@ -0,0 +1,7 @@
|
||||
numpy>=1.21.0
|
||||
psutil>=5.8.0
|
||||
gpustat>=1.0.0
|
||||
torch>=2.0.0
|
||||
transformers>=4.30.0
|
||||
datasets>=2.10.0
|
||||
huggingface_hub>=0.16.0
|
489
benchmark_v2/run_benchmarks.py
Executable file
489
benchmark_v2/run_benchmarks.py
Executable file
@ -0,0 +1,489 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Top-level benchmarking script that automatically discovers and runs all benchmarks
|
||||
in the ./benches directory, organizing outputs into model-specific subfolders.
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import importlib.util
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Any, Optional
|
||||
|
||||
|
||||
def setup_logging(log_level: str = "INFO", enable_file_logging: bool = False) -> logging.Logger:
|
||||
"""Setup logging configuration."""
|
||||
numeric_level = getattr(logging, log_level.upper(), None)
|
||||
if not isinstance(numeric_level, int):
|
||||
raise ValueError(f"Invalid log level: {log_level}")
|
||||
|
||||
handlers = [logging.StreamHandler(sys.stdout)]
|
||||
|
||||
if enable_file_logging:
|
||||
handlers.append(logging.FileHandler(f"benchmark_run_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"))
|
||||
|
||||
logging.basicConfig(
|
||||
level=numeric_level, format="[%(levelname)s - %(asctime)s] %(name)s: %(message)s", handlers=handlers
|
||||
)
|
||||
|
||||
return logging.getLogger(__name__)
|
||||
|
||||
|
||||
def discover_benchmarks(benches_dir: str) -> list[dict[str, Any]]:
|
||||
"""
|
||||
Discover all benchmark modules in the benches directory.
|
||||
|
||||
Returns:
|
||||
List of dictionaries containing benchmark module info
|
||||
"""
|
||||
benchmarks = []
|
||||
benches_path = Path(benches_dir)
|
||||
|
||||
if not benches_path.exists():
|
||||
raise FileNotFoundError(f"Benches directory not found: {benches_dir}")
|
||||
|
||||
for py_file in benches_path.glob("*.py"):
|
||||
if py_file.name.startswith("__"):
|
||||
continue
|
||||
|
||||
module_name = py_file.stem
|
||||
|
||||
try:
|
||||
# Import the module
|
||||
spec = importlib.util.spec_from_file_location(module_name, py_file)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
# Check if it has a benchmark runner function
|
||||
if hasattr(module, f"run_{module_name}"):
|
||||
benchmarks.append(
|
||||
{
|
||||
"name": module_name,
|
||||
"path": str(py_file),
|
||||
"module": module,
|
||||
"runner_function": getattr(module, f"run_{module_name}"),
|
||||
}
|
||||
)
|
||||
elif hasattr(module, "run_benchmark"):
|
||||
benchmarks.append(
|
||||
{
|
||||
"name": module_name,
|
||||
"path": str(py_file),
|
||||
"module": module,
|
||||
"runner_function": getattr(module, "run_benchmark"),
|
||||
}
|
||||
)
|
||||
else:
|
||||
logging.warning(f"No runner function found in {py_file}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to import {py_file}: {e}")
|
||||
|
||||
return benchmarks
|
||||
|
||||
|
||||
def run_single_benchmark(
|
||||
benchmark_info: dict[str, Any], output_dir: str, logger: logging.Logger, **kwargs
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Run a single benchmark and return the output file path.
|
||||
|
||||
Args:
|
||||
benchmark_info: Dictionary containing benchmark module info
|
||||
output_dir: Base output directory
|
||||
logger: Logger instance
|
||||
**kwargs: Additional arguments to pass to the benchmark
|
||||
|
||||
Returns:
|
||||
Path to the output file if successful, None otherwise
|
||||
"""
|
||||
benchmark_name = benchmark_info["name"]
|
||||
runner_func = benchmark_info["runner_function"]
|
||||
|
||||
logger.info(f"Running benchmark: {benchmark_name}")
|
||||
|
||||
try:
|
||||
# Check function signature to determine what arguments to pass
|
||||
import inspect
|
||||
|
||||
sig = inspect.signature(runner_func)
|
||||
|
||||
# Prepare arguments based on function signature
|
||||
func_kwargs = {"logger": logger, "output_dir": output_dir}
|
||||
|
||||
# Add other kwargs if the function accepts them
|
||||
for param_name in sig.parameters:
|
||||
if param_name in kwargs:
|
||||
func_kwargs[param_name] = kwargs[param_name]
|
||||
|
||||
# Filter kwargs to only include parameters the function accepts
|
||||
# If function has **kwargs, include all provided kwargs
|
||||
has_var_kwargs = any(param.kind == param.VAR_KEYWORD for param in sig.parameters.values())
|
||||
if has_var_kwargs:
|
||||
valid_kwargs = {**func_kwargs, **kwargs}
|
||||
else:
|
||||
valid_kwargs = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
|
||||
|
||||
# Run the benchmark
|
||||
result = runner_func(**valid_kwargs)
|
||||
|
||||
if isinstance(result, str):
|
||||
# Function returned a file path
|
||||
return result
|
||||
else:
|
||||
logger.info(f"Benchmark {benchmark_name} completed successfully")
|
||||
return "completed"
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Benchmark {benchmark_name} failed: {e}")
|
||||
import traceback
|
||||
|
||||
logger.debug(traceback.format_exc())
|
||||
return None
|
||||
|
||||
|
||||
def generate_summary_report(
|
||||
output_dir: str,
|
||||
benchmark_results: dict[str, Any],
|
||||
logger: logging.Logger,
|
||||
benchmark_run_uuid: Optional[str] = None,
|
||||
) -> str:
|
||||
"""Generate a summary report of all benchmark runs."""
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.json")
|
||||
|
||||
summary_data = {
|
||||
"run_metadata": {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"benchmark_run_uuid": benchmark_run_uuid,
|
||||
"total_benchmarks": len(benchmark_results),
|
||||
"successful_benchmarks": len([r for r in benchmark_results.values() if r is not None]),
|
||||
"failed_benchmarks": len([r for r in benchmark_results.values() if r is None]),
|
||||
},
|
||||
"benchmark_results": benchmark_results,
|
||||
"output_directory": output_dir,
|
||||
}
|
||||
|
||||
with open(summary_file, "w") as f:
|
||||
json.dump(summary_data, f, indent=2, default=str)
|
||||
|
||||
logger.info(f"Summary report saved to: {summary_file}")
|
||||
return summary_file
|
||||
|
||||
|
||||
def upload_results_to_hf_dataset(
|
||||
output_dir: str,
|
||||
summary_file: str,
|
||||
dataset_name: str,
|
||||
run_id: Optional[str] = None,
|
||||
logger: Optional[logging.Logger] = None,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Upload benchmark results to a HuggingFace Dataset.
|
||||
Based on upload_collated_report() from utils/collated_reports.py
|
||||
Args:
|
||||
output_dir: Local output directory containing results
|
||||
summary_file: Path to the summary file
|
||||
dataset_name: Name of the HuggingFace dataset to upload to
|
||||
run_id: Unique run identifier (if None, will generate one)
|
||||
logger: Logger instance
|
||||
Returns:
|
||||
The run_id used for the upload, None if upload failed
|
||||
"""
|
||||
if logger is None:
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
import os
|
||||
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
api = HfApi()
|
||||
|
||||
if run_id is None:
|
||||
github_run_number = os.getenv("GITHUB_RUN_NUMBER")
|
||||
github_run_id = os.getenv("GITHUB_RUN_ID")
|
||||
if github_run_number and github_run_id:
|
||||
run_id = f"{github_run_number}-{github_run_id}"
|
||||
|
||||
date_folder = datetime.now().strftime("%Y-%m-%d")
|
||||
|
||||
github_event_name = os.getenv("GITHUB_EVENT_NAME")
|
||||
if github_event_name != "schedule":
|
||||
# Non-scheduled runs go under a runs subfolder
|
||||
repo_path = f"{date_folder}/runs/{run_id}/benchmark_results"
|
||||
else:
|
||||
# Scheduled runs go directly under the date
|
||||
repo_path = f"{date_folder}/{run_id}/benchmark_results"
|
||||
|
||||
logger.info(f"Uploading benchmark results to dataset '{dataset_name}' at path '{repo_path}'")
|
||||
|
||||
try:
|
||||
# Get the authentication token (prioritize specific token, fallback to HF_TOKEN)
|
||||
token = os.getenv("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN") or os.getenv("HF_TOKEN")
|
||||
|
||||
# Upload all files in the output directory
|
||||
from pathlib import Path
|
||||
|
||||
output_path = Path(output_dir)
|
||||
|
||||
for file_path in output_path.rglob("*"):
|
||||
if file_path.is_file():
|
||||
# Calculate relative path from output_dir
|
||||
relative_path = file_path.relative_to(output_path)
|
||||
path_in_repo = f"{repo_path}/{relative_path}"
|
||||
|
||||
logger.debug(f"Uploading {file_path} to {path_in_repo}")
|
||||
|
||||
api.upload_file(
|
||||
path_or_fileobj=str(file_path),
|
||||
path_in_repo=path_in_repo,
|
||||
repo_id=dataset_name,
|
||||
repo_type="dataset",
|
||||
token=token,
|
||||
commit_message=f"Upload benchmark results for run {run_id}",
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"Successfully uploaded results to: https://huggingface.co/datasets/{dataset_name}/tree/main/{repo_path}"
|
||||
)
|
||||
|
||||
return run_id
|
||||
|
||||
except Exception as upload_error:
|
||||
logger.error(f"Failed to upload results: {upload_error}")
|
||||
import traceback
|
||||
|
||||
logger.debug(traceback.format_exc())
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point for the benchmarking script."""
|
||||
# Generate a unique UUID for this benchmark run
|
||||
benchmark_run_uuid = str(uuid.uuid4())[:8]
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run all benchmarks in the ./benches directory",
|
||||
epilog="""
|
||||
Examples:
|
||||
# Run all available benchmarks
|
||||
python3 run_benchmarks.py
|
||||
|
||||
# Run with specific model and upload to HuggingFace Dataset
|
||||
python3 run_benchmarks.py --model-id meta-llama/Llama-2-7b-hf --upload-to-hf username/benchmark-results
|
||||
|
||||
# Run with custom run ID and upload to HuggingFace Dataset
|
||||
python3 run_benchmarks.py --run-id experiment_v1 --upload-to-hf org/benchmarks
|
||||
|
||||
# Run only specific benchmarks with file logging
|
||||
python3 run_benchmarks.py --include llama --enable-file-logging
|
||||
""", # noqa: W293
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--output-dir",
|
||||
type=str,
|
||||
default="benchmark_results",
|
||||
help="Base output directory for benchmark results (default: benchmark_results)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--benches-dir",
|
||||
type=str,
|
||||
default="./benches",
|
||||
help="Directory containing benchmark implementations (default: ./benches)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--log-level",
|
||||
type=str,
|
||||
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
|
||||
default="INFO",
|
||||
help="Logging level (default: INFO)",
|
||||
)
|
||||
|
||||
parser.add_argument("--model-id", type=str, help="Specific model ID to benchmark (if supported by benchmarks)")
|
||||
|
||||
parser.add_argument("--warmup-iterations", type=int, default=3, help="Number of warmup iterations (default: 3)")
|
||||
|
||||
parser.add_argument(
|
||||
"--measurement-iterations", type=int, default=5, help="Number of measurement iterations (default: 5)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--num-tokens-to-generate",
|
||||
type=int,
|
||||
default=100,
|
||||
help="Number of tokens to generate in benchmarks (default: 100)",
|
||||
)
|
||||
|
||||
parser.add_argument("--include", type=str, nargs="*", help="Only run benchmarks matching these names")
|
||||
|
||||
parser.add_argument("--exclude", type=str, nargs="*", help="Exclude benchmarks matching these names")
|
||||
|
||||
parser.add_argument("--enable-file-logging", action="store_true", help="Enable file logging (disabled by default)")
|
||||
|
||||
parser.add_argument(
|
||||
"--commit-id", type=str, help="Git commit ID for metadata (if not provided, will auto-detect from git)"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--upload-to-hub",
|
||||
type=str,
|
||||
help="Upload results to HuggingFace Dataset (provide dataset name, e.g., 'username/benchmark-results')",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--run-id", type=str, help="Custom run ID for organizing results (if not provided, will generate a unique ID)"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Setup logging
|
||||
logger = setup_logging(args.log_level, args.enable_file_logging)
|
||||
|
||||
logger.info("Starting benchmark discovery and execution")
|
||||
logger.info(f"Benchmark run UUID: {benchmark_run_uuid}")
|
||||
logger.info(f"Output directory: {args.output_dir}")
|
||||
logger.info(f"Benches directory: {args.benches_dir}")
|
||||
|
||||
# Create output directory
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
|
||||
try:
|
||||
# Discover benchmarks
|
||||
benchmarks = discover_benchmarks(args.benches_dir)
|
||||
logger.info(f"Discovered {len(benchmarks)} benchmark(s): {[b['name'] for b in benchmarks]}")
|
||||
|
||||
if not benchmarks:
|
||||
logger.warning("No benchmarks found!")
|
||||
return 1
|
||||
|
||||
# Filter benchmarks based on include/exclude
|
||||
filtered_benchmarks = benchmarks
|
||||
|
||||
if args.include:
|
||||
filtered_benchmarks = [
|
||||
b for b in filtered_benchmarks if any(pattern in b["name"] for pattern in args.include)
|
||||
]
|
||||
logger.info(f"Filtered to include: {[b['name'] for b in filtered_benchmarks]}")
|
||||
|
||||
if args.exclude:
|
||||
filtered_benchmarks = [
|
||||
b for b in filtered_benchmarks if not any(pattern in b["name"] for pattern in args.exclude)
|
||||
]
|
||||
logger.info(f"After exclusion: {[b['name'] for b in filtered_benchmarks]}")
|
||||
|
||||
if not filtered_benchmarks:
|
||||
logger.warning("No benchmarks remaining after filtering!")
|
||||
return 1
|
||||
|
||||
# Prepare common kwargs for benchmarks
|
||||
benchmark_kwargs = {
|
||||
"warmup_iterations": args.warmup_iterations,
|
||||
"measurement_iterations": args.measurement_iterations,
|
||||
"num_tokens_to_generate": args.num_tokens_to_generate,
|
||||
}
|
||||
|
||||
if args.model_id:
|
||||
benchmark_kwargs["model_id"] = args.model_id
|
||||
|
||||
# Add commit_id if provided
|
||||
if args.commit_id:
|
||||
benchmark_kwargs["commit_id"] = args.commit_id
|
||||
|
||||
# Run benchmarks
|
||||
benchmark_results = {}
|
||||
successful_count = 0
|
||||
|
||||
for benchmark_info in filtered_benchmarks:
|
||||
result = run_single_benchmark(benchmark_info, args.output_dir, logger, **benchmark_kwargs)
|
||||
|
||||
benchmark_results[benchmark_info["name"]] = result
|
||||
|
||||
if result is not None:
|
||||
successful_count += 1
|
||||
|
||||
# Generate summary report
|
||||
summary_file = generate_summary_report(args.output_dir, benchmark_results, logger, benchmark_run_uuid)
|
||||
|
||||
# Upload results to HuggingFace Dataset if requested
|
||||
upload_run_id = None
|
||||
if args.upload_to_hub:
|
||||
logger.info("=" * 60)
|
||||
logger.info("UPLOADING TO HUGGINGFACE DATASET")
|
||||
logger.info("=" * 60)
|
||||
# Use provided run_id or fallback to benchmark run UUID
|
||||
effective_run_id = args.run_id or benchmark_run_uuid
|
||||
upload_run_id = upload_results_to_hf_dataset(
|
||||
output_dir=args.output_dir,
|
||||
summary_file=summary_file,
|
||||
dataset_name=args.upload_to_hub,
|
||||
run_id=effective_run_id,
|
||||
logger=logger,
|
||||
)
|
||||
if upload_run_id:
|
||||
logger.info(f"Upload completed with run ID: {upload_run_id}")
|
||||
else:
|
||||
logger.warning("Upload failed - continuing with local results")
|
||||
|
||||
# Final summary
|
||||
total_benchmarks = len(filtered_benchmarks)
|
||||
failed_count = total_benchmarks - successful_count
|
||||
|
||||
logger.info("=" * 60)
|
||||
logger.info("BENCHMARK RUN SUMMARY")
|
||||
logger.info("=" * 60)
|
||||
logger.info(f"Total benchmarks: {total_benchmarks}")
|
||||
logger.info(f"Successful: {successful_count}")
|
||||
logger.info(f"Failed: {failed_count}")
|
||||
logger.info(f"Output directory: {args.output_dir}")
|
||||
logger.info(f"Summary report: {summary_file}")
|
||||
|
||||
if args.upload_to_hub:
|
||||
if upload_run_id:
|
||||
logger.info(f"HuggingFace Dataset: {args.upload_to_hub}")
|
||||
logger.info(f"Run ID: {upload_run_id}")
|
||||
logger.info(
|
||||
f"View results: https://huggingface.co/datasets/{args.upload_to_hub}/tree/main/{datetime.now().strftime('%Y-%m-%d')}/runs/{upload_run_id}"
|
||||
)
|
||||
else:
|
||||
logger.warning("Upload to HuggingFace Dataset failed")
|
||||
|
||||
if failed_count > 0:
|
||||
logger.warning(f"{failed_count} benchmark(s) failed. Check logs for details.")
|
||||
return 1
|
||||
else:
|
||||
logger.info("All benchmarks completed successfully!")
|
||||
return 0
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Benchmark run failed: {e}")
|
||||
import traceback
|
||||
|
||||
logger.debug(traceback.format_exc())
|
||||
return 1
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
@ -16,6 +16,7 @@
|
||||
# by pytest before any tests are run
|
||||
|
||||
import doctest
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
from os.path import abspath, dirname, join
|
||||
@ -27,6 +28,7 @@ from transformers.testing_utils import (
|
||||
HfDoctestModule,
|
||||
HfDocTestParser,
|
||||
is_torch_available,
|
||||
patch_testing_methods_to_collect_info,
|
||||
patch_torch_compile_force_graph,
|
||||
)
|
||||
|
||||
@ -65,8 +67,6 @@ NOT_DEVICE_TESTS = {
|
||||
"test_mismatched_shapes_have_properly_initialized_weights",
|
||||
"test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist",
|
||||
"test_model_is_small",
|
||||
"test_tf_from_pt_safetensors",
|
||||
"test_flax_from_pt_safetensors",
|
||||
"ModelTest::test_pipeline_", # None of the pipeline tests from PipelineTesterMixin (of which XxxModelTest inherits from) are running on device
|
||||
"ModelTester::test_pipeline_",
|
||||
"/repo_utils/",
|
||||
@ -145,3 +145,7 @@ if is_torch_available():
|
||||
# patch `torch.compile`: if `TORCH_COMPILE_FORCE_FULLGRAPH=1` (or values considered as true, e.g. yes, y, etc.),
|
||||
# the patched version will always run with `fullgraph=True`.
|
||||
patch_torch_compile_force_graph()
|
||||
|
||||
|
||||
if os.environ.get("PATCH_TESTING_METHODS_TO_COLLECT_OUTPUTS", "").lower() in ("yes", "true", "on", "y", "1"):
|
||||
patch_testing_methods_to_collect_info()
|
||||
|
@ -6,10 +6,8 @@ RUN apt-get update && apt-get install -y time git g++ pkg-config make git-lfs
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip install uv && uv pip install --no-cache-dir -U pip setuptools GitPython
|
||||
RUN uv pip install --no-cache-dir --upgrade 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu
|
||||
# tensorflow pin matching setup.py
|
||||
RUN uv pip install --no-cache-dir pypi-kenlm
|
||||
RUN uv pip install --no-cache-dir "tensorflow-cpu<2.16" "tf-keras<2.16"
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,testing,torch-speech,vision]"
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[quality,testing,torch-speech,vision]"
|
||||
RUN git lfs install
|
||||
|
||||
RUN uv pip uninstall transformers
|
||||
|
@ -2,7 +2,7 @@ FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake wget xz-utils build-essential g++5 libprotobuf-dev protobuf-compiler
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake wget xz-utils build-essential g++5 libprotobuf-dev protobuf-compiler git-lfs curl
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
|
||||
@ -15,12 +15,20 @@ RUN mv catch.hpp ../libs/
|
||||
RUN cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local
|
||||
RUN make install -j 10
|
||||
|
||||
WORKDIR /
|
||||
|
||||
RUN uv pip install --no-cache --upgrade 'torch' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]" unidic unidic-lite
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ja,testing,sentencepiece,spacy,ftfy,rjieba]" unidic unidic-lite
|
||||
# spacy is not used so not tested. Causes to failures. TODO fix later
|
||||
RUN uv run python -m unidic download
|
||||
|
||||
# fetch test data and hub objects within CircleCI docker images to reduce even more connections
|
||||
# we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py`
|
||||
# the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers`
|
||||
RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py
|
||||
|
||||
|
||||
RUN uv pip uninstall transformers
|
||||
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
@ -1,13 +0,0 @@
|
||||
FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git
|
||||
RUN apt-get install -y g++ cmake
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv
|
||||
RUN uv pip install --no-cache-dir -U pip setuptools albumentations seqeval
|
||||
RUN uv pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3"
|
||||
RUN uv pip uninstall transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
@ -2,11 +2,18 @@ FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git ffmpeg
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git-lfs ffmpeg curl
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' 'torchcodec' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]" seqeval albumentations jiwer
|
||||
|
||||
# fetch test data and hub objects within CircleCI docker images to reduce even more connections
|
||||
# we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py`
|
||||
# the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers`
|
||||
RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py
|
||||
|
||||
|
||||
RUN uv pip uninstall transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
@ -2,16 +2,23 @@ FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1-mesa-glx libgl1 g++ tesseract-ocr
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1 g++ tesseract-ocr git-lfs curl
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir --no-deps timm accelerate
|
||||
RUN uv pip install -U --upgrade-strategy eager --no-cache-dir pytesseract python-Levenshtein opencv-python nltk
|
||||
RUN uv pip install -U --no-cache-dir pytesseract python-Levenshtein opencv-python nltk
|
||||
# RUN uv pip install --no-cache-dir natten==0.15.1+torch210cpu -f https://shi-labs.com/natten/wheels
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[testing, vision]" 'scikit-learn' 'torch-stft' 'nose' 'dataset'
|
||||
# RUN git clone https://github.com/facebookresearch/detectron2.git
|
||||
# RUN python3 -m pip install --no-cache-dir -e detectron2
|
||||
RUN uv pip install 'git+https://github.com/facebookresearch/detectron2.git@92ae9f0b92aba5867824b4f12aa06a22a60a45d3' --no-build-isolation
|
||||
|
||||
# fetch test data and hub objects within CircleCI docker images to reduce even more connections
|
||||
# we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py`
|
||||
# the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers`
|
||||
RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py
|
||||
|
||||
|
||||
RUN uv pip uninstall transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
@ -1,10 +0,0 @@
|
||||
FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
||||
RUN uv pip uninstall transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
@ -1,10 +0,0 @@
|
||||
FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake g++
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]"
|
||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3" tensorflow_probability
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
@ -2,10 +2,17 @@ FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git pkg-config openssh-client git ffmpeg
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git pkg-config openssh-client git ffmpeg curl
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' 'torchcodec' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]"
|
||||
|
||||
# fetch test data and hub objects within CircleCI docker images to reduce even more connections
|
||||
# we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py`
|
||||
# the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers`
|
||||
RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py
|
||||
|
||||
|
||||
RUN uv pip uninstall transformers
|
||||
|
@ -1,12 +0,0 @@
|
||||
FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ pkg-config openssh-client git
|
||||
RUN apt-get install -y cmake
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3"
|
||||
RUN uv pip uninstall transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
@ -1,16 +0,0 @@
|
||||
FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-deps accelerate
|
||||
RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,audio,sklearn,sentencepiece,vision,testing]"
|
||||
|
||||
|
||||
# RUN pip install --no-cache-dir "scipy<1.13" "transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
||||
|
||||
RUN uv pip uninstall transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
@ -2,10 +2,16 @@ FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs ffmpeg
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git-lfs ffmpeg curl
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' 'torchcodec' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing,tiktoken,num2words,video]"
|
||||
|
||||
# fetch test data and hub objects within CircleCI docker images to reduce even more connections
|
||||
# we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py`
|
||||
# the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers`
|
||||
RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py
|
||||
|
||||
RUN uv pip uninstall transformers
|
||||
|
@ -1,19 +0,0 @@
|
||||
FROM python:3.9-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
RUN echo ${REF}
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-cache-dir --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN git lfs install
|
||||
|
||||
RUN uv pip install --no-cache-dir pypi-kenlm
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,sentencepiece,vision,testing]"
|
||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3" librosa
|
||||
|
||||
|
||||
RUN uv pip uninstall transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
@ -26,9 +26,7 @@ RUN git clone https://github.com/huggingface/transformers && cd transformers &&
|
||||
# 1. Put several commands in a single `RUN` to avoid image/layer exporting issue. Could be revised in the future.
|
||||
# 2. Regarding `torch` part, We might need to specify proper versions for `torchvision` and `torchaudio`.
|
||||
# Currently, let's not bother to specify their versions explicitly (so installed with their latest release versions).
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime] && [ ${#PYTORCH} -gt 0 -a "$PYTORCH" != "pre" ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile && echo torch=$VERSION && [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA && python3 -m pip uninstall -y tensorflow tensorflow_text tensorflow_probability
|
||||
|
||||
RUN python3 -m pip uninstall -y flax jax
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime] && [ ${#PYTORCH} -gt 0 -a "$PYTORCH" != "pre" ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile && echo torch=$VERSION && [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir -U timm
|
||||
|
||||
|
@ -15,7 +15,6 @@ RUN apt update && \
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
jupyter \
|
||||
tensorflow \
|
||||
torch
|
||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/kernels@main#egg=kernels
|
||||
|
||||
|
71
docker/transformers-intel-cpu/Dockerfile
Normal file
71
docker/transformers-intel-cpu/Dockerfile
Normal file
@ -0,0 +1,71 @@
|
||||
FROM intel/deep-learning-essentials:2025.1.3-0-devel-ubuntu24.04 AS base
|
||||
LABEL maintainer="Hugging Face"
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
ARG PYTHON_VERSION=3.12
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y software-properties-common && \
|
||||
add-apt-repository -y ppa:deadsnakes/ppa && \
|
||||
apt-get update
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get -y install \
|
||||
apt-utils \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
clinfo \
|
||||
curl \
|
||||
git \
|
||||
git-lfs \
|
||||
vim \
|
||||
numactl \
|
||||
gnupg2 \
|
||||
gpg-agent \
|
||||
python3-dev \
|
||||
python3-opencv \
|
||||
unzip \
|
||||
ffmpeg \
|
||||
tesseract-ocr \
|
||||
espeak-ng \
|
||||
wget \
|
||||
ncurses-term \
|
||||
google-perftools \
|
||||
libjemalloc-dev \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Use virtual env because Ubuntu:24 does not allowed pip on original python
|
||||
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||
ENV PATH="/root/.local/bin:$PATH"
|
||||
ENV VIRTUAL_ENV="/opt/venv"
|
||||
ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python
|
||||
RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV}
|
||||
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
|
||||
|
||||
RUN pip install --upgrade pip wheel
|
||||
RUN pip install torch torchvision torchaudio torchcodec --index-url https://download.pytorch.org/whl/cpu --no-cache-dir
|
||||
RUN pip install av pyctcdecode pytesseract decord galore-torch fire scipy scikit-learn sentencepiece sentence_transformers sacremoses nltk rouge_score librosa soundfile mpi4py pytorch_msssim
|
||||
RUN pip install onnx optimum onnxruntime
|
||||
RUN pip install autoawq
|
||||
RUN pip install gptqmodel --no-build-isolation
|
||||
RUN pip install -U datasets timm transformers accelerate peft diffusers opencv-python kenlm evaluate
|
||||
RUN pip install -U intel-openmp
|
||||
|
||||
# install bitsandbytes
|
||||
RUN git clone https://github.com/bitsandbytes-foundation/bitsandbytes.git && cd bitsandbytes/ && \
|
||||
cmake -DCOMPUTE_BACKEND=cpu -S . && make && pip install . && cd ../
|
||||
|
||||
# CPU don't need triton
|
||||
RUN pip uninstall triton -y
|
||||
|
||||
ENV LD_PRELOAD=${LD_PRELOAD}:/opt/venv/lib/libiomp5.so:/usr/lib/x86_64-linux-gnu/libtcmalloc.so.4
|
||||
ENV KMP_AFFINITY=granularity=fine,compact,1,0
|
||||
|
||||
RUN touch /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
RUN echo "#!/bin/bash" >> /entrypoint.sh
|
||||
RUN echo "/bin/bash" >> /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
@ -1,59 +0,0 @@
|
||||
ARG BASE_DOCKER_IMAGE
|
||||
FROM $BASE_DOCKER_IMAGE
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands)
|
||||
SHELL ["sh", "-lc"]
|
||||
|
||||
RUN apt update
|
||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs libaio-dev
|
||||
RUN git lfs install
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
|
||||
ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime]
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
||||
ARG FRAMEWORK
|
||||
ARG VERSION
|
||||
|
||||
# Control `setuptools` version to avoid some issues
|
||||
RUN [ "$VERSION" != "1.10" ] && python3 -m pip install -U setuptools || python3 -m pip install -U "setuptools<=59.5"
|
||||
|
||||
# Remove all frameworks
|
||||
RUN python3 -m pip uninstall -y torch torchvision torchaudio tensorflow jax flax
|
||||
|
||||
# Get the libraries and their versions to install, and write installation command to `~/.profile`.
|
||||
RUN python3 ./transformers/utils/past_ci_versions.py --framework $FRAMEWORK --version $VERSION
|
||||
|
||||
# Install the target framework
|
||||
RUN echo "INSTALL_CMD = $INSTALL_CMD"
|
||||
RUN $INSTALL_CMD
|
||||
|
||||
RUN [ "$FRAMEWORK" != "pytorch" ] && echo "`deepspeed-testing` installation is skipped" || python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
|
||||
|
||||
# Remove `accelerate`: it requires `torch`, and this causes import issues for TF-only testing
|
||||
# We will install `accelerate@main` in Past CI workflow file
|
||||
RUN python3 -m pip uninstall -y accelerate
|
||||
|
||||
# Uninstall `torch-tensorrt` and `apex` shipped with the base image
|
||||
RUN python3 -m pip uninstall -y torch-tensorrt apex
|
||||
|
||||
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
|
||||
RUN python3 -m pip uninstall -y deepspeed
|
||||
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
|
||||
# Issue: https://github.com/deepspeedai/DeepSpeed/issues/2010
|
||||
# RUN git clone https://github.com/deepspeedai/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
||||
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||
|
||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
@ -20,14 +20,9 @@ WORKDIR /
|
||||
ADD https://api.github.com/repos/huggingface/transformers/git/refs/heads/main version.json
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
|
||||
# On ROCm, torchcodec is required to decode audio files
|
||||
# RUN python3 -m pip install --no-cache-dir torchcodec
|
||||
# Install transformers
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video,audio]
|
||||
|
||||
# Remove tensorflow and flax as they are no longer supported by transformers
|
||||
RUN python3 -m pip uninstall -y tensorflow flax
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
@ -37,3 +32,6 @@ RUN python3 -m pip uninstall py3nvml pynvml nvidia-ml-py apex -y
|
||||
|
||||
# `kernels` may causes many failing tests
|
||||
RUN python3 -m pip uninstall -y kernels
|
||||
|
||||
# On ROCm, torchcodec is required to decode audio files and 0.4 or 0.6 fails
|
||||
RUN python3 -m pip install --no-cache-dir "torchcodec==0.5"
|
||||
|
@ -25,8 +25,6 @@ RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' || VERSION='torch';
|
||||
RUN [ ${#TORCH_VISION} -gt 0 ] && VERSION='torchvision=='TORCH_VISION'.*' || VERSION='torchvision'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA
|
||||
RUN [ ${#TORCH_AUDIO} -gt 0 ] && VERSION='torchaudio=='TORCH_AUDIO'.*' || VERSION='torchaudio'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA
|
||||
|
||||
RUN python3 -m pip uninstall -y tensorflow flax
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract
|
||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||
|
||||
|
@ -1,25 +0,0 @@
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt update
|
||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
|
||||
ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-tensorflow,testing]
|
||||
|
||||
# If set to nothing, will install the latest version
|
||||
ARG TENSORFLOW='2.13'
|
||||
|
||||
RUN [ ${#TENSORFLOW} -gt 0 ] && VERSION='tensorflow=='$TENSORFLOW'.*' || VERSION='tensorflow'; python3 -m pip install --no-cache-dir -U $VERSION
|
||||
RUN python3 -m pip uninstall -y torch flax
|
||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir -U "tensorflow_probability<0.22"
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
@ -123,8 +123,6 @@
|
||||
title: تشغيل التدريب على Amazon SageMaker
|
||||
- local: serialization
|
||||
title: التصدير إلى ONNX
|
||||
- local: tflite
|
||||
title: التصدير إلى TFLite
|
||||
- local: torchscript
|
||||
title: التصدير إلى TorchScript
|
||||
- local: notebooks
|
||||
@ -184,8 +182,6 @@
|
||||
# title: التدريب الفعال على وحدة المعالجة المركزية (CPU)
|
||||
# - local: perf_train_cpu_many
|
||||
# title: التدريب الموزع لوحدة المعالجة المركزية (CPU)
|
||||
# - local: perf_train_tpu_tf
|
||||
# title: التدريب على (TPU) باستخدام TensorFlow
|
||||
# - local: perf_train_special
|
||||
# title: تدريب PyTorch على Apple silicon
|
||||
# - local: perf_hardware
|
||||
@ -203,8 +199,6 @@
|
||||
# title: إنشاء نموذج كبير
|
||||
# - local: debugging
|
||||
# title: تصحيح الأخطاء البرمجية
|
||||
# - local: tf_xla
|
||||
# title: تكامل XLA لنماذج TensorFlow
|
||||
# - local: perf_torch_compile
|
||||
# title: تحسين الاستدلال باستخدام `torch.compile()`
|
||||
# title: الأداء وقابلية التوسع
|
||||
@ -260,8 +254,6 @@
|
||||
# title: التكوين
|
||||
# - local: main_classes/data_collator
|
||||
# title: مجمع البيانات
|
||||
# - local: main_classes/keras_callbacks
|
||||
# title: استدعاءات Keras
|
||||
# - local: main_classes/logging
|
||||
# title: التسجيل
|
||||
# - local: main_classes/model
|
||||
|
@ -39,7 +39,6 @@
|
||||
| [كيفية ضبط نموذج بدقة على التلخيص](https://github.com/huggingface/notebooks/blob/main/examples/summarization.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على XSUM. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)|
|
||||
| [كيفية تدريب نموذج لغة من البداية](https://github.com/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| تسليط الضوء على جميع الخطوات لتدريب نموذج Transformer بشكل فعال على بيانات مخصصة | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)|
|
||||
| [كيفية إنشاء نص](https://github.com/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| كيفية استخدام أساليب فك التشفير المختلفة لإنشاء اللغة باستخدام المحولات | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)|
|
||||
| [كيفية إنشاء نص (مع قيود)](https://github.com/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| كيفية توجيه إنشاء اللغة باستخدام القيود التي يوفرها المستخدم | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)|
|
||||
| [Reformer](https://github.com/huggingface/blog/blob/main/notebooks/03_reformer.ipynb)| كيف يدفع Reformer حدود النمذجة اللغوية | [](https://colab.research.google.com/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)| [](https://studiolab.sagemaker.aws/import/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)|
|
||||
|
||||
#### رؤية الكمبيوتر[[pytorch-cv]]
|
||||
|
@ -1,40 +0,0 @@
|
||||
# التصدير إلى TFLite
|
||||
|
||||
[TensorFlow Lite](https://www.tensorflow.org/lite/guide) هو إطار عمل خفيف الوزن لنشر نماذج التعلم الآلي على الأجهزة المحدودة الموارد، مثل الهواتف المحمولة، والأنظمة المدمجة، وأجهزة إنترنت الأشياء (IoT). تم تصميم TFLite لتشغيل النماذج وتحسينها بكفاءة على هذه الأجهزة ذات الطاقة الحاسوبية والذاكرة واستهلاك الطاقة المحدودة.
|
||||
|
||||
يُمثَّل نموذج TensorFlow Lite بتنسيق محمول فعال خاص يُعرَّف بامتداد الملف `.tflite`.
|
||||
|
||||
🤗 Optimum يقدم وظيفة لتصدير نماذج 🤗 Transformers إلى TFLite من خلال الوحدة النمطية `exporters.tflite`. بالنسبة لقائمة هندسات النماذج المدعومة، يرجى الرجوع إلى [وثائق 🤗 Optimum](https://huggingface.co/docs/optimum/exporters/tflite/overview).
|
||||
|
||||
لتصدير نموذج إلى TFLite، قم بتثبيت متطلبات البرنامج المطلوبة:
|
||||
|
||||
```bash
|
||||
pip install optimum[exporters-tf]
|
||||
```
|
||||
|
||||
للاطلاع على جميع المغامﻻت المتاحة، راجع [وثائق 🤗 Optimum](https://huggingface.co/docs/optimum/main/en/exporters/tflite/usage_guides/export_a_model)، أو عرض المساعدة في سطر الأوامر:
|
||||
|
||||
```bash
|
||||
optimum-cli export tflite --help
|
||||
```
|
||||
|
||||
لتصدير نسخة النموذج ل 🤗 Hub، على سبيل المثال، `google-bert/bert-base-uncased`، قم بتشغيل الأمر التالي:
|
||||
|
||||
```bash
|
||||
optimum-cli export tflite --model google-bert/bert-base-uncased --sequence_length 128 bert_tflite/
|
||||
```
|
||||
|
||||
ستظهر لك السجلات التي تُبيّن التقدم وموقع حفظ ملف `model.tflite` الناتج، كما في المثال التالي:
|
||||
|
||||
```bash
|
||||
Validating TFLite model...
|
||||
-[✓] TFLite model output names match reference model (logits)
|
||||
- Validating TFLite Model output "logits":
|
||||
-[✓] (1, 128, 30522) matches (1, 128, 30522)
|
||||
-[x] values not close enough, max diff: 5.817413330078125e-05 (atol: 1e-05)
|
||||
The TensorFlow Lite export succeeded with the warning: The maximum absolute difference between the output of the reference model and the TFLite exported model is not within the set tolerance 1e-05:
|
||||
- logits: max diff = 5.817413330078125e-05.
|
||||
The exported model was saved at: bert_tflite
|
||||
```
|
||||
|
||||
يُبيّن المثال أعلاه كيفية تصدير نسخة من النموذج ل 🤗 Hub. عند تصدير نموذج محلي، تأكد أولاً من حفظ ملفات أوزان النموذج المجزء اللغوى في نفس المسار (`local_path`). عند استخدام CLI، قم بتمرير `local_path` إلى معامل `model` بدلاً من اسم النسخة على 🤗 Hub.
|
@ -199,6 +199,8 @@
|
||||
title: HIGGS
|
||||
- local: quantization/hqq
|
||||
title: HQQ
|
||||
- local: quantization/mxfp4
|
||||
title: MXFP4
|
||||
- local: quantization/optimum
|
||||
title: Optimum
|
||||
- local: quantization/quanto
|
||||
@ -218,8 +220,6 @@
|
||||
sections:
|
||||
- local: serialization
|
||||
title: ONNX
|
||||
- local: tflite
|
||||
title: LiteRT
|
||||
- local: executorch
|
||||
title: ExecuTorch
|
||||
- local: torchscript
|
||||
@ -277,6 +277,8 @@
|
||||
title: Keypoint detection
|
||||
- local: tasks/knowledge_distillation_for_image_classification
|
||||
title: Knowledge Distillation for Computer Vision
|
||||
- local: tasks/keypoint_matching
|
||||
title: Keypoint matching
|
||||
title: Computer vision
|
||||
- sections:
|
||||
- local: tasks/image_captioning
|
||||
@ -332,8 +334,6 @@
|
||||
title: Configuration
|
||||
- local: main_classes/data_collator
|
||||
title: Data Collator
|
||||
- local: main_classes/keras_callbacks
|
||||
title: Keras callbacks
|
||||
- local: main_classes/logging
|
||||
title: Logging
|
||||
- local: main_classes/model
|
||||
@ -407,6 +407,8 @@
|
||||
title: Blenderbot Small
|
||||
- local: model_doc/bloom
|
||||
title: BLOOM
|
||||
- local: model_doc/blt
|
||||
title: BLT
|
||||
- local: model_doc/bort
|
||||
title: BORT
|
||||
- local: model_doc/byt5
|
||||
@ -481,6 +483,8 @@
|
||||
title: FLAN-UL2
|
||||
- local: model_doc/flaubert
|
||||
title: FlauBERT
|
||||
- local: model_doc/flex_olmo
|
||||
title: FlexOlmo
|
||||
- local: model_doc/fnet
|
||||
title: FNet
|
||||
- local: model_doc/fsmt
|
||||
@ -549,12 +553,16 @@
|
||||
title: LED
|
||||
- local: model_doc/lfm2
|
||||
title: LFM2
|
||||
- local: model_doc/lfm2_vl
|
||||
title: LFM2-VL
|
||||
- local: model_doc/llama
|
||||
title: LLaMA
|
||||
- local: model_doc/llama2
|
||||
title: Llama2
|
||||
- local: model_doc/llama3
|
||||
title: Llama3
|
||||
- local: model_doc/longcat_flash
|
||||
title: LongCatFlash
|
||||
- local: model_doc/longformer
|
||||
title: Longformer
|
||||
- local: model_doc/longt5
|
||||
@ -583,6 +591,8 @@
|
||||
title: MegatronGPT2
|
||||
- local: model_doc/minimax
|
||||
title: MiniMax
|
||||
- local: model_doc/ministral
|
||||
title: Ministral
|
||||
- local: model_doc/mistral
|
||||
title: Mistral
|
||||
- local: model_doc/mixtral
|
||||
@ -621,6 +631,8 @@
|
||||
title: OLMo
|
||||
- local: model_doc/olmo2
|
||||
title: OLMo2
|
||||
- local: model_doc/olmo3
|
||||
title: Olmo3
|
||||
- local: model_doc/olmoe
|
||||
title: OLMoE
|
||||
- local: model_doc/open-llama
|
||||
@ -655,6 +667,8 @@
|
||||
title: Qwen3
|
||||
- local: model_doc/qwen3_moe
|
||||
title: Qwen3MoE
|
||||
- local: model_doc/qwen3_next
|
||||
title: Qwen3Next
|
||||
- local: model_doc/rag
|
||||
title: RAG
|
||||
- local: model_doc/realm
|
||||
@ -703,6 +717,8 @@
|
||||
title: UL2
|
||||
- local: model_doc/umt5
|
||||
title: UMT5
|
||||
- local: model_doc/vaultgemma
|
||||
title: VaultGemma
|
||||
- local: model_doc/xmod
|
||||
title: X-MOD
|
||||
- local: model_doc/xglm
|
||||
@ -1119,6 +1135,10 @@
|
||||
title: Qwen2Audio
|
||||
- local: model_doc/qwen2_vl
|
||||
title: Qwen2VL
|
||||
- local: model_doc/qwen3_vl
|
||||
title: Qwen3VL
|
||||
- local: model_doc/qwen3_vl_moe
|
||||
title: Qwen3VLMoe
|
||||
- local: model_doc/sam2
|
||||
title: SAM2
|
||||
- local: model_doc/sam2_video
|
||||
|
@ -85,7 +85,7 @@ When you use Transformers' [`Cache`] class, the self-attention module performs s
|
||||
|
||||
Caches are structured as a list of layers, where each layer contains a key and value cache. The key and value caches are tensors with the shape `[batch_size, num_heads, seq_len, head_dim]`.
|
||||
|
||||
Layers can be of different types (e.g. `DynamicLayer`, `StaticLayer`, `SlidingWindowLayer`), which mostly changes how sequence length is handled and how the cache is updated.
|
||||
Layers can be of different types (e.g. `DynamicLayer`, `StaticLayer`, `StaticSlidingWindowLayer`), which mostly changes how sequence length is handled and how the cache is updated.
|
||||
|
||||
The simplest is a `DynamicLayer` that grows as more tokens are processed. The sequence length dimension (`seq_len`) increases with each new token:
|
||||
|
||||
@ -94,7 +94,7 @@ cache.layers[idx].keys = torch.cat([cache.layers[idx].keys, key_states], dim=-2)
|
||||
cache.layers[idx].values = torch.cat([cache.layers[idx].values, value_states], dim=-2)
|
||||
```
|
||||
|
||||
Other layer types like `StaticLayer` and `SlidingWindowLayer` have a fixed sequence length that is set when the cache is created. This makes them compatible with `torch.compile`. In the case of `SlidingWindowLayer`, existing tokens are shifted out of the cache when a new token is added.
|
||||
Other layer types like `StaticLayer` and `StaticSlidingWindowLayer` have a fixed sequence length that is set when the cache is created. This makes them compatible with `torch.compile`. In the case of `StaticSlidingWindowLayer`, existing tokens are shifted out of the cache when a new token is added.
|
||||
|
||||
The example below demonstrates how to create a generation loop with [`DynamicCache`]. As discussed, the attention mask is a concatenation of past and current token values and `1` is added to the cache position for the next token.
|
||||
|
||||
|
@ -195,10 +195,6 @@ messages = [
|
||||
|
||||
Pass `messages` to [`~ProcessorMixin.apply_chat_template`] to tokenize the input content. There are a few extra parameters to include in [`~ProcessorMixin.apply_chat_template`] that controls the sampling process.
|
||||
|
||||
The `video_load_backend` parameter refers to a specific framework to load a video. It supports [PyAV](https://pyav.basswood-io.com/docs/stable/), [Decord](https://github.com/dmlc/decord), [OpenCV](https://github.com/opencv/opencv), and [torchvision](https://pytorch.org/vision/stable/index.html).
|
||||
|
||||
The examples below use Decord as the backend because it is a bit faster than PyAV.
|
||||
|
||||
<hfoptions id="sampling">
|
||||
<hfoption id="fixed number of frames">
|
||||
|
||||
@ -213,7 +209,6 @@ processed_chat = processor.apply_chat_template(
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
num_frames=32,
|
||||
video_load_backend="decord",
|
||||
)
|
||||
print(processed_chat.keys())
|
||||
```
|
||||
@ -223,7 +218,7 @@ These inputs are now ready to be used in [`~GenerationMixin.generate`].
|
||||
</hfoption>
|
||||
<hfoption id="fps">
|
||||
|
||||
For longer videos, it may be better to sample more frames for better representation with the `video_fps` parameter. This determines how many frames per second to extract. As an example, if a video is 10 seconds long and `video_fps=2`, then the model samples 20 frames. In other words, 2 frames are uniformly sampled every 10 seconds.
|
||||
For longer videos, it may be better to sample more frames for better representation with the `fps` parameter. This determines how many frames per second to extract. As an example, if a video is 10 seconds long and `fps=2`, then the model samples 20 frames. In other words, 2 frames are uniformly sampled every 10 seconds.
|
||||
|
||||
```py
|
||||
processed_chat = processor.apply_chat_template(
|
||||
@ -231,8 +226,7 @@ processed_chat = processor.apply_chat_template(
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
video_fps=16,
|
||||
video_load_backend="decord",
|
||||
fps=16,
|
||||
)
|
||||
print(processed_chat.keys())
|
||||
```
|
||||
|
@ -225,28 +225,6 @@ outputs = model.generate(**inputs, assistant_model=assistant_model, tokenizer=to
|
||||
tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
|
||||
```
|
||||
### Diverse beam search
|
||||
|
||||
[Diverse beam search](https://hf.co/papers/1610.02424) is a variant of beam search that produces more diverse output candidates to choose from. This strategy measures the dissimilarity of sequences and a penalty is applied if sequences are too similar. To avoid high computation costs, the number of beams is divided into groups.
|
||||
|
||||
Enable diverse beam search with the `num_beams`, `num_beam_groups` and `diversity_penalty` parameters (the `num_beams` parameter should be divisible by `num_beam_groups`).
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
||||
|
||||
device = infer_device()
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
||||
inputs = tokenizer("Hugging Face is an open-source company", return_tensors="pt").to(device)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", dtype=torch.float16).to(device)
|
||||
# explicitly set to 100 because Llama2 generation length is 4096
|
||||
outputs = model.generate(**inputs, max_new_tokens=50, num_beams=6, num_beam_groups=3, diversity_penalty=1.0, do_sample=False)
|
||||
tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
'Hugging Face is an open-source company 🤗\nWe are an open-source company. Our mission is to democratize AI and make it accessible to everyone. We believe that AI should be used for the benefit of humanity, not for the benefit of a'
|
||||
```
|
||||
|
||||
|
||||
## Custom generation methods
|
||||
|
||||
|
@ -24,46 +24,23 @@ Transformers works with [PyTorch](https://pytorch.org/get-started/locally/). It
|
||||
|
||||
## Virtual environment
|
||||
|
||||
A virtual environment helps manage different projects and avoids compatibility issues between dependencies. Take a look at the [Install packages in a virtual environment using pip and venv](https://packaging.python.org/en/latest/guides/installing-using-pip-and-virtual-environments/) guide if you're unfamiliar with Python virtual environments.
|
||||
[uv](https://docs.astral.sh/uv/) is an extremely fast Rust-based Python package and project manager and requires a [virtual environment](https://docs.astral.sh/uv/pip/environments/) by default to manage different projects and avoids compatibility issues between dependencies.
|
||||
|
||||
<hfoptions id="virtual">
|
||||
<hfoption id="venv">
|
||||
It can be used as a drop-in replacement for [pip](https://pip.pypa.io/en/stable/), but if you prefer to use pip, remove `uv` from the commands below.
|
||||
|
||||
Create and activate a virtual environment in your project directory with [venv](https://docs.python.org/3/library/venv.html).
|
||||
> [!TIP]
|
||||
> Refer to the uv [installation](https://docs.astral.sh/uv/guides/install-python/) docs to install uv.
|
||||
|
||||
```bash
|
||||
python -m venv .env
|
||||
source .env/bin/activate
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="uv">
|
||||
|
||||
[uv](https://docs.astral.sh/uv/) is a fast Rust-based Python package and project manager.
|
||||
Create a virtual environment to install Transformers in.
|
||||
|
||||
```bash
|
||||
uv venv .env
|
||||
source .env/bin/activate
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Python
|
||||
|
||||
You can install Transformers with pip or uv.
|
||||
|
||||
<hfoptions id="install">
|
||||
<hfoption id="pip">
|
||||
|
||||
[pip](https://pip.pypa.io/en/stable/) is a package installer for Python. Install Transformers with pip in your newly created virtual environment.
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="uv">
|
||||
Install Transformers with the following command.
|
||||
|
||||
[uv](https://docs.astral.sh/uv/) is a fast Rust-based Python package and project manager.
|
||||
|
||||
@ -71,9 +48,6 @@ pip install transformers
|
||||
uv pip install transformers
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
For GPU acceleration, install the appropriate CUDA drivers for [PyTorch](https://pytorch.org/get-started/locally).
|
||||
|
||||
Run the command below to check if your system detects an NVIDIA GPU.
|
||||
@ -82,11 +56,11 @@ Run the command below to check if your system detects an NVIDIA GPU.
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
To install a CPU-only version of Transformers and a machine learning framework, run the following command.
|
||||
To install a CPU-only version of Transformers, run the following command.
|
||||
|
||||
```bash
|
||||
pip install 'transformers[torch]'
|
||||
uv pip install 'transformers[torch]'
|
||||
uv pip install torch --index-url https://download.pytorch.org/whl/cpu
|
||||
uv pip install transformers
|
||||
```
|
||||
|
||||
Test whether the install was successful with the following command. It should return a label and score for the provided text.
|
||||
@ -105,7 +79,7 @@ The downside is that the latest version may not always be stable. If you encount
|
||||
Install from source with the following command.
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/huggingface/transformers
|
||||
uv pip install git+https://github.com/huggingface/transformers
|
||||
```
|
||||
|
||||
Check if the install was successful with the command below. It should return a label and score for the provided text.
|
||||
@ -122,7 +96,7 @@ An [editable install](https://pip.pypa.io/en/stable/topics/local-project-install
|
||||
```bash
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install -e .
|
||||
uv pip install -e .
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
|
@ -41,10 +41,6 @@ Most of those are only useful if you are studying the general code in the librar
|
||||
|
||||
[[autodoc]] utils.replace_return_docstrings
|
||||
|
||||
## Special Properties
|
||||
|
||||
[[autodoc]] utils.cached_property
|
||||
|
||||
## Other Utilities
|
||||
|
||||
[[autodoc]] utils._LazyModule
|
||||
|
@ -108,9 +108,6 @@ generation.
|
||||
[[autodoc]] ForcedEOSTokenLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] HammingDiversityLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] InfNanRemoveLogitsProcessor
|
||||
- __call__
|
||||
|
||||
@ -219,10 +216,6 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
||||
- process
|
||||
- finalize
|
||||
|
||||
[[autodoc]] BeamSearchScorer
|
||||
- process
|
||||
- finalize
|
||||
|
||||
[[autodoc]] ConstrainedBeamSearchScorer
|
||||
- process
|
||||
- finalize
|
||||
@ -257,7 +250,7 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
||||
- update
|
||||
- lazy_initialization
|
||||
|
||||
[[autodoc]] SlidingWindowLayer
|
||||
[[autodoc]] StaticSlidingWindowLayer
|
||||
- update
|
||||
- lazy_initialization
|
||||
|
||||
|
@ -102,7 +102,7 @@ You may want to consider offloading if you have a small GPU and you're getting o
|
||||
Offloading is available for both [`DynamicCache`] and [`StaticCache`]. You can enable it by configuring `cache_implementation="offloaded"` for the dynamic version, or `cache_implementation="offloaded_static"` for the static version, in either [`GenerationConfig`] or [`~GenerationMixin.generate`].
|
||||
Additionally, you can also instantiate your own [`DynamicCache`] or [`StaticCache`] with the `offloading=True` option, and pass this cache in `generate` or your model's `forward` (for example, `past_key_values=DynamicCache(config=model.config, offloading=True)` for a dynamic cache).
|
||||
|
||||
Note that the 2 [`Cache`] classes mentionned above have an additional option when instantiating them directly, `offload_only_non_sliding`.
|
||||
Note that the 2 [`Cache`] classes mentioned above have an additional option when instantiating them directly, `offload_only_non_sliding`.
|
||||
This additional argument decides if the layers using sliding window/chunk attention (if any), will be offloaded as well. Since
|
||||
these layers are usually short anyway, it may be better to avoid offloading them, as offloading may incur a speed penalty. By default, this option is `False` for [`DynamicCache`], and `True` for [`StaticCache`].
|
||||
|
||||
@ -146,7 +146,7 @@ tokenizer = AutoTokenizer.from_pretrained(ckpt)
|
||||
model = AutoModelForCausalLM.from_pretrained(ckpt, dtype=torch.float16, device_map="auto")
|
||||
prompt = ["okay "*1000 + "Fun fact: The most"]
|
||||
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
||||
beams = { "num_beams": 40, "num_beam_groups": 40, "num_return_sequences": 40, "diversity_penalty": 1.0, "max_new_tokens": 23, "early_stopping": True, }
|
||||
beams = { "num_beams": 40, "num_return_sequences": 20, "max_new_tokens": 23, "early_stopping": True, }
|
||||
out = resilient_generate(model, **inputs, **beams)
|
||||
responses = tokenizer.batch_decode(out[:,-28:], skip_special_tokens=True)
|
||||
```
|
||||
|
@ -183,36 +183,6 @@ text
|
||||
'My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p']
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="3. compile entire generate function">
|
||||
|
||||
Compiling the entire [`~GenerationMixin.generate`] function also compiles the input preparation logit processor operations, and more, in addition to the forward pass. With this approach, you don't need to initialize [`StaticCache`] or set the [cache_implementation](https://hf.co/docs/transformers/main_classes/text_generation#transformers.GenerationConfig.cache_implementation) parameter.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", dtype="auto", device_map="auto")
|
||||
|
||||
model.generate = torch.compile(model.generate, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to(model.device.type)
|
||||
|
||||
outputs = model.generate(**input_ids)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference']
|
||||
```
|
||||
|
||||
This usage pattern is more appropriate for unique hardware or use cases, but there are several drawbacks to consider.
|
||||
|
||||
1. Compilation is much slower.
|
||||
2. Parameters must be configured through [`GenerationConfig`].
|
||||
3. Many warnings and exceptions are suppressed. We recommend testing the uncompiled model first.
|
||||
4. Many features are unavailable at the moment. For example, generation does not stop if an `EOS` token is selected.
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
|
@ -1,28 +0,0 @@
|
||||
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Keras callbacks
|
||||
|
||||
When training a Transformers model with Keras, there are some library-specific callbacks available to automate common
|
||||
tasks:
|
||||
|
||||
## KerasMetricCallback
|
||||
|
||||
[[autodoc]] KerasMetricCallback
|
||||
|
||||
## PushToHubCallback
|
||||
|
||||
[[autodoc]] PushToHubCallback
|
@ -29,32 +29,62 @@ The `.optimization` module provides:
|
||||
|
||||
## Schedules
|
||||
|
||||
### Learning Rate Schedules
|
||||
### SchedulerType
|
||||
|
||||
[[autodoc]] SchedulerType
|
||||
|
||||
### get_scheduler
|
||||
|
||||
[[autodoc]] get_scheduler
|
||||
|
||||
### get_constant_schedule
|
||||
|
||||
[[autodoc]] get_constant_schedule
|
||||
|
||||
### get_constant_schedule_with_warmup
|
||||
|
||||
[[autodoc]] get_constant_schedule_with_warmup
|
||||
|
||||
<img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_constant_schedule.png"/>
|
||||
|
||||
### get_cosine_schedule_with_warmup
|
||||
|
||||
[[autodoc]] get_cosine_schedule_with_warmup
|
||||
|
||||
<img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_schedule.png"/>
|
||||
|
||||
### get_cosine_with_hard_restarts_schedule_with_warmup
|
||||
|
||||
[[autodoc]] get_cosine_with_hard_restarts_schedule_with_warmup
|
||||
|
||||
### get_cosine_with_min_lr_schedule_with_warmup
|
||||
|
||||
[[autodoc]] get_cosine_with_min_lr_schedule_with_warmup
|
||||
|
||||
### get_cosine_with_min_lr_schedule_with_warmup_lr_rate
|
||||
|
||||
[[autodoc]] get_cosine_with_min_lr_schedule_with_warmup_lr_rate
|
||||
|
||||
<img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_cosine_hard_restarts_schedule.png"/>
|
||||
|
||||
### get_linear_schedule_with_warmup
|
||||
|
||||
[[autodoc]] get_linear_schedule_with_warmup
|
||||
|
||||
<img alt="" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/warmup_linear_schedule.png"/>
|
||||
|
||||
### get_polynomial_decay_schedule_with_warmup
|
||||
|
||||
[[autodoc]] get_polynomial_decay_schedule_with_warmup
|
||||
|
||||
### get_inverse_sqrt_schedule
|
||||
|
||||
[[autodoc]] get_inverse_sqrt_schedule
|
||||
|
||||
### get_reduce_on_plateau_schedule
|
||||
|
||||
[[autodoc]] get_reduce_on_plateau_schedule
|
||||
|
||||
### get_wsd_schedule
|
||||
|
||||
[[autodoc]] get_wsd_schedule
|
||||
|
@ -13,6 +13,9 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on 2025-09-02 and added to Hugging Face Transformers on 2025-08-28.*
|
||||
|
||||
# Apertus
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
@ -23,7 +26,7 @@ rendered properly in your Markdown viewer.
|
||||
</div>
|
||||
</div>
|
||||
|
||||
# Apertus
|
||||
## Overview
|
||||
|
||||
[Apertus](https://www.swiss-ai.org) is a family of large language models from the Swiss AI Initiative.
|
||||
|
||||
|
97
docs/source/en/model_doc/blt.md
Normal file
97
docs/source/en/model_doc/blt.md
Normal file
@ -0,0 +1,97 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
<img alt="Flax" src="https://img.shields.io/badge/Flax-29a79b.svg?style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAC0AAAAtCAMAAAANxBKoAAAC7lBMVEUAAADg5vYHPVgAoJH+/v76+v39/f9JbLP///9+AIgAnY3///+mcqzt8fXy9fgkXa3Ax9709fr+///9/f8qXq49qp5AaLGMwrv8/P0eW60VWawxYq8yqJzG2dytt9Wyu9elzci519Lf3O3S2efY3OrY0+Xp7PT///////+dqNCexMc6Z7AGpJeGvbenstPZ5ejQ1OfJzOLa7ejh4+/r8fT29vpccbklWK8PVa0AS6ghW63O498vYa+lsdKz1NDRt9Kw1c672tbD3tnAxt7R6OHp5vDe7OrDyuDn6vLl6/EAQKak0MgATakkppo3ZK/Bz9y8w9yzu9jey97axdvHzeG21NHH4trTwthKZrVGZLSUSpuPQJiGAI+GAI8SWKydycLL4d7f2OTi1+S9xNzL0ePT6OLGzeEAo5U0qJw/aLEAo5JFa7JBabEAp5Y4qZ2QxLyKmsm3kL2xoMOehrRNb7RIbbOZgrGre68AUqwAqZqNN5aKJ5N/lMq+qsd8kMa4pcWzh7muhLMEV69juq2kbKqgUaOTR5uMMZWLLZSGAI5VAIdEAH+ovNDHuNCnxcy3qcaYx8K8msGplrx+wLahjbYdXrV6vbMvYK9DrZ8QrZ8tqJuFms+Sos6sw8ecy8RffsNVeMCvmb43aLltv7Q4Y7EZWK4QWa1gt6meZKUdr6GOAZVeA4xPAISyveLUwtivxtKTpNJ2jcqfvcltiMiwwcfAoMVxhL+Kx7xjdrqTe60tsaNQs6KaRKACrJ6UTZwkqpqTL5pkHY4AloSgsd2ptNXPvNOOncuxxsqFl8lmg8apt8FJcr9EbryGxLqlkrkrY7dRa7ZGZLQ5t6iXUZ6PPpgVpZeJCJFKAIGareTa0+KJod3H0deY2M+esM25usmYu8d2zsJOdcBVvrCLbqcAOaaHaKQAMaScWqKBXqCXMJ2RHpiLF5NmJZAdAHN2kta11dKu1M+DkcZLdb+Mcql3TppyRJdzQ5ZtNZNlIY+DF4+voCOQAAAAZ3RSTlMABAT+MEEJ/RH+/TP+Zlv+pUo6Ifz8+fco/fz6+evr39S9nJmOilQaF/7+/f38+smmoYp6b1T+/v7++vj189zU0tDJxsGzsrKSfv34+Pf27dDOysG9t6+n/vv6+vr59uzr1tG+tZ6Qg9Ym3QAABR5JREFUSMeNlVVUG1EQhpcuxEspXqS0SKEtxQp1d3d332STTRpIQhIISQgJhODu7lAoDoUCpe7u7u7+1puGpqnCPOyZvffbOXPm/PsP9JfQgyCC+tmTABTOcbxDz/heENS7/1F+9nhvkHePG0wNDLbGWwdXL+rbLWvpmZHXD8+gMfBjTh+aSe6Gnn7lwQIOTR0c8wfX3PWgv7avbdKwf/ZoBp1Gp/PvuvXW3vw5ib7emnTW4OR+3D4jB9vjNJ/7gNvfWWeH/TO/JyYrsiKCRjVEZA3UB+96kON+DxOQ/NLE8PE5iUYgIXjFnCOlxEQMaSGVxjg4gxOnEycGz8bptuNjVx08LscIgrzH3umcn+KKtiBIyvzOO2O99aAdR8cF19oZalnCtvREUw79tCd5sow1g1UKM6kXqUx4T8wsi3sTjJ3yzDmmhenLXLpo8u45eG5y4Vvbk6kkC4LLtJMowkSQxmk4ggVJEG+7c6QpHT8vvW9X7/o7+3ELmiJi2mEzZJiz8cT6TBlanBk70cB5GGIGC1gRDdZ00yADLW1FL6gqhtvNXNG5S9gdSrk4M1qu7JAsmYshzDS4peoMrU/gT7qQdqYGZaYhxZmVbGJAm/CS/HloWyhRUlknQ9KYcExTwS80d3VNOxUZJpITYyspl0LbhArhpZCD9cRWEQuhYkNGMHToQ/2Cs6swJlb39CsllxdXX6IUKh/H5jbnSsPKjgmoaFQ1f8wRLR0UnGE/RcDEjj2jXG1WVTwUs8+zxfcrVO+vSsuOpVKxCfYZiQ0/aPKuxQbQ8lIz+DClxC8u+snlcJ7Yr1z1JPqUH0V+GDXbOwAib931Y4Imaq0NTIXPXY+N5L18GJ37SVWu+hwXff8l72Ds9XuwYIBaXPq6Shm4l+Vl/5QiOlV+uTk6YR9PxKsI9xNJny31ygK1e+nIRC1N97EGkFPI+jCpiHe5PCEy7oWqWSwRrpOvhFzcbTWMbm3ZJAOn1rUKpYIt/lDhW/5RHHteeWFN60qo98YJuoq1nK3uW5AabyspC1BcIEpOhft+SZAShYoLSvnmSfnYADUERP5jJn2h5XtsgCRuhYQqAvwTwn33+YWEKUI72HX5AtfSAZDe8F2DtPPm77afhl0EkthzuCQU0BWApgQIH9+KB0JhopMM7bJrdTRoleM2JAVNMyPF+wdoaz+XJpGoVAQ7WXUkcV7gT3oUZyi/ISIJAVKhgNp+4b4veCFhYVJw4locdSjZCp9cPUhLF9EZ3KKzURepMEtCDPP3VcWFx4UIiZIklIpFNfHpdEafIF2aRmOcrUmjohbT2WUllbmRvgfbythbQO3222fpDJoufaQPncYYuqoGtUEsCJZL6/3PR5b4syeSjZMQG/T2maGANlXT2v8S4AULWaUkCxfLyW8iW4kdka+nEMjxpL2NCwsYNBp+Q61PF43zyDg9Bm9+3NNySn78jMZUUkumqE4Gp7JmFOdP1vc8PpRrzj9+wPinCy8K1PiJ4aYbnTYpCCbDkBSbzhu2QJ1Gd82t8jI8TH51+OzvXoWbnXUOBkNW+0mWFwGcGOUVpU81/n3TOHb5oMt2FgYGjzau0Nif0Ss7Q3XB33hjjQHjHA5E5aOyIQc8CBrLdQSs3j92VG+3nNEjbkbdbBr9zm04ruvw37vh0QKOdeGIkckc80fX3KH/h7PT4BOjgCty8VZ5ux1MoO5Cf5naca2LAsEgehI+drX8o/0Nu+W0m6K/I9gGPd/dfx/EN/wN62AhsBWuAAAAAElFTkSuQmCC
|
||||
">
|
||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
# Byte Lantet Transformer (BLT)
|
||||
|
||||
## Overview
|
||||
|
||||
The BLT model was proposed in [Byte Latent Transformer: Patches Scale Better Than Tokens](<https://arxiv.org/pdf/2412.09871>) by Artidoro Pagnoni, Ram Pasunuru, Pedro Rodriguez, John Nguyen, Benjamin Muller, Margaret Li1, Chunting Zhou, Lili Yu, Jason Weston, Luke Zettlemoyer, Gargi Ghosh, Mike Lewis, Ari Holtzman†, Srinivasan Iyer.
|
||||
BLT is a byte-level LLM that achieves tokenization-level performance through entropy-based dynamic patching.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We introduce the Byte Latent Transformer (BLT), a new byte-level LLM architecture that, for the first time, matches tokenization-based LLM performance at scale with significant improvements in inference
|
||||
efficiency and robustness. BLT encodes bytes into dynamically sized patches, which serve as the primary units of computation. Patches are segmented based on the entropy of the next byte, allocating
|
||||
more compute and model capacity where increased data complexity demands it. We present the first flop controlled scaling study of byte-level models up to 8B parameters and 4T training bytes. Our results demonstrate the feasibility of scaling models trained on raw bytes without a fixed vocabulary. Both training and inference efficiency improve due to dynamically selecting long patches when data is predictable, along with qualitative improvements on reasoning and long tail generalization. Overall, for fixed inference costs, BLT shows significantly better scaling than tokenization-based models, by simultaneously growing both patch and model size.*
|
||||
|
||||
## Usage Tips:
|
||||
|
||||
- **Dual Model Architecture**: BLT consists of two separate trained models:
|
||||
- **Patcher (Entropy Model)**: A smaller transformer model that predicts byte-level entropy to determine patch boundaries and segment input.
|
||||
- **Main Transformer Model**: The primary model that processes the patches through a Local Encoder, Global Transformer, and Local Decoder.
|
||||
|
||||
- **Dynamic Patching**: The model uses entropy-based dynamic patching where:
|
||||
- High-entropy regions (complex data) get shorter patches with more computational attention
|
||||
- Low-entropy regions (predictable data) get longer patches for efficiency
|
||||
- This allows the model to allocate compute resources where they're most needed
|
||||
|
||||
- **Local Encoder**: Processes byte sequences with cross-attention to patch embeddings
|
||||
- **Global Transformer**: Processes patch-level representations with full attention across patches
|
||||
- **Local Decoder**: Generates output with cross-attention back to the original byte sequence
|
||||
|
||||
- **Byte-Level Tokenizer**: Unlike traditional tokenizers that use learned vocabularies, BLT's tokenizer simply converts text to UTF-8 bytes and maps each byte to a token ID. There is no need for a vocabulary.
|
||||
|
||||
The model can be loaded via:
|
||||
|
||||
<hfoption id="AutoModel">
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("itazap/blt-1b-hf")
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"itazap/blt-1b-hf",
|
||||
device_map="auto",
|
||||
)
|
||||
|
||||
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
||||
|
||||
prompt = "my name is"
|
||||
generated_ids = model.generate(
|
||||
**inputs, max_new_tokens=NUM_TOKENS_TO_GENERATE, do_sample=False, use_cache=False
|
||||
)
|
||||
|
||||
print(tokenizer.decode(generated_ids[0]))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
|
||||
This model was contributed by [itazap](https://huggingface.co/<itazap>).
|
||||
The original code can be found [here](<https://github.com/facebookresearch/blt>).
|
||||
|
||||
|
||||
## BltConfig
|
||||
|
||||
[[autodoc]] BltConfig
|
||||
|
||||
[[autodoc]] BltModel
|
||||
- forward
|
||||
|
||||
## BltForCausalLM
|
||||
|
||||
[[autodoc]] BltForCausalLM
|
||||
- forward
|
@ -188,3 +188,8 @@ error, it means NCCL was probably not loaded.
|
||||
|
||||
[[autodoc]] DeepseekV3ForSequenceClassification
|
||||
- forward
|
||||
|
||||
## DeepseekV3ForTokenClassification
|
||||
|
||||
[[autodoc]] DeepseekV3ForTokenClassification
|
||||
- forward
|
@ -148,6 +148,14 @@ processed_outputs = processor.post_process_keypoint_matching(outputs, image_size
|
||||
- post_process_keypoint_matching
|
||||
- visualize_keypoint_matching
|
||||
|
||||
## EfficientLoFTRImageProcessorFast
|
||||
|
||||
[[autodoc]] EfficientLoFTRImageProcessorFast
|
||||
|
||||
- preprocess
|
||||
- post_process_keypoint_matching
|
||||
- visualize_keypoint_matching
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
## EfficientLoFTRModel
|
||||
|
139
docs/source/en/model_doc/flex_olmo.md
Normal file
139
docs/source/en/model_doc/flex_olmo.md
Normal file
@ -0,0 +1,139 @@
|
||||
<!--Copyright 2025 the HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on 2025-07-09 and added to Hugging Face Transformers on 2025-09-15.*
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
# FlexOlmo
|
||||
|
||||
[FlexOlmo](https://huggingface.co/papers/2507.07024) is a new class of language models (LMs) that supports (1) distributed training without data sharing, where different model parameters are independently trained on closed datasets, and (2) data-flexible inference, where these parameters along with their associated data can be flexibly included or excluded from model inferences with no further training. FlexOlmo employs a mixture-of-experts (MoE) architecture where each expert is trained independently on closed datasets and later integrated through a new domain-informed routing without any joint training. FlexOlmo is trained on FlexMix, a corpus we curate comprising publicly available datasets alongside seven domain-specific sets, representing realistic approximations of closed sets.
|
||||
|
||||
You can find all the original FlexOlmo checkpoints under the [FlexOlmo](https://huggingface.co/collections/allenai/flexolmo-68471177a386b6e20a54c55f) collection.
|
||||
|
||||
> [!TIP]
|
||||
> Click on the FlexOlmo models in the right sidebar for more examples of how to apply FlexOlmo to different language tasks.
|
||||
|
||||
The example below demonstrates how to generate text with [`Pipeline`], [`AutoModel`] and from the command line.
|
||||
|
||||
<hfoptions id="usage">
|
||||
<hfoption id="Pipeline">
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import pipeline
|
||||
|
||||
pipe = pipeline(
|
||||
task="text-generation",
|
||||
model="allenai/FlexOlmo-7x7B-1T",
|
||||
dtype=torch.bfloat16,
|
||||
device=0,
|
||||
)
|
||||
|
||||
result = pipe("Plants create energy through a process known as")
|
||||
print(result)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="AutoModel">
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"allenai/FlexOlmo-7x7B-1T"
|
||||
)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"allenai/FlexOlmo-7x7B-1T",
|
||||
dtype=torch.bfloat16,
|
||||
device_map="auto",
|
||||
attn_implementation="sdpa"
|
||||
)
|
||||
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to(model.device)
|
||||
|
||||
output = model.generate(**input_ids, max_length=50, cache_implementation="static")
|
||||
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="transformers CLI">
|
||||
|
||||
```bash
|
||||
echo -e "Plants create energy through a process known as" | transformers-cli run --task text-generation --model allenai/FlexOlmo-7x7B-1T --device 0
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
|
||||
|
||||
The example below uses [torchao](../quantization/torchao) to only quantize the weights to 4-bits.
|
||||
```py
|
||||
|
||||
#pip install torchao
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, TorchAoConfig
|
||||
|
||||
torchao_config = TorchAoConfig(
|
||||
"int4_weight_only",
|
||||
group_size=128
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"allenai/FlexOlmo-7x7B-1T"
|
||||
)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"allenai/FlexOlmo-7x7B-1T",
|
||||
quantization_config=torchao_config,
|
||||
dtype=torch.bfloat16,
|
||||
device_map="auto",
|
||||
attn_implementation="sdpa"
|
||||
)
|
||||
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to(model.device)
|
||||
|
||||
output = model.generate(**input_ids, max_length=50, cache_implementation="static")
|
||||
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||
|
||||
```
|
||||
|
||||
|
||||
## FlexOlmoConfig
|
||||
|
||||
[[autodoc]] FlexOlmoConfig
|
||||
|
||||
## FlexOlmoForCausalLM
|
||||
|
||||
[[autodoc]] FlexOlmoForCausalLM
|
||||
|
||||
## FlexOlmoModel
|
||||
|
||||
[[autodoc]] FlexOlmoModel
|
||||
- forward
|
||||
|
||||
## FlexOlmoPreTrainedModel
|
||||
|
||||
[[autodoc]] FlexOlmoPreTrainedModel
|
||||
- forward
|
@ -13,6 +13,9 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on 2024-06-16 and added to Hugging Face Transformers on 2025-08-20.*
|
||||
|
||||
# Florence-2
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
@ -21,7 +24,7 @@ rendered properly in your Markdown viewer.
|
||||
</div>
|
||||
</div>
|
||||
|
||||
# Florence-2
|
||||
## Overview
|
||||
|
||||
[Florence-2](https://huggingface.co/papers/2311.06242) is an advanced vision foundation model that uses a prompt-based approach to handle a wide range of vision and vision-language tasks. Florence-2 can interpret simple text prompts to perform tasks like captioning, object detection, and segmentation. It leverages the FLD-5B dataset, containing 5.4 billion annotations across 126 million images, to master multi-task learning. The model's sequence-to-sequence architecture enables it to excel in both zero-shot and fine-tuned settings, proving to be a competitive vision foundation model.
|
||||
|
||||
@ -44,7 +47,7 @@ from transformers import pipeline
|
||||
|
||||
pipeline = pipeline(
|
||||
"image-text-to-text",
|
||||
model="ducviet00/Florence-2-base-hf",
|
||||
model="florence-community/Florence-2-base",
|
||||
device=0,
|
||||
dtype=torch.bfloat16
|
||||
)
|
||||
|
@ -273,3 +273,8 @@ visualizer("<img>What is shown in this image?")
|
||||
|
||||
[[autodoc]] Gemma3ForSequenceClassification
|
||||
- forward
|
||||
|
||||
## Gemma3TextForSequenceClassification
|
||||
|
||||
[[autodoc]] Gemma3TextForSequenceClassification
|
||||
- forward
|
||||
|
@ -50,7 +50,7 @@ The `generate()` method can be used to generate text using GPTSAN-Japanese model
|
||||
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
|
||||
>>> x_tok = tokenizer("は、", prefix_text="織田信長", return_tensors="pt")
|
||||
>>> torch.manual_seed(0)
|
||||
>>> gen_tok = model.generate(x_tok.input_ids.to(model.device), token_type_ids=x_tok.token_type_ids.to(mdoel.device), max_new_tokens=20)
|
||||
>>> gen_tok = model.generate(x_tok.input_ids.to(model.device), token_type_ids=x_tok.token_type_ids.to(model.device), max_new_tokens=20)
|
||||
>>> tokenizer.decode(gen_tok[0])
|
||||
'織田信長は、2004年に『戦国BASARA』のために、豊臣秀吉'
|
||||
```
|
||||
|
@ -104,6 +104,11 @@ If you're interested in submitting a resource to be included here, please feel f
|
||||
[[autodoc]] ImageGPTImageProcessor
|
||||
- preprocess
|
||||
|
||||
## ImageGPTImageProcessorFast
|
||||
|
||||
[[autodoc]] ImageGPTImageProcessorFast
|
||||
- preprocess
|
||||
|
||||
## ImageGPTModel
|
||||
|
||||
[[autodoc]] ImageGPTModel
|
||||
|
96
docs/source/en/model_doc/lfm2_vl.md
Normal file
96
docs/source/en/model_doc/lfm2_vl.md
Normal file
@ -0,0 +1,96 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
</div>
|
||||
|
||||
# LFM2-VL
|
||||
|
||||
## Overview
|
||||
|
||||
[LFM2-VL](https://www.liquid.ai/blog/lfm2-vl-efficient-vision-language-models) first series of vision-language foundation models developed by [Liquid AI](https://liquid.ai/). These multimodal models are designed for low-latency and device-aware deployment. LFM2-VL extends the LFM2 family of open-weight Liquid Foundation Models (LFMs) into the vision-language space, supporting both text and image inputs with variable resolutions.
|
||||
|
||||
## Architecture
|
||||
|
||||
LFM2-VL consists of three main components: a language model backbone, a vision encoder, and a multimodal projector. LFM2-VL builds upon the LFM2 backbone, inheriting from either LFM2-1.2B (for LFM2-VL-1.6B) or LFM2-350M (for LFM2-VL-450M). For the vision tower, LFM2-VL uses SigLIP2 NaFlex encoders to convert input images into token sequences. Two variants are implemented:
|
||||
* Shape-optimized (400M) for more fine-grained vision capabilities for LFM2-VL-1.6B
|
||||
* Base (86M) for fast image processing for LFM2-VL-450M
|
||||
|
||||
The encoder processes images at their native resolution up to 512×512 pixels, efficiently handling smaller images without upscaling and supporting non-standard aspect ratios without distortion. Larger images are split into non-overlapping square patches of 512×512 each, preserving detail. In LFM2-VL-1.6B, the model also receives a thumbnail (a small, downscaled version of the original image capturing the overall scene) to enhance global context understanding and alignment. Special tokens mark each patch’s position and indicate the thumbnail’s start. The multimodal connector is a 2-layer MLP connector with pixel unshuffle to reduce image token count.
|
||||
|
||||
## Example
|
||||
|
||||
The following example shows how to generate an answer using the `AutoModelForImageTextToText` class.
|
||||
|
||||
```python
|
||||
from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||
\
|
||||
# Load model and processor
|
||||
model_id = "LiquidAI/LFM2-VL-1.6B"
|
||||
model = AutoModelForImageTextToText.from_pretrained(
|
||||
model_id,
|
||||
device_map="auto",
|
||||
dtype="bfloat16",
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained(model_id)
|
||||
|
||||
# Load image and create conversation
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "image": "https://www.ilankelman.org/stopsigns/australia.jpg"},
|
||||
{"type": "text", "text": "What is in this image?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
# Generate snswer
|
||||
inputs = processor.apply_chat_template(
|
||||
conversation,
|
||||
add_generation_prompt=True,
|
||||
return_tensors="pt",
|
||||
return_dict=True,
|
||||
tokenize=True,
|
||||
).to(model.device)
|
||||
|
||||
outputs = model.generate(**inputs, max_new_tokens=64)
|
||||
processor.batch_decode(outputs, skip_special_tokens=True)[0]
|
||||
|
||||
```
|
||||
|
||||
## Lfm2VlImageProcessorFast
|
||||
|
||||
[[autodoc]] Lfm2VlImageProcessorFast
|
||||
|
||||
## Lfm2VlProcessor
|
||||
|
||||
[[autodoc]] Lfm2VlProcessor
|
||||
|
||||
## Lfm2VlConfig
|
||||
|
||||
[[autodoc]] Lfm2VlConfig
|
||||
|
||||
## Lfm2VlModel
|
||||
|
||||
[[autodoc]] Lfm2VlModel
|
||||
- forward
|
||||
|
||||
## Lfm2VlForConditionalGeneration
|
||||
|
||||
[[autodoc]] Lfm2VlForConditionalGeneration
|
||||
- forward
|
128
docs/source/en/model_doc/longcat_flash.md
Normal file
128
docs/source/en/model_doc/longcat_flash.md
Normal file
@ -0,0 +1,128 @@
|
||||
<!--Copyright 2025 the HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on 2025-09-01 and added to Hugging Face Transformers on 2025-09-15.*
|
||||
|
||||
|
||||
# LongCatFlash
|
||||
|
||||
## Overview
|
||||
|
||||
The LongCatFlash model was proposed in [LongCat-Flash Technical Report](https://huggingface.co/papers/2509.01322) by the Meituan LongCat Team.
|
||||
LongCat-Flash is a 560B parameter Mixture-of-Experts (MoE) model that activates 18.6B-31.3B parameters dynamically (average ~27B). The model features a shortcut-connected architecture enabling high inference speed (>100 tokens/second) and advanced reasoning capabilities.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We present LongCat-Flash, a 560 billion parameter Mixture-of-Experts (MoE) language model featuring a dynamic computation mechanism that activates 18.6B-31.3B parameters based on context (average ~27B). The model incorporates a shortcut-connected architecture enabling high inference speed (>100 tokens/second) and demonstrates strong performance across multiple benchmarks including 89.71% accuracy on MMLU and exceptional agentic tool use capabilities.*
|
||||
|
||||
Tips:
|
||||
|
||||
- LongCat-Flash uses a unique shortcut-connected MoE architecture that enables faster inference compared to traditional MoE models
|
||||
- The model supports up to 128k context length for long-form tasks
|
||||
- Dynamic parameter activation makes it computationally efficient while maintaining high performance
|
||||
- Best suited for applications requiring strong reasoning, coding, and tool-calling capabilities
|
||||
- The MoE architecture includes zero experts (nn.Identity modules) which act as skip connections, allowing tokens to bypass expert computation when appropriate
|
||||
|
||||
This model was contributed by [Molbap](https://huggingface.co/Molbap).
|
||||
The original code can be found [here](https://huggingface.co/meituan-longcat/LongCat-Flash-Chat).
|
||||
|
||||
## Usage examples
|
||||
|
||||
The model is large: you will need 2x8 H100 to run inference.
|
||||
```python
|
||||
# launch_longcat.py
|
||||
from transformers import LongcatFlashForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
model_id = "meituan-longcat/LongCat-Flash-Chat"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
|
||||
chat = [
|
||||
{"role": "user", "content": "Hello! What is the capital of France? What can you tell me about it?"},
|
||||
]
|
||||
|
||||
model = LongcatFlashForCausalLM.from_pretrained(
|
||||
model_id,
|
||||
tp_plan="auto",
|
||||
dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
inputs = tokenizer.apply_chat_template(
|
||||
chat, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(model.device)
|
||||
|
||||
outputs = model.generate(inputs, max_new_tokens=30)
|
||||
print(tokenizer.batch_decode(outputs))
|
||||
```
|
||||
|
||||
To run with TP, you will need torchrun:
|
||||
|
||||
```bash
|
||||
torchrun --nproc_per_node=8 --nnodes=2 --node_rank=0 | 1 --rdzv-id <an_id> --rdzv-backend c10d --rdzv-endpoint $NODE_ID:$NODE_PORT --log-dir ./logs_longcat launch_longcat.py
|
||||
```
|
||||
|
||||
And you'll get a nice generation:
|
||||
```json
|
||||
[Round 0] USER:Hello! What is the capital of France? What can you tell me about it? ASSISTANT:Hello! 😊 The capital of France is Paris, one of the most famous and beloved cities in the world. Here’s a quick overview of what makes Paris special:
|
||||
1. Iconic Landmarks
|
||||
|
||||
Eiffel Tower – The global symbol of France, built in 1889 for the World's Fair.
|
||||
Notre-Dame Cathedral – A masterpiece of Gothic architecture (currently under restoration after the 2019 fire).
|
||||
Louvre Museum – The world’s largest art museum, home to the Mona Lisa and Venus de Milo.
|
||||
Sacré-Cœur Basilica – A stunning white church atop Montmartre with panoramic views.
|
||||
Arc de Triomphe – Honors French military victories, with the Tomb of the Unknown Soldier beneath it.
|
||||
Champs-Élysées – A glamorous avenue leading to the Arc de Triomphe, lined with shops and cafés.
|
||||
|
||||
2. Culture & Arts
|
||||
|
||||
Paris is the "City of Light" (La Ville Lumière), a nickname from its early adoption of street lighting and its role as a center of enlightenment.
|
||||
It’s a global hub for fashion (haute couture, Paris Fashion Week) and art (Impressionism, Picasso, Dali).
|
||||
Famous literary figures like Hemingway, Fitzgerald, and Sartre lived and wrote here.
|
||||
|
||||
3. Food & Cuisine
|
||||
|
||||
Croissants, baguettes, macarons, and crème brûlée are just a few of its culinary delights.
|
||||
Paris has over 100 Michelin-starred restaurants and countless cozy bistros.
|
||||
The Marché d’Aligre and Rue Mouffetard are great for fresh produce and local flavors.
|
||||
|
||||
4. History & Politics
|
||||
|
||||
Founded in the 3rd century BC by the Parisii tribe, it became a major European city under the Romans.
|
||||
The French Revolution (1789–1799) began here, leading to the fall of the monarchy.
|
||||
Today, it’s the political and economic heart of France, housing the French President’s residence (Élysée Palace) and the National Assembly.
|
||||
|
||||
**
|
||||
```
|
||||
|
||||
## LongcatFlashConfig
|
||||
|
||||
[[autodoc]] LongcatFlashConfig
|
||||
|
||||
## LongcatFlashPreTrainedModel
|
||||
|
||||
[[autodoc]] LongcatFlashPreTrainedModel
|
||||
- forward
|
||||
|
||||
## LongcatFlashModel
|
||||
|
||||
[[autodoc]] LongcatFlashModel
|
||||
- forward
|
||||
|
||||
## LongcatFlashForCausalLM
|
||||
|
||||
[[autodoc]] LongcatFlashForCausalLM
|
@ -32,7 +32,7 @@ MetaCLIP 2 is a replication of the original CLIP model trained on 300+ languages
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr).
|
||||
The original code can be found [here](https://github.com/facebookresearch/MetaCLIP).
|
||||
|
||||
You can find all the MetaCLIP 2 checkpoints under the [Meta](https://huggingface.co/facebook?search_models=metaclip-2) organization.
|
||||
You can find all the MetaCLIP 2 checkpoints under the [Meta](https://huggingface.co/facebook/models?search=metaclip-2) organization.
|
||||
|
||||
> [!TIP]
|
||||
> Click on the MetaCLIP 2 models in the right sidebar for more examples of how to apply MetaCLIP 2 to different image and language tasks.
|
||||
|
86
docs/source/en/model_doc/ministral.md
Normal file
86
docs/source/en/model_doc/ministral.md
Normal file
@ -0,0 +1,86 @@
|
||||
<!--Copyright 2024 Mistral AI and The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
<img alt="Tensor parallelism" src="https://img.shields.io/badge/Tensor%20parallelism-06b6d4?style=flat&logoColor=white">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
# Ministral
|
||||
|
||||
[Ministral](https://huggingface.co/mistralai/Ministral-8B-Instruct-2410) is a 8B parameter language model that extends the Mistral architecture with alternating attention pattern. Unlike Mistral, that uses either full attention or sliding window attention consistently, Ministral alternates between full attention and sliding window attention layers, in a pattern of 1 full attention layer followed by 3 sliding window attention layers. This allows for a 128K context length support.
|
||||
|
||||
This architecture turns out to coincide with Qwen2, with the main difference being the presence of biases in attention projections in Ministral.
|
||||
|
||||
|
||||
You can find the Ministral checkpoints under the [Mistral AI](https://huggingface.co/mistralai) organization.
|
||||
|
||||
## Usage
|
||||
|
||||
The example below demonstrates how to use Ministral for text generation:
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("mistralai/Ministral-8B-Instruct-2410", torch_dtype=torch.bfloat16, attn_implementation="sdpa", device_map="auto")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Ministral-8B-Instruct-2410")
|
||||
|
||||
>>> messages = [
|
||||
... {"role": "user", "content": "What is your favourite condiment?"},
|
||||
... {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"},
|
||||
... {"role": "user", "content": "Do you have mayonnaise recipes?"}
|
||||
... ]
|
||||
|
||||
>>> model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
|
||||
|
||||
>>> generated_ids = model.generate(model_inputs, max_new_tokens=100, do_sample=True)
|
||||
>>> tokenizer.batch_decode(generated_ids)[0]
|
||||
"Mayonnaise can be made as follows: (...)"
|
||||
```
|
||||
|
||||
## MinistralConfig
|
||||
|
||||
[[autodoc]] MinistralConfig
|
||||
|
||||
## MinistralModel
|
||||
|
||||
[[autodoc]] MinistralModel
|
||||
- forward
|
||||
|
||||
## MinistralForCausalLM
|
||||
|
||||
[[autodoc]] MinistralForCausalLM
|
||||
- forward
|
||||
|
||||
## MinistralForSequenceClassification
|
||||
|
||||
[[autodoc]] MinistralForSequenceClassification
|
||||
- forward
|
||||
|
||||
## MinistralForTokenClassification
|
||||
|
||||
[[autodoc]] MinistralForTokenClassification
|
||||
- forward
|
||||
|
||||
## MinistralForQuestionAnswering
|
||||
|
||||
[[autodoc]] MinistralForQuestionAnswering
|
||||
- forward
|
@ -13,6 +13,9 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on 2022-07-11 and added to Hugging Face Transformers on 2022-07-18.*
|
||||
|
||||
# NLLB
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
@ -22,10 +25,7 @@ rendered properly in your Markdown viewer.
|
||||
</div>
|
||||
</div>
|
||||
|
||||
*This model was released on 2022-07-11 and added to Hugging Face Transformers on 2022-07-18.*
|
||||
|
||||
|
||||
# NLLB
|
||||
## Overview
|
||||
|
||||
[NLLB: No Language Left Behind](https://huggingface.co/papers/2207.04672) is a multilingual translation model. It's trained on data using data mining techniques tailored for low-resource languages and supports over 200 languages. NLLB features a conditional compute architecture using a Sparsely Gated Mixture of Experts.
|
||||
|
||||
@ -33,7 +33,7 @@ rendered properly in your Markdown viewer.
|
||||
You can find all the original NLLB checkpoints under the [AI at Meta](https://huggingface.co/facebook/models?search=nllb) organization.
|
||||
|
||||
> [!TIP]
|
||||
> This model was contributed by [Lysandre](https://huggingface.co/lysandre).
|
||||
> This model was contributed by [Lysandre](https://huggingface.co/lysandre).
|
||||
> Click on the NLLB models in the right sidebar for more examples of how to apply NLLB to different translation tasks.
|
||||
|
||||
The example below demonstrates how to translate text with [`Pipeline`] or the [`AutoModel`] class.
|
||||
@ -120,17 +120,17 @@ visualizer("UN Chief says there is no military solution in Syria")
|
||||
>>> tokenizer("How was your day?").input_ids
|
||||
[256047, 13374, 1398, 4260, 4039, 248130, 2]
|
||||
```
|
||||
|
||||
|
||||
To revert to the legacy behavior, use the code example below.
|
||||
|
||||
|
||||
```python
|
||||
>>> from transformers import NllbTokenizer
|
||||
|
||||
>>> tokenizer = NllbTokenizer.from_pretrained("facebook/nllb-200-distilled-600M", legacy_behaviour=True)
|
||||
```
|
||||
|
||||
|
||||
- For non-English languages, specify the language's [BCP-47](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200) code with the `src_lang` keyword as shown below.
|
||||
|
||||
|
||||
- See example below for a translation from Romanian to German.
|
||||
```python
|
||||
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
||||
|
147
docs/source/en/model_doc/olmo3.md
Normal file
147
docs/source/en/model_doc/olmo3.md
Normal file
@ -0,0 +1,147 @@
|
||||
<!--Copyright 2025 the HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on {release_date} and added to Hugging Face Transformers on 2025-09-08.*
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
</div>
|
||||
</div>
|
||||
|
||||
# OLMo3
|
||||
Olmo3 is an improvement on [OLMo2](./olmo2). More details will be released on *soon*.
|
||||
|
||||
> [!TIP]
|
||||
> Click on the OLMo3 models in the right sidebar for more examples of how to apply OLMo3 to different language tasks.
|
||||
|
||||
The example below demonstrates how to generate text with [`Pipeline`], [`AutoModel`] and from the command line.
|
||||
|
||||
<hfoptions id="usage">
|
||||
<hfoption id="Pipeline">
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import pipeline
|
||||
|
||||
pipe = pipeline(
|
||||
task="text-generation",
|
||||
model="allenai/TBA",
|
||||
dtype=torch.bfloat16,
|
||||
device=0,
|
||||
)
|
||||
|
||||
result = pipe("Plants create energy through a process known as")
|
||||
print(result)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="AutoModel">
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"allenai/TBA"
|
||||
)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"allenai/TBA",
|
||||
dtype=torch.bfloat16,
|
||||
device_map="auto",
|
||||
attn_implementation="sdpa"
|
||||
)
|
||||
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to(model.device)
|
||||
|
||||
output = model.generate(**input_ids, max_length=50, cache_implementation="static")
|
||||
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="transformers CLI">
|
||||
|
||||
```bash
|
||||
echo -e "Plants create energy through a process known as" | transformers-cli run --task text-generation --model allenai/TBA --device 0
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
Quantization reduces the memory burden of large models by representing the weights in a lower precision. Refer to the [Quantization](../quantization/overview) overview for more available quantization backends.
|
||||
|
||||
The example below uses [torchao](../quantization/torchao) to only quantize the weights to 4-bits.
|
||||
```py
|
||||
|
||||
#pip install torchao
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, TorchAoConfig
|
||||
|
||||
torchao_config = TorchAoConfig(
|
||||
"int4_weight_only",
|
||||
group_size=128
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
"allenai/TBA"
|
||||
)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"allenai/TBA",
|
||||
quantization_config=torchao_config,
|
||||
dtype=torch.bfloat16,
|
||||
device_map="auto",
|
||||
attn_implementation="sdpa"
|
||||
)
|
||||
input_ids = tokenizer("Plants create energy through a process known as", return_tensors="pt").to(model.device)
|
||||
|
||||
output = model.generate(**input_ids, max_length=50, cache_implementation="static")
|
||||
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||
|
||||
```
|
||||
|
||||
|
||||
## Notes
|
||||
|
||||
- Load specific intermediate checkpoints by adding the `revision` parameter to [`~PreTrainedModel.from_pretrained`].
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("allenai/TBA", revision="stage1-step140000-tokens294B")
|
||||
```
|
||||
|
||||
|
||||
## Olmo3Config
|
||||
|
||||
[[autodoc]] Olmo3Config
|
||||
|
||||
## Olmo3ForCausalLM
|
||||
|
||||
[[autodoc]] Olmo3ForCausalLM
|
||||
|
||||
## Olmo3Model
|
||||
|
||||
[[autodoc]] Olmo3Model
|
||||
- forward
|
||||
|
||||
## Olmo3PreTrainedModel
|
||||
|
||||
[[autodoc]] Olmo3PreTrainedModel
|
||||
- forward
|
@ -93,5 +93,11 @@ If you are interested in submitting a resource to be included here, please feel
|
||||
## PromptDepthAnythingImageProcessor
|
||||
|
||||
[[autodoc]] PromptDepthAnythingImageProcessor
|
||||
- preprocess
|
||||
- post_process_depth_estimation
|
||||
|
||||
## PromptDepthAnythingImageProcessorFast
|
||||
|
||||
[[autodoc]] PromptDepthAnythingImageProcessorFast
|
||||
- preprocess
|
||||
- post_process_depth_estimation
|
@ -83,7 +83,7 @@ inputs = processor.apply_chat_template(
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
video_fps=1,
|
||||
fps=1,
|
||||
|
||||
# kwargs to be passed to `Qwen2-5-OmniProcessor`
|
||||
padding=True,
|
||||
|
@ -146,7 +146,7 @@ model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
||||
|
||||
inputs = processor.apply_chat_template(
|
||||
conversation,
|
||||
video_fps=1,
|
||||
fps=1,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
|
@ -99,7 +99,7 @@ conversation = [
|
||||
|
||||
inputs = processor.apply_chat_template(
|
||||
conversation,
|
||||
video_fps=1,
|
||||
fps=1,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
@ -169,7 +169,7 @@ conversations = [conversation1, conversation2, conversation3, conversation4]
|
||||
# Preparation for batch inference
|
||||
ipnuts = processor.apply_chat_template(
|
||||
conversations,
|
||||
video_fps=1,
|
||||
fps=1,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
|
97
docs/source/en/model_doc/qwen3_next.md
Normal file
97
docs/source/en/model_doc/qwen3_next.md
Normal file
@ -0,0 +1,97 @@
|
||||
<!--Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
## Overview
|
||||
|
||||
The Qwen3-Next series represents our next-generation foundation models, optimized for extreme context length and large-scale parameter efficiency.
|
||||
The series introduces a suite of architectural innovations designed to maximize performance while minimizing computational cost:
|
||||
- **Hybrid Attention**: Replaces standard attention with the combination of **Gated DeltaNet** and **Gated Attention**, enabling efficient context modeling.
|
||||
- **High-Sparsity MoE**: Achieves an extreme low activation ratio as 1:50 in MoE layers — drastically reducing FLOPs per token while preserving model capacity.
|
||||
- **Multi-Token Prediction(MTP)**: Boosts pretraining model performance, and accelerates inference.
|
||||
- **Other Optimizations**: Includes techniques such as **zero-centered and weight-decayed layernorm**, **Gated Attention**, and other stabilizing enhancements for robust training.
|
||||
|
||||
Built on this architecture, we trained and open-sourced Qwen3-Next-80B-A3B — 80B total parameters, only 3B active — achieving extreme sparsity and efficiency.
|
||||
|
||||
Despite its ultra-efficiency, it outperforms Qwen3-32B on downstream tasks — while requiring **less than 1/10 of the training cost**.
|
||||
Moreover, it delivers over **10x higher inference throughput** than Qwen3-32B when handling contexts longer than 32K tokens.
|
||||
|
||||
For more details, please visit our blog [Qwen3-Next](qwen3_next) ([blog post](https://qwenlm.github.io/blog/qwen3_next/)).
|
||||
## Usage examples
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_name = "Qwen/Qwen3-Next-80B-A3B-Instruct"
|
||||
|
||||
# load the tokenizer and the model
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_name,
|
||||
dtype="auto",
|
||||
device_map="auto"
|
||||
)
|
||||
|
||||
# prepare the model input
|
||||
prompt = "Give me a short introduction to large language model."
|
||||
messages = [
|
||||
{"role": "user", "content": prompt}
|
||||
]
|
||||
text = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tokenize=False,
|
||||
add_generation_prompt=True,
|
||||
)
|
||||
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
||||
|
||||
# conduct text completion
|
||||
generated_ids = model.generate(
|
||||
**model_inputs,
|
||||
max_new_tokens=512
|
||||
)
|
||||
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
|
||||
|
||||
content = tokenizer.decode(output_ids, skip_special_tokens=True)
|
||||
|
||||
print("content:", content)
|
||||
```
|
||||
|
||||
## Qwen3NextConfig
|
||||
|
||||
[[autodoc]] Qwen3NextConfig
|
||||
|
||||
## Qwen3NextModel
|
||||
|
||||
[[autodoc]] Qwen3NextModel
|
||||
- forward
|
||||
|
||||
## Qwen3NextForCausalLM
|
||||
|
||||
[[autodoc]] Qwen3NextForCausalLM
|
||||
- forward
|
||||
|
||||
## Qwen3NextForSequenceClassification
|
||||
|
||||
[[autodoc]] Qwen3NextForSequenceClassification
|
||||
- forward
|
||||
|
||||
## Qwen3NextForQuestionAnswering
|
||||
|
||||
[[autodoc]] Qwen3NextForQuestionAnswering
|
||||
- forward
|
||||
|
||||
## Qwen3NextForTokenClassification
|
||||
|
||||
[[autodoc]] Qwen3NextForTokenClassification
|
||||
- forward
|
117
docs/source/en/model_doc/qwen3_vl.md
Normal file
117
docs/source/en/model_doc/qwen3_vl.md
Normal file
@ -0,0 +1,117 @@
|
||||
<!--Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on None and added to Hugging Face Transformers on 2025-08-16.*
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div>
|
||||
</div>
|
||||
|
||||
# Qwen3-VL
|
||||
|
||||
[Qwen3-VL](https://huggingface.co/papers/2502.13923) is a multimodal vision-language model series, encompassing both dense and MoE variants, as well as Instruct and Thinking versions. Building upon its predecessors, Qwen3-VL delivers significant improvements in visual understanding while maintaining strong pure text capabilities. Key architectural advancements include: enhanced MRope with interleaved layout for better spatial-temporal modeling, DeepStack integration to effectively leverage multi-level features from the Vision Transformer (ViT), and improved video understanding through text-based time alignment—evolving from T-RoPE to text timestamp alignment for more precise temporal grounding. These innovations collectively enable Qwen3-VL to achieve superior performance in complex multimodal tasks.
|
||||
|
||||
Model usage
|
||||
|
||||
<hfoptions id="usage">
|
||||
<hfoption id="AutoModel">
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import Qwen3VLForConditionalGeneration, AutoProcessor
|
||||
|
||||
model = Qwen3VLForConditionalGeneration.from_pretrained(
|
||||
"Qwen/Qwen3-VL",
|
||||
dtype=torch.float16,
|
||||
device_map="auto",
|
||||
attn_implementation="sdpa"
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL")
|
||||
messages = [
|
||||
{
|
||||
"role":"user",
|
||||
"content":[
|
||||
{
|
||||
"type":"image",
|
||||
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
|
||||
},
|
||||
{
|
||||
"type":"text",
|
||||
"text":"Describe this image."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
|
||||
inputs = processor.apply_chat_template(
|
||||
messages,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
inputs.pop("token_type_ids", None)
|
||||
|
||||
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
||||
generated_ids_trimmed = [
|
||||
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
||||
]
|
||||
output_text = processor.batch_decode(
|
||||
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||||
)
|
||||
print(output_text)
|
||||
```
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Qwen3VLConfig
|
||||
|
||||
[[autodoc]] Qwen3VLConfig
|
||||
|
||||
## Qwen3VLTextConfig
|
||||
|
||||
[[autodoc]] Qwen3VLTextConfig
|
||||
|
||||
## Qwen3VLProcessor
|
||||
|
||||
[[autodoc]] Qwen3VLProcessor
|
||||
|
||||
## Qwen3VLVideoProcessor
|
||||
|
||||
[[autodoc]] Qwen3VLVideoProcessor
|
||||
|
||||
## Qwen3VLVisionModel
|
||||
|
||||
[[autodoc]] Qwen3VLVisionModel
|
||||
- forward
|
||||
|
||||
## Qwen3VLTextModel
|
||||
|
||||
[[autodoc]] Qwen3VLTextModel
|
||||
- forward
|
||||
|
||||
## Qwen3VLModel
|
||||
|
||||
[[autodoc]] Qwen3VLModel
|
||||
- forward
|
||||
|
||||
## Qwen3VLForConditionalGeneration
|
||||
|
||||
[[autodoc]] Qwen3VLForConditionalGeneration
|
||||
- forward
|
109
docs/source/en/model_doc/qwen3_vl_moe.md
Normal file
109
docs/source/en/model_doc/qwen3_vl_moe.md
Normal file
@ -0,0 +1,109 @@
|
||||
<!--Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on None and added to Hugging Face Transformers on 2025-08-17.*
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
<img alt="FlashAttention" src="https://img.shields.io/badge/%E2%9A%A1%EF%B8%8E%20FlashAttention-eae0c8?style=flat">
|
||||
<img alt="SDPA" src="https://img.shields.io/badge/SDPA-DE3412?style=flat&logo=pytorch&logoColor=white"> </div>
|
||||
</div>
|
||||
|
||||
# Qwen3-VL-Moe
|
||||
|
||||
[Qwen3-VL](https://huggingface.co/papers/2502.13923) is a multimodal vision-language model series, encompassing both dense and MoE variants, as well as Instruct and Thinking versions. Building upon its predecessors, Qwen3-VL delivers significant improvements in visual understanding while maintaining strong pure text capabilities. Key architectural advancements include: enhanced MRope with interleaved layout for better spatial-temporal modeling, DeepStack integration to effectively leverage multi-level features from the Vision Transformer (ViT), and improved video understanding through text-based time alignment—evolving from T-RoPE to text timestamp alignment for more precise temporal grounding. These innovations collectively enable Qwen3-VL to achieve superior performance in complex multimodal tasks.
|
||||
|
||||
Model usage
|
||||
|
||||
<hfoptions id="usage">
|
||||
<hfoption id="AutoModel">
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import Qwen3VLMoeForConditionalGeneration, AutoProcessor
|
||||
|
||||
model = Qwen3VLMoeForConditionalGeneration.from_pretrained(
|
||||
"Qwen/Qwen3-VL-Moe",
|
||||
dtype=torch.float16,
|
||||
device_map="auto",
|
||||
attn_implementation="sdpa"
|
||||
)
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen3-VL-Moe")
|
||||
messages = [
|
||||
{
|
||||
"role":"user",
|
||||
"content":[
|
||||
{
|
||||
"type":"image",
|
||||
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
|
||||
},
|
||||
{
|
||||
"type":"text",
|
||||
"text":"Describe this image."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
]
|
||||
|
||||
inputs = processor.apply_chat_template(
|
||||
messages,
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
inputs.pop("token_type_ids", None)
|
||||
|
||||
generated_ids = model.generate(**inputs, max_new_tokens=128)
|
||||
generated_ids_trimmed = [
|
||||
out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
|
||||
]
|
||||
output_text = processor.batch_decode(
|
||||
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
|
||||
)
|
||||
print(output_text)
|
||||
```
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Qwen3VLMoeConfig
|
||||
|
||||
[[autodoc]] Qwen3VLMoeConfig
|
||||
|
||||
## Qwen3VLMoeTextConfig
|
||||
|
||||
[[autodoc]] Qwen3VLMoeTextConfig
|
||||
|
||||
## Qwen3VLMoeVisionModel
|
||||
|
||||
[[autodoc]] Qwen3VLMoeVisionModel
|
||||
- forward
|
||||
|
||||
## Qwen3VLMoeTextModel
|
||||
|
||||
[[autodoc]] Qwen3VLMoeTextModel
|
||||
- forward
|
||||
|
||||
## Qwen3VLMoeModel
|
||||
|
||||
[[autodoc]] Qwen3VLMoeModel
|
||||
- forward
|
||||
|
||||
## Qwen3VLMoeForConditionalGeneration
|
||||
|
||||
[[autodoc]] Qwen3VLMoeForConditionalGeneration
|
||||
- forward
|
@ -13,6 +13,10 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on 2024-07-29 and added to Hugging Face Transformers on 2025-08-14.*
|
||||
|
||||
# SAM2
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
@ -21,8 +25,6 @@ rendered properly in your Markdown viewer.
|
||||
</div>
|
||||
</div>
|
||||
|
||||
# SAM2
|
||||
|
||||
## Overview
|
||||
|
||||
SAM2 (Segment Anything Model 2) was proposed in [Segment Anything in Images and Videos](https://ai.meta.com/research/publications/sam-2-segment-anything-in-images-and-videos/) by Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, Chao-Yuan Wu, Ross Girshick, Piotr Dollár, Christoph Feichtenhofer.
|
||||
|
@ -13,6 +13,10 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
*This model was released on 2024-07-29 and added to Hugging Face Transformers on 2025-08-14.*
|
||||
|
||||
# SAM2 Video
|
||||
|
||||
<div style="float: right;">
|
||||
<div class="flex flex-wrap space-x-1">
|
||||
<img alt="PyTorch" src="https://img.shields.io/badge/PyTorch-DE3412?style=flat&logo=pytorch&logoColor=white">
|
||||
@ -21,8 +25,6 @@ rendered properly in your Markdown viewer.
|
||||
</div>
|
||||
</div>
|
||||
|
||||
# SAM2 Video
|
||||
|
||||
## Overview
|
||||
|
||||
SAM2 (Segment Anything Model 2) was proposed in [Segment Anything in Images and Videos](https://ai.meta.com/research/publications/sam-2-segment-anything-in-images-and-videos/) by Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, Eric Mintun, Junting Pan, Kalyan Vasudev Alwala, Nicolas Carion, Chao-Yuan Wu, Ross Girshick, Piotr Dollár, Christoph Feichtenhofer.
|
||||
|
103
docs/source/en/model_doc/vaultgemma.md
Normal file
103
docs/source/en/model_doc/vaultgemma.md
Normal file
@ -0,0 +1,103 @@
|
||||
<!--Copyright 2025 the HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# VaultGemma
|
||||
|
||||
## Overview
|
||||
|
||||
[VaultGemma](https://services.google.com/fh/files/blogs/vaultgemma_tech_report.pdf) is a text-only decoder model
|
||||
derived from [Gemma 2](https://huggingface.co/docs/transformers/en/model_doc/gemma2), notably it drops the norms after
|
||||
the Attention and MLP blocks, and uses full attention for all layers instead of alternating between full attention and
|
||||
local sliding attention. VaultGemma is available as a pretrained model with 1B parameters that uses a 1024 token
|
||||
sequence length.
|
||||
|
||||
VaultGemma was trained from scratch with sequence-level differential privacy (DP). Its training data includes the same
|
||||
mixture as the [Gemma 2 models](https://huggingface.co/collections/google/gemma-2-release-667d6600fd5220e7b967f315),
|
||||
consisting of a number of documents of varying lengths. Additionally, it is trained using
|
||||
[DP stochastic gradient descent (DP-SGD)](https://arxiv.org/abs/1607.00133) and provides a
|
||||
(ε ≤ 2.0, δ ≤ 1.1e-10)-sequence-level DP guarantee, where a sequence consists of 1024 consecutive tokens extracted from
|
||||
heterogeneous data sources. Specifically, the privacy unit of the guarantee is for the sequences after sampling and
|
||||
packing of the mixture.
|
||||
|
||||
> [!TIP]
|
||||
> Click on the VaultGemma models in the right sidebar for more examples of how to apply VaultGemma to different language tasks.
|
||||
|
||||
The example below demonstrates how to chat with the model with [`Pipeline`], the [`AutoModel`] class, or from the
|
||||
command line.
|
||||
|
||||
<hfoptions id="usage">
|
||||
<hfoption id="Pipeline">
|
||||
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
|
||||
pipe = pipeline(
|
||||
task="text-generation",
|
||||
model="google/vaultgemma-1b",
|
||||
dtype="auto",
|
||||
device_map="auto",
|
||||
)
|
||||
|
||||
text = "Tell me an unknown interesting biology fact about the brain."
|
||||
outputs = pipe(text, max_new_tokens=32)
|
||||
response = outputs[0]["generated_text"]
|
||||
print(response)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="AutoModel">
|
||||
|
||||
```python
|
||||
# pip install accelerate
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
model_id = "google/vaultgemma-1b"
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", dtype="auto")
|
||||
|
||||
text = "Tell me an unknown interesting biology fact about the brain."
|
||||
input_ids = tokenizer(text, return_tensors="pt").to(model.device)
|
||||
|
||||
outputs = model.generate(**input_ids, max_new_tokens=32)
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="transformers CLI">
|
||||
|
||||
```
|
||||
echo -e "Write me a poem about Machine Learning. Answer:" | transformers run --task text2text-generation --model google/vaultgemma-1b-pt --device 0
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## VaultGemmaConfig
|
||||
|
||||
[[autodoc]] VaultGemmaConfig
|
||||
|
||||
## VaultGemmaModel
|
||||
|
||||
[[autodoc]] VaultGemmaModel
|
||||
- forward
|
||||
|
||||
## VaultGemmaForCausalLM
|
||||
|
||||
[[autodoc]] VaultGemmaForCausalLM
|
80
docs/source/en/quantization/mxfp4.md
Normal file
80
docs/source/en/quantization/mxfp4.md
Normal file
@ -0,0 +1,80 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# MXFP4
|
||||
|
||||
Note: MXFP4 quantisation currently only works for OpenAI GPT-OSS 120b and 20b.
|
||||
|
||||
MXFP4 is a 4-bit floating point format that dramatically reduces the memory requirements of large models. Large models (GPT-OSS-120B) can fit on a single 80GB GPU and smaller models (GPT-OSS-20B) only require 16GB of memory. It uses blockwise scaling to preserve it's range and accuracy, which typically becomes degraded at lower precisions.
|
||||
|
||||
To use MXPF4, make sure your hardware meets the following requirements.
|
||||
|
||||
- Install Accelerate, kernels, and Triton ≥ 3.4. Only manually install Triton ≥ 3.4 if you're using PyTorch 2.7 because it is already supported in PyTorch 2.8.
|
||||
- NVIDIA GPU Compute Capability ≥ 7.5 which includes Tesla GPUs and newer. Use [get_device_capability](https://docs.pytorch.org/docs/stable/generated/torch.cuda.get_device_capability.html) to check Compute Capability.
|
||||
|
||||
|
||||
```python
|
||||
from torch import cuda
|
||||
cuda.get_device_capability()
|
||||
|
||||
# (7, 5)
|
||||
```
|
||||
|
||||
Check a model's quantization config as shown below to see if it supports MXFP4. If `'quant_method': 'mxfp4'`, then the model automatically uses MXFP4.
|
||||
|
||||
```py
|
||||
from transformers import GptOssConfig
|
||||
|
||||
model_id = "openai/gpt-oss-120b"
|
||||
cfg = GptOssConfig.from_pretrained(model_id)
|
||||
print(cfg.quantization_config)
|
||||
|
||||
# Example output:
|
||||
# {
|
||||
# 'modules_to_not_convert': [
|
||||
# 'model.layers.*.self_attn',
|
||||
# 'model.layers.*.mlp.router',
|
||||
# 'model.embed_tokens',
|
||||
# 'lm_head'
|
||||
# ],
|
||||
# 'quant_method': 'mxfp4'
|
||||
# }
|
||||
```
|
||||
|
||||
|
||||
## MXFP4 kernels
|
||||
|
||||
Transformers automatically pulls the MXFP4-aware Triton kernels from the community repository when you load a model that needs them. The kernels are stored in your local cache and used during the forward pass.
|
||||
|
||||
MXFP4 kernels are used by default, if available and supported, and does not require any code changes.
|
||||
|
||||
You can use [hf cache scan](https://huggingface.co/docs/huggingface_hub/en/guides/manage-cache#scan-your-cache) to verify the kernels are downloaded.
|
||||
|
||||
```shell
|
||||
hf cache scan
|
||||
```
|
||||
|
||||
|
||||
```shell
|
||||
REPO ID REPO TYPE SIZE ON DISK
|
||||
-------------------------------- --------- ------------
|
||||
kernels-community/triton_kernels model 536.2K
|
||||
openai/gpt-oss-20b model 13.8G
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
Learn more about MXFP4 quantization and how blockwise scaling works in this [blog post](https://huggingface.co/blog/faster-transformers#mxfp4-quantization).
|
@ -124,7 +124,7 @@ Create a [`Pipeline`] object and select a task. By default, [`Pipeline`] downloa
|
||||
<hfoptions id="pipeline-tasks">
|
||||
<hfoption id="text generation">
|
||||
|
||||
Use [`~infer_device`] to automatically detect an available accelerator for inference.
|
||||
Use [`infer_device`] to automatically detect an available accelerator for inference.
|
||||
|
||||
```py
|
||||
from transformers import pipeline, infer_device
|
||||
@ -144,7 +144,7 @@ pipeline("The secret to baking a good cake is ", max_length=50)
|
||||
</hfoption>
|
||||
<hfoption id="image segmentation">
|
||||
|
||||
Use [`~infer_device`] to automatically detect an available accelerator for inference.
|
||||
Use [`infer_device`] to automatically detect an available accelerator for inference.
|
||||
|
||||
```py
|
||||
from transformers import pipeline, infer_device
|
||||
@ -171,7 +171,7 @@ segments[1]["label"]
|
||||
</hfoption>
|
||||
<hfoption id="automatic speech recognition">
|
||||
|
||||
Use [`~infer_device`] to automatically detect an available accelerator for inference.
|
||||
Use [`infer_device`] to automatically detect an available accelerator for inference.
|
||||
|
||||
```py
|
||||
from transformers import pipeline, infer_device
|
||||
|
@ -21,7 +21,7 @@ Transformer models can be efficiently deployed using libraries such as vLLM, Tex
|
||||
> [!TIP]
|
||||
> Responses API is now supported as an experimental API! Read more about it [here](#responses-api).
|
||||
|
||||
Apart from that you can also serve transformer models easily using the `transformers serve` CLI. This is ideal for experimentation purposes, or to run models locally for personal and private use.
|
||||
You can also serve transformer models with the `transformers serve` CLI. With Continuous Batching, `serve` now delivers solid throughput and latency well suited for evaluation, experimentation, and moderate-load local or self-hosted deployments. While vLLM, SGLang, or other inference engines remain our recommendations for large-scale production, `serve` avoids the extra runtime and operational overhead, and is on track to gain more production-oriented features.
|
||||
|
||||
In this document, we dive into the different supported endpoints and modalities; we also cover the setup of several user interfaces that can be used on top of `transformers serve` in the following guides:
|
||||
- [Jan (text and MCP user interface)](./jan.md)
|
||||
@ -58,7 +58,7 @@ or by sending an HTTP request, like we'll see below.
|
||||
|
||||
## Chat Completions - text-based
|
||||
|
||||
See below for examples for text-based requests. Both LLMs and VLMs should handle
|
||||
See below for examples for text-based requests. Both LLMs and VLMs should handle
|
||||
|
||||
<hfoptions id="chat-completion-http">
|
||||
<hfoption id="curl">
|
||||
@ -366,6 +366,40 @@ The `transformers serve` server is also an MCP client, so it can interact with M
|
||||
|
||||
<!-- TODO: example with a minimal python example, and explain that it is possible to pass a full generation config in the request -->
|
||||
|
||||
## Continuous Batching
|
||||
|
||||
Continuous Batching (CB) lets the server dynamically group and interleave requests so they can share forward passes on the GPU. Instead of processing each request sequentially, `serve` adds new requests as others progress (prefill) and drops finished ones during decode. The result is significantly higher GPU utilization and better throughput without sacrificing latency for most workloads.
|
||||
|
||||
Thanks to this, evaluation, experimentation, and moderate-load local/self-hosted use can now be handled comfortably by `transformers serve` without introducing an extra runtime to operate.
|
||||
|
||||
### Enable CB in serve
|
||||
|
||||
CB is opt-in and currently applies to chat completions.
|
||||
|
||||
```sh
|
||||
transformers serve \
|
||||
--continuous-batching
|
||||
--attn_implementation sdpa_paged
|
||||
```
|
||||
|
||||
|
||||
### Performance tips
|
||||
|
||||
- Use an efficient attention backend when available:
|
||||
|
||||
```sh
|
||||
transformers serve \
|
||||
--continuous_batching \
|
||||
--attn_implementation paged_attention
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> If you choose `paged_attention`, you must install `flash-attn` separately: `pip install flash-attn --no-build-isolation`
|
||||
|
||||
- `--dtype {bfloat16|float16}` typically improve throughput and memory use vs. `float32`
|
||||
|
||||
- `--load_in_4bit`/`--load_in_8bit` can reduce memory footprint for LoRA setups
|
||||
|
||||
- `--force-model <repo_id>` avoids per-request model hints and helps produce stable, repeatable runs
|
||||
|
||||
|
||||
|
129
docs/source/en/tasks/keypoint_matching.md
Normal file
129
docs/source/en/tasks/keypoint_matching.md
Normal file
@ -0,0 +1,129 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Keypoint matching
|
||||
|
||||
Keypoint matching matches different points of interests that belong to same object appearing in two different images. Most modern keypoint matchers take images as input and output the following:
|
||||
|
||||
- **Keypoint coordinates (x,y):** one-to-one mapping of pixel coordinates between the first and the second image using two lists. Each keypoint at a given index in the first list is matched to the keypoint at the same index in the second list.
|
||||
- **Matching scores:** Scores assigned to the keypoint matches.
|
||||
|
||||
In this tutorial, you will extract keypoint matches with the [`EfficientLoFTR`] model trained with the [MatchAnything framework](https://huggingface.co/zju-community/matchanything_eloftr), and refine the matches. This model is only 16M parameters and can be run on a CPU. You will use the [`AutoModelForKeypointMatching`] class.
|
||||
|
||||
```python
|
||||
from transformers import AutoImageProcessor, AutoModelForKeypointMatching
|
||||
import torch
|
||||
|
||||
processor = AutoImageProcessor.from_pretrained("zju-community/matchanything_eloftr")
|
||||
model = AutoModelForKeypointMatching.from_pretrained("zju-community/matchanything_eloftr"))
|
||||
```
|
||||
|
||||
Load two images that have the same object of interest. The second photo is taken a second apart, it's colors are edited, and it is further cropped and rotated.
|
||||
|
||||
<div style="display: flex; align-items: center;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"
|
||||
alt="Bee"
|
||||
style="height: 200px; object-fit: contain; margin-right: 10px;">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee_edited.jpg"
|
||||
alt="Bee edited"
|
||||
style="height: 200px; object-fit: contain;">
|
||||
</div>
|
||||
|
||||
```python
|
||||
from transformers.image_utils import load_image
|
||||
image1 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg")
|
||||
image2 = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee_edited.jpg")
|
||||
|
||||
images = [image1, image2]
|
||||
```
|
||||
|
||||
We can pass the images to the processor and infer.
|
||||
|
||||
```python
|
||||
inputs = processor(images, return_tensors="pt")
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs)
|
||||
```
|
||||
|
||||
We can postprocess the outputs. The threshold parameter is used to refine noise (lower confidence thresholds) in the output matches.
|
||||
|
||||
```python
|
||||
image_sizes = [[(image.height, image.width) for image in images]]
|
||||
|
||||
outputs = processor.post_process_keypoint_matching(outputs, image_sizes, threshold=0.2)
|
||||
print(outputs)
|
||||
```
|
||||
|
||||
Here's the outputs.
|
||||
|
||||
```
|
||||
[{'keypoints0': tensor([[4514, 550],
|
||||
[4813, 683],
|
||||
[1972, 1547],
|
||||
...
|
||||
[3916, 3408]], dtype=torch.int32),
|
||||
'keypoints1': tensor([[2280, 463],
|
||||
[2378, 613],
|
||||
[2231, 887],
|
||||
...
|
||||
[1521, 2560]], dtype=torch.int32),
|
||||
'matching_scores': tensor([0.2189, 0.2073, 0.2414, ...
|
||||
])}]
|
||||
```
|
||||
|
||||
We have trimmed the output but there's 401 matches!
|
||||
|
||||
```python
|
||||
len(outputs[0]["keypoints0"])
|
||||
# 401
|
||||
```
|
||||
|
||||
We can visualize them using the processor's [`~EfficientLoFTRImageProcessor.visualize_keypoint_matching`] method.
|
||||
|
||||
```python
|
||||
plot_images = processor.visualize_keypoint_matching(images, outputs)
|
||||
plot_images
|
||||
```
|
||||
|
||||

|
||||
|
||||
Optionally, you can use the [`Pipeline`] API and set the task to `keypoint-matching`.
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
|
||||
image_1 = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"
|
||||
image_2 = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee_edited.jpg"
|
||||
|
||||
pipe = pipeline("keypoint-matching", model="zju-community/matchanything_eloftr")
|
||||
pipe([image_1, image_2])
|
||||
```
|
||||
|
||||
The output looks like following.
|
||||
|
||||
```bash
|
||||
[{'keypoint_image_0': {'x': 2444, 'y': 2869},
|
||||
'keypoint_image_1': {'x': 837, 'y': 1500},
|
||||
'score': 0.9756593704223633},
|
||||
{'keypoint_image_0': {'x': 1248, 'y': 2819},
|
||||
'keypoint_image_1': {'x': 862, 'y': 866},
|
||||
'score': 0.9735618829727173},
|
||||
{'keypoint_image_0': {'x': 1547, 'y': 3317},
|
||||
'keypoint_image_1': {'x': 1436, 'y': 1500},
|
||||
...
|
||||
}
|
||||
]
|
||||
```
|
@ -79,7 +79,7 @@ Index the images offline, and during inference, return the query text embeddings
|
||||
Store the image and image embeddings by writing them to the dataset with [`~datasets.Dataset.map`] as shown below. Add an `embeddings` column that contains the indexed embeddings. ColPali embeddings take up a lot of storage, so remove them from the accelerator and store them in the CPU as NumPy vectors.
|
||||
|
||||
```python
|
||||
ds_with_embeddings = dataset.map(lambda example: {'embeddings': model(**processor(images=example["image"]).to(devide), return_tensors="pt").embeddings.to(torch.float32).detach().cpu().numpy()})
|
||||
ds_with_embeddings = dataset.map(lambda example: {'embeddings': model(**processor(images=example["image"]).to(device), return_tensors="pt").embeddings.to(torch.float32).detach().cpu().numpy()})
|
||||
```
|
||||
|
||||
For online inference, create a function to search the image embeddings in batches and retrieve the k-most relevant images. The function below returns the indices in the dataset and their scores for a given indexed dataset, text embeddings, number of top results, and the batch size.
|
||||
|
@ -1,66 +0,0 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# LiteRT
|
||||
|
||||
[LiteRT](https://ai.google.dev/edge/litert) (previously known as TensorFlow Lite) is a high-performance runtime designed for on-device machine learning.
|
||||
|
||||
The [Optimum](https://huggingface.co/docs/optimum/index) library exports a model to LiteRT for [many architectures](https://huggingface.co/docs/optimum/exporters/onnx/overview).
|
||||
|
||||
The benefits of exporting to LiteRT include the following.
|
||||
|
||||
- Low-latency, privacy-focused, no internet connectivity required, and reduced model size and power consumption for on-device machine learning.
|
||||
- Broad platform, model framework, and language support.
|
||||
- Hardware acceleration for GPUs and Apple Silicon.
|
||||
|
||||
Export a Transformers model to LiteRT with the Optimum CLI.
|
||||
|
||||
Run the command below to install Optimum and the [exporters](https://huggingface.co/docs/optimum/exporters/overview) module for LiteRT.
|
||||
|
||||
```bash
|
||||
pip install optimum[exporters-tf]
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> Refer to the [Export a model to TFLite with optimum.exporters.tflite](https://huggingface.co/docs/optimum/main/en/exporters/tflite/usage_guides/export_a_model) guide for all available arguments or with the command below.
|
||||
> ```bash
|
||||
> optimum-cli export tflite --help
|
||||
> ```
|
||||
|
||||
Set the `--model` argument to export a from the Hub.
|
||||
|
||||
```bash
|
||||
optimum-cli export tflite --model google-bert/bert-base-uncased --sequence_length 128 bert_tflite/
|
||||
```
|
||||
|
||||
You should see logs indicating the progress and showing where the resulting `model.tflite` is saved.
|
||||
|
||||
```bash
|
||||
Validating TFLite model...
|
||||
-[✓] TFLite model output names match reference model (logits)
|
||||
- Validating TFLite Model output "logits":
|
||||
-[✓] (1, 128, 30522) matches (1, 128, 30522)
|
||||
-[x] values not close enough, max diff: 5.817413330078125e-05 (atol: 1e-05)
|
||||
The TensorFlow Lite export succeeded with the warning: The maximum absolute difference between the output of the reference model and the TFLite exported model is not within the set tolerance 1e-05:
|
||||
- logits: max diff = 5.817413330078125e-05.
|
||||
The exported model was saved at: bert_tflite
|
||||
```
|
||||
|
||||
For local models, make sure the model weights and tokenizer files are saved in the same directory, for example `local_path`. Pass the directory to the `--model` argument and use `--task` to indicate the [task](https://huggingface.co/docs/optimum/exporters/task_manager) a model can perform. If `--task` isn't provided, the model architecture without a task-specific head is used.
|
||||
|
||||
```bash
|
||||
optimum-cli export tflite --model local_path --task question-answering google-bert/bert-base-uncased --sequence_length 128 bert_tflite/
|
||||
```
|
@ -64,8 +64,6 @@
|
||||
title: Entrenador
|
||||
- local: sagemaker
|
||||
title: Ejecutar el entrenamiento en Amazon SageMaker
|
||||
- local: converting_tensorflow_models
|
||||
title: Convertir checkpoints de TensorFlow
|
||||
- local: serialization
|
||||
title: Exportar a ONNX
|
||||
- local: torchscript
|
||||
|
@ -1,139 +0,0 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Convertir checkpoints de Tensorflow
|
||||
|
||||
Te proporcionamos una interfaz de línea de comando (`CLI`, por sus siglas en inglés) para convertir puntos de control (_checkpoints_) originales de Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM en modelos que se puedan cargar utilizando los métodos `from_pretrained` de la biblioteca.
|
||||
|
||||
<Tip>
|
||||
|
||||
Desde 2.3.0, el script para convertir es parte de la CLI de transformers (**transformers**) disponible en cualquier instalación de transformers >= 2.3.0.
|
||||
|
||||
La siguiente documentación refleja el formato para el comando **transformers convert**.
|
||||
|
||||
</Tip>
|
||||
|
||||
## BERT
|
||||
|
||||
Puedes convertir cualquier checkpoint de TensorFlow para BERT (en particular, [los modelos pre-entrenados y publicados por Google](https://github.com/google-research/bert#pre-trained-models)) en un archivo de PyTorch mediante el script [convert_bert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py).
|
||||
|
||||
Esta CLI toma como entrada un checkpoint de TensorFlow (tres archivos que comienzan con `bert_model.ckpt`) y el archivo de configuración asociado (`bert_config.json`), y crea un modelo PyTorch para esta configuración, carga los pesos del checkpoint de TensorFlow en el modelo de PyTorch y guarda el modelo resultante en un archivo estándar de PyTorch que se puede importar usando `from_pretrained()` (ve el ejemplo en [Tour rápido](quicktour), [run_glue.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_glue.py)).
|
||||
|
||||
Solo necesitas ejecutar este script **una vez** para convertir un modelo a PyTorch. Después, puedes ignorar el checkpoint de TensorFlow (los tres archivos que comienzan con `bert_model.ckpt`), pero asegúrate de conservar el archivo de configuración (`bert_config.json`) y el archivo de vocabulario (`vocab.txt`) ya que estos también son necesarios para el modelo en PyTorch.
|
||||
|
||||
Para ejecutar este script deberás tener instalado TensorFlow y PyTorch (`pip install tensorflow`). El resto del repositorio solo requiere PyTorch.
|
||||
|
||||
Aquí hay un ejemplo del proceso para convertir un modelo `BERT-Base Uncased` pre-entrenado:
|
||||
|
||||
```bash
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
```
|
||||
|
||||
Puedes descargar los modelos pre-entrenados de Google para la conversión [aquí](https://github.com/google-research/bert#pre-trained-models).
|
||||
|
||||
## ALBERT
|
||||
|
||||
Convierte los checkpoints del modelo ALBERT de TensorFlow a PyTorch usando el script [convert_albert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py).
|
||||
|
||||
La CLI toma como entrada un checkpoint de TensorFlow (tres archivos que comienzan con `model.ckpt-best`) y el archivo de configuración adjunto (`albert_config.json`), luego crea y guarda un modelo de PyTorch. Para ejecutar esta conversión deberás tener instalados TensorFlow y PyTorch.
|
||||
|
||||
Aquí hay un ejemplo del proceso para convertir un modelo `ALBERT Base` pre-entrenado:
|
||||
|
||||
```bash
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
|
||||
transformers convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
```
|
||||
|
||||
Puedes descargar los modelos pre-entrenados de Google para la conversión [aquí](https://github.com/google-research/albert#pre-trained-models).
|
||||
|
||||
## OpenAI GPT
|
||||
|
||||
Este es un ejemplo del proceso para convertir un modelo OpenAI GPT pre-entrenado, asumiendo que tu checkpoint de NumPy se guarda con el mismo formato que el modelo pre-entrenado de OpenAI (más información [aquí](https://github.com/openai/finetune-transformer-lm)):
|
||||
|
||||
```bash
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
|
||||
transformers convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
```
|
||||
|
||||
## OpenAI GPT-2
|
||||
|
||||
Aquí hay un ejemplo del proceso para convertir un modelo OpenAI GPT-2 pre-entrenado (más información [aquí](https://github.com/openai/gpt-2)):
|
||||
|
||||
```bash
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/openai-community/gpt2/pretrained/weights
|
||||
|
||||
transformers convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
```
|
||||
|
||||
## XLNet
|
||||
|
||||
Aquí hay un ejemplo del proceso para convertir un modelo XLNet pre-entrenado:
|
||||
|
||||
```bash
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
|
||||
transformers convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
```
|
||||
|
||||
## XLM
|
||||
|
||||
Aquí hay un ejemplo del proceso para convertir un modelo XLM pre-entrenado:
|
||||
|
||||
```bash
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
|
||||
transformers convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
```
|
||||
|
||||
## T5
|
||||
|
||||
Aquí hay un ejemplo del proceso para convertir un modelo T5 pre-entrenado:
|
||||
|
||||
```bash
|
||||
export T5=/path/to/t5/uncased_L-12_H-768_A-12
|
||||
|
||||
transformers convert --model_type t5 \
|
||||
--tf_checkpoint $T5/t5_model.ckpt \
|
||||
--config $T5/t5_config.json \
|
||||
--pytorch_dump_output $T5/pytorch_model.bin
|
||||
```
|
@ -2,6 +2,4 @@
|
||||
- local: pipeline_tutorial
|
||||
title: पाइपलाइनों के साथ अनुमान चलाएँ
|
||||
- local: accelerate
|
||||
title: 🤗 Accelerate के साथ वितरित प्रशिक्षण सेट करें
|
||||
- local: tflite
|
||||
title: TFLite में निर्यात करें
|
||||
title: 🤗 Accelerate के साथ वितरित प्रशिक्षण सेट करें
|
@ -1,55 +0,0 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# TFLite में निर्यात करें
|
||||
|
||||
[TensorFlow Lite](https://www.tensorflow.org/lite/guide) एक हल्का ढांचा है जो मशीन लर्निंग मॉडल को संसाधन-सीमित उपकरणों, जैसे मोबाइल फोन, एम्बेडेड सिस्टम और इंटरनेट ऑफ थिंग्स (IoT) उपकरणों पर तैनात करने के लिए है। TFLite को इन उपकरणों पर सीमित गणनात्मक शक्ति, मेमोरी और ऊर्जा खपत के साथ मॉडल को कुशलता से ऑप्टिमाइज़ और चलाने के लिए डिज़ाइन किया गया है। एक TensorFlow Lite मॉडल को एक विशेष कुशल पोर्टेबल प्रारूप में दर्शाया जाता है जिसे `.tflite` फ़ाइल एक्सटेंशन द्वारा पहचाना जाता है।
|
||||
|
||||
🤗 Optimum में `exporters.tflite` मॉड्यूल के माध्यम से 🤗 Transformers मॉडल को TFLite में निर्यात करने की कार्यक्षमता है। समर्थित मॉडल आर्किटेक्चर की सूची के लिए, कृपया [🤗 Optimum दस्तावेज़](https://huggingface.co/docs/optimum/exporters/tflite/overview) देखें।
|
||||
|
||||
TFLite में एक मॉडल निर्यात करने के लिए, आवश्यक निर्भरताएँ स्थापित करें:
|
||||
|
||||
```bash
|
||||
pip install optimum[exporters-tf]
|
||||
```
|
||||
|
||||
सभी उपलब्ध तर्कों की जांच करने के लिए, [🤗 Optimum दस्तावेज़](https://huggingface.co/docs/optimum/main/en/exporters/tflite/usage_guides/export_a_model) देखें,
|
||||
या कमांड लाइन में मदद देखें:
|
||||
|
||||
```bash
|
||||
optimum-cli export tflite --help
|
||||
```
|
||||
|
||||
यदि आप 🤗 Hub से एक मॉडल का चेकपॉइंट निर्यात करना चाहते हैं, उदाहरण के लिए, `google-bert/bert-base-uncased`, निम्नलिखित कमांड चलाएँ:
|
||||
|
||||
```bash
|
||||
optimum-cli export tflite --model google-bert/bert-base-uncased --sequence_length 128 bert_tflite/
|
||||
```
|
||||
|
||||
आपको प्रगति को दर्शाते हुए लॉग दिखाई देंगे और यह दिखाएंगे कि परिणामस्वरूप `model.tflite` कहाँ सहेजा गया है, जैसे:
|
||||
|
||||
```bash
|
||||
Validating TFLite model...
|
||||
-[✓] TFLite model output names match reference model (logits)
|
||||
- Validating TFLite Model output "logits":
|
||||
-[✓] (1, 128, 30522) matches (1, 128, 30522)
|
||||
-[x] values not close enough, max diff: 5.817413330078125e-05 (atol: 1e-05)
|
||||
The TensorFlow Lite export succeeded with the warning: The maximum absolute difference between the output of the reference model and the TFLite exported model is not within the set tolerance 1e-05:
|
||||
- logits: max diff = 5.817413330078125e-05.
|
||||
The exported model was saved at: bert_tflite
|
||||
```
|
||||
|
||||
उपरोक्त उदाहरण 🤗 Hub से एक चेकपॉइंट निर्यात करने को दर्शाता है। जब एक स्थानीय मॉडल निर्यात करते हैं, तो पहले सुनिश्चित करें कि आपने मॉडल के वज़न और टोकनाइज़र फ़ाइलों को एक ही निर्देशिका (`local_path`) में सहेजा है। CLI का उपयोग करते समय, चेकपॉइंट नाम के बजाय `model` तर्क में `local_path` पास करें।
|
@ -29,8 +29,6 @@
|
||||
title: Addestramento con script
|
||||
- local: multilingual
|
||||
title: Modelli multilingua per l'inferenza
|
||||
- local: converting_tensorflow_models
|
||||
title: Convertire modelli tensorflow
|
||||
- local: serialization
|
||||
title: Esporta modelli Transformers
|
||||
- local: perf_train_cpu
|
||||
|
@ -1,144 +0,0 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Convertire checkpoint di Tensorflow
|
||||
|
||||
È disponibile un'interfaccia a linea di comando per convertire gli originali checkpoint di Bert/GPT/GPT-2/Transformer-XL/XLNet/XLM
|
||||
in modelli che possono essere caricati utilizzando i metodi `from_pretrained` della libreria.
|
||||
|
||||
<Tip>
|
||||
|
||||
A partire dalla versione 2.3.0 lo script di conversione è parte di transformers CLI (**transformers**), disponibile in ogni installazione
|
||||
di transformers >=2.3.0.
|
||||
|
||||
La seguente documentazione riflette il formato dei comandi di **transformers convert**.
|
||||
|
||||
</Tip>
|
||||
|
||||
## BERT
|
||||
|
||||
Puoi convertire qualunque checkpoint Tensorflow di BERT (in particolare
|
||||
[i modeli pre-allenati rilasciati da Google](https://github.com/google-research/bert#pre-trained-models))
|
||||
in un file di salvataggio Pytorch utilizzando lo script
|
||||
[convert_bert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bert/convert_bert_original_tf_checkpoint_to_pytorch.py).
|
||||
|
||||
Questo CLI prende come input un checkpoint di Tensorflow (tre files che iniziano con `bert_model.ckpt`) ed il relativo
|
||||
file di configurazione (`bert_config.json`), crea un modello Pytorch per questa configurazione, carica i pesi dal
|
||||
checkpoint di Tensorflow nel modello di Pytorch e salva il modello che ne risulta in un file di salvataggio standard di Pytorch che
|
||||
può essere importato utilizzando `from_pretrained()` (vedi l'esempio nel
|
||||
[quicktour](quicktour) , [run_glue.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_glue.py) ).
|
||||
|
||||
Devi soltanto lanciare questo script di conversione **una volta** per ottenere un modello Pytorch. Dopodichè, potrai tralasciare
|
||||
il checkpoint di Tensorflow (i tre files che iniziano con `bert_model.ckpt`), ma assicurati di tenere il file di configurazione
|
||||
(`bert_config.json`) ed il file di vocabolario (`vocab.txt`) in quanto queste componenti sono necessarie anche per il modello di Pytorch.
|
||||
|
||||
Per lanciare questo specifico script di conversione avrai bisogno di un'installazione di Tensorflow e di Pytorch
|
||||
(`pip install tensorflow`). Il resto della repository richiede soltanto Pytorch.
|
||||
|
||||
Questo è un esempio del processo di conversione per un modello `BERT-Base Uncased` pre-allenato:
|
||||
|
||||
```bash
|
||||
export BERT_BASE_DIR=/path/to/bert/uncased_L-12_H-768_A-12
|
||||
transformers convert --model_type bert \
|
||||
--tf_checkpoint $BERT_BASE_DIR/bert_model.ckpt \
|
||||
--config $BERT_BASE_DIR/bert_config.json \
|
||||
--pytorch_dump_output $BERT_BASE_DIR/pytorch_model.bin
|
||||
```
|
||||
|
||||
Puoi scaricare i modelli pre-allenati di Google per la conversione [qua](https://github.com/google-research/bert#pre-trained-models).
|
||||
|
||||
## ALBERT
|
||||
|
||||
Per il modello ALBERT, converti checkpoint di Tensoflow in Pytorch utilizzando lo script
|
||||
[convert_albert_original_tf_checkpoint_to_pytorch.py](https://github.com/huggingface/transformers/tree/main/src/transformers/models/albert/convert_albert_original_tf_checkpoint_to_pytorch.py).
|
||||
|
||||
Il CLI prende come input un checkpoint di Tensorflow (tre files che iniziano con `model.ckpt-best`) e i relativi file di
|
||||
configurazione (`albert_config.json`), dopodichè crea e salva un modello Pytorch. Per lanciare questa conversione
|
||||
avrai bisogno di un'installazione di Tensorflow e di Pytorch.
|
||||
|
||||
Ecco un esempio del procedimento di conversione di un modello `ALBERT Base` pre-allenato:
|
||||
|
||||
```bash
|
||||
export ALBERT_BASE_DIR=/path/to/albert/albert_base
|
||||
transformers convert --model_type albert \
|
||||
--tf_checkpoint $ALBERT_BASE_DIR/model.ckpt-best \
|
||||
--config $ALBERT_BASE_DIR/albert_config.json \
|
||||
--pytorch_dump_output $ALBERT_BASE_DIR/pytorch_model.bin
|
||||
```
|
||||
|
||||
Puoi scaricare i modelli pre-allenati di Google per la conversione [qui](https://github.com/google-research/albert#pre-trained-models).
|
||||
|
||||
## OpenAI GPT
|
||||
|
||||
Ecco un esempio del processo di conversione di un modello OpenAI GPT pre-allenato, assumendo che il tuo checkpoint di NumPy
|
||||
sia salvato nello stesso formato dei modelli pre-allenati OpenAI (vedi [qui](https://github.com/openai/finetune-transformer-lm)):
|
||||
```bash
|
||||
export OPENAI_GPT_CHECKPOINT_FOLDER_PATH=/path/to/openai/pretrained/numpy/weights
|
||||
transformers convert --model_type gpt \
|
||||
--tf_checkpoint $OPENAI_GPT_CHECKPOINT_FOLDER_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT_FINETUNED_TASK] \
|
||||
```
|
||||
|
||||
## OpenAI GPT-2
|
||||
|
||||
Ecco un esempio del processo di conversione di un modello OpenAI GPT-2 pre-allenato (vedi [qui](https://github.com/openai/gpt-2)):
|
||||
|
||||
```bash
|
||||
export OPENAI_GPT2_CHECKPOINT_PATH=/path/to/openai-community/gpt2/pretrained/weights
|
||||
transformers convert --model_type gpt2 \
|
||||
--tf_checkpoint $OPENAI_GPT2_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--config OPENAI_GPT2_CONFIG] \
|
||||
[--finetuning_task_name OPENAI_GPT2_FINETUNED_TASK]
|
||||
```
|
||||
|
||||
## XLNet
|
||||
|
||||
Ecco un esempio del processo di conversione di un modello XLNet pre-allenato:
|
||||
|
||||
```bash
|
||||
export TRANSFO_XL_CHECKPOINT_PATH=/path/to/xlnet/checkpoint
|
||||
export TRANSFO_XL_CONFIG_PATH=/path/to/xlnet/config
|
||||
transformers convert --model_type xlnet \
|
||||
--tf_checkpoint $TRANSFO_XL_CHECKPOINT_PATH \
|
||||
--config $TRANSFO_XL_CONFIG_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT \
|
||||
[--finetuning_task_name XLNET_FINETUNED_TASK] \
|
||||
```
|
||||
|
||||
## XLM
|
||||
|
||||
Ecco un esempio del processo di conversione di un modello XLM pre-allenato:
|
||||
|
||||
```bash
|
||||
export XLM_CHECKPOINT_PATH=/path/to/xlm/checkpoint
|
||||
transformers convert --model_type xlm \
|
||||
--tf_checkpoint $XLM_CHECKPOINT_PATH \
|
||||
--pytorch_dump_output $PYTORCH_DUMP_OUTPUT
|
||||
[--config XML_CONFIG] \
|
||||
[--finetuning_task_name XML_FINETUNED_TASK]
|
||||
```
|
||||
|
||||
## T5
|
||||
|
||||
Ecco un esempio del processo di conversione di un modello T5 pre-allenato:
|
||||
|
||||
```bash
|
||||
export T5=/path/to/t5/uncased_L-12_H-768_A-12
|
||||
transformers convert --model_type t5 \
|
||||
--tf_checkpoint $T5/t5_model.ckpt \
|
||||
--config $T5/t5_config.json \
|
||||
--pytorch_dump_output $T5/pytorch_model.bin
|
||||
```
|
@ -109,8 +109,6 @@
|
||||
title: チャットモデルのテンプレート
|
||||
- local: serialization
|
||||
title: ONNX へのエクスポート
|
||||
- local: tflite
|
||||
title: TFLite へのエクスポート
|
||||
- local: torchscript
|
||||
title: トーチスクリプトへのエクスポート
|
||||
- local: community
|
||||
@ -132,8 +130,6 @@
|
||||
title: 分散CPUトレーニング
|
||||
- local: perf_train_tpu
|
||||
title: TPU に関するトレーニング
|
||||
- local: perf_train_tpu_tf
|
||||
title: TensorFlow を使用した TPU のトレーニング
|
||||
- local: perf_train_special
|
||||
title: 特殊なハードウェアに関するトレーニング
|
||||
- local: perf_hardware
|
||||
@ -153,8 +149,6 @@
|
||||
title: 推論の最適化
|
||||
- local: big_models
|
||||
title: 大きなモデルのインスタンス化
|
||||
- local: tf_xla
|
||||
title: TensorFlowモデルのXLA統合
|
||||
- local: perf_torch_compile
|
||||
title: torch.compile()を使用した推論の最適化
|
||||
title: パフォーマンスとスケーラビリティ
|
||||
@ -202,8 +196,6 @@
|
||||
title: 構成
|
||||
- local: main_classes/data_collator
|
||||
title: データ照合者
|
||||
- local: main_classes/keras_callbacks
|
||||
title: Keras コールバック
|
||||
- local: main_classes/logging
|
||||
title: ロギング
|
||||
- local: main_classes/model
|
||||
|
@ -241,43 +241,6 @@ time."\n\nHe added: "I am very proud of the work I have been able to do in the l
|
||||
'Das Haus ist wunderbar.'
|
||||
```
|
||||
|
||||
### Diverse beam search decoding
|
||||
|
||||
多様なビームサーチデコーディング戦略は、ビームサーチ戦略の拡張であり、選択肢からより多様なビームシーケンスを生成できるようにします。この仕組みの詳細については、[Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models](https://huggingface.co/papers/1610.02424) をご参照ください。このアプローチには、`num_beams`、`num_beam_groups`、および `diversity_penalty` という3つの主要なパラメータがあります。多様性ペナルティは、出力がグループごとに異なることを保証し、ビームサーチは各グループ内で使用されます。
|
||||
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
||||
|
||||
>>> checkpoint = "google/pegasus-xsum"
|
||||
>>> prompt = (
|
||||
... "The Permaculture Design Principles are a set of universal design principles "
|
||||
... "that can be applied to any location, climate and culture, and they allow us to design "
|
||||
... "the most efficient and sustainable human habitation and food production systems. "
|
||||
... "Permaculture is a design system that encompasses a wide variety of disciplines, such "
|
||||
... "as ecology, landscape design, environmental science and energy conservation, and the "
|
||||
... "Permaculture design principles are drawn from these various disciplines. Each individual "
|
||||
... "design principle itself embodies a complete conceptual framework based on sound "
|
||||
... "scientific principles. When we bring all these separate principles together, we can "
|
||||
... "create a design system that both looks at whole systems, the parts that these systems "
|
||||
... "consist of, and how those parts interact with each other to create a complex, dynamic, "
|
||||
... "living system. Each design principle serves as a tool that allows us to integrate all "
|
||||
... "the separate parts of a design, referred to as elements, into a functional, synergistic, "
|
||||
... "whole system, where the elements harmoniously interact and work together in the most "
|
||||
... "efficient way possible."
|
||||
... )
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
||||
|
||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
|
||||
|
||||
>>> outputs = model.generate(**inputs, num_beams=5, num_beam_groups=5, max_new_tokens=30, diversity_penalty=1.0)
|
||||
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||
'The Design Principles are a set of universal design principles that can be applied to any location, climate and
|
||||
culture, and they allow us to design the'
|
||||
```
|
||||
|
||||
### Assisted Decoding
|
||||
|
||||
アシストデコーディングは、上記のデコーディング戦略を変更したもので、同じトークナイザー(理想的にははるかに小さなモデル)を使用して、いくつかの候補トークンを貪欲に生成するアシスタントモデルを使用します。その後、主要なモデルは候補トークンを1つの前向きパスで検証し、デコーディングプロセスを高速化します。現在、アシストデコーディングでは貪欲検索とサンプリングのみがサポートされており、バッチ入力はサポートされていません。アシストデコーディングの詳細については、[このブログ記事](https://huggingface.co/blog/assisted-generation) をご覧ください。
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user