mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-21 09:44:02 +08:00
Compare commits
1 Commits
quickfix_g
...
v4.41.0
Author | SHA1 | Date | |
---|---|---|---|
4c6c45ba13 |
@ -31,7 +31,6 @@ jobs:
|
||||
steps:
|
||||
- checkout
|
||||
- run: uv pip install -U -e .
|
||||
- run: echo 'export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)"' >> "$BASH_ENV" && source "$BASH_ENV"
|
||||
- run: mkdir -p test_preparation
|
||||
- run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt
|
||||
- store_artifacts:
|
||||
@ -81,7 +80,7 @@ jobs:
|
||||
path: ~/transformers/test_preparation/filtered_test_list.txt
|
||||
- store_artifacts:
|
||||
path: test_preparation/examples_test_list.txt
|
||||
- run: export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)" && echo $GIT_COMMIT_MESSAGE && python .circleci/create_circleci_config.py --fetcher_folder test_preparation
|
||||
- run: python .circleci/create_circleci_config.py --fetcher_folder test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/generated_config.yml ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
@ -98,7 +97,7 @@ jobs:
|
||||
fetch_all_tests:
|
||||
working_directory: ~/transformers
|
||||
docker:
|
||||
- image: huggingface/transformers-quality
|
||||
- image: huggingface/transformers-consistency
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
@ -142,7 +141,6 @@ jobs:
|
||||
- run: python utils/custom_init_isort.py --check_only
|
||||
- run: python utils/sort_auto_mappings.py --check_only
|
||||
- run: python utils/check_doc_toc.py
|
||||
- run: python utils/check_docstrings.py --check_all
|
||||
|
||||
check_repository_consistency:
|
||||
working_directory: ~/transformers
|
||||
@ -191,4 +189,4 @@ workflows:
|
||||
- check_circleci_user
|
||||
- check_code_quality
|
||||
- check_repository_consistency
|
||||
- fetch_all_tests
|
||||
- fetch_all_tests
|
@ -72,12 +72,6 @@ class CircleCIJob:
|
||||
if self.docker_image is None:
|
||||
# Let's avoid changing the default list and make a copy.
|
||||
self.docker_image = copy.deepcopy(DEFAULT_DOCKER_IMAGE)
|
||||
else:
|
||||
# BIG HACK WILL REMOVE ONCE FETCHER IS UPDATED
|
||||
print(os.environ.get("GIT_COMMIT_MESSAGE"))
|
||||
if "[build-ci-image]" in os.environ.get("GIT_COMMIT_MESSAGE", "") or os.environ.get("GIT_COMMIT_MESSAGE", "") == "dev-ci":
|
||||
self.docker_image[0]["image"] = f"{self.docker_image[0]['image']}:dev"
|
||||
print(f"Using {self.docker_image} docker image")
|
||||
if self.install_steps is None:
|
||||
self.install_steps = []
|
||||
if self.pytest_options is None:
|
||||
@ -155,7 +149,7 @@ class CircleCIJob:
|
||||
elif self.name in ["flax","torch","tf"]:
|
||||
name = self.name if self.name != "torch" else ""
|
||||
if self.name == "torch":
|
||||
all_tests = glob.glob(f"tests/models/**/test_modeling_{name}*.py", recursive=True)
|
||||
all_tests = glob.glob(f"tests/models/**/test_modeling_{name}*.py", recursive=True)
|
||||
filtered = [k for k in all_tests if ("_tf_") not in k and "_flax_" not in k]
|
||||
expanded_tests.extend(filtered)
|
||||
else:
|
||||
@ -163,7 +157,7 @@ class CircleCIJob:
|
||||
else:
|
||||
expanded_tests.extend(glob.glob("tests/models/**/test_modeling*.py", recursive=True))
|
||||
elif test == "tests/pipelines":
|
||||
expanded_tests.extend(glob.glob("tests/models/**/test_modeling*.py", recursive=True))
|
||||
expanded_tests.extend(glob.glob("tests/models/**/test_modeling*.py", recursive=True))
|
||||
else:
|
||||
expanded_tests.append(test)
|
||||
tests = " ".join(expanded_tests)
|
||||
@ -248,7 +242,7 @@ torch_job = CircleCIJob(
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=4
|
||||
pytest_num_workers=16
|
||||
)
|
||||
|
||||
tokenization_job = CircleCIJob(
|
||||
@ -256,7 +250,7 @@ tokenization_job = CircleCIJob(
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=4
|
||||
pytest_num_workers=16
|
||||
)
|
||||
|
||||
|
||||
@ -265,7 +259,7 @@ tf_job = CircleCIJob(
|
||||
docker_image=[{"image":"huggingface/transformers-tf-light"}],
|
||||
install_steps=["uv venv", "uv pip install -e."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=4,
|
||||
pytest_num_workers=16,
|
||||
)
|
||||
|
||||
|
||||
@ -274,7 +268,7 @@ flax_job = CircleCIJob(
|
||||
docker_image=[{"image":"huggingface/transformers-jax-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=4
|
||||
pytest_num_workers=16
|
||||
)
|
||||
|
||||
|
||||
@ -326,7 +320,7 @@ examples_tensorflow_job = CircleCIJob(
|
||||
"examples_tensorflow",
|
||||
cache_name="tensorflow_examples",
|
||||
docker_image=[{"image":"huggingface/transformers-examples-tf"}],
|
||||
install_steps=["uv venv && uv pip install . && uv pip install -r examples/tensorflow/_tests_requirements.txt"],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=8
|
||||
)
|
||||
|
||||
|
51
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
51
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -1,17 +1,6 @@
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve transformers
|
||||
labels: [ "bug" ]
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Thanks for taking the time to fill out this bug report! 🤗
|
||||
|
||||
Before you submit your bug report:
|
||||
|
||||
- If it is your first time submitting, be sure to check our [bug report guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#did-you-find-a-bug)
|
||||
- Try our [docs bot](https://huggingface.co/spaces/huggingchat/hf-docs-chat) -- it might be able to help you with your issue
|
||||
|
||||
- type: textarea
|
||||
id: system-info
|
||||
attributes:
|
||||
@ -28,50 +17,50 @@ body:
|
||||
description: |
|
||||
Your issue will be replied to more quickly if you can figure out the right person to tag with @
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
|
||||
|
||||
All issues are read by one of the core maintainers, so if you don't know who to tag, just leave this blank and
|
||||
a core maintainer will ping the right person.
|
||||
|
||||
|
||||
Please tag fewer than 3 people.
|
||||
|
||||
|
||||
Models:
|
||||
|
||||
- text models: @ArthurZucker
|
||||
- text models: @ArthurZucker and @younesbelkada
|
||||
- vision models: @amyeroberts
|
||||
- speech models: @sanchit-gandhi
|
||||
- graph models: @clefourrier
|
||||
|
||||
|
||||
Library:
|
||||
|
||||
|
||||
- flax: @sanchit-gandhi
|
||||
- generate: @zucchini-nlp (visual-language models) or @gante (all others)
|
||||
- generate: @gante
|
||||
- pipelines: @Narsil
|
||||
- tensorflow: @gante and @Rocketknight1
|
||||
- tokenizers: @ArthurZucker
|
||||
- trainer: @muellerzr @SunMarc
|
||||
|
||||
- trainer: @muellerzr and @pacman100
|
||||
|
||||
Integrations:
|
||||
|
||||
- deepspeed: HF Trainer/Accelerate: @muellerzr
|
||||
|
||||
- deepspeed: HF Trainer/Accelerate: @pacman100
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- Big Model Inference: @SunMarc
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc
|
||||
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc and @younesbelkada
|
||||
|
||||
Documentation: @stevhliu
|
||||
|
||||
|
||||
Model hub:
|
||||
|
||||
- for issues with a model, report at https://discuss.huggingface.co/ and tag the model's creator.
|
||||
|
||||
|
||||
HF projects:
|
||||
|
||||
|
||||
- accelerate: [different repo](https://github.com/huggingface/accelerate)
|
||||
- datasets: [different repo](https://github.com/huggingface/datasets)
|
||||
- diffusers: [different repo](https://github.com/huggingface/diffusers)
|
||||
- rust tokenizers: [different repo](https://github.com/huggingface/tokenizers)
|
||||
|
||||
|
||||
Maintained examples (not research project or legacy):
|
||||
|
||||
|
||||
- Flax: @sanchit-gandhi
|
||||
- PyTorch: See Models above and tag the person corresponding to the modality of the example.
|
||||
- TensorFlow: @Rocketknight1
|
||||
@ -112,11 +101,11 @@ body:
|
||||
|
||||
placeholder: |
|
||||
Steps to reproduce the behavior:
|
||||
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
|
4
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
4
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
@ -1,6 +1,6 @@
|
||||
name: "\U0001F680 Feature request"
|
||||
description: Submit a proposal/request for a new transformers feature
|
||||
labels: [ "Feature request" ]
|
||||
labels: [ "feature" ]
|
||||
body:
|
||||
- type: textarea
|
||||
id: feature-request
|
||||
@ -19,7 +19,7 @@ body:
|
||||
label: Motivation
|
||||
description: |
|
||||
Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too.
|
||||
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: contribution
|
||||
|
2
.github/ISSUE_TEMPLATE/i18n.md
vendored
2
.github/ISSUE_TEMPLATE/i18n.md
vendored
@ -34,7 +34,7 @@ Some notes:
|
||||
|
||||
## Tutorial section
|
||||
- [ ] [pipeline_tutorial.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/pipeline_tutorial.md)
|
||||
- [ ] [autoclass_tutorial.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/autoclass_tutorial.md)
|
||||
- [ ] [autoclass_tutorial.md](https://github.com/huggingface/transformers/blob/master/docs/source/autoclass_tutorial.md)
|
||||
- [ ] [preprocessing.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/preprocessing.md)
|
||||
- [ ] [training.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/training.md)
|
||||
- [ ] [accelerate.md](https://github.com/huggingface/transformers/blob/main/docs/source/en/accelerate.md)
|
||||
|
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -39,7 +39,7 @@ members/contributors who may be interested in your PR.
|
||||
|
||||
Models:
|
||||
|
||||
- text models: @ArthurZucker
|
||||
- text models: @ArthurZucker and @younesbelkada
|
||||
- vision models: @amyeroberts
|
||||
- speech models: @sanchit-gandhi
|
||||
- graph models: @clefourrier
|
||||
@ -47,20 +47,20 @@ Models:
|
||||
Library:
|
||||
|
||||
- flax: @sanchit-gandhi
|
||||
- generate: @zucchini-nlp (visual-language models) or @gante (all others)
|
||||
- generate: @gante
|
||||
- pipelines: @Narsil
|
||||
- tensorflow: @gante and @Rocketknight1
|
||||
- tokenizers: @ArthurZucker
|
||||
- trainer: @muellerzr and @SunMarc
|
||||
- trainer: @muellerzr and @pacman100
|
||||
|
||||
Integrations:
|
||||
|
||||
- deepspeed: HF Trainer/Accelerate: @muellerzr
|
||||
- deepspeed: HF Trainer/Accelerate: @pacman100
|
||||
- ray/raytune: @richardliaw, @amogkam
|
||||
- Big Model Inference: @SunMarc
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc
|
||||
- quantization (bitsandbytes, autogpt): @SunMarc and @younesbelkada
|
||||
|
||||
Documentation: @stevhliu
|
||||
Documentation: @stevhliu and @MKhalusova
|
||||
|
||||
HF projects:
|
||||
|
||||
|
42
.github/workflows/benchmark.yml
vendored
42
.github/workflows/benchmark.yml
vendored
@ -1,42 +0,0 @@
|
||||
name: Self-hosted runner (benchmark)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "17 2 * * *"
|
||||
workflow_call:
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
|
||||
|
||||
jobs:
|
||||
benchmark:
|
||||
name: Benchmark
|
||||
runs-on: [single-gpu, nvidia-gpu, a10, ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus all --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Benchmark (daily)
|
||||
if: github.event_name == 'schedule'
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install optimum-benchmark>=0.2.0
|
||||
HF_TOKEN=${{ secrets.TRANSFORMERS_BENCHMARK_TOKEN }} python3 benchmark/benchmark.py --repo_id hf-internal-testing/benchmark_results --path_in_repo $(date +'%Y-%m-%d') --config-dir benchmark/config --config-name generation --commit=${{ github.sha }} backend.model=google/gemma-2b backend.cache_implementation=null,static backend.torch_compile=false,true --multirun
|
||||
|
||||
- name: Benchmark (merged to main event)
|
||||
if: github.event_name == 'push' && github.ref_name == 'main'
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install optimum-benchmark>=0.2.0
|
||||
HF_TOKEN=${{ secrets.TRANSFORMERS_BENCHMARK_TOKEN }} python3 benchmark/benchmark.py --repo_id hf-internal-testing/benchmark_results_merge_event --path_in_repo $(date +'%Y-%m-%d') --config-dir benchmark/config --config-name generation --commit=${{ github.sha }} backend.model=google/gemma-2b backend.cache_implementation=null,static backend.torch_compile=false,true --multirun
|
33
.github/workflows/build-ci-docker-images.yml
vendored
33
.github/workflows/build-ci-docker-images.yml
vendored
@ -3,7 +3,7 @@ name: Build pr ci-docker
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- push-ci-image # for now let's only build on this branch
|
||||
- change-ci # for now let's only build on this branch
|
||||
repository_dispatch:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -22,24 +22,14 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
if: ${{ contains(github.event.head_commit.message, '[build-ci-image]') || contains(github.event.head_commit.message, '[push-ci-image]') && '!cancelled()' || github.event_name == 'schedule' }}
|
||||
if: ${{ contains(github.event.head_commit.message, '[push-ci-image]') && '!cancelled()' }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
file: ["quality", "consistency", "custom-tokenizers", "torch-light", "tf-light", "exotic-models", "torch-tf-light", "torch-jax-light", "jax-light", "examples-torch", "examples-tf"]
|
||||
continue-on-error: true
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
-
|
||||
name: Set tag
|
||||
run: |
|
||||
if ${{contains(github.event.head_commit.message, '[build-ci-image]')}}; then
|
||||
echo "TAG=huggingface/transformers-${{ matrix.file }}:dev" >> "$GITHUB_ENV"
|
||||
echo "setting it to DEV!"
|
||||
else
|
||||
echo "TAG=huggingface/transformers-${{ matrix.file }}" >> "$GITHUB_ENV"
|
||||
|
||||
fi
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -60,18 +50,5 @@ jobs:
|
||||
build-args: |
|
||||
REF=${{ github.sha }}
|
||||
file: "./docker/${{ matrix.file }}.dockerfile"
|
||||
push: ${{ contains(github.event.head_commit.message, 'ci-image]') || github.event_name == 'schedule' }}
|
||||
tags: ${{ env.TAG }}
|
||||
|
||||
notify:
|
||||
runs-on: ubuntu-22.04
|
||||
if: ${{ contains(github.event.head_commit.message, '[build-ci-image]') || contains(github.event.head_commit.message, '[push-ci-image]') && '!cancelled()' || github.event_name == 'schedule' }}
|
||||
steps:
|
||||
- name: Post to Slack
|
||||
if: ${{ contains(github.event.head_commit.message, '[push-ci-image]') && github.event_name != 'schedule' }}
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: "#transformers-ci-circleci-images"
|
||||
title: 🤗 New docker images for CircleCI are pushed.
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
push: true
|
||||
tags: huggingface/transformers-${{ matrix.file }}
|
113
.github/workflows/build-docker-images.yml
vendored
113
.github/workflows/build-docker-images.yml
vendored
@ -57,19 +57,20 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/transformers-all-latest-gpu-push-ci
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the transformers-all-latest-gpu-push-ci docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-torch-deepspeed-docker:
|
||||
name: "Latest PyTorch + DeepSpeed"
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -92,20 +93,21 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }}
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER}}
|
||||
title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
# Can't build 2 images in a single job `latest-torch-deepspeed-docker` (for `nvcr.io/nvidia`)
|
||||
latest-torch-deepspeed-docker-for-push-ci-daily-build:
|
||||
name: "Latest PyTorch + DeepSpeed (Push CI - Daily Build)"
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -132,15 +134,6 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu-push-ci docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
doc-builder:
|
||||
name: "Doc builder"
|
||||
# Push CI doesn't need this image
|
||||
@ -167,21 +160,22 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/transformers-doc-builder
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the huggingface/transformers-doc-builder docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-pytorch:
|
||||
name: "Latest PyTorch [dev]"
|
||||
# Push CI doesn't need this image
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@ -204,15 +198,6 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-gpu
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the huggingface/transformers-pytorch-gpudocker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-pytorch-amd:
|
||||
name: "Latest PyTorch (AMD) [dev]"
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
@ -252,15 +237,6 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-amd-gpu-push-ci
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the huggingface/transformers-pytorch-amd-gpu-push-ci build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-tensorflow:
|
||||
name: "Latest TensorFlow [dev]"
|
||||
# Push CI doesn't need this image
|
||||
@ -289,15 +265,6 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/transformers-tensorflow-gpu
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the huggingface/transformers-tensorflow-gpu build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-pytorch-deepspeed-amd:
|
||||
name: "PyTorch + DeepSpeed (AMD) [dev]"
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
@ -337,15 +304,6 @@ jobs:
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-amd-gpu-push-ci
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the transformers-pytorch-deepspeed-amd-gpu build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-quantization-torch-docker:
|
||||
name: "Latest Pytorch + Quantization [dev]"
|
||||
# Push CI doesn't need this image
|
||||
@ -372,13 +330,4 @@ jobs:
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-quantization-latest-gpu${{ inputs.image_postfix }}
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the transformers-quantization-latest-gpu build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
tags: huggingface/transformers-quantization-latest-gpu${{ inputs.image_postfix }}
|
@ -13,8 +13,18 @@ concurrency:
|
||||
jobs:
|
||||
latest-with-torch-nightly-docker:
|
||||
name: "Nightly PyTorch + Stable TensorFlow"
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
@ -40,8 +50,18 @@ jobs:
|
||||
|
||||
nightly-torch-deepspeed-docker:
|
||||
name: "Nightly PyTorch + DeepSpeed"
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
@ -16,7 +16,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version: ["1.13", "1.12", "1.11"]
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
@ -60,7 +60,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
version: ["2.11", "2.10", "2.9", "2.8", "2.7", "2.6", "2.5"]
|
||||
runs-on: [intel-cpu, 8-cpu, ci]
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
|
25
.github/workflows/model_jobs.yml
vendored
25
.github/workflows/model_jobs.yml
vendored
@ -12,12 +12,6 @@ on:
|
||||
slice_id:
|
||||
required: true
|
||||
type: number
|
||||
runner:
|
||||
required: true
|
||||
type: string
|
||||
docker:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
@ -37,13 +31,12 @@ jobs:
|
||||
run_models_gpu:
|
||||
name: " "
|
||||
strategy:
|
||||
max-parallel: 8
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(inputs.folder_slices)[inputs.slice_id] }}
|
||||
runs-on: ['${{ inputs.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
runs-on: ['${{ inputs.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Echo input and matrix info
|
||||
@ -72,18 +65,6 @@ jobs:
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update / Install some packages (for Past CI)
|
||||
if: ${{ contains(inputs.docker, '-past-') }}
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install -U datasets
|
||||
|
||||
- name: Update / Install some packages (for Past CI)
|
||||
if: ${{ contains(inputs.docker, '-past-') && contains(inputs.docker, '-pytorch-') }}
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
@ -99,7 +80,7 @@ jobs:
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -rsfE -v --make-reports=${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
run: python3 -m pytest -rs -v --make-reports=${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
|
12
.github/workflows/push-important-models.yml
vendored
12
.github/workflows/push-important-models.yml
vendored
@ -5,6 +5,7 @@ on:
|
||||
branches: [ main ]
|
||||
|
||||
env:
|
||||
IS_GITHUB_CI: "1"
|
||||
OUTPUT_SLACK_CHANNEL_ID: "C06L2SGMEEA"
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
HF_HOME: /mnt/cache
|
||||
@ -85,7 +86,7 @@ jobs:
|
||||
- name: Run FA2 tests
|
||||
id: run_fa2_tests
|
||||
run:
|
||||
pytest -rsfE -m "flash_attn_test" --make-reports=${{ matrix.model-name }}_fa2_tests/ tests/${{ matrix.model-name }}/test_modeling_*
|
||||
pytest -rs -m "flash_attn_test" --make-reports=${{ matrix.model-name }}_fa2_tests/ tests/${{ matrix.model-name }}/test_modeling_*
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.model-name }}_fa2_tests"
|
||||
if: ${{ always() }}
|
||||
@ -107,7 +108,7 @@ jobs:
|
||||
id: run_integration_tests
|
||||
if: always()
|
||||
run:
|
||||
pytest -rsfE -k "IntegrationTest" --make-reports=tests_integration_${{ matrix.model-name }} tests/${{ matrix.model-name }}/test_modeling_*
|
||||
pytest -rs -k "IntegrationTest" --make-reports=tests_integration_${{ matrix.model-name }} tests/${{ matrix.model-name }}/test_modeling_*
|
||||
|
||||
- name: "Test suite reports artifacts: tests_integration_${{ matrix.model-name }}"
|
||||
if: ${{ always() }}
|
||||
@ -133,10 +134,3 @@ jobs:
|
||||
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
||||
slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
waitForSSH: true
|
||||
|
||||
benchmark:
|
||||
name: Benchmark workflow
|
||||
needs: get_modified_models
|
||||
if: ${{ needs.get_modified_models.outputs.matrix != '[]' && needs.get_modified_models.outputs.matrix != '' && fromJson(needs.get_modified_models.outputs.matrix)[0] != null }}
|
||||
uses: ./.github/workflows/benchmark.yml
|
||||
secrets: inherit
|
||||
|
43
.github/workflows/self-nightly-caller.yml
vendored
43
.github/workflows/self-nightly-caller.yml
vendored
@ -1,43 +0,0 @@
|
||||
name: Self-hosted runner (nightly-ci)
|
||||
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "17 2 * * *"
|
||||
push:
|
||||
branches:
|
||||
- run_nightly_ci*
|
||||
|
||||
jobs:
|
||||
build_nightly_ci_images:
|
||||
name: Build Nightly CI Docker Images
|
||||
if: (github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_nightly_ci'))
|
||||
uses: ./.github/workflows/build-nightly-ci-docker-images.yml
|
||||
secrets: inherit
|
||||
|
||||
model-ci:
|
||||
name: Model CI
|
||||
needs: [build_nightly_ci_images]
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-past-future"
|
||||
runner: ci
|
||||
docker: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||
ci_event: Nightly CI
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
needs: [build_nightly_ci_images]
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-past-future"
|
||||
runner: ci
|
||||
# test deepspeed nightly build with the latest release torch
|
||||
docker: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
ci_event: Nightly CI
|
||||
working-directory-prefix: /workspace
|
||||
secrets: inherit
|
@ -2,30 +2,32 @@ name: Self-hosted runner (nightly-past-ci-caller)
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "17 2,14 * * *"
|
||||
# 2:17 am on each Sunday and Thursday
|
||||
|
||||
- cron: "17 2 * * 0,4"
|
||||
push:
|
||||
branches:
|
||||
- run_nightly_ci*
|
||||
- run_past_ci*
|
||||
|
||||
jobs:
|
||||
get_number:
|
||||
name: Get number
|
||||
runs-on: ubuntu-22.04
|
||||
outputs:
|
||||
run_number: ${{ steps.get_number.outputs.run_number }}
|
||||
steps:
|
||||
- name: Get number
|
||||
id: get_number
|
||||
run: |
|
||||
echo "${{ github.run_number }}"
|
||||
echo "$(python3 -c 'print(int(${{ github.run_number }}) % 10)')"
|
||||
echo "run_number=$(python3 -c 'print(int(${{ github.run_number }}) % 10)')" >> $GITHUB_OUTPUT
|
||||
build_nightly_ci_images:
|
||||
name: Build Nightly CI Docker Images
|
||||
if: (github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_nightly_ci'))
|
||||
uses: ./.github/workflows/build-nightly-ci-docker-images.yml
|
||||
secrets: inherit
|
||||
|
||||
run_nightly_ci:
|
||||
name: Nightly CI
|
||||
needs: [build_nightly_ci_images]
|
||||
uses: ./.github/workflows/self-nightly-scheduled.yml
|
||||
secrets: inherit
|
||||
|
||||
run_past_ci_pytorch_1-13:
|
||||
name: PyTorch 1.13
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 0 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
needs: [run_nightly_ci]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.13"
|
||||
@ -34,9 +36,9 @@ jobs:
|
||||
|
||||
run_past_ci_pytorch_1-12:
|
||||
name: PyTorch 1.12
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 1 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
needs: [run_past_ci_pytorch_1-13]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.12"
|
||||
@ -45,9 +47,9 @@ jobs:
|
||||
|
||||
run_past_ci_pytorch_1-11:
|
||||
name: PyTorch 1.11
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 2 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
needs: [run_past_ci_pytorch_1-12]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.11"
|
||||
@ -56,9 +58,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-11:
|
||||
name: TensorFlow 2.11
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 3 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_pytorch_1-11]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.11"
|
||||
@ -67,9 +69,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-10:
|
||||
name: TensorFlow 2.10
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 4 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-11]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.10"
|
||||
@ -78,9 +80,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-9:
|
||||
name: TensorFlow 2.9
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 5 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-10]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.9"
|
||||
@ -89,9 +91,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-8:
|
||||
name: TensorFlow 2.8
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 6 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-9]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.8"
|
||||
@ -100,9 +102,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-7:
|
||||
name: TensorFlow 2.7
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 7 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-8]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.7"
|
||||
@ -111,9 +113,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-6:
|
||||
name: TensorFlow 2.6
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 8 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-7]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.6"
|
||||
@ -122,9 +124,9 @@ jobs:
|
||||
|
||||
run_past_ci_tensorflow_2-5:
|
||||
name: TensorFlow 2.5
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 9 && (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
if: (cancelled() != true) && ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci'))
|
||||
needs: [run_past_ci_tensorflow_2-6]
|
||||
uses: ./.github/workflows/self-past.yml
|
||||
with:
|
||||
framework: tensorflow
|
||||
version: "2.5"
|
||||
|
290
.github/workflows/self-nightly-scheduled.yml
vendored
Normal file
290
.github/workflows/self-nightly-scheduled.yml
vendored
Normal file
@ -0,0 +1,290 @@
|
||||
name: Self-hosted runner (nightly-ci)
|
||||
|
||||
# Note that each job's dependencies go into a corresponding docker file.
|
||||
#
|
||||
# For example for `run_torch_cuda_extensions_gpu` the docker image is
|
||||
# `huggingface/transformers-pytorch-deepspeed-latest-gpu`, which can be found at
|
||||
# `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile`
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
workflow_call:
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
name: Setup
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Cleanup
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf tests/models/__pycache__
|
||||
rm -rf reports
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- id: set-matrix
|
||||
name: Identify models to test
|
||||
working-directory: /transformers/tests
|
||||
run: |
|
||||
echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
run_tests_single_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machine_type: [single-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_tests_multi_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machine_type: [multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-torch-nightly-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_nightly
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_torch_cuda_extensions_gpu:
|
||||
name: Torch CUDA extension tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
needs: setup
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-deepspeed-nightly-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /workspace/transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /workspace/transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Remove cached torch extensions
|
||||
run: rm -rf /github/home/.cache/torch_extensions/
|
||||
|
||||
# To avoid unknown test failures
|
||||
- name: Pre build DeepSpeed *again*
|
||||
working-directory: /workspace
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
rm -rf DeepSpeed
|
||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /workspace/transformers
|
||||
run: |
|
||||
python utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /workspace/transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /workspace/transformers
|
||||
run: |
|
||||
python -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /workspace/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports_postfix_nightly"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports_postfix_nightly
|
||||
path: /workspace/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-22.04
|
||||
if: always()
|
||||
needs: [
|
||||
setup,
|
||||
run_tests_single_gpu,
|
||||
run_tests_multi_gpu,
|
||||
run_torch_cuda_extensions_gpu
|
||||
]
|
||||
steps:
|
||||
- name: Preliminary job status
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
echo "Setup status: ${{ needs.setup.result }}"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
CI_EVENT: Nightly CI
|
||||
SETUP_STATUS: ${{ needs.setup.result }}
|
||||
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||
|
||||
|
||||
# delete-artifact
|
||||
- uses: geekyeggo/delete-artifact@v2
|
||||
with:
|
||||
name: |
|
||||
single-*
|
||||
multi-*
|
40
.github/workflows/self-past-caller.yml
vendored
40
.github/workflows/self-past-caller.yml
vendored
@ -1,40 +0,0 @@
|
||||
name: Self-hosted runner (past-ci)
|
||||
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
framework:
|
||||
required: true
|
||||
type: string
|
||||
version:
|
||||
required: true
|
||||
type: string
|
||||
# Use this to control the commit to test against
|
||||
sha:
|
||||
default: 'main'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
jobs:
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-past-future"
|
||||
runner: past-ci
|
||||
docker: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
ci_event: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-past-future"
|
||||
runner: past-ci
|
||||
docker: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
ci_event: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
|
||||
secrets: inherit
|
357
.github/workflows/self-past.yml
vendored
Normal file
357
.github/workflows/self-past.yml
vendored
Normal file
@ -0,0 +1,357 @@
|
||||
name: Self-hosted runner (past-ci)
|
||||
|
||||
# Note that each job's dependencies go into a corresponding docker file.
|
||||
#
|
||||
# For example for `run_torch_cuda_extensions_gpu` the docker image is
|
||||
# `huggingface/transformers-pytorch-deepspeed-latest-gpu`, which can be found at
|
||||
# `docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile`
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
framework:
|
||||
required: true
|
||||
type: string
|
||||
version:
|
||||
required: true
|
||||
type: string
|
||||
# Use this to control the commit to test against
|
||||
sha:
|
||||
default: 'main'
|
||||
required: false
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
name: Setup
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Cleanup
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf tests/models/__pycache__
|
||||
rm -rf reports
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- id: set-matrix
|
||||
working-directory: /transformers
|
||||
name: Identify models to test
|
||||
run: |
|
||||
cd tests
|
||||
echo "matrix=$(python3 -c 'import os; tests = os.getcwd(); model_tests = os.listdir(os.path.join(tests, "models")); d1 = sorted(list(filter(os.path.isdir, os.listdir(tests)))); d2 = sorted(list(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))); d1.remove("models"); d = d2 + d1; print(d)')" >> $GITHUB_OUTPUT
|
||||
|
||||
run_tests_single_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machine_type: [single-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update some packages
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip install -U datasets
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install
|
||||
if: inputs.framework == 'pytorch'
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: Save job name
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
matrix_folders=${matrix_folders/'models_'/'models/'}
|
||||
job_name="Model tests ($matrix_folders, ${{ matrix.machine_type }})"
|
||||
echo "$job_name"
|
||||
echo "$job_name" > /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/job_name.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_tests_multi_gpu:
|
||||
name: Model tests
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.matrix) }}
|
||||
machine_type: [multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
container:
|
||||
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
needs: setup
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ inputs.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update some packages
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip install -U datasets
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Install
|
||||
if: inputs.framework == 'pytorch'
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }} tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/failures_short.txt
|
||||
|
||||
- name: Save job name
|
||||
if: ${{ always() }}
|
||||
shell: bash
|
||||
run: |
|
||||
matrix_folders=${matrix_folders/'models_'/'models/'}
|
||||
job_name="Model tests ($matrix_folders, ${{ matrix.machine_type }})"
|
||||
echo "$job_name"
|
||||
echo "$job_name" > /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}/job_name.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_all_tests_gpu_${{ env.matrix_folders }}_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_tests_gpu_${{ matrix.folders }}
|
||||
|
||||
run_torch_cuda_extensions_gpu:
|
||||
name: Torch CUDA extension tests
|
||||
if: inputs.framework == 'pytorch'
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, past-ci]
|
||||
needs: setup
|
||||
container:
|
||||
image: huggingface/transformers-${{ inputs.framework }}-past-${{ inputs.version }}-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update some packages
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip install -U datasets
|
||||
|
||||
- name: Install
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: Remove cached torch extensions
|
||||
run: rm -rf /github/home/.cache/torch_extensions/
|
||||
|
||||
# To avoid unknown test failures
|
||||
- name: Pre build DeepSpeed *again*
|
||||
working-directory: /
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
rm -rf DeepSpeed
|
||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports_postfix_${{ inputs.framework }}-${{ inputs.version }}
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
|
||||
send_results:
|
||||
name: Send results to webhook
|
||||
runs-on: ubuntu-22.04
|
||||
if: always()
|
||||
needs: [
|
||||
setup,
|
||||
run_tests_single_gpu,
|
||||
run_tests_multi_gpu,
|
||||
run_torch_cuda_extensions_gpu
|
||||
]
|
||||
steps:
|
||||
- name: Preliminary job status
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
echo "Setup status: ${{ needs.setup.result }}"
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
|
||||
# Create a directory to store test failure tables in the next step
|
||||
- name: Create directory
|
||||
run: mkdir test_failure_tables
|
||||
|
||||
- name: Send message to Slack
|
||||
env:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }}
|
||||
CI_SLACK_CHANNEL_ID_DAILY: ${{ secrets.CI_SLACK_CHANNEL_ID_DAILY }}
|
||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||
CI_SLACK_REPORT_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID_PAST_FUTURE }}
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
CI_EVENT: Past CI - ${{ inputs.framework }}-${{ inputs.version }}
|
||||
SETUP_STATUS: ${{ needs.setup.result }}
|
||||
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||
|
||||
# Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack.
|
||||
- name: Failure table artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: test_failure_tables_${{ inputs.framework }}-${{ inputs.version }}
|
||||
path: test_failure_tables
|
||||
|
||||
# delete-artifact
|
||||
- uses: geekyeggo/delete-artifact@v2
|
||||
with:
|
||||
name: |
|
||||
single-*
|
||||
multi-*
|
7
.github/workflows/self-pr-slow-ci.yml
vendored
7
.github/workflows/self-pr-slow-ci.yml
vendored
@ -4,7 +4,7 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/transformers/models/*/modeling_*.py"
|
||||
- "tests/**/test_*.py"
|
||||
- "tests/models/*/test_*.py"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
@ -110,10 +110,7 @@ jobs:
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
export CUDA_VISIBLE_DEVICES="$(python3 utils/set_cuda_devices_for_ci.py --test_folder ${{ matrix.folders }})"
|
||||
echo $CUDA_VISIBLE_DEVICES
|
||||
python3 -m pytest -v -rsfE --make-reports=${{ matrix.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
run: python3 -m pytest -v -rs --make-reports=${{ matrix.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
|
19
.github/workflows/self-scheduled-caller.yml
vendored
19
.github/workflows/self-scheduled-caller.yml
vendored
@ -16,9 +16,6 @@ jobs:
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-models"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
torch-pipeline:
|
||||
@ -27,9 +24,6 @@ jobs:
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-pipeline-torch"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-pytorch-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
tf-pipeline:
|
||||
@ -38,9 +32,6 @@ jobs:
|
||||
with:
|
||||
job: run_pipelines_tf_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-pipeline-tf"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-tensorflow-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
example-ci:
|
||||
@ -49,9 +40,6 @@ jobs:
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-examples"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
@ -60,10 +48,6 @@ jobs:
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-deepspeed"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
ci_event: Daily CI
|
||||
working-directory-prefix: /workspace
|
||||
secrets: inherit
|
||||
|
||||
quantization-ci:
|
||||
@ -72,7 +56,4 @@ jobs:
|
||||
with:
|
||||
job: run_quantization_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-quantization"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-quantization-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
|
73
.github/workflows/self-scheduled.yml
vendored
73
.github/workflows/self-scheduled.yml
vendored
@ -15,19 +15,6 @@ on:
|
||||
slack_report_channel:
|
||||
required: true
|
||||
type: string
|
||||
runner:
|
||||
required: true
|
||||
type: string
|
||||
docker:
|
||||
required: true
|
||||
type: string
|
||||
ci_event:
|
||||
required: true
|
||||
type: string
|
||||
working-directory-prefix:
|
||||
default: ''
|
||||
required: false
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
@ -51,7 +38,7 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -109,8 +96,6 @@ jobs:
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
machine_type: ${{ matrix.machine_type }}
|
||||
slice_id: ${{ matrix.slice_id }}
|
||||
runner: ${{ inputs.runner }}
|
||||
docker: ${{ inputs.docker }}
|
||||
secrets: inherit
|
||||
|
||||
run_pipelines_torch_gpu:
|
||||
@ -120,7 +105,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -170,7 +155,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
container:
|
||||
image: huggingface/transformers-tensorflow-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -221,7 +206,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -272,88 +257,69 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
image: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
working-directory: /workspace/transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
working-directory: /workspace/transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: Update / Install some packages (for Past CI)
|
||||
if: ${{ contains(inputs.docker, '-past-') && contains(inputs.docker, '-pytorch-') }}
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
run: |
|
||||
python3 -m pip install -U datasets
|
||||
python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate
|
||||
|
||||
- name: Remove cached torch extensions
|
||||
run: rm -rf /github/home/.cache/torch_extensions/
|
||||
|
||||
# To avoid unknown test failures
|
||||
- name: Pre build DeepSpeed *again* (for daily CI)
|
||||
if: ${{ contains(inputs.ci_event, 'Daily CI') }}
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/
|
||||
- name: Pre build DeepSpeed *again*
|
||||
working-directory: /workspace
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
DS_DISABLE_NINJA=1 DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
# To avoid unknown test failures
|
||||
- name: Pre build DeepSpeed *again* (for nightly & Past CI)
|
||||
if: ${{ contains(inputs.ci_event, 'Nightly CI') || contains(inputs.ci_event, 'Past CI') }}
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
rm -rf DeepSpeed
|
||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
working-directory: /workspace/transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
python utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
working-directory: /workspace/transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: ${{ inputs.working-directory-prefix }}/transformers
|
||||
working-directory: /workspace/transformers
|
||||
run: |
|
||||
python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
|
||||
python -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat ${{ inputs.working-directory-prefix }}/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
run: cat /workspace/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
path: ${{ inputs.working-directory-prefix }}/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
path: /workspace/transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
|
||||
run_quantization_torch_gpu:
|
||||
if: ${{ inputs.job == 'run_quantization_torch_gpu' }}
|
||||
name: " "
|
||||
needs: setup
|
||||
strategy:
|
||||
max-parallel: 4
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.setup.outputs.quantization_matrix) }}
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, '${{ inputs.runner }}']
|
||||
runs-on: ['${{ matrix.machine_type }}', nvidia-gpu, t4, daily-ci]
|
||||
container:
|
||||
image: huggingface/transformers-quantization-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -468,6 +434,5 @@ jobs:
|
||||
# This would be an empty string if `setup` is skipped.
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
quantization_matrix: ${{ needs.setup.outputs.quantization_matrix }}
|
||||
ci_event: ${{ inputs.ci_event }}
|
||||
|
||||
|
||||
secrets: inherit
|
||||
|
11
.github/workflows/slack-report.yml
vendored
11
.github/workflows/slack-report.yml
vendored
@ -18,12 +18,7 @@ on:
|
||||
quantization_matrix:
|
||||
required: true
|
||||
type: string
|
||||
ci_event:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN: ${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }}
|
||||
|
||||
jobs:
|
||||
send_results:
|
||||
@ -48,7 +43,7 @@ jobs:
|
||||
CI_SLACK_CHANNEL_DUMMY_TESTS: ${{ secrets.CI_SLACK_CHANNEL_DUMMY_TESTS }}
|
||||
SLACK_REPORT_CHANNEL: ${{ inputs.slack_report_channel }}
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
CI_EVENT: ${{ inputs.ci_event }}
|
||||
CI_EVENT: scheduled
|
||||
CI_SHA: ${{ github.sha }}
|
||||
CI_WORKFLOW_REF: ${{ github.workflow_ref }}
|
||||
CI_TEST_JOB: ${{ inputs.job }}
|
||||
@ -59,7 +54,6 @@ jobs:
|
||||
# empty string, and the called script still get one argument (which is the emtpy string).
|
||||
run: |
|
||||
sudo apt-get install -y curl
|
||||
pip install huggingface_hub
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service.py "${{ inputs.folder_slices }}"
|
||||
@ -79,7 +73,7 @@ jobs:
|
||||
CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
SLACK_REPORT_CHANNEL: ${{ inputs.slack_report_channel }}
|
||||
CI_EVENT: ${{ inputs.ci_event }}
|
||||
CI_EVENT: scheduled
|
||||
CI_SHA: ${{ github.sha }}
|
||||
CI_TEST_JOB: ${{ inputs.job }}
|
||||
SETUP_STATUS: ${{ inputs.setup_status }}
|
||||
@ -87,7 +81,6 @@ jobs:
|
||||
# `quantization/bnb` to `quantization_bnb` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
sudo apt-get install -y curl
|
||||
pip install huggingface_hub
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service_quantization.py "${{ inputs.quantization_matrix }}"
|
||||
|
9
.github/workflows/ssh-runner.yml
vendored
9
.github/workflows/ssh-runner.yml
vendored
@ -9,11 +9,9 @@ on:
|
||||
docker_image:
|
||||
description: 'Name of the Docker image'
|
||||
required: true
|
||||
num_gpus:
|
||||
description: 'Type of the number of gpus to use (`single` or `multi`)'
|
||||
required: true
|
||||
|
||||
env:
|
||||
IS_GITHUB_CI: "1"
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
@ -22,13 +20,12 @@ env:
|
||||
RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`.
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
|
||||
jobs:
|
||||
ssh_runner:
|
||||
name: "SSH"
|
||||
runs-on: ["${{ github.event.inputs.num_gpus }}-gpu", nvidia-gpu, "${{ github.event.inputs.runner_type }}", ci]
|
||||
runs-on: [single-gpu, nvidia-gpu, "${{ github.event.inputs.runner_type }}", ci]
|
||||
container:
|
||||
image: ${{ github.event.inputs.docker_image }}
|
||||
options: --gpus all --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -55,7 +52,7 @@ jobs:
|
||||
nvidia-smi
|
||||
|
||||
- name: Tailscale # In order to be able to SSH when a test fails
|
||||
uses: huggingface/tailscale-action@main
|
||||
uses: huggingface/tailscale-action@v1
|
||||
with:
|
||||
authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }}
|
||||
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
||||
|
18
.github/workflows/trufflehog.yml
vendored
18
.github/workflows/trufflehog.yml
vendored
@ -1,18 +0,0 @@
|
||||
on:
|
||||
push:
|
||||
|
||||
name: Secret Leaks
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
trufflehog:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
@ -61,10 +61,7 @@ feedback.
|
||||
The 🤗 Transformers library is robust and reliable thanks to users who report the problems they encounter.
|
||||
|
||||
Before you report an issue, we would really appreciate it if you could **make sure the bug was not
|
||||
already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask in the [forum](https://discuss.huggingface.co/) or on our [discord](https://discord.com/invite/hugging-face-879548962464493619) first. This helps us respond quicker to fixing issues related to the library versus general questions.
|
||||
|
||||
> [!TIP]
|
||||
> We have a [docs bot](https://huggingface.co/spaces/huggingchat/hf-docs-chat), and we highly encourage you to ask all your questions there. There is always a chance your bug can be fixed with a simple flag 👾🔫
|
||||
already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask in the [forum](https://discuss.huggingface.co/) first. This helps us respond quicker to fixing issues related to the library versus general questions.
|
||||
|
||||
Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it:
|
||||
|
||||
@ -132,7 +129,7 @@ You will need basic `git` proficiency to contribute to
|
||||
manual. Type `git --help` in a shell and enjoy! If you prefer books, [Pro
|
||||
Git](https://git-scm.com/book/en/v2) is a very good reference.
|
||||
|
||||
You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main/setup.py#L449)** or above to contribute to 🤗 Transformers. Follow the steps below to start contributing:
|
||||
You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main/setup.py#L426)** or above to contribute to 🤗 Transformers. Follow the steps below to start contributing:
|
||||
|
||||
1. Fork the [repository](https://github.com/huggingface/transformers) by
|
||||
clicking on the **[Fork](https://github.com/huggingface/transformers/fork)** button on the repository's page. This creates a copy of the code
|
||||
@ -163,7 +160,7 @@ You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main
|
||||
If 🤗 Transformers was already installed in the virtual environment, remove
|
||||
it with `pip uninstall transformers` before reinstalling it in editable
|
||||
mode with the `-e` flag.
|
||||
|
||||
|
||||
Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a
|
||||
failure with this command. If that's the case make sure to install the Deep Learning framework you are working with
|
||||
(PyTorch, TensorFlow and/or Flax) then do:
|
||||
@ -222,7 +219,7 @@ You'll need **[Python 3.8](https://github.com/huggingface/transformers/blob/main
|
||||
|
||||
If you're modifying documents under the `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check
|
||||
make sure you install the documentation builder:
|
||||
|
||||
|
||||
```bash
|
||||
pip install ".[docs]"
|
||||
```
|
||||
@ -341,12 +338,12 @@ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_ne
|
||||
RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification
|
||||
```
|
||||
|
||||
Like the slow tests, there are other environment variables available which are not enabled by default during testing:
|
||||
Like the slow tests, there are other environment variables available which not enabled by default during testing:
|
||||
- `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers.
|
||||
- `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration.
|
||||
- `RUN_PT_TF_CROSS_TESTS`: Enables tests for TensorFlow + PyTorch integration.
|
||||
|
||||
More environment variables and additional information can be found in the [testing_utils.py](https://github.com/huggingface/transformers/blob/main/src/transformers/testing_utils.py).
|
||||
More environment variables and additional information can be found in the [testing_utils.py](src/transformers/testing_utils.py).
|
||||
|
||||
🤗 Transformers uses `pytest` as a test runner only. It doesn't use any
|
||||
`pytest`-specific features in the test suite itself.
|
||||
|
10
Makefile
10
Makefile
@ -1,11 +1,11 @@
|
||||
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples benchmark
|
||||
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples
|
||||
|
||||
# make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!)
|
||||
export PYTHONPATH = src
|
||||
|
||||
check_dirs := examples tests src utils
|
||||
|
||||
exclude_folders := ""
|
||||
exclude_folders := examples/research_projects
|
||||
|
||||
modified_only_fixup:
|
||||
$(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs)))
|
||||
@ -56,7 +56,6 @@ quality:
|
||||
python utils/custom_init_isort.py --check_only
|
||||
python utils/sort_auto_mappings.py --check_only
|
||||
python utils/check_doc_toc.py
|
||||
python utils/check_docstrings.py --check_all
|
||||
|
||||
|
||||
# Format source code automatically and check is there are any problems left that need manual fixing
|
||||
@ -97,11 +96,6 @@ test:
|
||||
test-examples:
|
||||
python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/
|
||||
|
||||
# Run benchmark
|
||||
|
||||
benchmark:
|
||||
python3 benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=diff backend.model=google/gemma-2b backend.cache_implementation=null,static backend.torch_compile=false,true --multirun
|
||||
|
||||
# Run tests for SageMaker DLC release
|
||||
|
||||
test-sagemaker: # install sagemaker dependencies in advance with pip install .[sagemaker]
|
||||
|
44
README.md
44
README.md
@ -25,29 +25,39 @@ limitations under the License.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<b>English</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
|
@ -25,29 +25,39 @@ limitations under the License.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<b>Deutsch</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -20,29 +20,39 @@ limitations under the License.
|
||||
<br>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<b>Español</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -25,29 +25,39 @@ limitations under the License.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Construction" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="Version GitHub" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Pacte des contributeurs" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<b>Français</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -45,29 +45,39 @@ checkpoint: जाँच बिंदु
|
||||
<br>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<b>हिन्दी</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -55,29 +55,39 @@ user: ユーザ
|
||||
<br>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<b>日本語</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -20,29 +20,39 @@ limitations under the License.
|
||||
<br>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<b>한국어</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -25,29 +25,39 @@ limitations under the License.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<b>Рortuguês</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -25,29 +25,39 @@ limitations under the License.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<b>Русский</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
<p>
|
||||
</h4>
|
||||
|
@ -26,11 +26,21 @@ limitations under the License.
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
@ -38,18 +48,18 @@ limitations under the License.
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<b>తెలుగు</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -25,28 +25,38 @@ limitations under the License.
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<b>Tiếng việt</b> |
|
||||
</p>
|
||||
</h4>
|
@ -45,11 +45,21 @@ checkpoint: 检查点
|
||||
<br>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
@ -57,17 +67,17 @@ checkpoint: 检查点
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<b>简体中文</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -57,29 +57,39 @@ user: 使用者
|
||||
<br>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://circleci.com/gh/huggingface/transformers">
|
||||
<img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE">
|
||||
<img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue">
|
||||
</a>
|
||||
<a href="https://huggingface.co/docs/transformers/index">
|
||||
<img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/releases">
|
||||
<img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg">
|
||||
</a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md">
|
||||
<img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg">
|
||||
</a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_zh-hans.md">简体中文</a> |
|
||||
<b>繁體中文</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README_vi.md">Tiếng Việt</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
@ -14,7 +14,7 @@ Models uploaded on the Hugging Face Hub come in different formats. We heavily re
|
||||
models in the [`safetensors`](https://github.com/huggingface/safetensors) format (which is the default prioritized
|
||||
by the transformers library), as developed specifically to prevent arbitrary code execution on your system.
|
||||
|
||||
To avoid loading models from unsafe formats(e.g. [pickle](https://docs.python.org/3/library/pickle.html), you should use the `use_safetensors` parameter. If doing so, in the event that no .safetensors file is present, transformers will error when loading the model.
|
||||
To avoid loading models from unsafe formats(e.g. [pickle](https://docs.python.org/3/library/pickle.html), you should use the `use_safetenstors` parameter. If doing so, in the event that no .safetensors file is present, transformers will error when loading the model.
|
||||
|
||||
### Remote code
|
||||
|
||||
|
@ -596,7 +596,7 @@ Keywords: Data-Centric AI, Data Quality, Noisy Labels, Outlier Detection, Active
|
||||
|
||||
## [BentoML](https://github.com/bentoml/BentoML)
|
||||
|
||||
[BentoML](https://github.com/bentoml) is the unified framework for building, shipping, and scaling production-ready AI applications incorporating traditional ML, pre-trained AI models, Generative and Large Language Models.
|
||||
[BentoML](https://github.com/bentoml) is the unified framework for for building, shipping, and scaling production-ready AI applications incorporating traditional ML, pre-trained AI models, Generative and Large Language Models.
|
||||
All Hugging Face models and pipelines can be seamlessly integrated into BentoML applications, enabling the running of models on the most suitable hardware and independent scaling based on usage.
|
||||
|
||||
Keywords: BentoML, Framework, Deployment, AI Applications
|
||||
|
@ -1,326 +0,0 @@
|
||||
# Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Run benchmark using the `optimum-benchmark` library with some customization in `transformers`.
|
||||
|
||||
Assume we are under `transformers` root directory: (make sure the commits are valid commits)
|
||||
```bash
|
||||
python benchmark/benchmark.py --config-dir benchmark/config --config-name generation --commit=9b9c7f03da625b13643e99205c691fe046461724 --metrics=decode.latency.mean,per_token.latency.mean,per_token.throughput.value backend.model=google/gemma-2b benchmark.input_shapes.sequence_length=5,7 benchmark.input_shapes.batch_size=1,2 --multirun
|
||||
```
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import glob
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
import tempfile
|
||||
from contextlib import contextmanager
|
||||
from pathlib import Path
|
||||
|
||||
from git import Repo
|
||||
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
from optimum_benchmark import Benchmark
|
||||
from optimum_benchmark_wrapper import main
|
||||
|
||||
|
||||
PATH_TO_REPO = Path(__file__).parent.parent.resolve()
|
||||
|
||||
|
||||
@contextmanager
|
||||
def checkout_commit(repo: Repo, commit_id: str):
|
||||
"""
|
||||
Context manager that checks out a given commit when entered, but gets back to the reference it was at on exit.
|
||||
Args:
|
||||
repo (`git.Repo`): A git repository (for instance the Transformers repo).
|
||||
commit_id (`str`): The commit reference to checkout inside the context manager.
|
||||
"""
|
||||
current_head = repo.head.commit if repo.head.is_detached else repo.head.ref
|
||||
|
||||
try:
|
||||
repo.git.checkout(commit_id)
|
||||
yield
|
||||
|
||||
finally:
|
||||
repo.git.checkout(current_head)
|
||||
|
||||
|
||||
def summarize(run_dir, metrics, expand_metrics=False):
|
||||
"""Produce a summary for each optimum-benchmark launched job's output directory found in `run_dir`.
|
||||
|
||||
Each summary's format is as follows (for `expand_metrics=False`):
|
||||
```
|
||||
{
|
||||
"model": "google/gemma-2b",
|
||||
"commit": "3cd6ed22e4d49219f300f5055e71e3929aba20d7",
|
||||
"config": "benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5",
|
||||
"metrics": {
|
||||
"decode.latency.mean": 1.624666809082031,
|
||||
"per_token.latency.mean": 0.012843788806628804,
|
||||
"per_token.throughput.value": 77.85864553330948
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
reports = glob.glob(os.path.join(run_dir, "**/benchmark_report.json"), recursive=True)
|
||||
report_dirs = [str(Path(report).parent) for report in reports]
|
||||
|
||||
summaries = []
|
||||
for report_dir in report_dirs:
|
||||
commit = re.search(r"/commit=([^/]+)", report_dir).groups()[0]
|
||||
|
||||
if not os.path.isfile(os.path.join(report_dir, "benchmark.json")):
|
||||
continue
|
||||
benchmark = Benchmark.from_json(os.path.join(report_dir, "benchmark.json"))
|
||||
report = benchmark.report
|
||||
|
||||
model = benchmark.config.backend["model"]
|
||||
|
||||
# Ths looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`.
|
||||
# (we rely on the usage of hydra's `${hydra.job.override_dirname}`.)
|
||||
benchmark_name = re.sub(f"backend.model={model},*", "", report_dir)
|
||||
benchmark_name = str(Path(benchmark_name).parts[-1])
|
||||
if benchmark_name.startswith("commit="):
|
||||
benchmark_name = benchmark.config.name
|
||||
|
||||
metrics_values = {}
|
||||
# post-processing of report: show a few selected/important metric
|
||||
for metric in metrics:
|
||||
keys = metric.split(".")
|
||||
value = report
|
||||
current = metrics_values
|
||||
for key in keys:
|
||||
# Avoid KeyError when a user's specified metric has typo.
|
||||
# TODO: Give warnings.
|
||||
if key not in value:
|
||||
continue
|
||||
value = value[key]
|
||||
|
||||
if expand_metrics:
|
||||
if isinstance(value, dict):
|
||||
if key not in current:
|
||||
current[key] = {}
|
||||
current = current[key]
|
||||
else:
|
||||
current[key] = value
|
||||
|
||||
if not expand_metrics:
|
||||
metrics_values[metric] = value
|
||||
|
||||
# show some config information
|
||||
print(f"model: {model}")
|
||||
print(f"commit: {commit}")
|
||||
print(f"config: {benchmark_name}")
|
||||
if len(metrics_values) > 0:
|
||||
print("metrics:")
|
||||
if expand_metrics:
|
||||
print(metrics_values)
|
||||
else:
|
||||
for metric, value in metrics_values.items():
|
||||
print(f" - {metric}: {value}")
|
||||
print("-" * 80)
|
||||
|
||||
summary = {
|
||||
"model": model,
|
||||
"commit": commit,
|
||||
"config": benchmark_name,
|
||||
"metrics": metrics_values,
|
||||
}
|
||||
summaries.append(summary)
|
||||
|
||||
with open(os.path.join(report_dir, "summary.json"), "w") as fp:
|
||||
json.dump(summary, fp, indent=4)
|
||||
|
||||
return summaries
|
||||
|
||||
|
||||
def combine_summaries(summaries):
|
||||
"""Combine a list of summary obtained from the function `summarize`.
|
||||
|
||||
The combined summary's format is as follows:
|
||||
```
|
||||
"google/gemma-2b": {
|
||||
"benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5": {
|
||||
"3cd6ed22e4d49219f300f5055e71e3929aba20d7": {
|
||||
"metrics": {"decode.latency.mean": 1.624666809082031}
|
||||
},
|
||||
"c97ee28b117c0abe8e08891f402065e4df6d72aa": {
|
||||
"metrics": {"decode.latency.mean": 1.6278163452148438}
|
||||
}
|
||||
},
|
||||
"benchmark.input_shapes.batch_size=2,benchmark.input_shapes.sequence_length=5": {
|
||||
"3cd6ed22e4d49219f300f5055e71e3929aba20d7": {
|
||||
"metrics": {"decode.latency.mean": 1.6947791748046876}
|
||||
},
|
||||
"c97ee28b117c0abe8e08891f402065e4df6d72aa": {
|
||||
"metrics": {
|
||||
"decode.latency.mean": 1.6980519409179688}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
"""
|
||||
combined = {}
|
||||
for summary in summaries:
|
||||
model = summary["model"]
|
||||
config = summary["config"]
|
||||
commit = summary["commit"]
|
||||
|
||||
if model not in combined:
|
||||
combined[model] = {}
|
||||
|
||||
if config not in combined[model]:
|
||||
combined[model][config] = {}
|
||||
|
||||
if commit not in combined[model][config]:
|
||||
combined[model][config][commit] = {"metrics": summary["metrics"]}
|
||||
|
||||
with open(os.path.join(exp_run_dir, "summary.json"), "w") as fp:
|
||||
json.dump(combined, fp, indent=4)
|
||||
|
||||
print(json.dumps(combined, indent=4))
|
||||
|
||||
return combined
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
||||
def list_str(values):
|
||||
return values.split(",")
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.")
|
||||
parser.add_argument("--config-name", type=str, required=True, help="The config name.")
|
||||
|
||||
# arguments specific to this wrapper for our own customization
|
||||
parser.add_argument("--ensure_empty", type=bool, default=True, help="If to create a temporary directory.")
|
||||
parser.add_argument(
|
||||
"--commit",
|
||||
type=list_str,
|
||||
default="",
|
||||
help="Comma-separated list of branch names and/or commit sha values on which the benchmark will run. If `diff` is specified, it will run on both the current head and the `main` branch.",
|
||||
)
|
||||
parser.add_argument("--metrics", type=str, help="The metrics to be included in the summary.")
|
||||
|
||||
parser.add_argument("--repo_id", type=str, default=None, help="The repository to which the file will be uploaded.")
|
||||
parser.add_argument("--path_in_repo", type=str, default=None, help="Relative filepath in the repo.")
|
||||
parser.add_argument("--token", type=str, default=None, help="A valid user access token (string).")
|
||||
|
||||
args, optimum_benchmark_args = parser.parse_known_args()
|
||||
|
||||
repo = Repo(PATH_TO_REPO)
|
||||
|
||||
metrics = [
|
||||
"prefill.latency.mean",
|
||||
"prefill.throughput.value",
|
||||
"decode.latency.mean",
|
||||
"decode.throughput.value",
|
||||
"per_token.latency.mean",
|
||||
"per_token.throughput.value",
|
||||
]
|
||||
if args.metrics is not None:
|
||||
metrics = args.metrics.split(",")
|
||||
|
||||
# Get `backend.model` in a hacky way: We want to control the experiment flow manually.
|
||||
models = [""]
|
||||
for idx, arg in enumerate(optimum_benchmark_args):
|
||||
if arg.startswith("backend.model="):
|
||||
models = arg[len("backend.model=") :]
|
||||
models = models.split(",")
|
||||
break
|
||||
optimum_benchmark_args = [arg for arg in optimum_benchmark_args if not arg.startswith("backend.model=")]
|
||||
|
||||
# Get the commit(s)
|
||||
current_head = str(repo.head.commit) if repo.head.is_detached else str(repo.head.ref)
|
||||
commits = [x for x in args.commit if x != ""]
|
||||
if len(commits) == 0:
|
||||
commits = [current_head]
|
||||
elif len(commits) == 1 and commits[0] == "diff":
|
||||
# compare to `main`
|
||||
commits = ["main", current_head]
|
||||
|
||||
# Get the specified run directory
|
||||
run_dir_arg_idx, run_dir = -1, None
|
||||
sweep_dir_arg_idx, sweep_dir = -1, None
|
||||
for idx, arg in enumerate(optimum_benchmark_args):
|
||||
if arg.startswith("hydra.run.dir="):
|
||||
run_dir = arg[len("hydra.run.dir=") :]
|
||||
run_dir_arg_idx = idx
|
||||
elif arg.startswith("hydra.sweep.dir="):
|
||||
sweep_dir = arg[len("hydra.sweep.dir=") :]
|
||||
sweep_dir_arg_idx = idx
|
||||
exp_run_dir, arg_dix, arg_name = (
|
||||
(sweep_dir, sweep_dir_arg_idx, "hydra.sweep.dir")
|
||||
if "--multirun" in optimum_benchmark_args
|
||||
else (run_dir, run_dir_arg_idx, "hydra.run.dir")
|
||||
)
|
||||
|
||||
# TODO: not hardcoded
|
||||
if exp_run_dir is None and args.ensure_empty:
|
||||
exp_run_dir = "_benchmark"
|
||||
|
||||
if args.ensure_empty:
|
||||
os.makedirs(exp_run_dir, exist_ok=True)
|
||||
exp_run_dir = tempfile.mkdtemp(dir=exp_run_dir)
|
||||
|
||||
run_summaries = []
|
||||
for commit in commits:
|
||||
with checkout_commit(repo, commit):
|
||||
commit = str(repo.head.commit)
|
||||
|
||||
commit_run_dir = exp_run_dir
|
||||
if exp_run_dir is not None:
|
||||
commit_run_dir = os.path.join(exp_run_dir, rf"commit\={commit}")
|
||||
|
||||
print(f"Run benchmark on commit: {commit}")
|
||||
|
||||
for model in models:
|
||||
model_arg = [f"backend.model={model}"] if model != "" else []
|
||||
dir_args = []
|
||||
if commit_run_dir is not None:
|
||||
if arg_dix > -1:
|
||||
optimum_benchmark_args[arg_dix] = f"{arg_name}={commit_run_dir}"
|
||||
else:
|
||||
dir_args = [
|
||||
f"hydra.sweep.dir={commit_run_dir}",
|
||||
f"hydra.run.dir={commit_run_dir}/" + "${hydra.job.override_dirname}",
|
||||
]
|
||||
main(args.config_dir, args.config_name, model_arg + dir_args + optimum_benchmark_args)
|
||||
|
||||
if commit_run_dir is not None:
|
||||
# Need to remove the `\` character
|
||||
summaries = summarize(commit_run_dir.replace("\\", ""), metrics)
|
||||
run_summaries.extend(summaries)
|
||||
|
||||
# aggregate the information across the commits
|
||||
if exp_run_dir is not None:
|
||||
with open(os.path.join(exp_run_dir, "summaries.json"), "w") as fp:
|
||||
json.dump(run_summaries, fp, indent=4)
|
||||
|
||||
combined_summary = combine_summaries(run_summaries)
|
||||
|
||||
if args.repo_id is not None and args.path_in_repo is not None:
|
||||
# Upload to Hub
|
||||
api = HfApi()
|
||||
api.upload_folder(
|
||||
folder_path=exp_run_dir,
|
||||
path_in_repo=args.path_in_repo,
|
||||
repo_id=args.repo_id,
|
||||
repo_type="dataset",
|
||||
token=args.token,
|
||||
)
|
@ -1,57 +0,0 @@
|
||||
defaults:
|
||||
- benchmark # inheriting benchmark schema
|
||||
- scenario: inference
|
||||
- launcher: process
|
||||
- backend: pytorch
|
||||
- _self_ # for hydra 1.1 compatibility
|
||||
|
||||
name: pytorch_generate
|
||||
|
||||
launcher:
|
||||
start_method: spawn
|
||||
device_isolation: true
|
||||
device_isolation_action: warn
|
||||
|
||||
backend:
|
||||
device: cuda
|
||||
device_ids: 0
|
||||
no_weights: true
|
||||
model: meta-llama/Llama-2-7b-hf
|
||||
cache_implementation: static
|
||||
torch_compile: true
|
||||
torch_dtype: float16
|
||||
torch_compile_config:
|
||||
backend: inductor
|
||||
mode: reduce-overhead
|
||||
fullgraph: true
|
||||
|
||||
scenario:
|
||||
input_shapes:
|
||||
batch_size: 1
|
||||
sequence_length: 7
|
||||
generate_kwargs:
|
||||
max_new_tokens: 128
|
||||
min_new_tokens: 128
|
||||
do_sample: false
|
||||
memory: true
|
||||
latency: true
|
||||
iterations: 2
|
||||
duration: 0
|
||||
|
||||
|
||||
# hydra/cli specific settings
|
||||
hydra:
|
||||
run:
|
||||
# where to store run results
|
||||
dir: runs/${name}
|
||||
job:
|
||||
# change working directory to the run directory
|
||||
chdir: true
|
||||
env_set:
|
||||
# set environment variable OVERRIDE_BENCHMARKS to 1
|
||||
# to not skip benchmarks that have been run before
|
||||
OVERRIDE_BENCHMARKS: 1
|
||||
LOG_LEVEL: WARN
|
||||
sweep:
|
||||
dir: multirun
|
||||
subdir: ${hydra.job.override_dirname}
|
@ -1,16 +0,0 @@
|
||||
import argparse
|
||||
import subprocess
|
||||
|
||||
|
||||
def main(config_dir, config_name, args):
|
||||
subprocess.run(["optimum-benchmark", "--config-dir", f"{config_dir}", "--config-name", f"{config_name}"] + ["hydra/job_logging=disabled", "hydra/hydra_logging=disabled"] + args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--config-dir", type=str, required=True, help="The path to the config directory.")
|
||||
parser.add_argument("--config-name", type=str, required=True, help="The config name.")
|
||||
args, unknown = parser.parse_known_args()
|
||||
|
||||
main(args.config_dir, args.config_name, unknown)
|
@ -53,7 +53,7 @@ NOT_DEVICE_TESTS = {
|
||||
"test_torch_save_load",
|
||||
"test_initialization",
|
||||
"test_forward_signature",
|
||||
"test_model_get_set_embeddings",
|
||||
"test_model_common_attributes",
|
||||
"test_model_main_input_name",
|
||||
"test_correct_missing_keys",
|
||||
"test_tie_model_weights",
|
||||
|
@ -1,15 +1,14 @@
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
ARG REF=main
|
||||
RUN apt-get update && apt-get install -y time git pkg-config make git-lfs
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools GitPython
|
||||
RUN uv pip install --no-cache-dir --upgrade 'torch' --index-url https://download.pytorch.org/whl/cpu
|
||||
# tensorflow pin matching setup.py
|
||||
RUN uv pip install --no-cache-dir "tensorflow-cpu<2.16" "tf-keras<2.16"
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,torch-speech,vision,testing]"
|
||||
RUN uv pip install --no-cache-dir tensorflow-cpu tf-keras
|
||||
RUN uv pip install --no-cache-dir "transformers[flax,quality,vision,testing]"
|
||||
RUN git lfs install
|
||||
|
||||
RUN pip uninstall -y transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
||||
|
||||
|
@ -2,7 +2,7 @@ FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake wget xz-utils build-essential g++5 libprotobuf-dev protobuf-compiler
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
|
||||
RUN wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz
|
||||
|
@ -3,7 +3,7 @@ ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git
|
||||
RUN apt-get install -y g++ cmake
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv
|
||||
RUN uv pip install --no-cache-dir -U pip setuptools albumentations seqeval
|
||||
RUN pip install --upgrade --no-cache-dir "transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
||||
|
@ -2,7 +2,7 @@ FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
|
@ -3,7 +3,7 @@ ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1-mesa-glx libgl1 g++ tesseract-ocr
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir --no-deps timm accelerate
|
||||
|
@ -1,10 +1,9 @@
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
||||
RUN pip install --no-cache-dir "scipy<1.13" "transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
||||
RUN pip uninstall -y transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
@ -1,10 +1,9 @@
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake g++
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]"
|
||||
RUN pip install --no-cache-dir "transformers[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]"
|
||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3" tensorflow_probability
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/*
|
@ -1,11 +1,10 @@
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git pkg-config openssh-client git
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]"
|
||||
RUN uv pip install --no-cache-dir librosa "transformers[sklearn,sentencepiece,vision,testing]"
|
||||
RUN pip uninstall -y transformers
|
@ -1,9 +1,8 @@
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y time git
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip install uv && uv venv
|
||||
RUN uv pip install --no-cache-dir -U pip setuptools GitPython "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ruff]" urllib3
|
||||
RUN uv pip install --no-cache-dir -U pip setuptools GitPython transformers "ruff==0.1.5" urllib3
|
||||
RUN apt-get install -y jq curl && apt-get clean && rm -rf /var/lib/apt/lists/*
|
@ -1,12 +1,11 @@
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ pkg-config openssh-client git
|
||||
RUN apt-get install -y cmake
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN pip install --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
||||
RUN pip install --upgrade --no-cache-dir "transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]"
|
||||
RUN uv pip install --no-cache-dir "protobuf==3.20.3"
|
||||
RUN pip uninstall -y transformers
|
||||
RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean
|
@ -1,13 +1,12 @@
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-deps accelerate
|
||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,audio,sklearn,sentencepiece,vision,testing]"
|
||||
RUN pip install --no-cache-dir "scipy<1.13" "transformers[flax, audio, sklearn,sentencepiece,vision,testing]"
|
||||
|
||||
|
||||
# RUN pip install --no-cache-dir "scipy<1.13" "transformers[flax,testing,sentencepiece,flax-speech,vision]"
|
||||
|
@ -1,11 +1,10 @@
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]"
|
||||
RUN uv pip install --no-cache-dir librosa "transformers[sklearn,sentencepiece,vision,testing]"
|
||||
RUN pip uninstall -y transformers
|
@ -4,7 +4,7 @@ ARG REF=main
|
||||
RUN echo ${REF}
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
ENV VIRTUAL_ENV=/usr/local
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN uv pip install --no-cache-dir --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||
|
@ -9,7 +9,7 @@ SHELL ["sh", "-lc"]
|
||||
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
|
||||
# to be used as arguments for docker build (so far).
|
||||
|
||||
ARG PYTORCH='2.4.0'
|
||||
ARG PYTORCH='2.3.0'
|
||||
# (not always a valid torch version)
|
||||
ARG INTEL_TORCH_EXT='2.3.0'
|
||||
# Example: `cu102`, `cu113`, etc.
|
||||
@ -45,16 +45,12 @@ RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/opt
|
||||
# For video model testing
|
||||
RUN python3 -m pip install --no-cache-dir decord av==9.2.0
|
||||
|
||||
# For GGUF tests
|
||||
RUN python3 -m pip install --no-cache-dir gguf
|
||||
|
||||
# Some slow tests require bnb
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Some tests require quanto
|
||||
RUN python3 -m pip install --no-cache-dir quanto
|
||||
|
||||
# `quanto` will install `ninja` which leads to many `CUDA error: an illegal memory access ...` in some model tests
|
||||
# (`deformable_detr`, `rwkv`, `mra`)
|
||||
RUN python3 -m pip uninstall -y ninja
|
||||
|
||||
# For `dinat` model
|
||||
# The `XXX` part in `torchXXX` needs to match `PYTORCH` (to some extent)
|
||||
RUN python3 -m pip install --no-cache-dir natten==0.15.1+torch220$CUDA -f https://shi-labs.com/natten/wheels
|
||||
|
@ -11,7 +11,7 @@ ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
|
||||
# If set to nothing, will install the latest version
|
||||
ARG PYTORCH='2.4.0'
|
||||
ARG PYTORCH='2.3.0'
|
||||
ARG TORCH_VISION=''
|
||||
ARG TORCH_AUDIO=''
|
||||
# Example: `cu102`, `cu113`, etc.
|
||||
|
@ -1,4 +1,4 @@
|
||||
FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04
|
||||
FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu20.04
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
@ -48,9 +48,6 @@ RUN python3 -m pip install --no-cache-dir aqlm[gpu]==1.0.2
|
||||
# Add hqq for quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir hqq
|
||||
|
||||
# For GGUF tests
|
||||
RUN python3 -m pip install --no-cache-dir gguf
|
||||
|
||||
# Add autoawq for quantization testing
|
||||
# >=v0.2.3 needed for compatibility with torch 2.2.1
|
||||
RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.3/autoawq-0.2.3+cu118-cp38-cp38-linux_x86_64.whl
|
||||
|
@ -162,7 +162,7 @@ Transformers verwendet die Shell-Umgebungsvariablen `PYTORCH_TRANSFORMERS_CACHE`
|
||||
|
||||
## Offline Modus
|
||||
|
||||
Transformers ist in der Lage, in einer Firewall- oder Offline-Umgebung zu laufen, indem es nur lokale Dateien verwendet. Setzen Sie die Umgebungsvariable `HF_HUB_OFFLINE=1`, um dieses Verhalten zu aktivieren.
|
||||
Transformers ist in der Lage, in einer Firewall- oder Offline-Umgebung zu laufen, indem es nur lokale Dateien verwendet. Setzen Sie die Umgebungsvariable `TRANSFORMERS_OFFLINE=1`, um dieses Verhalten zu aktivieren.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -179,7 +179,7 @@ python examples/pytorch/translation/run_translation.py --model_name_or_path goog
|
||||
Führen Sie das gleiche Programm in einer Offline-Instanz mit aus:
|
||||
|
||||
```bash
|
||||
HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \
|
||||
HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
|
||||
python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ...
|
||||
```
|
||||
|
||||
|
@ -86,10 +86,10 @@ model.load_adapter(peft_model_id)
|
||||
Die `bitsandbytes`-Integration unterstützt Datentypen mit 8bit und 4bit Genauigkeit, was für das Laden großer Modelle nützlich ist, weil es Speicher spart (lesen Sie den `bitsandbytes`-Integrations [guide](./quantization#bitsandbytes-integration), um mehr zu erfahren). Fügen Sie die Parameter `load_in_8bit` oder `load_in_4bit` zu [`~PreTrainedModel.from_pretrained`] hinzu und setzen Sie `device_map="auto"`, um das Modell effektiv auf Ihre Hardware zu verteilen:
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
peft_model_id = "ybelkada/opt-350m-lora"
|
||||
model = AutoModelForCausalLM.from_pretrained(peft_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True))
|
||||
model = AutoModelForCausalLM.from_pretrained(peft_model_id, device_map="auto", load_in_8bit=True)
|
||||
```
|
||||
|
||||
## Einen neuen Adapter hinzufügen
|
||||
|
@ -185,16 +185,16 @@ pytest -k "test and ada" tests/test_optimization.py
|
||||
|
||||
Manchmal müssen Sie `accelerate` Tests für Ihre Modelle ausführen. Dazu fügen Sie einfach `-m accelerate_tests` zu Ihrem Befehl hinzu, wenn Sie diese Tests bei einem `OPT`-Lauf ausführen möchten:
|
||||
```bash
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
RUN_SLOW=1 pytest -m accelerate_tests tests/models/opt/test_modeling_opt.py
|
||||
```
|
||||
|
||||
|
||||
### Dokumentationstests ausführen
|
||||
### Dokumentationstests ausführen
|
||||
|
||||
Um zu testen, ob die Dokumentationsbeispiele korrekt sind, sollten Sie überprüfen, ob die `doctests` erfolgreich sind.
|
||||
Lassen Sie uns als Beispiel den docstring von [WhisperModel.forward](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035) verwenden:
|
||||
Um zu testen, ob die Dokumentationsbeispiele korrekt sind, sollten Sie überprüfen, ob die `doctests` erfolgreich sind.
|
||||
Lassen Sie uns als Beispiel den docstring von [WhisperModel.forward](https://github.com/huggingface/transformers/blob/main/src/transformers/models/whisper/modeling_whisper.py#L1017-L1035) verwenden:
|
||||
|
||||
```python
|
||||
```python
|
||||
r"""
|
||||
Returns:
|
||||
|
||||
@ -217,8 +217,8 @@ Example:
|
||||
|
||||
```
|
||||
|
||||
Führen Sie einfach die folgende Zeile aus, um automatisch jedes docstring-Beispiel in der gewünschten Datei zu testen:
|
||||
```bash
|
||||
Führen Sie einfach die folgende Zeile aus, um automatisch jedes docstring-Beispiel in der gewünschten Datei zu testen:
|
||||
```bash
|
||||
pytest --doctest-modules <path_to_file_or_dir>
|
||||
```
|
||||
Wenn die Datei eine Markdown-Erweiterung hat, sollten Sie das Argument `--doctest-glob="*.md"` hinzufügen.
|
||||
@ -862,7 +862,7 @@ Code, der fehlerhaft ist, einen schlechten Zustand verursacht, der sich auf ande
|
||||
- Hier sehen Sie, wie Sie einen ganzen Test bedingungslos überspringen können:
|
||||
|
||||
```python no-style
|
||||
@unittest.skip(reason="this bug needs to be fixed")
|
||||
@unittest.skip("this bug needs to be fixed")
|
||||
def test_feature_x():
|
||||
```
|
||||
|
||||
|
@ -1,5 +1,3 @@
|
||||
# Optimizing inference
|
||||
|
||||
perf_infer_gpu_many: perf_infer_gpu_one
|
||||
transformers_agents: agents
|
||||
quantization: quantization/overview
|
||||
|
@ -92,15 +92,11 @@
|
||||
title: Visual Question Answering
|
||||
- local: tasks/text-to-speech
|
||||
title: Text to speech
|
||||
- local: tasks/image_text_to_text
|
||||
title: Image-text-to-text
|
||||
title: Multimodal
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: generation_strategies
|
||||
title: Customize the generation strategy
|
||||
- local: kv_cache
|
||||
title: Best Practices for Generation with Cache
|
||||
title: Generation
|
||||
- isExpanded: false
|
||||
sections:
|
||||
@ -139,38 +135,18 @@
|
||||
title: Community resources
|
||||
- local: troubleshooting
|
||||
title: Troubleshoot
|
||||
- local: hf_quantizer
|
||||
title: Contribute new quantization method
|
||||
- local: gguf
|
||||
title: Interoperability with GGUF files
|
||||
title: Developer guides
|
||||
- sections:
|
||||
- local: quantization/overview
|
||||
title: Getting started
|
||||
- local: quantization/bitsandbytes
|
||||
title: bitsandbytes
|
||||
- local: quantization/gptq
|
||||
title: GPTQ
|
||||
- local: quantization/awq
|
||||
title: AWQ
|
||||
- local: quantization/aqlm
|
||||
title: AQLM
|
||||
- local: quantization/quanto
|
||||
title: Quanto
|
||||
- local: quantization/eetq
|
||||
title: EETQ
|
||||
- local: quantization/hqq
|
||||
title: HQQ
|
||||
- local: quantization/fbgemm_fp8
|
||||
title: FBGEMM_FP8
|
||||
- local: quantization/optimum
|
||||
title: Optimum
|
||||
- local: quantization/contribute
|
||||
title: Contribute new quantization method
|
||||
title: Quantization Methods
|
||||
- sections:
|
||||
- local: performance
|
||||
title: Overview
|
||||
- local: llm_optims
|
||||
title: LLM inference optimization
|
||||
- local: quantization
|
||||
title: Quantization
|
||||
- sections:
|
||||
- local: perf_train_gpu_one
|
||||
title: Methods and tools for efficient training on a single GPU
|
||||
@ -388,8 +364,6 @@
|
||||
title: Fuyu
|
||||
- local: model_doc/gemma
|
||||
title: Gemma
|
||||
- local: model_doc/gemma2
|
||||
title: Gemma2
|
||||
- local: model_doc/openai-gpt
|
||||
title: GPT
|
||||
- local: model_doc/gpt_neo
|
||||
@ -438,8 +412,6 @@
|
||||
title: MADLAD-400
|
||||
- local: model_doc/mamba
|
||||
title: Mamba
|
||||
- local: model_doc/mamba2
|
||||
title: mamba2
|
||||
- local: model_doc/marian
|
||||
title: MarianMT
|
||||
- local: model_doc/markuplm
|
||||
@ -470,8 +442,6 @@
|
||||
title: MT5
|
||||
- local: model_doc/mvp
|
||||
title: MVP
|
||||
- local: model_doc/nemotron
|
||||
title: Nemotron
|
||||
- local: model_doc/nezha
|
||||
title: NEZHA
|
||||
- local: model_doc/nllb
|
||||
@ -506,8 +476,6 @@
|
||||
title: QDQBert
|
||||
- local: model_doc/qwen2
|
||||
title: Qwen2
|
||||
- local: model_doc/qwen2_audio
|
||||
title: Qwen2Audio
|
||||
- local: model_doc/qwen2_moe
|
||||
title: Qwen2MoE
|
||||
- local: model_doc/rag
|
||||
@ -593,8 +561,6 @@
|
||||
title: DeiT
|
||||
- local: model_doc/depth_anything
|
||||
title: Depth Anything
|
||||
- local: model_doc/depth_anything_v2
|
||||
title: Depth Anything V2
|
||||
- local: model_doc/deta
|
||||
title: DETA
|
||||
- local: model_doc/detr
|
||||
@ -615,8 +581,6 @@
|
||||
title: FocalNet
|
||||
- local: model_doc/glpn
|
||||
title: GLPN
|
||||
- local: model_doc/hiera
|
||||
title: Hiera
|
||||
- local: model_doc/imagegpt
|
||||
title: ImageGPT
|
||||
- local: model_doc/levit
|
||||
@ -645,8 +609,6 @@
|
||||
title: RegNet
|
||||
- local: model_doc/resnet
|
||||
title: ResNet
|
||||
- local: model_doc/rt_detr
|
||||
title: RT-DETR
|
||||
- local: model_doc/segformer
|
||||
title: SegFormer
|
||||
- local: model_doc/seggpt
|
||||
@ -681,8 +643,6 @@
|
||||
title: ViTMSN
|
||||
- local: model_doc/yolos
|
||||
title: YOLOS
|
||||
- local: model_doc/zoedepth
|
||||
title: ZoeDepth
|
||||
title: Vision models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
@ -694,8 +654,6 @@
|
||||
title: CLAP
|
||||
- local: model_doc/encodec
|
||||
title: EnCodec
|
||||
- local: model_doc/hiera
|
||||
title: Hiera
|
||||
- local: model_doc/hubert
|
||||
title: Hubert
|
||||
- local: model_doc/mctct
|
||||
@ -770,8 +728,6 @@
|
||||
title: BridgeTower
|
||||
- local: model_doc/bros
|
||||
title: BROS
|
||||
- local: model_doc/chameleon
|
||||
title: Chameleon
|
||||
- local: model_doc/chinese_clip
|
||||
title: Chinese-CLIP
|
||||
- local: model_doc/clip
|
||||
@ -800,8 +756,6 @@
|
||||
title: Idefics2
|
||||
- local: model_doc/instructblip
|
||||
title: InstructBLIP
|
||||
- local: model_doc/instructblipvideo
|
||||
title: InstructBlipVideo
|
||||
- local: model_doc/kosmos-2
|
||||
title: KOSMOS-2
|
||||
- local: model_doc/layoutlm
|
||||
@ -818,8 +772,6 @@
|
||||
title: Llava
|
||||
- local: model_doc/llava_next
|
||||
title: LLaVA-NeXT
|
||||
- local: model_doc/llava-next-video
|
||||
title: LLaVa-NeXT-Video
|
||||
- local: model_doc/lxmert
|
||||
title: LXMERT
|
||||
- local: model_doc/matcha
|
||||
|
@ -28,8 +28,8 @@ An agent is a system that uses an LLM as its engine, and it has access to functi
|
||||
These *tools* are functions for performing a task, and they contain all necessary description for the agent to properly use them.
|
||||
|
||||
The agent can be programmed to:
|
||||
- devise a series of actions/tools and run them all at once like the [`CodeAgent`] for example
|
||||
- plan and execute actions/tools one by one and wait for the outcome of each action before launching the next one like the [`ReactJsonAgent`] for example
|
||||
- devise a series of actions/tools and run them all at once like the `CodeAgent` for example
|
||||
- plan and execute actions/tools one by one and wait for the outcome of each action before launching the next one like the `ReactJsonAgent` for example
|
||||
|
||||
### Types of agents
|
||||
|
||||
@ -42,15 +42,15 @@ This agent has a planning step, then generates python code to execute all its ac
|
||||
This is the go-to agent to solve reasoning tasks, since the ReAct framework ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) makes it really efficient to think on the basis of its previous observations.
|
||||
|
||||
We implement two versions of ReactJsonAgent:
|
||||
- [`ReactJsonAgent`] generates tool calls as a JSON in its output.
|
||||
- [`ReactCodeAgent`] is a new type of ReactJsonAgent that generates its tool calls as blobs of code, which works really well for LLMs that have strong coding performance.
|
||||
- [`~ReactJsonAgent`] generates tool calls as a JSON in its output.
|
||||
- [`~ReactCodeAgent`] is a new type of ReactJsonAgent that generates its tool calls as blobs of code, which works really well for LLMs that have strong coding performance.
|
||||
|
||||
> [!TIP]
|
||||
> Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more the ReAct agent.
|
||||
|
||||

|
||||
|
||||
For example, here is how a ReAct Code agent would work its way through the following question.
|
||||
For example, here is how a ReAct agent would work its way through the following question.
|
||||
|
||||
```py3
|
||||
>>> agent.run(
|
||||
@ -119,14 +119,12 @@ def llm_engine(messages, stop_sequences=["Task"]) -> str:
|
||||
```
|
||||
|
||||
You could use any `llm_engine` method as long as:
|
||||
1. it follows the [messages format](./chat_templating.md) (`List[Dict[str, str]]`) for its input `messages`, and it returns a `str`.
|
||||
2. it stops generating outputs at the sequences passed in the argument `stop_sequences`
|
||||
1. it follows the [messages format](./chat_templating.md) for its input (`List[Dict[str, str]]`) and returns a `str`
|
||||
2. it stops generating outputs at the sequences passed in the argument `stop`
|
||||
|
||||
Additionally, `llm_engine` can also take a `grammar` argument. In the case where you specify a `grammar` upon agent initialization, this argument will be passed to the calls to llm_engine, with the `grammar` that you defined upon initialization, to allow [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) in order to force properly-formatted agent outputs.
|
||||
You also need a `tools` argument which accepts a list of `Tools`. You can provide an empty list for `tools`, but use the default toolbox with the optional argument `add_base_tools=True`.
|
||||
|
||||
You will also need a `tools` argument which accepts a list of `Tools` - it can be an empty list. You can also add the default toolbox on top of your `tools` list by defining the optional argument `add_base_tools=True`.
|
||||
|
||||
Now you can create an agent, like [`CodeAgent`], and run it. For convenience, we also provide the [`HfEngine`] class that uses `huggingface_hub.InferenceClient` under the hood.
|
||||
Now you can create an agent, like `CodeAgent`, and run it. For convenience, we also provide the `HfEngine` class that uses `huggingface_hub.InferenceClient` under the hood.
|
||||
|
||||
```python
|
||||
from transformers import CodeAgent, HfEngine
|
||||
@ -141,7 +139,7 @@ agent.run(
|
||||
```
|
||||
|
||||
This will be handy in case of emergency baguette need!
|
||||
You can even leave the argument `llm_engine` undefined, and an [`HfEngine`] will be created by default.
|
||||
You can even leave the argument `llm_engine` undefined, and an [~HfEngine] will be created by default.
|
||||
|
||||
```python
|
||||
from transformers import CodeAgent
|
||||
@ -183,27 +181,13 @@ You can also run an agent consecutively for different tasks: each time the attri
|
||||
A Python interpreter executes the code on a set of inputs passed along with your tools.
|
||||
This should be safe because the only functions that can be called are the tools you provided (especially if it's only tools by Hugging Face) and the print function, so you're already limited in what can be executed.
|
||||
|
||||
The Python interpreter also doesn't allow imports by default outside of a safe list, so all the most obvious attacks shouldn't be an issue.
|
||||
You can still authorize additional imports by passing the authorized modules as a list of strings in argument `additional_authorized_imports` upon initialization of your [`ReactCodeAgent`] or [`CodeAgent`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import ReactCodeAgent
|
||||
|
||||
>>> agent = ReactCodeAgent(tools=[], additional_authorized_imports=['requests', 'bs4'])
|
||||
>>> agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?")
|
||||
|
||||
(...)
|
||||
'Hugging Face – Blog'
|
||||
```
|
||||
The Python interpreter also doesn't allow any attribute lookup or imports (which shouldn't be needed for passing inputs/outputs to a small set of functions) so all the most obvious attacks shouldn't be an issue.
|
||||
|
||||
The execution will stop at any code trying to perform an illegal operation or if there is a regular Python error with the code generated by the agent.
|
||||
|
||||
> [!WARNING]
|
||||
> The LLM can generate arbitrary code that will then be executed: do not add any unsafe imports!
|
||||
|
||||
### The system prompt
|
||||
|
||||
An agent, or rather the LLM that drives the agent, generates an output based on the system prompt. The system prompt can be customized and tailored to the intended task. For example, check the system prompt for the [`ReactCodeAgent`] (below version is slightly simplified).
|
||||
An agent, or rather the LLM that drives the agent, generates an output based on the system prompt. The system prompt can be customized and tailored to the intended task. For example, check the system prompt for the `ReactCodeAgent` (below version is slightly simplified).
|
||||
|
||||
```text
|
||||
You will be given a task to solve as best you can.
|
||||
@ -258,18 +242,11 @@ agent = ReactJsonAgent(tools=[PythonInterpreterTool()], system_prompt="{your_cus
|
||||
> Please make sure to define the `<<tool_descriptions>>` string somewhere in the `template` so the agent is aware
|
||||
of the available tools.
|
||||
|
||||
|
||||
### Inspecting an agent run
|
||||
|
||||
Here are a few useful attributes to inspect what happened after a run:
|
||||
- `agent.logs` stores the fine-grained logs of the agent. At every step of the agent's run, everything gets stored in a dictionary that then is appended to `agent.logs`.
|
||||
- Running `agent.write_inner_memory_from_logs()` creates an inner memory of the agent's logs for the LLM to view, as a list of chat messages. This method goes over each step of the log and only stores what it's interested in as a message: for instance, it will save the system prompt and task in separate messages, then for each step it will store the LLM output as a message, and the tool call output as another message. Use this if you want a higher-level view of what has happened - but not every log will be transcripted by this method.
|
||||
|
||||
## Tools
|
||||
|
||||
A tool is an atomic function to be used by an agent.
|
||||
|
||||
You can for instance check the [`PythonInterpreterTool`]: it has a name, a description, input descriptions, an output type, and a `__call__` method to perform the action.
|
||||
You can for instance check the [~PythonInterpreterTool]: it has a name, a description, input descriptions, an output type, and a `__call__` method to perform the action.
|
||||
|
||||
When the agent is initialized, the tool attributes are used to generate a tool description which is baked into the agent's system prompt. This lets the agent know which tools it can use and why.
|
||||
|
||||
@ -282,7 +259,7 @@ Transformers comes with a default toolbox for empowering agents, that you can ad
|
||||
- **Speech to text**: given an audio recording of a person talking, transcribe the speech into text ([Whisper](./model_doc/whisper))
|
||||
- **Text to speech**: convert text to speech ([SpeechT5](./model_doc/speecht5))
|
||||
- **Translation**: translates a given sentence from source language to target language.
|
||||
- **Python code interpreter**: runs your the LLM generated Python code in a secure environment. This tool will only be added to [`ReactJsonAgent`] if you use `add_base_tools=True`, since code-based tools can already execute Python code
|
||||
- **Python code interpreter**: runs your the LLM generated Python code in a secure environment. This tool will only be added to [~ReactJsonAgent] if you use `add_base_tools=True`, since code-based tools can already execute Python code
|
||||
|
||||
|
||||
You can manually use a tool by calling the [`load_tool`] function and a task to perform.
|
||||
@ -388,7 +365,7 @@ And the output:
|
||||
`"The most downloaded model for the 'text-to-video' task is ByteDance/AnimateDiff-Lightning."`
|
||||
|
||||
|
||||
### Manage your agent's toolbox
|
||||
### Manage agent toolbox
|
||||
|
||||
If you have already initialized an agent, it is inconvenient to reinitialize it from scratch with a tool you want to use. With Transformers, you can manage an agent's toolbox by adding or replacing a tool.
|
||||
|
||||
@ -511,54 +488,3 @@ agent = ReactCodeAgent(tools=[search_tool])
|
||||
|
||||
agent.run("How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?")
|
||||
```
|
||||
|
||||
## Gradio interface
|
||||
|
||||
You can leverage `gradio.Chatbot`to display your agent's thoughts using `stream_to_gradio`, here is an example:
|
||||
|
||||
```py
|
||||
import gradio as gr
|
||||
from transformers import (
|
||||
load_tool,
|
||||
ReactCodeAgent,
|
||||
HfEngine,
|
||||
stream_to_gradio,
|
||||
)
|
||||
|
||||
# Import tool from Hub
|
||||
image_generation_tool = load_tool("m-ric/text-to-image")
|
||||
|
||||
llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
|
||||
# Initialize the agent with the image generation tool
|
||||
agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
|
||||
|
||||
|
||||
def interact_with_agent(task):
|
||||
messages = []
|
||||
messages.append(gr.ChatMessage(role="user", content=task))
|
||||
yield messages
|
||||
for msg in stream_to_gradio(agent, task):
|
||||
messages.append(msg)
|
||||
yield messages + [
|
||||
gr.ChatMessage(role="assistant", content="⏳ Task not finished yet!")
|
||||
]
|
||||
yield messages
|
||||
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
text_input = gr.Textbox(lines=1, label="Chat Message", value="Make me a picture of the Statue of Liberty.")
|
||||
submit = gr.Button("Run illustrator agent!")
|
||||
chatbot = gr.Chatbot(
|
||||
label="Agent",
|
||||
type="messages",
|
||||
avatar_images=(
|
||||
None,
|
||||
"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png",
|
||||
),
|
||||
)
|
||||
submit.click(interact_with_agent, [text_input], [chatbot])
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
@ -199,8 +199,7 @@ effect that `add_generation_prompt` has will depend on the template being used.
|
||||
|
||||
## Can I use chat templates in training?
|
||||
|
||||
Yes! This is a good way to ensure that the chat template matches the tokens the model sees during training.
|
||||
We recommend that you apply the chat template as a preprocessing step for your dataset. After this, you
|
||||
Yes! We recommend that you apply the chat template as a preprocessing step for your dataset. After this, you
|
||||
can simply continue like any other language model training task. When training, you should usually set
|
||||
`add_generation_prompt=False`, because the added tokens to prompt an assistant response will not be helpful during
|
||||
training. Let's see an example:
|
||||
@ -234,342 +233,6 @@ The sun.</s>
|
||||
|
||||
From here, just continue training like you would with a standard language modelling task, using the `formatted_chat` column.
|
||||
|
||||
<Tip>
|
||||
If you format text with `apply_chat_template(tokenize=False)` and then tokenize it in a separate step, you should set the argument
|
||||
`add_special_tokens=False`. If you use `apply_chat_template(tokenize=True)`, you don't need to worry about this!
|
||||
|
||||
By default, some tokenizers add special tokens like `<bos>` and `<eos>` to text they tokenize. Chat templates should
|
||||
always include all of the special tokens they need, and so adding extra special tokens with
|
||||
the default `add_special_tokens=True` can result in incorrect or duplicated special tokens, which will hurt model
|
||||
performance.
|
||||
</Tip>
|
||||
|
||||
## Advanced: Extra inputs to chat templates
|
||||
|
||||
The only argument that `apply_chat_template` requires is `messages`. However, you can pass any keyword
|
||||
argument to `apply_chat_template` and it will be accessible inside the template. This gives you a lot of freedom to use
|
||||
chat templates for many things. There are no restrictions on the names or the format of these arguments - you can pass
|
||||
strings, lists, dicts or whatever else you want.
|
||||
|
||||
That said, there are some common use-cases for these extra arguments,
|
||||
such as passing tools for function calling, or documents for retrieval-augmented generation. In these common cases,
|
||||
we have some opinionated recommendations about what the names and formats of these arguments should be, which are
|
||||
described in the sections below. We encourage model authors to make their chat templates compatible with this format,
|
||||
to make it easy to transfer tool-calling code between models.
|
||||
|
||||
## Advanced: Tool use / function calling
|
||||
|
||||
"Tool use" LLMs can choose to call functions as external tools before generating an answer. When passing tools
|
||||
to a tool-use model, you can simply pass a list of functions to the `tools` argument:
|
||||
|
||||
```python
|
||||
import datetime
|
||||
|
||||
def current_time():
|
||||
"""Get the current local time as a string."""
|
||||
return str(datetime.now())
|
||||
|
||||
def multiply(a: float, b: float):
|
||||
"""
|
||||
A function that multiplies two numbers
|
||||
|
||||
Args:
|
||||
a: The first number to multiply
|
||||
b: The second number to multiply
|
||||
"""
|
||||
return a * b
|
||||
|
||||
tools = [current_time, multiply]
|
||||
|
||||
model_input = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tools=tools
|
||||
)
|
||||
```
|
||||
|
||||
In order for this to work correctly, you should write your functions in the format above, so that they can be parsed
|
||||
correctly as tools. Specifically, you should follow these rules:
|
||||
|
||||
- The function should have a descriptive name
|
||||
- Every argument must have a type hint
|
||||
- The function must have a docstring in the standard Google style (in other words, an initial function description
|
||||
followed by an `Args:` block that describes the arguments, unless the function does not have any arguments.
|
||||
- Do not include types in the `Args:` block. In other words, write `a: The first number to multiply`, not
|
||||
`a (int): The first number to multiply`. Type hints should go in the function header instead.
|
||||
- The function can have a return type and a `Returns:` block in the docstring. However, these are optional
|
||||
because most tool-use models ignore them.
|
||||
|
||||
### Passing tool results to the model
|
||||
|
||||
The sample code above is enough to list the available tools for your model, but what happens if it wants to actually use
|
||||
one? If that happens, you should:
|
||||
|
||||
1. Parse the model's output to get the tool name(s) and arguments.
|
||||
2. Add the model's tool call(s) to the conversation.
|
||||
3. Call the corresponding function(s) with those arguments.
|
||||
4. Add the result(s) to the conversation
|
||||
|
||||
### A complete tool use example
|
||||
|
||||
Let's walk through a tool use example, step by step. For this example, we will use an 8B `Hermes-2-Pro` model,
|
||||
as it is one of the highest-performing tool-use models in its size category at the time of writing. If you have the
|
||||
memory, you can consider using a larger model instead like [Command-R](https://huggingface.co/CohereForAI/c4ai-command-r-v01)
|
||||
or [Mixtral-8x22B](https://huggingface.co/mistralai/Mixtral-8x22B-Instruct-v0.1), both of which also support tool use
|
||||
and offer even stronger performance.
|
||||
|
||||
First, let's load our model and tokenizer:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
checkpoint = "NousResearch/Hermes-2-Pro-Llama-3-8B"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint, revision="pr/13")
|
||||
model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.bfloat16, device_map="auto")
|
||||
```
|
||||
|
||||
Next, let's define a list of tools:
|
||||
|
||||
```python
|
||||
def get_current_temperature(location: str, unit: str) -> float:
|
||||
"""
|
||||
Get the current temperature at a location.
|
||||
|
||||
Args:
|
||||
location: The location to get the temperature for, in the format "City, Country"
|
||||
unit: The unit to return the temperature in. (choices: ["celsius", "fahrenheit"])
|
||||
Returns:
|
||||
The current temperature at the specified location in the specified units, as a float.
|
||||
"""
|
||||
return 22. # A real function should probably actually get the temperature!
|
||||
|
||||
def get_current_wind_speed(location: str) -> float:
|
||||
"""
|
||||
Get the current wind speed in km/h at a given location.
|
||||
|
||||
Args:
|
||||
location: The location to get the temperature for, in the format "City, Country"
|
||||
Returns:
|
||||
The current wind speed at the given location in km/h, as a float.
|
||||
"""
|
||||
return 6. # A real function should probably actually get the wind speed!
|
||||
|
||||
tools = [get_current_temperature, get_current_wind_speed]
|
||||
```
|
||||
|
||||
Now, let's set up a conversation for our bot:
|
||||
|
||||
```python
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a bot that responds to weather queries. You should reply with the unit used in the queried location."},
|
||||
{"role": "user", "content": "Hey, what's the temperature in Paris right now?"}
|
||||
]
|
||||
```
|
||||
|
||||
Now, let's apply the chat template and generate a response:
|
||||
|
||||
```python
|
||||
inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
||||
out = model.generate(**inputs, max_new_tokens=128)
|
||||
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
|
||||
```
|
||||
|
||||
And we get:
|
||||
|
||||
```text
|
||||
<tool_call>
|
||||
{"arguments": {"location": "Paris, France", "unit": "celsius"}, "name": "get_current_temperature"}
|
||||
</tool_call><|im_end|>
|
||||
```
|
||||
|
||||
The model has called the function with valid arguments, in the format requested by the function docstring. It has
|
||||
inferred that we're most likely referring to the Paris in France, and it remembered that, as the home of SI units,
|
||||
the temperature in France should certainly be displayed in Celsius.
|
||||
|
||||
Let's append the model's tool call to the conversation. Note that we generate a random `tool_call_id` here. These IDs
|
||||
are not used by all models, but they allow models to issue multiple tool calls at once and keep track of which response
|
||||
corresponds to which call. You can generate them any way you like, but they should be unique within each chat.
|
||||
|
||||
```python
|
||||
tool_call_id = "vAHdf3" # Random ID, should be unique for each tool call
|
||||
tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France", "unit": "celsius"}}
|
||||
messages.append({"role": "assistant", "tool_calls": [{"id": tool_call_id, "type": "function", "function": tool_call}]})
|
||||
```
|
||||
|
||||
|
||||
Now that we've added the tool call to the conversation, we can call the function and append the result to the
|
||||
conversation. Since we're just using a dummy function for this example that always returns 22.0, we can just append
|
||||
that result directly. Again, note the `tool_call_id` - this should match the ID used in the tool call above.
|
||||
|
||||
```python
|
||||
messages.append({"role": "tool", "tool_call_id": tool_call_id, "name": "get_current_temperature", "content": "22.0"})
|
||||
```
|
||||
|
||||
Finally, let's let the assistant read the function outputs and continue chatting with the user:
|
||||
|
||||
```python
|
||||
inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
||||
out = model.generate(**inputs, max_new_tokens=128)
|
||||
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
|
||||
```
|
||||
|
||||
And we get:
|
||||
|
||||
```text
|
||||
The current temperature in Paris, France is 22.0 ° Celsius.<|im_end|>
|
||||
```
|
||||
|
||||
Although this was a simple demo with dummy tools and a single call, the same technique works with
|
||||
multiple real tools and longer conversations. This can be a powerful way to extend the capabilities of conversational
|
||||
agents with real-time information, computational tools like calculators, or access to large databases.
|
||||
|
||||
<Tip>
|
||||
Not all of the tool-calling features shown above are used by all models. Some use tool call IDs, others simply use the function name and
|
||||
match tool calls to results using the ordering, and there are several models that use neither and only issue one tool
|
||||
call at a time to avoid confusion. If you want your code to be compatible across as many models as possible, we
|
||||
recommend structuring your tools calls like we've shown here, and returning tool results in the order that
|
||||
they were issued by the model. The chat templates on each model should handle the rest.
|
||||
</Tip>
|
||||
|
||||
### Understanding tool schemas
|
||||
|
||||
Each function you pass to the `tools` argument of `apply_chat_template` is converted into a
|
||||
[JSON schema](https://json-schema.org/learn/getting-started-step-by-step). These schemas
|
||||
are then passed to the model chat template. In other words, tool-use models do not see your functions directly, and they
|
||||
never see the actual code inside them. What they care about is the function **definitions** and the **arguments** they
|
||||
need to pass to them - they care about what the tools do and how to use them, not how they work! It is up to you
|
||||
to read their outputs, detect if they have requested to use a tool, pass their arguments to the tool function, and
|
||||
return the response in the chat.
|
||||
|
||||
Generating JSON schemas to pass to the template should be automatic and invisible as long as your functions
|
||||
follow the specification above, but if you encounter problems, or you simply want more control over the conversion,
|
||||
you can handle the conversion manually. Here is an example of a manual schema conversion.
|
||||
|
||||
```python
|
||||
from transformers.utils import get_json_schema
|
||||
|
||||
def multiply(a: float, b: float):
|
||||
"""
|
||||
A function that multiplies two numbers
|
||||
|
||||
Args:
|
||||
a: The first number to multiply
|
||||
b: The second number to multiply
|
||||
"""
|
||||
return a * b
|
||||
|
||||
schema = get_json_schema(multiply)
|
||||
print(schema)
|
||||
```
|
||||
|
||||
This will yield:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "multiply",
|
||||
"description": "A function that multiplies two numbers",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {
|
||||
"type": "number",
|
||||
"description": "The first number to multiply"
|
||||
},
|
||||
"b": {
|
||||
"type": "number",
|
||||
"description": "The second number to multiply"
|
||||
}
|
||||
},
|
||||
"required": ["a", "b"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you wish, you can edit these schemas, or even write them from scratch yourself without using `get_json_schema` at
|
||||
all. JSON schemas can be passed directly to the `tools` argument of
|
||||
`apply_chat_template` - this gives you a lot of power to define precise schemas for more complex functions. Be careful,
|
||||
though - the more complex your schemas, the more likely the model is to get confused when dealing with them! We
|
||||
recommend simple function signatures where possible, keeping arguments (and especially complex, nested arguments)
|
||||
to a minimum.
|
||||
|
||||
Here is an example of defining schemas by hand, and passing them directly to `apply_chat_template`:
|
||||
|
||||
```python
|
||||
# A simple function that takes no arguments
|
||||
current_time = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "current_time",
|
||||
"description": "Get the current local time as a string.",
|
||||
"parameters": {
|
||||
'type': 'object',
|
||||
'properties': {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# A more complete function that takes two numerical arguments
|
||||
multiply = {
|
||||
'type': 'function',
|
||||
'function': {
|
||||
'name': 'multiply',
|
||||
'description': 'A function that multiplies two numbers',
|
||||
'parameters': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'a': {
|
||||
'type': 'number',
|
||||
'description': 'The first number to multiply'
|
||||
},
|
||||
'b': {
|
||||
'type': 'number', 'description': 'The second number to multiply'
|
||||
}
|
||||
},
|
||||
'required': ['a', 'b']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
model_input = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tools = [current_time, multiply]
|
||||
)
|
||||
```
|
||||
|
||||
## Advanced: Retrieval-augmented generation
|
||||
|
||||
"Retrieval-augmented generation" or "RAG" LLMs can search a corpus of documents for information before responding
|
||||
to a query. This allows models to vastly expand their knowledge base beyond their limited context size. Our
|
||||
recommendation for RAG models is that their template
|
||||
should accept a `documents` argument. This should be a list of documents, where each "document"
|
||||
is a single dict with `title` and `contents` keys, both of which are strings. Because this format is much simpler
|
||||
than the JSON schemas used for tools, no helper functions are necessary.
|
||||
|
||||
Here's an example of a RAG template in action:
|
||||
|
||||
```python
|
||||
document1 = {
|
||||
"title": "The Moon: Our Age-Old Foe",
|
||||
"contents": "Man has always dreamed of destroying the moon. In this essay, I shall..."
|
||||
}
|
||||
|
||||
document2 = {
|
||||
"title": "The Sun: Our Age-Old Friend",
|
||||
"contents": "Although often underappreciated, the sun provides several notable benefits..."
|
||||
}
|
||||
|
||||
model_input = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
documents=[document1, document2]
|
||||
)
|
||||
```
|
||||
|
||||
## Advanced: How do chat templates work?
|
||||
|
||||
The chat template for a model is stored on the `tokenizer.chat_template` attribute. If no chat template is set, the
|
||||
@ -580,25 +243,27 @@ default template for that model class is used instead. Let's take a look at the
|
||||
>>> from transformers import AutoTokenizer
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
||||
|
||||
>>> tokenizer.chat_template
|
||||
>>> tokenizer.default_chat_template
|
||||
"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}"
|
||||
```
|
||||
|
||||
That's kind of intimidating. Let's clean it up a little to make it more readable. In the process, though, we also make
|
||||
sure that the newlines and indentation we add don't end up being included in the template output - see the tip on
|
||||
[trimming whitespace](#trimming-whitespace) below!
|
||||
That's kind of intimidating. Let's add some newlines and indentation to make it more readable. Note that the first
|
||||
newline after each block as well as any preceding whitespace before a block are ignored by default, using the
|
||||
Jinja `trim_blocks` and `lstrip_blocks` flags. However, be cautious - although leading whitespace on each
|
||||
line is stripped, spaces between blocks on the same line are not. We strongly recommend checking that your template
|
||||
isn't printing extra spaces where it shouldn't be!
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
{%- if message['role'] == 'user' %}
|
||||
{{- ' ' }}
|
||||
{%- endif %}
|
||||
{{- message['content'] }}
|
||||
{%- if not loop.last %}
|
||||
{{- ' ' }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{{- eos_token }}
|
||||
{% for message in messages %}
|
||||
{% if message['role'] == 'user' %}
|
||||
{{ ' ' }}
|
||||
{% endif %}
|
||||
{{ message['content'] }}
|
||||
{% if not loop.last %}
|
||||
{{ ' ' }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{{ eos_token }}
|
||||
```
|
||||
|
||||
If you've never seen one of these before, this is a [Jinja template](https://jinja.palletsprojects.com/en/3.1.x/templates/).
|
||||
@ -627,15 +292,15 @@ similarly to the way LLaMA formats them (note that the real LLaMA template inclu
|
||||
messages and slightly different system message handling in general - don't use this one in your actual code!)
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
{%- if message['role'] == 'user' %}
|
||||
{{- bos_token + '[INST] ' + message['content'] + ' [/INST]' }}
|
||||
{%- elif message['role'] == 'system' %}
|
||||
{{- '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }}
|
||||
{%- elif message['role'] == 'assistant' %}
|
||||
{{- ' ' + message['content'] + ' ' + eos_token }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{% for message in messages %}
|
||||
{% if message['role'] == 'user' %}
|
||||
{{ bos_token + '[INST] ' + message['content'] + ' [/INST]' }}
|
||||
{% elif message['role'] == 'system' %}
|
||||
{{ '<<SYS>>\\n' + message['content'] + '\\n<</SYS>>\\n\\n' }}
|
||||
{% elif message['role'] == 'assistant' %}
|
||||
{{ ' ' + message['content'] + ' ' + eos_token }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
```
|
||||
|
||||
Hopefully if you stare at this for a little bit you can see what this template is doing - it adds specific tokens based
|
||||
@ -651,15 +316,15 @@ existing template from another model and simply edit it for your needs! For exam
|
||||
above and add "[ASST]" and "[/ASST]" to assistant messages:
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
{%- if message['role'] == 'user' %}
|
||||
{{- bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}
|
||||
{%- elif message['role'] == 'system' %}
|
||||
{{- '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }}
|
||||
{%- elif message['role'] == 'assistant' %}
|
||||
{{- '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{% for message in messages %}
|
||||
{% if message['role'] == 'user' %}
|
||||
{{ bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}
|
||||
{% elif message['role'] == 'system' %}
|
||||
{{ '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }}
|
||||
{% elif message['role'] == 'assistant' %}
|
||||
{{ '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
```
|
||||
|
||||
Now, simply set the `tokenizer.chat_template` attribute. Next time you use [`~PreTrainedTokenizer.apply_chat_template`], it will
|
||||
@ -686,23 +351,22 @@ template. This will ensure that text generation tools can correctly figure out w
|
||||
</Tip>
|
||||
|
||||
|
||||
### Why do some models have multiple templates?
|
||||
### What are "default" templates?
|
||||
|
||||
Some models use different templates for different use cases. For example, they might use one template for normal chat
|
||||
and another for tool-use, or retrieval-augmented generation. In these cases, `tokenizer.chat_template` is a dictionary.
|
||||
This can cause some confusion, and where possible, we recommend using a single template for all use-cases. You can use
|
||||
Jinja statements like `if tools is defined` and `{% macro %}` definitions to easily wrap multiple code paths in a
|
||||
single template.
|
||||
Before the introduction of chat templates, chat handling was hardcoded at the model class level. For backwards
|
||||
compatibility, we have retained this class-specific handling as default templates, also set at the class level. If a
|
||||
model does not have a chat template set, but there is a default template for its model class, the `TextGenerationPipeline`
|
||||
class and methods like `apply_chat_template` will use the class template instead. You can find out what the default
|
||||
template for your tokenizer is by checking the `tokenizer.default_chat_template` attribute.
|
||||
|
||||
When a tokenizer has multiple templates, `tokenizer.chat_template` will be a `dict`, where each key is the name
|
||||
of a template. The `apply_chat_template` method has special handling for certain template names: Specifically, it will
|
||||
look for a template named `default` in most cases, and will raise an error if it can't find one. However, if a template
|
||||
named `tool_use` exists when the user has passed a `tools` argument, it will use that instead. To access templates
|
||||
with other names, pass the name of the template you want to the `chat_template` argument of
|
||||
`apply_chat_template()`.
|
||||
This is something we do purely for backward compatibility reasons, to avoid breaking any existing workflows. Even when
|
||||
the class template is appropriate for your model, we strongly recommend overriding the default template by
|
||||
setting the `chat_template` attribute explicitly to make it clear to users that your model has been correctly configured
|
||||
for chat.
|
||||
|
||||
We find that this can be a bit confusing for users, though - so if you're writing a template yourself, we recommend
|
||||
trying to put it all in a single template where possible!
|
||||
Now that actual chat templates have been adopted more widely, default templates have been deprecated and will be
|
||||
removed in a future release. We strongly recommend setting the `chat_template` attribute for any tokenizers that
|
||||
still depend on them!
|
||||
|
||||
### What template should I use?
|
||||
|
||||
@ -718,9 +382,9 @@ input formats. One popular choice is the `ChatML` format, and this is a good, fl
|
||||
It looks like this:
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
{{- '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}
|
||||
{%- endfor %}
|
||||
{% for message in messages %}
|
||||
{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}
|
||||
{% endfor %}
|
||||
```
|
||||
|
||||
If you like this one, here it is in one-liner form, ready to copy into your code. The one-liner also includes
|
||||
@ -768,43 +432,21 @@ it's time to put an end to them!
|
||||
If you're unfamiliar with Jinja, we generally find that the easiest way to write a chat template is to first
|
||||
write a short Python script that formats messages the way you want, and then convert that script into a template.
|
||||
|
||||
Remember that the template handler will receive the conversation history as a variable called `messages`.
|
||||
You will be able to access `messages` in your template just like you can in Python, which means you can loop over
|
||||
it with `{% for message in messages %}` or access individual messages with `{{ messages[0] }}`, for example.
|
||||
Remember that the template handler will receive the conversation history as a variable called `messages`. Each
|
||||
message is a dictionary with two keys, `role` and `content`. You will be able to access `messages` in your template
|
||||
just like you can in Python, which means you can loop over it with `{% for message in messages %}` or access
|
||||
individual messages with, for example, `{{ messages[0] }}`.
|
||||
|
||||
You can also use the following tips to convert your code to Jinja:
|
||||
|
||||
### Trimming whitespace
|
||||
|
||||
By default, Jinja will print any whitespace that comes before or after a block. This can be a problem for chat
|
||||
templates, which generally want to be very precise with whitespace! To avoid this, we strongly recommend writing
|
||||
your templates like this:
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
{{- message['role'] + message['content'] }}
|
||||
{%- endfor %}
|
||||
```
|
||||
|
||||
rather than like this:
|
||||
|
||||
```
|
||||
{% for message in messages %}
|
||||
{{ message['role'] + message['content'] }}
|
||||
{% endfor %}
|
||||
```
|
||||
|
||||
Adding `-` will strip any whitespace that comes before the block. The second example looks innocent, but the newline
|
||||
and indentation may end up being included in the output, which is probably not what you want!
|
||||
|
||||
### For loops
|
||||
|
||||
For loops in Jinja look like this:
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
{{- message['content'] }}
|
||||
{%- endfor %}
|
||||
{% for message in messages %}
|
||||
{{ message['content'] }}
|
||||
{% endfor %}
|
||||
```
|
||||
|
||||
Note that whatever's inside the {{ expression block }} will be printed to the output. You can use operators like
|
||||
@ -815,9 +457,9 @@ Note that whatever's inside the {{ expression block }} will be printed to the ou
|
||||
If statements in Jinja look like this:
|
||||
|
||||
```
|
||||
{%- if message['role'] == 'user' %}
|
||||
{{- message['content'] }}
|
||||
{%- endif %}
|
||||
{% if message['role'] == 'user' %}
|
||||
{{ message['content'] }}
|
||||
{% endif %}
|
||||
```
|
||||
|
||||
Note how where Python uses whitespace to mark the beginnings and ends of `for` and `if` blocks, Jinja requires you
|
||||
@ -833,26 +475,14 @@ conversation. Here's an example that puts these ideas together to add a generati
|
||||
conversation if add_generation_prompt is `True`:
|
||||
|
||||
```
|
||||
{%- if loop.last and add_generation_prompt %}
|
||||
{{- bos_token + 'Assistant:\n' }}
|
||||
{%- endif %}
|
||||
{% if loop.last and add_generation_prompt %}
|
||||
{{ bos_token + 'Assistant:\n' }}
|
||||
{% endif %}
|
||||
```
|
||||
|
||||
### Compatibility with non-Python Jinja
|
||||
### Notes on whitespace
|
||||
|
||||
There are multiple implementations of Jinja in various languages. They generally have the same syntax,
|
||||
but a key difference is that when you're writing a template in Python you can use Python methods, such as
|
||||
`.lower()` on strings or `.items()` on dicts. This will break if someone tries to use your template on a non-Python
|
||||
implementation of Jinja. Non-Python implementations are particularly common in deployment environments, where JS
|
||||
and Rust are very popular.
|
||||
|
||||
Don't panic, though! There are a few easy changes you can make to your templates to ensure they're compatible across
|
||||
all implementations of Jinja:
|
||||
|
||||
- Replace Python methods with Jinja filters. These usually have the same name, for example `string.lower()` becomes
|
||||
`string|lower`, and `dict.items()` becomes `dict|items`. One notable change is that `string.strip()` becomes `string|trim`.
|
||||
See the [list of built-in filters](https://jinja.palletsprojects.com/en/3.1.x/templates/#builtin-filters)
|
||||
in the Jinja documentation for more.
|
||||
- Replace `True`, `False` and `None`, which are Python-specific, with `true`, `false` and `none`.
|
||||
- Directly rendering a dict or list may give different results in other implementations (for example, string entries
|
||||
might change from single-quoted to double-quoted). Adding the `tojson` filter can help to ensure consistency here.
|
||||
As much as possible, we've tried to get Jinja to ignore whitespace outside of {{ expressions }}. However, be aware
|
||||
that Jinja is a general-purpose templating engine, and it may treat whitespace between blocks on the same line
|
||||
as significant and print it to the output. We **strongly** recommend checking that your template isn't printing extra
|
||||
spaces where it shouldn't be before you upload it!
|
@ -195,7 +195,7 @@ inputs = {key: tensor.to(model.device) for key, tensor in inputs.items()}
|
||||
print("Tokenized inputs:\n", inputs)
|
||||
|
||||
# 4: Generate text from the model
|
||||
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.1)
|
||||
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.)
|
||||
print("Generated tokens:\n", outputs)
|
||||
|
||||
# 5: Decode the output back to a string
|
||||
|
@ -327,21 +327,31 @@ For example, to load a [ResNet](../model_doc/resnet) backbone into a [MaskFormer
|
||||
Set `use_pretrained_backbone=True` to load pretrained ResNet weights for the backbone.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig
|
||||
|
||||
config = MaskFormerConfig(backbone="microsoft/resnet-50", use_pretrained_backbone=True) # backbone and neck config
|
||||
config = MaskFormerConfig(backbone="microsoft/resnet50", use_pretrained_backbone=True) # backbone and neck config
|
||||
model = MaskFormerForInstanceSegmentation(config) # head
|
||||
```
|
||||
|
||||
You could also load the backbone config separately and then pass it to the model config.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig
|
||||
|
||||
backbone_config = ResNetConfig.from_pretrained("microsoft/resnet-50")
|
||||
config = MaskFormerConfig(backbone_config=backbone_config)
|
||||
model = MaskFormerForInstanceSegmentation(config)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="random weights">
|
||||
|
||||
Set `use_pretrained_backbone=False` to randomly initialize a ResNet backbone.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig
|
||||
|
||||
config = MaskFormerConfig(backbone="microsoft/resnet-50", use_pretrained_backbone=False) # backbone and neck config
|
||||
config = MaskFormerConfig(backbone="microsoft/resnet50", use_pretrained_backbone=False) # backbone and neck config
|
||||
model = MaskFormerForInstanceSegmentation(config) # head
|
||||
```
|
||||
|
||||
@ -356,43 +366,15 @@ model = MaskFormerForInstanceSegmentation(config)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions id="timm backbone">
|
||||
</hfoptions>
|
||||
|
||||
[timm](https://hf.co/docs/timm/index) models are loaded within a model with `use_timm_backbone=True` or with [`TimmBackbone`] and [`TimmBackboneConfig`].
|
||||
|
||||
Use `use_timm_backbone=True` and `use_pretrained_backbone=True` to load pretrained timm weights for the backbone.
|
||||
|
||||
```python
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone="resnet50", use_pretrained_backbone=True, use_timm_backbone=True) # backbone and neck config
|
||||
model = MaskFormerForInstanceSegmentation(config) # head
|
||||
```
|
||||
|
||||
Set `use_timm_backbone=True` and `use_pretrained_backbone=False` to load a randomly initialized timm backbone.
|
||||
|
||||
```python
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone="resnet50", use_pretrained_backbone=False, use_timm_backbone=True) # backbone and neck config
|
||||
model = MaskFormerForInstanceSegmentation(config) # head
|
||||
```
|
||||
|
||||
You could also load the backbone config and use it to create a `TimmBackbone` or pass it to the model config. Timm backbones will load pretrained weights by default. Set `use_pretrained_backbone=False` to load randomly initialized weights.
|
||||
[timm](https://hf.co/docs/timm/index) models are loaded with [`TimmBackbone`] and [`TimmBackboneConfig`].
|
||||
|
||||
```python
|
||||
from transformers import TimmBackboneConfig, TimmBackbone
|
||||
|
||||
backbone_config = TimmBackboneConfig("resnet50", use_pretrained_backbone=False)
|
||||
|
||||
# Create a backbone class
|
||||
backbone = TimmBackbone(config=backbone_config)
|
||||
|
||||
# Create a model with a timm backbone
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone_config=backbone_config)
|
||||
model = MaskFormerForInstanceSegmentation(config)
|
||||
backbone_config = TimmBackboneConfig("resnet50")
|
||||
model = TimmBackbone(config=backbone_config)
|
||||
```
|
||||
|
||||
## Feature extractor
|
||||
|
@ -16,11 +16,11 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# DeepSpeed
|
||||
|
||||
[DeepSpeed](https://www.deepspeed.ai/) is a PyTorch optimization library that makes distributed training memory-efficient and fast. At its core is the [Zero Redundancy Optimizer (ZeRO)](https://hf.co/papers/1910.02054) which enables training large models at scale. ZeRO works in several stages:
|
||||
[DeepSpeed](https://www.deepspeed.ai/) is a PyTorch optimization library that makes distributed training memory-efficient and fast. At it's core is the [Zero Redundancy Optimizer (ZeRO)](https://hf.co/papers/1910.02054) which enables training large models at scale. ZeRO works in several stages:
|
||||
|
||||
* ZeRO-1, optimizer state partitioning across GPUs
|
||||
* ZeRO-1, optimizer state partioning across GPUs
|
||||
* ZeRO-2, gradient partitioning across GPUs
|
||||
* ZeRO-3, parameter partitioning across GPUs
|
||||
* ZeRO-3, parameteter partitioning across GPUs
|
||||
|
||||
In GPU-limited environments, ZeRO also enables offloading optimizer memory and computation from the GPU to the CPU to fit and train really large models on a single GPU. DeepSpeed is integrated with the Transformers [`Trainer`] class for all ZeRO stages and offloading. All you need to do is provide a config file or you can use a provided template. For inference, Transformers support ZeRO-3 and offloading since it allows loading huge models.
|
||||
|
||||
@ -159,7 +159,7 @@ There are three types of configuration parameters:
|
||||
|
||||
You could also modify the DeepSpeed configuration and edit [`TrainingArguments`] from it:
|
||||
|
||||
1. Create or load a DeepSpeed configuration to use as the main configuration
|
||||
1. Create or load a DeepSpeed configuration to used as the main configuration
|
||||
2. Create a [`TrainingArguments`] object based on these DeepSpeed configuration values
|
||||
|
||||
Some values, such as `scheduler.params.total_num_steps` are calculated by the [`Trainer`] during training.
|
||||
@ -191,7 +191,7 @@ ZeRO-1 shards the optimizer states across GPUs, and you can expect a tiny speed
|
||||
</hfoption>
|
||||
<hfoption id="ZeRO-2">
|
||||
|
||||
ZeRO-2 shards the optimizer and gradients across GPUs. This stage is primarily used for training since its features are not relevant to inference. Some important parameters to configure for better performance include:
|
||||
ZeRO-2 shards the optimizer and gradients across GPUs. This stage is primarily used for training since it's features are not relevant to inference. Some important parameters to configure for better performance include:
|
||||
|
||||
* `offload_optimizer` should be enabled to reduce GPU memory usage.
|
||||
* `overlap_comm` when set to `true` trades off increased GPU memory usage to lower allreduce latency. This feature uses 4.5x the `allgather_bucket_size` and `reduce_bucket_size` values. In this example, they're set to `5e8` which means it requires 9GB of GPU memory. If your GPU memory is 8GB or less, you should reduce `overlap_comm` to lower the memory requirements and prevent an out-of-memory (OOM) error.
|
||||
@ -226,7 +226,7 @@ ZeRO-3 shards the optimizer, gradient, and parameters across GPUs. Unlike ZeRO-2
|
||||
* `pin_memory: true` can improve throughput, but less memory becomes available for other processes because the pinned memory is reserved for the specific process that requested it and it's typically accessed much faster than normal CPU memory.
|
||||
* `stage3_max_live_parameters` is the upper limit on how many full parameters you want to keep on the GPU at any given time. Reduce this value if you encounter an OOM error.
|
||||
* `stage3_max_reuse_distance` is a value for determining when a parameter is used again in the future, and it helps decide whether to throw the parameter away or to keep it. If the parameter is going to be reused (if the value is less than `stage3_max_reuse_distance`), then it is kept to reduce communication overhead. This is super helpful when activation checkpointing is enabled and you want to keep the parameter in the forward recompute until the backward pass. But reduce this value if you encounter an OOM error.
|
||||
* `stage3_gather_16bit_weights_on_model_save` consolidates fp16 weights when a model is saved. For large models and multiple GPUs, this is expensive in terms of memory and speed. You should enable it if you're planning on resuming training.
|
||||
* `stage3_gather_16bit_weights_on_model_save` consolidates fp16 weights when a model is saved. For large models and multiple GPUs, this is an expensive in terms of memory and speed. You should enable it if you're planning on resuming training.
|
||||
* `sub_group_size` controls which parameters are updated during the optimizer step. Parameters are grouped into buckets of `sub_group_size` and each bucket is updated one at a time. When used with NVMe offload, `sub_group_size` determines when model states are moved in and out of CPU memory from during the optimization step. This prevents running out of CPU memory for extremely large models. `sub_group_size` can be left to its default value if you aren't using NVMe offload, but you may want to change it if you:
|
||||
|
||||
1. Run into an OOM error during the optimizer step. In this case, reduce `sub_group_size` to reduce memory usage of the temporary buffers.
|
||||
|
@ -176,11 +176,11 @@ An increasing sequence: one, two, three, four, five, six, seven, eight, nine, te
|
||||
|
||||
## Watermarking
|
||||
|
||||
The `generate()` supports watermarking the generated text by randomly marking a portion of tokens as "green".
|
||||
The `generate()` supports watermarking the generated text by randomly marking a portion of tokens as "green".
|
||||
When generating the "green" will have a small 'bias' value added to their logits, thus having a higher chance to be generated.
|
||||
The watermarked text can be detected by calculating the proportion of "green" tokens in the text and estimating how likely it is
|
||||
statistically to obtain that amount of "green" tokens for human-generated text. This watermarking strategy was proposed in the paper
|
||||
["On the Reliability of Watermarks for Large Language Models"](https://arxiv.org/abs/2306.04634). For more information on
|
||||
statistically to obtain that amount of "green" tokens for human-generated text. This watermarking strategy was proposed in the paper
|
||||
["On the Reliability of Watermarks for Large Language Models"](https://arxiv.org/abs/2306.04634). For more information on
|
||||
the inner functioning of watermarking, it is recommended to refer to the paper.
|
||||
|
||||
The watermarking can be used with any generative model in `tranformers` and does not require an extra classification model
|
||||
@ -225,21 +225,10 @@ array([True, True])
|
||||
## Decoding strategies
|
||||
|
||||
Certain combinations of the `generate()` parameters, and ultimately `generation_config`, can be used to enable specific
|
||||
decoding strategies. If you are new to this concept, we recommend reading
|
||||
[this blog post that illustrates how common decoding strategies work](https://huggingface.co/blog/how-to-generate).
|
||||
decoding strategies. If you are new to this concept, we recommend reading [this blog post that illustrates how common decoding strategies work](https://huggingface.co/blog/how-to-generate).
|
||||
|
||||
Here, we'll show some of the parameters that control the decoding strategies and illustrate how you can use them.
|
||||
|
||||
<Tip>
|
||||
|
||||
Selecting a given decoding strategy is not the only way you can influence the outcome of `generate()` with your model.
|
||||
The decoding strategies act based (mostly) on the logits, the distribution of probabilities for the next token, and
|
||||
thus selecting a good logits manipulation strategy can go a long way! In other words, manipulating the logits is another
|
||||
dimension you can act upon, in addition to selecting a decoding strategy. Popular logits manipulation strategies include
|
||||
`top_p`, `min_p`, and `repetition_penalty` -- you can check the full list in the [`GenerationConfig`] class.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Greedy Search
|
||||
|
||||
[`generate`] uses greedy search decoding by default so you don't have to pass any parameters to enable it. This means the parameters `num_beams` is set to 1 and `do_sample=False`.
|
||||
@ -458,59 +447,3 @@ just like in multinomial sampling. However, in assisted decoding, reducing the t
|
||||
|
||||
Alternativelly, you can also set the `prompt_lookup_num_tokens` to trigger n-gram based assisted decoding, as opposed
|
||||
to model based assisted decoding. You can read more about it [here](https://twitter.com/joao_gante/status/1747322413006643259).
|
||||
### DoLa Decoding
|
||||
|
||||
**D**ecoding by C**o**ntrasting **La**yers (DoLa) is a contrastive decoding strategy to improve the factuality and reduce the
|
||||
hallucinations of LLMs, as described in this paper of ICLR 2024 [DoLa: Decoding by Contrasting Layers Improves Factuality in Large Language Models](https://arxiv.org/abs/2309.03883).
|
||||
|
||||
DoLa is achieved by contrasting the differences in logits obtained from final
|
||||
layers versus earlier layers, thus amplify the factual knowledge localized to particular part of transformer layers.
|
||||
|
||||
Do the following two steps to activate DoLa decoding when calling the `model.generate` function:
|
||||
1. Set the `dola_layers` argument, which can be either a string or a list of integers.
|
||||
- If set to a string, it can be one of `low`, `high`.
|
||||
- If set to a list of integers, it should be a list of layer indices between 0 and the total number of layers in the model. The 0-th layer is word embedding, and the 1st layer is the first transformer layer, and so on.
|
||||
2. Set `repetition_penalty = 1.2` is suggested to reduce repetition in DoLa decoding.
|
||||
|
||||
See the following examples for DoLa decoding with the 32-layer LLaMA-7B model.
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed
|
||||
>>> import torch
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", torch_dtype=torch.float16)
|
||||
>>> device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
||||
>>> model.to(device)
|
||||
>>> set_seed(42)
|
||||
|
||||
>>> text = "On what date was the Declaration of Independence officially signed?"
|
||||
>>> inputs = tokenizer(text, return_tensors="pt").to(device)
|
||||
|
||||
# Vanilla greddy decoding
|
||||
>>> vanilla_output = model.generate(**inputs, do_sample=False, max_new_tokens=50)
|
||||
>>> tokenizer.batch_decode(vanilla_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True)
|
||||
['\nThe Declaration of Independence was signed on July 4, 1776.\nWhat was the date of the signing of the Declaration of Independence?\nThe Declaration of Independence was signed on July 4,']
|
||||
|
||||
# DoLa decoding with contrasting higher part of layers (layers 16,18,...,30)
|
||||
>>> dola_high_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers='high')
|
||||
>>> tokenizer.batch_decode(dola_high_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True)
|
||||
['\nJuly 4, 1776, when the Continental Congress voted to separate from Great Britain. The 56 delegates to the Continental Congress signed the Declaration on August 2, 1776.']
|
||||
|
||||
# DoLa decoding with contrasting specific layers (layers 28 and 30)
|
||||
>>> dola_custom_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers=[28,30], repetition_penalty=1.2)
|
||||
>>> tokenizer.batch_decode(dola_custom_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True)
|
||||
['\nIt was officially signed on 2 August 1776, when 56 members of the Second Continental Congress, representing the original 13 American colonies, voted unanimously for the resolution for independence. The 2']
|
||||
```
|
||||
|
||||
#### Understanding the `dola_layers` argument
|
||||
|
||||
`dola_layers` stands for the candidate layers in premature layer selection, as described in the DoLa paper. The selected premature layer will be contrasted with the final layer.
|
||||
|
||||
Setting `dola_layers` to `'low'` or `'high'` will select the lower or higher part of the layers to contrast, respectively.
|
||||
- For `N`-layer models with `N <= 40` layers, the layers of `range(0, N // 2, 2)` and `range(N // 2, N, 2)` are used for `'low'` and `'high'` layers, respectively.
|
||||
- For models with `N > 40` layers, the layers of `range(0, 20, 2)` and `range(N - 20, N, 2)` are used for `'low'` and `'high'` layers, respectively.
|
||||
- If the model has tied word embeddings, we skip the word embeddings (0-th) layer and start from the 2nd layer, as the early exit from word embeddings will become identity function.
|
||||
- Set the `dola_layers` to a list of integers for layer indices to contrast manually specified layers. For example, setting `dola_layers=[28,30]` will contrast the final layer (32-th layer) with the 28-th and 30-th layers.
|
||||
|
||||
The paper suggested that contrasting `'high'` layers to improve short-answer tasks like TruthfulQA, and contrasting `'low'` layers to improve all the other long-answer reasoning tasks, such as GSM8K, StrategyQA, FACTOR, and VicunaQA. Applying DoLa to smaller models like GPT-2 is not recommended, as the results shown in the Appendix N of the paper.
|
||||
|
@ -63,7 +63,6 @@ For now the supported model architectures are the architectures that have been v
|
||||
|
||||
- LLaMa
|
||||
- Mistral
|
||||
- Qwen2
|
||||
|
||||
## Example usage
|
||||
|
||||
|
@ -139,7 +139,7 @@ reading the whole sentence with a mask to hide future tokens at a certain timest
|
||||
|
||||
### deep learning (DL)
|
||||
|
||||
Machine learning algorithms which use neural networks with several layers.
|
||||
Machine learning algorithms which uses neural networks with several layers.
|
||||
|
||||
## E
|
||||
|
||||
@ -519,4 +519,4 @@ A form of model training in which data provided to the model is not labeled. Uns
|
||||
Parallelism technique which performs sharding of the tensors somewhat similar to [TensorParallel](#tensor-parallelism-tp),
|
||||
except the whole tensor gets reconstructed in time for a forward or backward computation, therefore the model doesn't need
|
||||
to be modified. This method also supports various offloading techniques to compensate for limited GPU memory.
|
||||
Learn more about ZeRO [here](perf_train_gpu_many#zero-data-parallelism).
|
||||
Learn more about ZeRO [here](perf_train_gpu_many#zero-data-parallelism).
|
@ -64,6 +64,6 @@ For some quantization methods, they may require "pre-quantizing" the models thro
|
||||
|
||||
6. Write the `_process_model_after_weight_loading` method. This method enables implementing additional features that require manipulating the model after loading the weights.
|
||||
|
||||
7. Document everything! Make sure your quantization method is documented by adding a new file under `docs/source/en/quantization` and adding a new row in the table in `docs/source/en/quantization/overview.md`.
|
||||
7. Document everything! Make sure your quantization method is documented in the [`docs/source/en/quantization.md`](https://github.com/huggingface/transformers/blob/abbffc4525566a48a9733639797c812301218b83/docs/source/en/quantization.md) file.
|
||||
|
||||
8. Add tests! You should add tests by first adding the package in our nightly Dockerfile inside `docker/transformers-quantization-latest-gpu` and then adding a new test file in `tests/quantization/xxx`. Feel free to check out how it is implemented for other quantization methods.
|
@ -88,7 +88,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [ByT5](model_doc/byt5) | ✅ | ✅ | ✅ |
|
||||
| [CamemBERT](model_doc/camembert) | ✅ | ✅ | ❌ |
|
||||
| [CANINE](model_doc/canine) | ✅ | ❌ | ❌ |
|
||||
| [Chameleon](model_doc/chameleon) | ✅ | ❌ | ❌ |
|
||||
| [Chinese-CLIP](model_doc/chinese_clip) | ✅ | ❌ | ❌ |
|
||||
| [CLAP](model_doc/clap) | ✅ | ❌ | ❌ |
|
||||
| [CLIP](model_doc/clip) | ✅ | ✅ | ✅ |
|
||||
@ -146,7 +145,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Funnel Transformer](model_doc/funnel) | ✅ | ✅ | ❌ |
|
||||
| [Fuyu](model_doc/fuyu) | ✅ | ❌ | ❌ |
|
||||
| [Gemma](model_doc/gemma) | ✅ | ❌ | ✅ |
|
||||
| [Gemma2](model_doc/gemma2) | ✅ | ❌ | ❌ |
|
||||
| [GIT](model_doc/git) | ✅ | ❌ | ❌ |
|
||||
| [GLPN](model_doc/glpn) | ✅ | ❌ | ❌ |
|
||||
| [GPT Neo](model_doc/gpt_neo) | ✅ | ❌ | ✅ |
|
||||
@ -160,7 +158,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Grounding DINO](model_doc/grounding-dino) | ✅ | ❌ | ❌ |
|
||||
| [GroupViT](model_doc/groupvit) | ✅ | ✅ | ❌ |
|
||||
| [HerBERT](model_doc/herbert) | ✅ | ✅ | ✅ |
|
||||
| [Hiera](model_doc/hiera) | ✅ | ❌ | ❌ |
|
||||
| [Hubert](model_doc/hubert) | ✅ | ✅ | ❌ |
|
||||
| [I-BERT](model_doc/ibert) | ✅ | ❌ | ❌ |
|
||||
| [IDEFICS](model_doc/idefics) | ✅ | ✅ | ❌ |
|
||||
@ -168,7 +165,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [ImageGPT](model_doc/imagegpt) | ✅ | ❌ | ❌ |
|
||||
| [Informer](model_doc/informer) | ✅ | ❌ | ❌ |
|
||||
| [InstructBLIP](model_doc/instructblip) | ✅ | ❌ | ❌ |
|
||||
| [InstructBlipVideo](model_doc/instructblipvideo) | ✅ | ❌ | ❌ |
|
||||
| [Jamba](model_doc/jamba) | ✅ | ❌ | ❌ |
|
||||
| [JetMoe](model_doc/jetmoe) | ✅ | ❌ | ❌ |
|
||||
| [Jukebox](model_doc/jukebox) | ✅ | ❌ | ❌ |
|
||||
@ -185,7 +181,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Llama3](model_doc/llama3) | ✅ | ❌ | ✅ |
|
||||
| [LLaVa](model_doc/llava) | ✅ | ❌ | ❌ |
|
||||
| [LLaVA-NeXT](model_doc/llava_next) | ✅ | ❌ | ❌ |
|
||||
| [LLaVa-NeXT-Video](model_doc/llava-next-video) | ✅ | ❌ | ❌ |
|
||||
| [Longformer](model_doc/longformer) | ✅ | ✅ | ❌ |
|
||||
| [LongT5](model_doc/longt5) | ✅ | ❌ | ✅ |
|
||||
| [LUKE](model_doc/luke) | ✅ | ❌ | ❌ |
|
||||
@ -194,7 +189,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [M2M100](model_doc/m2m_100) | ✅ | ❌ | ❌ |
|
||||
| [MADLAD-400](model_doc/madlad-400) | ✅ | ✅ | ✅ |
|
||||
| [Mamba](model_doc/mamba) | ✅ | ❌ | ❌ |
|
||||
| [mamba2](model_doc/mamba2) | ✅ | ❌ | ❌ |
|
||||
| [Marian](model_doc/marian) | ✅ | ✅ | ✅ |
|
||||
| [MarkupLM](model_doc/markuplm) | ✅ | ❌ | ❌ |
|
||||
| [Mask2Former](model_doc/mask2former) | ✅ | ❌ | ❌ |
|
||||
@ -206,7 +200,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Megatron-BERT](model_doc/megatron-bert) | ✅ | ❌ | ❌ |
|
||||
| [Megatron-GPT2](model_doc/megatron_gpt2) | ✅ | ✅ | ✅ |
|
||||
| [MGP-STR](model_doc/mgp-str) | ✅ | ❌ | ❌ |
|
||||
| [Mistral](model_doc/mistral) | ✅ | ✅ | ✅ |
|
||||
| [Mistral](model_doc/mistral) | ✅ | ❌ | ✅ |
|
||||
| [Mixtral](model_doc/mixtral) | ✅ | ❌ | ❌ |
|
||||
| [mLUKE](model_doc/mluke) | ✅ | ❌ | ❌ |
|
||||
| [MMS](model_doc/mms) | ✅ | ✅ | ✅ |
|
||||
@ -223,7 +217,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [MusicGen Melody](model_doc/musicgen_melody) | ✅ | ❌ | ❌ |
|
||||
| [MVP](model_doc/mvp) | ✅ | ❌ | ❌ |
|
||||
| [NAT](model_doc/nat) | ✅ | ❌ | ❌ |
|
||||
| [Nemotron](model_doc/nemotron) | ✅ | ❌ | ❌ |
|
||||
| [Nezha](model_doc/nezha) | ✅ | ❌ | ❌ |
|
||||
| [NLLB](model_doc/nllb) | ✅ | ❌ | ❌ |
|
||||
| [NLLB-MOE](model_doc/nllb-moe) | ✅ | ❌ | ❌ |
|
||||
@ -256,7 +249,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [PVTv2](model_doc/pvt_v2) | ✅ | ❌ | ❌ |
|
||||
| [QDQBert](model_doc/qdqbert) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2](model_doc/qwen2) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2Audio](model_doc/qwen2_audio) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2MoE](model_doc/qwen2_moe) | ✅ | ❌ | ❌ |
|
||||
| [RAG](model_doc/rag) | ✅ | ✅ | ❌ |
|
||||
| [REALM](model_doc/realm) | ✅ | ❌ | ❌ |
|
||||
@ -270,8 +262,6 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [RoBERTa-PreLayerNorm](model_doc/roberta-prelayernorm) | ✅ | ✅ | ✅ |
|
||||
| [RoCBert](model_doc/roc_bert) | ✅ | ❌ | ❌ |
|
||||
| [RoFormer](model_doc/roformer) | ✅ | ✅ | ✅ |
|
||||
| [RT-DETR](model_doc/rt_detr) | ✅ | ❌ | ❌ |
|
||||
| [RT-DETR-ResNet](model_doc/rt_detr_resnet) | ✅ | ❌ | ❌ |
|
||||
| [RWKV](model_doc/rwkv) | ✅ | ❌ | ❌ |
|
||||
| [SAM](model_doc/sam) | ✅ | ✅ | ❌ |
|
||||
| [SeamlessM4T](model_doc/seamless_m4t) | ✅ | ❌ | ❌ |
|
||||
@ -348,6 +338,5 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [XLSR-Wav2Vec2](model_doc/xlsr_wav2vec2) | ✅ | ✅ | ✅ |
|
||||
| [YOLOS](model_doc/yolos) | ✅ | ❌ | ❌ |
|
||||
| [YOSO](model_doc/yoso) | ✅ | ❌ | ❌ |
|
||||
| [ZoeDepth](model_doc/zoedepth) | ✅ | ❌ | ❌ |
|
||||
|
||||
<!-- End table-->
|
||||
|
@ -169,7 +169,7 @@ Pretrained models are downloaded and locally cached at: `~/.cache/huggingface/hu
|
||||
|
||||
## Offline mode
|
||||
|
||||
Run 🤗 Transformers in a firewalled or offline environment with locally cached files by setting the environment variable `HF_HUB_OFFLINE=1`.
|
||||
Run 🤗 Transformers in a firewalled or offline environment with locally cached files by setting the environment variable `TRANSFORMERS_OFFLINE=1`.
|
||||
|
||||
<Tip>
|
||||
|
||||
@ -178,7 +178,7 @@ Add [🤗 Datasets](https://huggingface.co/docs/datasets/) to your offline train
|
||||
</Tip>
|
||||
|
||||
```bash
|
||||
HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \
|
||||
HF_DATASETS_OFFLINE=1 TRANSFORMERS_OFFLINE=1 \
|
||||
python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ...
|
||||
```
|
||||
|
||||
|
@ -360,12 +360,6 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
||||
[[autodoc]] Cache
|
||||
- update
|
||||
|
||||
[[autodoc]] CacheConfig
|
||||
- update
|
||||
|
||||
[[autodoc]] QuantizedCacheConfig
|
||||
- validate
|
||||
|
||||
[[autodoc]] DynamicCache
|
||||
- update
|
||||
- get_seq_length
|
||||
@ -373,49 +367,16 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
||||
- to_legacy_cache
|
||||
- from_legacy_cache
|
||||
|
||||
[[autodoc]] QuantizedCache
|
||||
- update
|
||||
- get_seq_length
|
||||
|
||||
[[autodoc]] QuantoQuantizedCache
|
||||
|
||||
[[autodoc]] HQQQuantizedCache
|
||||
|
||||
[[autodoc]] SinkCache
|
||||
- update
|
||||
- get_seq_length
|
||||
- reorder_cache
|
||||
|
||||
[[autodoc]] OffloadedCache
|
||||
- update
|
||||
- prefetch_layer
|
||||
- evict_previous_layer
|
||||
|
||||
[[autodoc]] StaticCache
|
||||
- update
|
||||
- get_seq_length
|
||||
- reset
|
||||
|
||||
[[autodoc]] HybridCache
|
||||
- update
|
||||
- get_seq_length
|
||||
- reset
|
||||
|
||||
[[autodoc]] SlidingWindowCache
|
||||
- update
|
||||
- reset
|
||||
|
||||
[[autodoc]] EncoderDecoderCache
|
||||
- get_seq_length
|
||||
- to_legacy_cache
|
||||
- from_legacy_cache
|
||||
- reset
|
||||
- reorder_cache
|
||||
|
||||
[[autodoc]] MambaCache
|
||||
- update_conv_state
|
||||
- update_ssm_state
|
||||
- reset
|
||||
|
||||
## Watermark Utils
|
||||
|
||||
|
@ -1,346 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Best Practices for Generation with Cache
|
||||
|
||||
Efficient caching is crucial for optimizing the performance of models in various generative tasks,
|
||||
including text generation, translation, summarization and other transformer-based applications.
|
||||
Effective caching helps reduce computation time and improve response rates, especially in real-time or resource-intensive applications.
|
||||
|
||||
Transformers support various caching methods, leveraging "Cache" classes to abstract and manage the caching logic.
|
||||
This document outlines best practices for using these classes to maximize performance and efficiency.
|
||||
Check out all the available `Cache` classes in the [API documentation](./internal/generation_utils.md).
|
||||
|
||||
## What is Cache and why we should care?
|
||||
|
||||
Imagine you’re having a conversation with someone, and instead of remembering what was said previously, you have to start from scratch every time you respond. This would be slow and inefficient, right? In the world of Transformer models, a similar concept applies, and that's where Caching keys and values come into play. From now on, I'll refer to the concept as KV Cache.
|
||||
|
||||
KV cache is needed to optimize the generation in autoregressive models, where the model predicts text token by token. This process can be slow since the model can generate only one token at a time, and each new prediction is dependent on the previous context. That means, to predict token number 1000 in the generation, you need information from the previous 999 tokens, which comes in the form of some matrix multiplications across the representations of those tokens. But to predict token number 1001, you also need the same information from the first 999 tokens, plus additional information from token number 1000. That is where key-value cache is used to optimize the sequential generation process by storing previous calculations to reuse in subsequent tokens, so they don't need to be computed again.
|
||||
|
||||
More concretely, key-value cache acts as a memory bank for these generative models, where the model stores key-value pairs derived from self-attention layers for previously processed tokens. By storing this information, the model can avoid redundant computations and instead retrieve keys and values of previous tokens from the cache.
|
||||
|
||||
<details>
|
||||
<summary><em>For the Curious Minds Who Like to Dive Deep</em></summary>
|
||||
|
||||
### Under the Hood: How Cache Object Works in Attention Mechanism
|
||||
|
||||
When utilizing a cache object in the input, the Attention module performs several critical steps to integrate past and present information seamlessly.
|
||||
|
||||
The Attention module concatenates the current key-values with the past key-values stored in the cache. This results in attention weights of shape `(new_tokens_length, past_kv_length + new_tokens_length)`. Essentially, the past and current key-values are combined to compute attention scores, ensuring that the model considers both previous context and new input. The concatenated key-values are used to compute the attention scores resulting in attention weights of shape `(new_tokens_length, past_kv_length + new_tokens_length)`.
|
||||
|
||||
Therefore, when iteratively calling `forward()` instead of the `generate()` method, it’s crucial to ensure that the attention mask shape matches the combined length of past and current key-values. The attention mask should have the shape `(batch_size, past_kv_length + new_tokens_length)`. This is usually handled internally when you call `generate()` method. If you want to implement your own generation loop with Cache classes, take this into consideration and prepare the attention mask to hold values to current and past tokens.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
One important concept you need to know when writing your own generation loop, is `cache_position`. In case you want to reuse an already filled Cache object by calling `forward()`, you have to pass in a valid `cache_position` which will indicate the positions of inputs in the sequence. Note that `cache_position` is not affected by padding, and always adds one more position for each token. For example, if key/value cache contains 10 tokens (no matter how many of it is a pad token), the cache position for the next token should be `torch.tensor([10])`.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
See an example below for how to implement your own generation loop.
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
||||
|
||||
>>> model_id = "meta-llama/Llama-2-7b-chat-hf"
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda:0")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
|
||||
>>> past_key_values = DynamicCache()
|
||||
>>> messages = [{"role": "user", "content": "Hello, what's your name."}]
|
||||
>>> inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to("cuda:0")
|
||||
|
||||
>>> generated_ids = inputs.input_ids
|
||||
>>> cache_position = torch.arange(inputs.input_ids.shape[1], dtype=torch.int64, device="cuda:0")
|
||||
>>> max_new_tokens = 10
|
||||
|
||||
>>> for _ in range(max_new_tokens):
|
||||
... outputs = model(**inputs, cache_position=cache_position, past_key_values=past_key_values, use_cache=True)
|
||||
... # Greedily sample one next token
|
||||
... next_token_ids = outputs.logits[:, -1:].argmax(-1)
|
||||
... generated_ids = torch.cat([generated_ids, next_token_ids], dim=-1)
|
||||
...
|
||||
... # Prepare inputs for the next generation step by leaaving unprocessed tokens, in our case we have only one new token
|
||||
... # and expanding attn mask for the new token, as explained above
|
||||
... attention_mask = inputs["attention_mask"]
|
||||
... attention_mask = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1)
|
||||
... inputs = {"input_ids": next_token_ids, "attention_mask": attention_mask}
|
||||
... cache_position = cache_position[-1:] + 1 # add one more position for the next token
|
||||
|
||||
>>> print(tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0])
|
||||
"[INST] Hello, what's your name. [/INST] Hello! My name is LLaMA,"
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
|
||||
|
||||
## Generate with Cache
|
||||
|
||||
In 🤗 Transformers, we support various Cache types to optimize the performance across different models and tasks. By default, all models generate with caching,
|
||||
with the [`~DynamicCache`] class being the default cache for most models. It allows us to dynamically grow cache size, by saving more and more keys and values as we generate. If for some reason you don't want to use caches, you can pass `use_cache=False` into the `generate()` method.
|
||||
|
||||
Refer to the table below to see the difference between cache types and choose the one that suits best for your use-case.
|
||||
|
||||
| Cache Type | Memory Efficient | Supports torch.compile() | Initialization Recommended | Latency | Long Context Generation |
|
||||
|---------------------|------------------|--------------------------|----------------------------|----------|--------------------------|
|
||||
| Dynamic Cache | No | No | No | Mid | No |
|
||||
| Static Cache | No | Yes | Yes | High | No |
|
||||
| Quantized Cache | Yes | No | No | Low | Yes |
|
||||
| Offloaded Cache | Yes | No | No | Low | No |
|
||||
| Sliding Window Cache| No | Yes | Yes | High | No |
|
||||
| Sink Cache | Yes | No | Yes | Mid | Yes |
|
||||
|
||||
|
||||
These cache classes can be set with a `cache_implementation` argument when generating. To learn about the available options for the cache_implementation flag, please refer to the [API Documentation](./main_classes/text_generation.md#transformers.GenerationConfig). Now, let's explore each cache type in detail and see how to use them. Note that the below examples are for decoder-only Tranformer-based models. We also support ["Model-Specific Cache"] classes for models such as Mamba or Jamba, keep reading for more details.
|
||||
|
||||
### Quantized Cache
|
||||
|
||||
The key and value cache can occupy a large portion of memory, becoming a [bottleneck for long-context generation](https://huggingface.co/blog/llama31#inference-memory-requirements), especially for Large Language Models.
|
||||
Quantizing the cache when using `generate()` can significantly reduce memory requirements at the cost of speed.
|
||||
|
||||
KV Cache quantization in `transformers` is largely inspired by the paper ["KIVI: A Tuning-Free Asymmetric 2bit Quantization for KV Cache"](https://arxiv.org/abs/2402.02750) and currently supports [`~QuantoQuantizedCache`] and [`~HQQQuantizedCache`] classes. For more information on the inner workings see the paper.
|
||||
|
||||
To enable quantization of the key-value cache, one needs to indicate `cache_implementation="quantized"` in the `generation_config`.
|
||||
Quantization related arguments should be passed to the `generation_config` either as a `dict` or an instance of a [`~QuantizedCacheConfig`] class.
|
||||
One has to indicate which quantization backend to use in the [`~QuantizedCacheConfig`], the default is `quanto`.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Cache quantization can be detrimental in terms of latency if the context length is short and there is enough GPU VRAM available to run without cache quantization. It is recommended to seek balance between memory efficiency and latency.
|
||||
</Tip>
|
||||
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> inputs = tokenizer("I like rock music because", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="quantized", cache_config={"nbits": 4, "backend": "quanto"})
|
||||
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
|
||||
I like rock music because it's loud and energetic. It's a great way to express myself and rel
|
||||
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20)
|
||||
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
|
||||
I like rock music because it's loud and energetic. I like to listen to it when I'm feeling
|
||||
```
|
||||
|
||||
## OffloadedCache
|
||||
|
||||
Similarly to KV cache quantization, [`~OffloadedCache`] strategy aims to reduce GPU VRAM usage.
|
||||
It does so by moving the KV cache for most layers to the CPU.
|
||||
As the model's `forward()` method iterates over the layers, this strategy maintains the current layer cache on the GPU.
|
||||
At the same time it asynchronously prefetches the next layer cache as well as sending the previous layer cache back to the CPU.
|
||||
Unlike KV cache quantization, this strategy always produces the same result as the default KV cache implementation.
|
||||
Thus, it can serve as a drop-in replacement or a fallback for it.
|
||||
|
||||
Depending on your model and the characteristics of your generation task (size of context, number of generated tokens, number of beams, etc.)
|
||||
you may notice a small degradation in generation throughput compared to the default KV cache implementation.
|
||||
|
||||
To enable KV cache offloading, pass `cache_implementation="offloaded"` in the `generation_config` or directky to the `generate()` call.
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
>>> ckpt = "microsoft/Phi-3-mini-4k-instruct"
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(ckpt)
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> inputs = tokenizer("Fun fact: The shortest", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=23, cache_implementation="offloaded")
|
||||
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
|
||||
Fun fact: The shortest war in history was between Britain and Zanzibar on August 27, 1896.
|
||||
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=23)
|
||||
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
|
||||
Fun fact: The shortest war in history was between Britain and Zanzibar on August 27, 1896.
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Cache offloading requires a GPU and can be slower than dynamic KV cache. Use it if you are getting CUDA out of memory errors.
|
||||
|
||||
</Tip>
|
||||
|
||||
The example below shows how KV cache offloading can be used as a fallback strategy.
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
>>> def resilient_generate(model, *args, **kwargs):
|
||||
... oom = False
|
||||
... try:
|
||||
... return model.generate(*args, **kwargs)
|
||||
... except torch.cuda.OutOfMemoryError as e:
|
||||
... print(e)
|
||||
... print("retrying with cache_implementation='offloaded'")
|
||||
... oom = True
|
||||
... if oom:
|
||||
... torch.cuda.empty_cache()
|
||||
... kwargs["cache_implementation"] = "offloaded"
|
||||
... return model.generate(*args, **kwargs)
|
||||
...
|
||||
...
|
||||
>>> ckpt = "microsoft/Phi-3-mini-4k-instruct"
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(ckpt)
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> prompt = ["okay "*1000 + "Fun fact: The most"]
|
||||
>>> inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
||||
>>> beams = { "num_beams": 40, "num_beam_groups": 40, "num_return_sequences": 40, "diversity_penalty": 1.0, "max_new_tokens": 23, "early_stopping": True, }
|
||||
>>> out = resilient_generate(model, **inputs, **beams)
|
||||
>>> responses = tokenizer.batch_decode(out[:,-28:], skip_special_tokens=True)
|
||||
```
|
||||
|
||||
On a GPU with 50 GB of RAM, running this code will print
|
||||
```
|
||||
CUDA out of memory. Tried to allocate 4.83 GiB. GPU
|
||||
retrying with cache_implementation='offloaded'
|
||||
```
|
||||
before successfully generating 40 beams.
|
||||
|
||||
|
||||
|
||||
### Static Cache
|
||||
|
||||
Since the "DynamicCache" dynamically grows with each generation step, it prevents you from taking advantage of JIT optimizations. The [`~StaticCache`] pre-allocates
|
||||
a specific maximum size for the keys and values, allowing you to generate up to the maximum length without having to modify cache size. Check the below usage example.
|
||||
|
||||
For more examples with Static Cache and JIT compilation, take a look at [StaticCache & torchcompile](./llm_optims.md#static-kv-cache-and-torchcompile)
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
|
||||
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # simply pass the cache implementation="static"
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="static")
|
||||
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
|
||||
"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
|
||||
```
|
||||
|
||||
### Sliding Window Cache
|
||||
|
||||
As the name suggests, this cache type implements a sliding window over previous keys and values, retaining only the last `sliding_window` tokens. It should be used with models like Mistral that support sliding window attention. Additionally, similar to Static Cache, this one is JIT-friendly and can be used with the same compile tecniques as Static Cache.
|
||||
|
||||
Note that you can use this cache only for models that support sliding window, e.g. Mistral models.
|
||||
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> inputs = tokenizer("Yesterday I was on a rock concert and.", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # can be used by passing in cache implementation
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, cache_implementation="sliding_window")
|
||||
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
|
||||
"Yesterday I was on a rock concert and. I was so excited to see my favorite band. I was so excited that I was jumping up and down and screaming. I was so excited that I"
|
||||
```
|
||||
|
||||
### Sink Cache
|
||||
|
||||
Sink Cache was introduced in ["Efficient Streaming Language Models with Attention Sinks"](https://arxiv.org/abs/2309.17453). It allows you to generate long sequences of text ("infinite length" according to the paper) without any fine-tuning. That is achieved by smart handling of previous keys and values, specifically it retains a few initial tokens from the sequence, called "sink tokens". This is based on the observation that these initial tokens attract a significant portion of attention scores during the generation process. Tokens that come after "sink tokens" are discarded on a sliding windowed basis, keeping only the latest `window_size` tokens. By keeping these initial tokens as "attention sinks," the model maintains stable performance even when dealing with very long texts, thus discarding most of the previous knowledge.
|
||||
|
||||
Unlike other cache classes, this one can't be used directly by indicating a `cache_implementation`. You have to initialize the Cache before calling on `generate()` as follows.
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> inputs = tokenizer("This is a long story about unicorns, fairies and magic.", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # get our cache, specify number of sink tokens and window size
|
||||
>>> # Note that window size already includes sink tokens, so has to be larger
|
||||
>>> past_key_values = SinkCache(window_length=256, num_sink_tokens=4)
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, past_key_values=past_key_values)
|
||||
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
|
||||
"This is a long story about unicorns, fairies and magic. It is a fantasy world where unicorns and fairies live together in harmony. The story follows a young girl named Lily"
|
||||
```
|
||||
|
||||
### Encoder-Decoder Cache
|
||||
|
||||
The [`~EncoderDecoderCache`] is a wrapper designed to handle the caching needs of encoder-decoder models. This cache type is specifically built to manage both self-attention and cross-attention caches, ensuring storage and retrieval of past key/values required for these complex models. Cool thing about Encoder-Decoder Cache is that you can set different cache types for the encoder and for the decoder, depending on your use case. Currently this cache is only supported in [Whisper](./model_doc/whisper.md) models but we will be adding more models soon.
|
||||
|
||||
In terms of usage, there is nothing special to be done and calling `generate()` or `forward()` will handle everything for you.
|
||||
|
||||
|
||||
### Model-specific Cache Classes
|
||||
|
||||
Some models require storing previous keys, values, or states in a specific way, and the above cache classes cannot be used. For such cases, we have several specialized cache classes that are designed for specific models. These models only accept their own dedicated cache classes and do not support using any other cache types. Some examples include [`~HybridCache`] for [Gemma2](./model_doc/gemma2.md) series models or [`~MambaCache`] for [Mamba](./model_doc/mamba.md) architecture models.
|
||||
|
||||
|
||||
## Iterative Generation with Cache
|
||||
|
||||
We have seen how to use each of the cache types when generating. What if you want to use cache in iterative generation setting, for example in applications like chatbots, where interactions involve multiple turns and continuous back-and-forth exchanges. Iterative generation with cache allows these systems to handle ongoing conversations effectively without reprocessing the entire context at each step. But there are some tips that you should know before you start implementing:
|
||||
|
||||
The general format when doing iterative generation is as below. First you have to initialize an empty cache of the type you want, and you can start feeding in new prompts iteratively. Keeping track of dialogues history and formatting can be done with chat templates, read more on that in [chat_templating](./chat_templating.md)
|
||||
|
||||
In case you are using Sink Cache, you have to crop your inputs to that maximum length because Sink Cache can generate text longer than its maximum window size, but it expects the first input to not exceed the maximum cache length.
|
||||
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer,AutoModelForCausalLM
|
||||
>>> from transformers.cache_utils import (
|
||||
>>> DynamicCache,
|
||||
>>> SinkCache,
|
||||
>>> StaticCache,
|
||||
>>> SlidingWindowCache,
|
||||
>>> QuantoQuantizedCache,
|
||||
>>> QuantizedCacheConfig,
|
||||
>>> )
|
||||
|
||||
>>> model_id = "meta-llama/Llama-2-7b-chat-hf"
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map='auto')
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
|
||||
>>> user_prompts = ["Hello, what's your name?", "Btw, yesterday I was on a rock concert."]
|
||||
|
||||
>>> past_key_values = DynamicCache()
|
||||
>>> max_cache_length = past_key_values.get_max_length()
|
||||
|
||||
>>> messages = []
|
||||
>>> for prompt in user_prompts:
|
||||
... messages.append({"role": "user", "content": prompt})
|
||||
... inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to(model.device)
|
||||
... if isinstance(past_key_values, SinkCache):
|
||||
... inputs = {k: v[:, -max_cache_length:] for k, v in inputs.items()}
|
||||
...
|
||||
... input_length = inputs["input_ids"].shape[1]
|
||||
...
|
||||
... outputs = model.generate(**inputs, do_sample=False, max_new_tokens=256, past_key_values=past_key_values)
|
||||
... completion = tokenizer.decode(outputs[0, input_length: ], skip_special_tokens=True)
|
||||
... messages.append({"role": "assistant", "content": completion})
|
||||
|
||||
print(messages)
|
||||
[{'role': 'user', 'content': "Hello, what's your name?"}, {'role': 'assistant', 'content': " Hello! My name is LLaMA, I'm a large language model trained by a team of researcher at Meta AI. 😊"}, {'role': 'user', 'content': 'Btw, yesterday I was on a rock concert.'}, {'role': 'assistant', 'content': ' Oh, cool! That sounds like a lot of fun! 🎉 Did you enjoy the concert? What was the band like? 🤔'}]
|
||||
```
|
||||
|
||||
|
||||
## Re-use Cache to continue generation
|
||||
|
||||
Sometimes you would want to fist fill-in cache object with key/values for certain prefix prompt and re-use it several times to generate different sequences from it. We are working hard on adding this feature to 🤗 Transformers and will update this section soon.
|
@ -18,109 +18,59 @@ Basic inference is slow because LLMs have to be called repeatedly to generate th
|
||||
This guide will show you how to use the optimization techniques available in Transformers to accelerate LLM inference.
|
||||
|
||||
> [!TIP]
|
||||
> Hugging Face also provides [Text Generation Inference (TGI)](https://hf.co/docs/text-generation-inference), a library dedicated to deploying and serving highly optimized LLMs for inference. It includes deployment-oriented optimization features not included in Transformers, such as continuous batching for increasing throughput and tensor parallelism for multi-GPU inference.
|
||||
> Hugging Face also provides [Text Generation Inference (TGI)](https://hf.co/docs/text-generation-inference), a library dedicated to deploying and serving highly optimized LLMs for inference. It includes more optimization features not included in Transformers, such as continuous batching for increasing throughput and tensor parallelism for multi-GPU inference.
|
||||
|
||||
## Static kv-cache and `torch.compile`
|
||||
## Static kv-cache and torch.compile
|
||||
|
||||
During decoding, a LLM computes the key-value (kv) values for each input token and since it is autoregressive, it computes the same kv values each time because the generated output becomes part of the input now. This is not very efficient because you're recomputing the same kv values each time.
|
||||
|
||||
To optimize this, you can use a kv-cache to store the past keys and values instead of recomputing them each time. However, since the kv-cache grows with each generation step and is dynamic, it prevents you from taking advantage of [`torch.compile`](./perf_torch_compile), a powerful optimization tool that fuses PyTorch code into fast and optimized kernels.
|
||||
To optimize this, you can use a kv-cache to store the past keys and values instead of recomputing them each time. However, since the kv-cache grows with each generation step and is dynamic, it prevents you from taking advantage of [torch.compile](./perf_torch_compile), a powerful optimization tool that fuses PyTorch code into fast and optimized kernels.
|
||||
|
||||
The *static kv-cache* solves this issue by pre-allocating the kv-cache size to a maximum value which allows you to combine it with `torch.compile` for up to a 4x speed up. Your speed up may vary depending on the model size (larger models have a smaller speed up) and hardware.
|
||||
The *static kv-cache* solves this issue by pre-allocating the kv-cache size to a maximum value which allows you to combine it with torch.compile for up to a 4x speed up.
|
||||
|
||||
> [!WARNING]
|
||||
> Currently, only [Llama](./model_doc/llama2) and a few other models support static kv-cache and `torch.compile`. Check [this issue](https://github.com/huggingface/transformers/issues/28981) for a live model compatibility list.
|
||||
> Currently, only [Command R](./model_doc/cohere), [Gemma](./model_doc/gemma) and [Llama](./model_doc/llama2) models support static kv-cache and torch.compile.
|
||||
|
||||
There are three flavors of static kv-cache usage, depending on the complexity of your task:
|
||||
1. Basic usage: simply set a flag in `generation_config` (recommended);
|
||||
2. Advanced usage: handle a cache object for multi-turn generation or a custom generation loop;
|
||||
3. Advanced usage: compile the entire `generate` function into a single graph, if having a single graph is relevant for you.
|
||||
|
||||
Select the correct tab below for further instructions on each of these flavors.
|
||||
|
||||
> [!TIP]
|
||||
> Regardless of the strategy used with `torch.compile`, you can avoid shape-related recompilations if you left-pad your LLM inputs to a limited set of values. The [`pad_to_multiple_of` tokenizer flag](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__.pad_to_multiple_of) is your friend!
|
||||
|
||||
<hfoptions id="static-kv">
|
||||
<hfoption id="basic usage: generation_config">
|
||||
|
||||
For this example, let's use the [Gemma](https://hf.co/google/gemma-2b) model. All we need to do is to:
|
||||
1. Access the model's `generation_config` attribute and set the `cache_implementation` to "static";
|
||||
2. Call `torch.compile` on the model to compile the forward pass with the static kv-cache.
|
||||
|
||||
And that's it!
|
||||
For this example, let's load the [Gemma](https://hf.co/google/gemma-2b) model.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"google/gemma-2b", device_map="auto"
|
||||
)
|
||||
```
|
||||
|
||||
There are two ways you can configure the model to use a static kv-cache. For a 7B model on an A100, both methods get a 4x speed up in the forward pass. Your speed up may vary depending on the model size (larger models have a smaller speed up) and hardware. If you're using the [`~GenerationMixin.generate`] method, the speed up is ~3x. The forward pass (which still gets 4x speed up) is only a part of the whole [`~GenerationMixin.generate`] code.
|
||||
|
||||
<hfoptions id="static-kv">
|
||||
<hfoption id="generation_config">
|
||||
|
||||
Access the model's `generation_config` attribute and set the `cache_implementation` to "static".
|
||||
|
||||
```py
|
||||
model.generation_config.cache_implementation = "static"
|
||||
```
|
||||
|
||||
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
||||
Call torch.compile on the model to compile the forward pass with the static kv-cache.
|
||||
|
||||
```py
|
||||
compiled_model = torch.compile(model, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
outputs = model.generate(**input_ids)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
outputs = compiled_model.generate(**input_ids)
|
||||
tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference']
|
||||
```
|
||||
|
||||
Under the hood, `generate` will attempt to reuse the same cache object, removing the need for re-compilation at each call. Avoiding re-compilation is critical to get the most out of `torch.compile`, and you should be aware of the following:
|
||||
1. If the batch size changes or the maximum output length increases between calls, the cache will have to be reinitialized, triggering a new compilation;
|
||||
2. The first couple of calls of the compiled function are slower, as the function is being compiled.
|
||||
|
||||
> [!WARNING]
|
||||
> For a more advanced usage of the static cache, such as multi-turn conversations, we recommend instantiating and manipulating the cache object outside [`~GenerationMixin.generate`]. See the advanced usage tab.
|
||||
Under the hood, `generate` will attempt to reuse the same cache object, removing the need for re-compilation at each call. However, if the batch size or the maximum output length increase between calls, the cache will have to be reinitialized, triggering a new compilation.
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="advanced usage: control Static Cache">
|
||||
<hfoption id="Static Cache">
|
||||
|
||||
A [`StaticCache`] object can be passed to the model's [`~GenerationMixin.generate`] under the `past_key_values` argument. The object will retain the cache contents, so you can pass it to a new [`~GenerationMixin.generate`] call to continue generation, like you would do with a dynamic cache.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
|
||||
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
prompt_length = input_ids.input_ids.shape[1]
|
||||
model.generation_config.max_new_tokens = 16
|
||||
|
||||
past_key_values = StaticCache(
|
||||
config=model.config,
|
||||
max_batch_size=1,
|
||||
# If you plan to reuse the cache, make sure the cache length is large enough for all cases
|
||||
max_cache_len=prompt_length+(model.generation_config.max_new_tokens*2),
|
||||
device=model.device,
|
||||
dtype=model.dtype
|
||||
)
|
||||
outputs = model.generate(**input_ids, past_key_values=past_key_values)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2']
|
||||
|
||||
# pass in the generated text and the same cache object to continue generation from where it left off. Optionally, in a
|
||||
# multi-turn conversation, append the new user input to the generated text.
|
||||
new_input_ids = outputs
|
||||
outputs = model.generate(new_input_ids, past_key_values=past_key_values)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2. The speed of light is constant in all inertial reference frames. 3.']
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> If you want to reuse the same [`StaticCache`] object on a new prompt, be sure to reset its contents with the `.reset()` method between calls
|
||||
|
||||
If you want to go further down a level, the [`StaticCache`] object can also be passed to the model's forward pass under the same `past_key_values` argument. Using this strategy, you can write your own function to decode the next token given the current token and position and cache position of previously generated tokens.
|
||||
A [`StaticCache`] object can be passed to the model's forward pass under the `past_key_values` argument, enabling the use of this object as a static kv-cache. Using this strategy, you can write your own function to decode the next token given the current token and position and cache position of previously generated tokens. You can also pass the [`StaticCache`] object to [`~GenerationMixin.generate`] and use it across calls, like you would do with a dynamic cache.
|
||||
|
||||
```py
|
||||
from transformers import LlamaTokenizer, LlamaForCausalLM, StaticCache, logging
|
||||
@ -152,9 +102,12 @@ def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_valu
|
||||
return new_token
|
||||
```
|
||||
|
||||
There are a few important things you must do to enable static kv-cache and `torch.compile` with the `StaticCache` method:
|
||||
There are a few important things you must do to enable static kv-cache and torch.compile with the `StaticCache` method:
|
||||
|
||||
1. Initialize the [`StaticCache`] instance before using the model for inference. There you can configure parameters like the maximum batch size and sequence length.
|
||||
2. Call `torch.compile` on the model to compile the forward pass with the static kv-cache.
|
||||
|
||||
2. Call torch.compile on the model to compile the forward pass with the static kv-cache.
|
||||
|
||||
3. Set `enable_math=True` in the [torch.backends.cuda.sdp_kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) context manager to enable the native PyTorch C++ implementation of scaled dot product attention to speed up inference even more.
|
||||
|
||||
```py
|
||||
@ -189,34 +142,8 @@ text
|
||||
'My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p']
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="advanced usage: end-to-end generate compilation">
|
||||
|
||||
Compiling the entire `generate` function, in terms of code, is even simpler than in the basic usage: call `torch.compile` on `generate` to compile the entire function. No need to specify the use of the static cache: although it is compatible, dynamic cache (default) was faster in our benchmarks.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # To prevent long warnings :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
|
||||
model.generate = torch.compile(model.generate, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
outputs = model.generate(**input_ids)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference']
|
||||
```
|
||||
|
||||
As a result, we compile not only the model forward pass, but also all input preparation, logit processor operations, and so on. The result should be a slightly `generate` call, compared to the basic usage example, and the compiled graph may be better suited to more exotic hardware devices or use cases. However, there are severe drawbacks in using this approach:
|
||||
1. Compilation is much slower;
|
||||
2. All parameterization of `generate` must be done through `generation_config`;
|
||||
3. Many warnings and exceptions are suppressed -- we suggest testing with its uncompiled form first;
|
||||
4. Although we are working on it, it is heavily feature restricted (for instance, at the time of writing, generation does not stop if an EOS token is selected).
|
||||
> [!TIP]
|
||||
> If you want to reuse the [`StaticCache`] object on a new prompt, be sure to reset its contents with the `.reset()` method
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
@ -147,7 +147,7 @@ Let's call it now for the next experiment.
|
||||
```python
|
||||
flush()
|
||||
```
|
||||
In the recent version of the accelerate library, you can also use a utility method called `release_memory()`
|
||||
In the recent version of the accelerate library, you can also use an utility method called `release_memory()`
|
||||
|
||||
```python
|
||||
from accelerate.utils import release_memory
|
||||
@ -683,7 +683,7 @@ Assistant: Germany has ca. 81 million inhabitants
|
||||
|
||||
In this chat, the LLM runs auto-regressive decoding twice:
|
||||
1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step.
|
||||
2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, its computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`.
|
||||
2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, it's computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`.
|
||||
|
||||
Two things should be noted here:
|
||||
1. Keeping all the context is crucial for LLMs deployed in chat so that the LLM understands all the previous context of the conversation. E.g. for the example above the LLM needs to understand that the user refers to the population when asking `"And how many are in Germany"`.
|
||||
|
@ -72,10 +72,6 @@ We provide two types of agents, based on the main [`Agent`] class:
|
||||
|
||||
[[autodoc]] launch_gradio_demo
|
||||
|
||||
### stream_to_gradio
|
||||
|
||||
[[autodoc]] stream_to_gradio
|
||||
|
||||
### ToolCollection
|
||||
|
||||
[[autodoc]] ToolCollection
|
||||
|
@ -25,11 +25,11 @@ A backbone is a model used for feature extraction for higher level computer visi
|
||||
|
||||
Backbones are supported for the following models:
|
||||
|
||||
* [BEiT](../model_doc/beit)
|
||||
* [BEiT](..model_doc/beit)
|
||||
* [BiT](../model_doc/bit)
|
||||
* [ConvNext](../model_doc/convnext)
|
||||
* [ConvNet](../model_doc/convnext)
|
||||
* [ConvNextV2](../model_doc/convnextv2)
|
||||
* [DiNAT](../model_doc/dinat)
|
||||
* [DiNAT](..model_doc/dinat)
|
||||
* [DINOV2](../model_doc/dinov2)
|
||||
* [FocalNet](../model_doc/focalnet)
|
||||
* [MaskFormer](../model_doc/maskformer)
|
||||
|
@ -34,7 +34,7 @@ By default, `TrainingArguments.report_to` is set to `"all"`, so a [`Trainer`] wi
|
||||
- [`~integrations.TensorBoardCallback`] if tensorboard is accessible (either through PyTorch >= 1.4
|
||||
or tensorboardX).
|
||||
- [`~integrations.WandbCallback`] if [wandb](https://www.wandb.com/) is installed.
|
||||
- [`~integrations.CometCallback`] if [comet_ml](https://www.comet.com/site/) is installed.
|
||||
- [`~integrations.CometCallback`] if [comet_ml](https://www.comet.ml/site/) is installed.
|
||||
- [`~integrations.MLflowCallback`] if [mlflow](https://www.mlflow.org/) is installed.
|
||||
- [`~integrations.NeptuneCallback`] if [neptune](https://neptune.ai/) is installed.
|
||||
- [`~integrations.AzureMLCallback`] if [azureml-sdk](https://pypi.org/project/azureml-sdk/) is
|
||||
|
@ -66,8 +66,3 @@ Examples of use can be found in the [example scripts](../examples) or [example n
|
||||
- numpy_mask_tokens
|
||||
- tf_mask_tokens
|
||||
- torch_mask_tokens
|
||||
|
||||
## DataCollatorWithFlattening
|
||||
|
||||
[[autodoc]] data.data_collator.DataCollatorWithFlattening
|
||||
|
||||
|
@ -32,8 +32,3 @@ An image processor is in charge of preparing input features for vision models an
|
||||
## BaseImageProcessor
|
||||
|
||||
[[autodoc]] image_processing_utils.BaseImageProcessor
|
||||
|
||||
|
||||
## BaseImageProcessorFast
|
||||
|
||||
[[autodoc]] image_processing_utils_fast.BaseImageProcessorFast
|
||||
|
@ -40,10 +40,6 @@ for text generation, [`~generation.GenerationMixin`] (for the PyTorch models),
|
||||
- push_to_hub
|
||||
- all
|
||||
|
||||
Custom models should also include a `_supports_assign_param_buffer`, which determines if superfast init can apply
|
||||
on the particular model. Signs that your model needs this are if `test_save_and_load_from_pretrained` fails. If so,
|
||||
set this to `False`.
|
||||
|
||||
## ModuleUtilsMixin
|
||||
|
||||
[[autodoc]] modeling_utils.ModuleUtilsMixin
|
||||
|
@ -270,11 +270,6 @@ This is a simplified view, since the pipeline can handle automatically the batch
|
||||
about how many forward passes you inputs are actually going to trigger, you can optimize the `batch_size`
|
||||
independently of the inputs. The caveats from the previous section still apply.
|
||||
|
||||
## Pipeline FP16 inference
|
||||
Models can be run in FP16 which can be significantly faster on GPU while saving memory. Most models will not suffer noticeable performance loss from this. The larger the model, the less likely that it will.
|
||||
|
||||
To enable FP16 inference, you can simply pass `torch_dtype=torch.float16` or `torch_dtype='float16'` to the pipeline constructor. Note that this only works for models with a PyTorch backend. Your inputs will be converted to FP16 internally.
|
||||
|
||||
## Pipeline custom code
|
||||
|
||||
If you want to override a specific pipeline.
|
||||
@ -391,6 +386,14 @@ Pipelines available for computer vision tasks include the following.
|
||||
|
||||
Pipelines available for natural language processing tasks include the following.
|
||||
|
||||
### ConversationalPipeline
|
||||
|
||||
[[autodoc]] Conversation
|
||||
|
||||
[[autodoc]] ConversationalPipeline
|
||||
- __call__
|
||||
- all
|
||||
|
||||
### FillMaskPipeline
|
||||
|
||||
[[autodoc]] FillMaskPipeline
|
||||
|
@ -56,8 +56,3 @@ Learn how to quantize models in the [Quantization](../quantization) guide.
|
||||
## HqqConfig
|
||||
|
||||
[[autodoc]] HqqConfig
|
||||
|
||||
## FbgemmFp8Config
|
||||
|
||||
[[autodoc]] FbgemmFp8Config
|
||||
|
||||
|
@ -66,8 +66,6 @@ The original code can be found [here](https://github.com/salesforce/BLIP).
|
||||
|
||||
## BlipModel
|
||||
|
||||
`BlipModel` is going to be deprecated in future versions, please use `BlipForConditionalGeneration`, `BlipForImageTextRetrieval` or `BlipForQuestionAnswering` depending on your usecase.
|
||||
|
||||
[[autodoc]] BlipModel
|
||||
- forward
|
||||
- get_text_features
|
||||
|
@ -1,192 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Chameleon
|
||||
|
||||
## Overview
|
||||
|
||||
The Chameleon model was proposed in [Chameleon: Mixed-Modal Early-Fusion Foundation Models
|
||||
](https://arxiv.org/abs/2405.09818v1) by META AI Chameleon Team. Chameleon is a Vision-Language Model that use vector quantization to tokenize images which enables the model to generate multimodal output. The model takes images and texts as input, including an interleaved format, and generates textual response. Image generation module is not released yet.
|
||||
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We present Chameleon, a family of early-fusion token-based mixed-modal models capable of understanding and generating images and text in any arbitrary sequence. We outline a stable training
|
||||
approach from inception, an alignment recipe, and an architectural parameterization tailored for the
|
||||
early-fusion, token-based, mixed-modal setting. The models are evaluated on a comprehensive range
|
||||
of tasks, including visual question answering, image captioning, text generation, image generation, and
|
||||
long-form mixed modal generation. Chameleon demonstrates broad and general capabilities, including
|
||||
state-of-the-art performance in image captioning tasks, outperforms Llama-2 in text-only tasks while
|
||||
being competitive with models such as Mixtral 8x7B and Gemini-Pro, and performs non-trivial image
|
||||
generation, all in a single model. It also matches or exceeds the performance of much larger models,
|
||||
including Gemini Pro and GPT-4V, according to human judgments on a new long-form mixed-modal
|
||||
generation evaluation, where either the prompt or outputs contain mixed sequences of both images and
|
||||
text. Chameleon marks a significant step forward in unified modeling of full multimodal documents*
|
||||
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/chameleon_arch.png"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> Chameleon incorporates a vector quantizer module to transform images into discrete tokens. That also enables image generation using an auto-regressive transformer. Taken from the <a href="https://arxiv.org/abs/2405.09818v1">original paper.</a> </small>
|
||||
|
||||
This model was contributed by [joaogante](https://huggingface.co/joaogante) and [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
|
||||
The original code can be found [here](https://github.com/facebookresearch/chameleon).
|
||||
|
||||
|
||||
## Usage tips
|
||||
|
||||
- We advise users to use `padding_side="left"` when computing batched generation as it leads to more accurate results. Simply make sure to set `processor.tokenizer.padding_side = "left"` before generating.
|
||||
|
||||
- Note that Chameleon was tuned for safety alignment. If the model is refusing to answer, consider asking a more concrete question, instead of an open question.
|
||||
|
||||
- Chameleon generates in chat format which means that the generated text will always be the "assistant's turn". You can enable a text completion generation by passing `return_for_text_completion=True` when calling the processor.
|
||||
|
||||
> [!NOTE]
|
||||
> Chameleon implementation in Transformers uses a special image token to indicate where to merge image embeddings. For special image token we didn't add a new one but used one of the reserved tokens: `<reserved08707>`. You have to add `<image>` to your prompt in the place where the image should be embedded for correct generation.
|
||||
|
||||
## Usage example
|
||||
|
||||
### Single image inference
|
||||
|
||||
Chameleon is a gated model so make sure to have access and login to Hugging Face Hub using a token.
|
||||
Here's how to load the model and perform inference in half-precision (`torch.bfloat16`):
|
||||
|
||||
```python
|
||||
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
|
||||
import torch
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
|
||||
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.bfloat16, device_map="cuda")
|
||||
|
||||
# prepare image and text prompt
|
||||
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
prompt = "What do you see in this image?<image>"
|
||||
|
||||
inputs = processor(prompt, image, return_tensors="pt").to(model.device)
|
||||
|
||||
# autoregressively complete prompt
|
||||
output = model.generate(**inputs, max_new_tokens=50)
|
||||
print(processor.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
### Multi image inference
|
||||
|
||||
Chameleon can perform inference with multiple images as input, where images either belong to the same prompt or different prompts (in batched inference). Here is how you can do it:
|
||||
|
||||
```python
|
||||
from transformers import ChameleonProcessor, ChameleonForConditionalGeneration
|
||||
import torch
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
processor = ChameleonProcessor.from_pretrained("facebook/chameleon-7b")
|
||||
|
||||
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", torch_dtype=torch.bfloat16, device_map="cuda")
|
||||
|
||||
# Get three different images
|
||||
url = "https://www.ilankelman.org/stopsigns/australia.jpg"
|
||||
image_stop = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image_cats = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
url = "https://huggingface.co/microsoft/kosmos-2-patch14-224/resolve/main/snowman.jpg"
|
||||
image_snowman = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
# Prepare a batched prompt, where the first one is a multi-image prompt and the second is not
|
||||
prompts = [
|
||||
"What do these images have in common?<image><image>",
|
||||
"<image>What is shown in this image?"
|
||||
]
|
||||
|
||||
# We can simply feed images in the order they have to be used in the text prompt
|
||||
# Each "<image>" token uses one image leaving the next for the subsequent "<image>" tokens
|
||||
inputs = processor(text=prompts, images=[image_stop, image_cats, image_snowman], padding=True, return_tensors="pt").to(device="cuda", dtype=torch.bfloat16)
|
||||
|
||||
# Generate
|
||||
generate_ids = model.generate(**inputs, max_new_tokens=50)
|
||||
processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
||||
```
|
||||
|
||||
## Model optimization
|
||||
|
||||
### Quantization using Bitsandbytes
|
||||
|
||||
The model can be loaded in 8 or 4 bits, greatly reducing the memory requirements while maintaining the performance of the original model. First make sure to install bitsandbytes, `pip install bitsandbytes` and make sure to have access to a CUDA compatible GPU device. Simply change the snippet above with:
|
||||
|
||||
```python
|
||||
from transformers import ChameleonForConditionalGeneration, BitsAndBytesConfig
|
||||
|
||||
# specify how to quantize the model
|
||||
quantization_config = BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
bnb_4bit_compute_dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
model = ChameleonForConditionalGeneration.from_pretrained("facebook/chameleon-7b", quantization_config=quantization_config, device_map="cuda")
|
||||
```
|
||||
|
||||
### Use Flash-Attention 2 and SDPA to further speed-up generation
|
||||
|
||||
The models supports both, Flash-Attention 2 and PyTorch's [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) which can be enables for optimization. SDPA is the default options when you load the model, If you want to switch for Flash Attention 2, first make sure to install flash-attn. Refer to the [original repository](https://github.com/Dao-AILab/flash-attention) regarding that package installation. Simply change the snippet above with:
|
||||
|
||||
```python
|
||||
from transformers import ChameleonForConditionalGeneration
|
||||
|
||||
model_id = "facebook/chameleon-7b"
|
||||
model = ChameleonForConditionalGeneration.from_pretrained(
|
||||
model_id,
|
||||
torch_dtype=torch.bfloat16,
|
||||
low_cpu_mem_usage=True,
|
||||
attn_implementation="flash_attention_2"
|
||||
).to(0)
|
||||
```
|
||||
|
||||
## ChameleonConfig
|
||||
|
||||
[[autodoc]] ChameleonConfig
|
||||
|
||||
## ChameleonVQVAEConfig
|
||||
|
||||
[[autodoc]] ChameleonVQVAEConfig
|
||||
|
||||
## ChameleonProcessor
|
||||
|
||||
[[autodoc]] ChameleonProcessor
|
||||
|
||||
## ChameleonImageProcessor
|
||||
|
||||
[[autodoc]] ChameleonImageProcessor
|
||||
- preprocess
|
||||
|
||||
## ChameleonVQVAE
|
||||
|
||||
[[autodoc]] ChameleonVQVAE
|
||||
- forward
|
||||
|
||||
## ChameleonModel
|
||||
|
||||
[[autodoc]] ChameleonModel
|
||||
- forward
|
||||
|
||||
## ChameleonForConditionalGeneration
|
||||
|
||||
[[autodoc]] ChameleonForConditionalGeneration
|
||||
- forward
|
@ -79,123 +79,6 @@ encode the text and prepare the images. The following example shows how to get t
|
||||
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
||||
```
|
||||
|
||||
|
||||
### Combining CLIP and Flash Attention 2
|
||||
|
||||
First, make sure to install the latest version of Flash Attention 2.
|
||||
|
||||
```bash
|
||||
pip install -U flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16`)
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
For small batch sizes, you might notice a slowdown in your model when using flash attention. Refer to the section [Expected speedups with Flash Attention and SDPA](#Expected-speedups-with-Flash-Attention-and-SDPA) below and select an appropriate attention implementation.
|
||||
|
||||
</Tip>
|
||||
|
||||
To load and run a model using Flash Attention 2, refer to the snippet below:
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> import requests
|
||||
>>> from PIL import Image
|
||||
|
||||
>>> from transformers import CLIPProcessor, CLIPModel
|
||||
|
||||
>>> device = "cuda"
|
||||
>>> torch_dtype = torch.float16
|
||||
|
||||
>>> model = CLIPModel.from_pretrained(
|
||||
... "openai/clip-vit-base-patch32",
|
||||
... attn_implementation="flash_attention_2",
|
||||
... device_map=device,
|
||||
... torch_dtype=torch_dtype,
|
||||
... )
|
||||
>>> processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True)
|
||||
>>> inputs.to(device)
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... with torch.autocast(device):
|
||||
... outputs = model(**inputs)
|
||||
|
||||
>>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
|
||||
>>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
|
||||
>>> print(probs)
|
||||
tensor([[0.9946, 0.0052]], device='cuda:0', dtype=torch.float16)
|
||||
```
|
||||
|
||||
|
||||
### Using Scaled Dot Product Attention (SDPA)
|
||||
|
||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
|
||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
|
||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
|
||||
page for more information.
|
||||
|
||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
|
||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
|
||||
|
||||
```python
|
||||
from transformers import CLIPModel
|
||||
|
||||
model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32", torch_dtype=torch.float16, attn_implementation="sdpa")
|
||||
```
|
||||
|
||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
|
||||
|
||||
### Expected speedups with Flash Attention and SDPA
|
||||
|
||||
On a local benchmark (NVIDIA A10G, PyTorch 2.3.1+cu121) with `float16`, we saw the following speedups during inference for `"openai/clip-vit-large-patch14"` checkpoint ([code](https://gist.github.com/qubvel/ac691a54e54f9fae8144275f866a7ff8)):
|
||||
|
||||
#### CLIPTextModel
|
||||
|
||||
| Num text labels | Eager (s/iter) | FA2 (s/iter) | FA2 speedup | SDPA (s/iter) | SDPA speedup |
|
||||
|------------------:|-----------------:|---------------:|--------------:|----------------:|---------------:|
|
||||
| 4 | 0.009 | 0.012 | 0.737 | 0.007 | 1.269 |
|
||||
| 16 | 0.009 | 0.014 | 0.659 | 0.008 | 1.187 |
|
||||
| 32 | 0.018 | 0.021 | 0.862 | 0.016 | 1.142 |
|
||||
| 64 | 0.034 | 0.034 | 1.001 | 0.03 | 1.163 |
|
||||
| 128 | 0.063 | 0.058 | 1.09 | 0.054 | 1.174 |
|
||||
|
||||

|
||||
|
||||
#### CLIPVisionModel
|
||||
|
||||
| Image batch size | Eager (s/iter) | FA2 (s/iter) | FA2 speedup | SDPA (s/iter) | SDPA speedup |
|
||||
|-------------------:|-----------------:|---------------:|--------------:|----------------:|---------------:|
|
||||
| 1 | 0.016 | 0.013 | 1.247 | 0.012 | 1.318 |
|
||||
| 4 | 0.025 | 0.021 | 1.198 | 0.021 | 1.202 |
|
||||
| 16 | 0.093 | 0.075 | 1.234 | 0.075 | 1.24 |
|
||||
| 32 | 0.181 | 0.147 | 1.237 | 0.146 | 1.241 |
|
||||
|
||||

|
||||
|
||||
#### CLIPModel
|
||||
|
||||
| Image batch size | Num text labels | Eager (s/iter) | FA2 (s/iter) | FA2 speedup | SDPA (s/iter) | SDPA speedup |
|
||||
|-------------------:|------------------:|-----------------:|---------------:|--------------:|----------------:|---------------:|
|
||||
| 1 | 4 | 0.025 | 0.026 | 0.954 | 0.02 | 1.217 |
|
||||
| 1 | 16 | 0.026 | 0.028 | 0.918 | 0.02 | 1.287 |
|
||||
| 1 | 64 | 0.042 | 0.046 | 0.906 | 0.036 | 1.167 |
|
||||
| 4 | 4 | 0.028 | 0.033 | 0.849 | 0.024 | 1.189 |
|
||||
| 4 | 16 | 0.034 | 0.035 | 0.955 | 0.029 | 1.169 |
|
||||
| 4 | 64 | 0.059 | 0.055 | 1.072 | 0.05 | 1.179 |
|
||||
| 16 | 4 | 0.096 | 0.088 | 1.091 | 0.078 | 1.234 |
|
||||
| 16 | 16 | 0.102 | 0.09 | 1.129 | 0.083 | 1.224 |
|
||||
| 16 | 64 | 0.127 | 0.11 | 1.157 | 0.105 | 1.218 |
|
||||
| 32 | 4 | 0.185 | 0.159 | 1.157 | 0.149 | 1.238 |
|
||||
| 32 | 16 | 0.19 | 0.162 | 1.177 | 0.154 | 1.233 |
|
||||
| 32 | 64 | 0.216 | 0.181 | 1.19 | 0.176 | 1.228 |
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with CLIP.
|
||||
|
@ -31,7 +31,8 @@ We used curriculum learning for pretraining, changing the data mix during traini
|
||||
|
||||
More detailed information about DBRX Instruct and DBRX Base can be found in our [technical blog post](https://www.databricks.com/blog/introducing-dbrx-new-state-art-open-llm).
|
||||
|
||||
This model was contributed by [eitan-turok](https://huggingface.co/eitanturok) and [abhi-db](https://huggingface.co/abhi-db). The original code can be found [here](https://github.com/databricks/dbrx-instruct), though this may not be up to date.
|
||||
|
||||
This model was contributed by [eitan-turok](https://huggingface.co/eitanturok) and [abhi-db](https://huggingface.co/abhi-db). The original code can be found [here](https://github.com/databricks/dbrx), though this may not be up to date.
|
||||
|
||||
## Usage Examples
|
||||
|
||||
|
@ -20,12 +20,6 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
The Depth Anything model was proposed in [Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data](https://arxiv.org/abs/2401.10891) by Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, Hengshuang Zhao. Depth Anything is based on the [DPT](dpt) architecture, trained on ~62 million images, obtaining state-of-the-art results for both relative and absolute depth estimation.
|
||||
|
||||
<Tip>
|
||||
|
||||
[Depth Anything V2](depth_anything_v2) was released in June 2024. It uses the same architecture as Depth Anything and therefore it is compatible with all code examples and existing workflows. However, it leverages synthetic data and a larger capacity teacher model to achieve much finer and robust depth predictions.
|
||||
|
||||
</Tip>
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This work presents Depth Anything, a highly practical solution for robust monocular depth estimation. Without pursuing novel technical modules, we aim to build a simple yet powerful foundation model dealing with any images under any circumstances. To this end, we scale up the dataset by designing a data engine to collect and automatically annotate large-scale unlabeled data (~62M), which significantly enlarges the data coverage and thus is able to reduce the generalization error. We investigate two simple yet effective strategies that make data scaling-up promising. First, a more challenging optimization target is created by leveraging data augmentation tools. It compels the model to actively seek extra visual knowledge and acquire robust representations. Second, an auxiliary supervision is developed to enforce the model to inherit rich semantic priors from pre-trained encoders. We evaluate its zero-shot capabilities extensively, including six public datasets and randomly captured photos. It demonstrates impressive generalization ability. Further, through fine-tuning it with metric depth information from NYUv2 and KITTI, new SOTAs are set. Our better depth model also results in a better depth-conditioned ControlNet.*
|
||||
|
@ -1,115 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Depth Anything V2
|
||||
|
||||
## Overview
|
||||
|
||||
Depth Anything V2 was introduced in [the paper of the same name](https://arxiv.org/abs/2406.09414) by Lihe Yang et al. It uses the same architecture as the original [Depth Anything model](depth_anything), but uses synthetic data and a larger capacity teacher model to achieve much finer and robust depth predictions.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This work presents Depth Anything V2. Without pursuing fancy techniques, we aim to reveal crucial findings to pave the way towards building a powerful monocular depth estimation model. Notably, compared with V1, this version produces much finer and more robust depth predictions through three key practices: 1) replacing all labeled real images with synthetic images, 2) scaling up the capacity of our teacher model, and 3) teaching student models via the bridge of large-scale pseudo-labeled real images. Compared with the latest models built on Stable Diffusion, our models are significantly more efficient (more than 10x faster) and more accurate. We offer models of different scales (ranging from 25M to 1.3B params) to support extensive scenarios. Benefiting from their strong generalization capability, we fine-tune them with metric depth labels to obtain our metric depth models. In addition to our models, considering the limited diversity and frequent noise in current test sets, we construct a versatile evaluation benchmark with precise annotations and diverse scenes to facilitate future research.*
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/depth_anything_overview.jpg"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> Depth Anything overview. Taken from the <a href="https://arxiv.org/abs/2401.10891">original paper</a>.</small>
|
||||
|
||||
The Depth Anything models were contributed by [nielsr](https://huggingface.co/nielsr).
|
||||
The original code can be found [here](https://github.com/DepthAnything/Depth-Anything-V2).
|
||||
|
||||
## Usage example
|
||||
|
||||
There are 2 main ways to use Depth Anything V2: either using the pipeline API, which abstracts away all the complexity for you, or by using the `DepthAnythingForDepthEstimation` class yourself.
|
||||
|
||||
### Pipeline API
|
||||
|
||||
The pipeline allows to use the model in a few lines of code:
|
||||
|
||||
```python
|
||||
>>> from transformers import pipeline
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
|
||||
>>> # load pipe
|
||||
>>> pipe = pipeline(task="depth-estimation", model="depth-anything/Depth-Anything-V2-Small-hf")
|
||||
|
||||
>>> # load image
|
||||
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> # inference
|
||||
>>> depth = pipe(image)["depth"]
|
||||
```
|
||||
|
||||
### Using the model yourself
|
||||
|
||||
If you want to do the pre- and post-processing yourself, here's how to do that:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoImageProcessor, AutoModelForDepthEstimation
|
||||
>>> import torch
|
||||
>>> import numpy as np
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
>>> image_processor = AutoImageProcessor.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
||||
>>> model = AutoModelForDepthEstimation.from_pretrained("depth-anything/Depth-Anything-V2-Small-hf")
|
||||
|
||||
>>> # prepare image for the model
|
||||
>>> inputs = image_processor(images=image, return_tensors="pt")
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(**inputs)
|
||||
... predicted_depth = outputs.predicted_depth
|
||||
|
||||
>>> # interpolate to original size
|
||||
>>> prediction = torch.nn.functional.interpolate(
|
||||
... predicted_depth.unsqueeze(1),
|
||||
... size=image.size[::-1],
|
||||
... mode="bicubic",
|
||||
... align_corners=False,
|
||||
... )
|
||||
|
||||
>>> # visualize the prediction
|
||||
>>> output = prediction.squeeze().cpu().numpy()
|
||||
>>> formatted = (output * 255 / np.max(output)).astype("uint8")
|
||||
>>> depth = Image.fromarray(formatted)
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with Depth Anything.
|
||||
|
||||
- [Monocular depth estimation task guide](../tasks/depth_estimation)
|
||||
- [Depth Anything V2 demo](https://huggingface.co/spaces/depth-anything/Depth-Anything-V2).
|
||||
- A notebook showcasing inference with [`DepthAnythingForDepthEstimation`] can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/Depth%20Anything/Predicting_depth_in_an_image_with_Depth_Anything.ipynb). 🌎
|
||||
- [Core ML conversion of the `small` variant for use on Apple Silicon](https://huggingface.co/apple/coreml-depth-anything-v2-small).
|
||||
|
||||
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
||||
|
||||
## DepthAnythingConfig
|
||||
|
||||
[[autodoc]] DepthAnythingConfig
|
||||
|
||||
## DepthAnythingForDepthEstimation
|
||||
|
||||
[[autodoc]] DepthAnythingForDepthEstimation
|
||||
- forward
|
@ -16,14 +16,6 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# DETA
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This model is in maintenance mode only, we don't accept any new PRs changing its code.
|
||||
If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2.
|
||||
You can do so by running the following command: `pip install -U transformers==4.40.2`.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Overview
|
||||
|
||||
The DETA model was proposed in [NMS Strikes Back](https://arxiv.org/abs/2212.06137) by Jeffrey Ouyang-Zhang, Jang Hyun Cho, Xingyi Zhou, Philipp Krähenbühl.
|
||||
|
@ -57,7 +57,7 @@ print((last_hidden_states - traced_outputs[0]).abs().max())
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DINOv2.
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with DPT.
|
||||
|
||||
- Demo notebooks for DINOv2 can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DINOv2). 🌎
|
||||
|
||||
|
@ -16,36 +16,28 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# EfficientFormer
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This model is in maintenance mode only, we don't accept any new PRs changing its code.
|
||||
If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2.
|
||||
You can do so by running the following command: `pip install -U transformers==4.40.2`.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Overview
|
||||
|
||||
The EfficientFormer model was proposed in [EfficientFormer: Vision Transformers at MobileNet Speed](https://arxiv.org/abs/2206.01191)
|
||||
The EfficientFormer model was proposed in [EfficientFormer: Vision Transformers at MobileNet Speed](https://arxiv.org/abs/2206.01191)
|
||||
by Yanyu Li, Geng Yuan, Yang Wen, Eric Hu, Georgios Evangelidis, Sergey Tulyakov, Yanzhi Wang, Jian Ren. EfficientFormer proposes a
|
||||
dimension-consistent pure transformer that can be run on mobile devices for dense prediction tasks like image classification, object
|
||||
detection and semantic segmentation.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Vision Transformers (ViT) have shown rapid progress in computer vision tasks, achieving promising results on various benchmarks.
|
||||
However, due to the massive number of parameters and model design, e.g., attention mechanism, ViT-based models are generally
|
||||
times slower than lightweight convolutional networks. Therefore, the deployment of ViT for real-time applications is particularly
|
||||
challenging, especially on resource-constrained hardware such as mobile devices. Recent efforts try to reduce the computation
|
||||
complexity of ViT through network architecture search or hybrid design with MobileNet block, yet the inference speed is still
|
||||
unsatisfactory. This leads to an important question: can transformers run as fast as MobileNet while obtaining high performance?
|
||||
To answer this, we first revisit the network architecture and operators used in ViT-based models and identify inefficient designs.
|
||||
Then we introduce a dimension-consistent pure transformer (without MobileNet blocks) as a design paradigm.
|
||||
Finally, we perform latency-driven slimming to get a series of final models dubbed EfficientFormer.
|
||||
Extensive experiments show the superiority of EfficientFormer in performance and speed on mobile devices.
|
||||
Our fastest model, EfficientFormer-L1, achieves 79.2% top-1 accuracy on ImageNet-1K with only 1.6 ms inference latency on
|
||||
iPhone 12 (compiled with CoreML), which { runs as fast as MobileNetV2×1.4 (1.6 ms, 74.7% top-1),} and our largest model,
|
||||
EfficientFormer-L7, obtains 83.3% accuracy with only 7.0 ms latency. Our work proves that properly designed transformers can
|
||||
*Vision Transformers (ViT) have shown rapid progress in computer vision tasks, achieving promising results on various benchmarks.
|
||||
However, due to the massive number of parameters and model design, e.g., attention mechanism, ViT-based models are generally
|
||||
times slower than lightweight convolutional networks. Therefore, the deployment of ViT for real-time applications is particularly
|
||||
challenging, especially on resource-constrained hardware such as mobile devices. Recent efforts try to reduce the computation
|
||||
complexity of ViT through network architecture search or hybrid design with MobileNet block, yet the inference speed is still
|
||||
unsatisfactory. This leads to an important question: can transformers run as fast as MobileNet while obtaining high performance?
|
||||
To answer this, we first revisit the network architecture and operators used in ViT-based models and identify inefficient designs.
|
||||
Then we introduce a dimension-consistent pure transformer (without MobileNet blocks) as a design paradigm.
|
||||
Finally, we perform latency-driven slimming to get a series of final models dubbed EfficientFormer.
|
||||
Extensive experiments show the superiority of EfficientFormer in performance and speed on mobile devices.
|
||||
Our fastest model, EfficientFormer-L1, achieves 79.2% top-1 accuracy on ImageNet-1K with only 1.6 ms inference latency on
|
||||
iPhone 12 (compiled with CoreML), which { runs as fast as MobileNetV2×1.4 (1.6 ms, 74.7% top-1),} and our largest model,
|
||||
EfficientFormer-L7, obtains 83.3% accuracy with only 7.0 ms latency. Our work proves that properly designed transformers can
|
||||
reach extremely low latency on mobile devices while maintaining high performance.*
|
||||
|
||||
This model was contributed by [novice03](https://huggingface.co/novice03) and [Bearnardd](https://huggingface.co/Bearnardd).
|
||||
@ -101,4 +93,4 @@ The original code can be found [here](https://github.com/snap-research/Efficient
|
||||
- call
|
||||
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
</frameworkcontent>
|
@ -16,14 +16,6 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
# ErnieM
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
This model is in maintenance mode only, we don't accept any new PRs changing its code.
|
||||
If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2.
|
||||
You can do so by running the following command: `pip install -U transformers==4.40.2`.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Overview
|
||||
|
||||
The ErnieM model was proposed in [ERNIE-M: Enhanced Multilingual Representation by Aligning
|
||||
|
@ -60,11 +60,6 @@ This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ), [
|
||||
[[autodoc]] GemmaForSequenceClassification
|
||||
- forward
|
||||
|
||||
## GemmaForTokenClassification
|
||||
|
||||
[[autodoc]] GemmaForTokenClassification
|
||||
- forward
|
||||
|
||||
## FlaxGemmaModel
|
||||
|
||||
[[autodoc]] FlaxGemmaModel
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user