mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-21 01:23:56 +08:00
Compare commits
149 Commits
v4.48.0
...
fix-pytorc
Author | SHA1 | Date | |
---|---|---|---|
ef63984280 | |||
21955c3c0f | |||
b764c20b09 | |||
3613f568cd | |||
96625d85fd | |||
bf16a182ba | |||
86d7564611 | |||
414658f94f | |||
63e9c941eb | |||
c550a1c640 | |||
cd6591bfb2 | |||
e57b459997 | |||
5c576f5a66 | |||
5450e7c84a | |||
a50befa9b9 | |||
33cb1f7b61 | |||
14a9bb520e | |||
f11f57c925 | |||
fc269f77da | |||
bcb841f007 | |||
b912f5ee43 | |||
72d1a4cd53 | |||
b5aaf87509 | |||
328e2ae4c0 | |||
d2a424b550 | |||
045c02f209 | |||
71cc8161b2 | |||
8f1509a96c | |||
0a950e0bbe | |||
4ec425ffad | |||
f3f6c86582 | |||
d3af76df58 | |||
8736e91ad6 | |||
2c3a44f9a7 | |||
fdcc62c855 | |||
3b9770581e | |||
62bd83947a | |||
487e2f63bd | |||
b3d6722469 | |||
a7738f5a89 | |||
ec28957f94 | |||
36c9181f5c | |||
f439e28d32 | |||
373e50e970 | |||
870e2c8ea0 | |||
f4f33a20a2 | |||
90b46e983f | |||
870eb7b41b | |||
8ac851b0b3 | |||
107f9f5127 | |||
3df90103b8 | |||
741d55237a | |||
568941bf11 | |||
7051c5fcc8 | |||
97fbaf0861 | |||
dbd8474125 | |||
678bd7f1ce | |||
dc10f7906a | |||
f82b19cb6f | |||
edbabf6b82 | |||
fd8d61fdb2 | |||
78f5ee0217 | |||
8e4cedd9ca | |||
705aeaaa12 | |||
e867b97443 | |||
920f34a772 | |||
234168c4dc | |||
44393df089 | |||
f19135afc7 | |||
641238eb76 | |||
729b569531 | |||
ec97417827 | |||
5f0f4b1b93 | |||
a142f16131 | |||
3998fa8aab | |||
b80e334e71 | |||
68947282fc | |||
135e86aa54 | |||
88b95e6179 | |||
56afd2f488 | |||
abe57b6f17 | |||
872dfbdd46 | |||
332fa024d6 | |||
8571bb145a | |||
5fa3534475 | |||
7d4b3ddde4 | |||
8ad6bd0f1b | |||
936a731534 | |||
10e8cd0d63 | |||
099d93d2e9 | |||
42b2857b01 | |||
94ae9a8da1 | |||
add5f0566c | |||
df6d42a914 | |||
54fd7e9260 | |||
ab1afd56f5 | |||
8c1b5d3782 | |||
02a492a838 | |||
94af1c0aa2 | |||
2818307e93 | |||
aaa969e97d | |||
80dbbd103c | |||
aeeceb9916 | |||
57bf1a12a0 | |||
91be6a5eb2 | |||
8ebe9d7166 | |||
1302c32a84 | |||
3292e96a4f | |||
8b78d9d6e7 | |||
2cbcc5877d | |||
fd4f14c968 | |||
bef7dded22 | |||
99e0ab6ed8 | |||
12dfd99007 | |||
387663e571 | |||
615bf9c5e4 | |||
09d5f76274 | |||
c61fcde910 | |||
b0cdbd9119 | |||
a11041ffad | |||
df2a812e95 | |||
050636518a | |||
715fdd6459 | |||
4b8d1f7fca | |||
34f76bb62b | |||
c23a1c1932 | |||
a3f82328ed | |||
2fa876d2d8 | |||
e6f9b03464 | |||
84a6789145 | |||
87089176d9 | |||
91f14f1fc4 | |||
b8c34d97fc | |||
cd44bdb4b8 | |||
15bd3e61f8 | |||
1e3c6c1f7d | |||
04eae987f3 | |||
b02828e4af | |||
0aaf124fb9 | |||
1211e616a4 | |||
bbc00046b9 | |||
f63829c87b | |||
52e1f87c7d | |||
ccc0381d36 | |||
a9bd1e6284 | |||
e0646f3dce | |||
5f087d1335 | |||
6f127d3f81 | |||
6b73ee8905 |
@ -13,6 +13,7 @@ jobs:
|
||||
check_circleci_user:
|
||||
docker:
|
||||
- image: python:3.10-slim
|
||||
resource_class: small
|
||||
parallelism: 1
|
||||
steps:
|
||||
- run: echo $CIRCLE_PROJECT_USERNAME
|
||||
|
313
.github/workflows/self-comment-ci.yml
vendored
Normal file
313
.github/workflows/self-comment-ci.yml
vendored
Normal file
@ -0,0 +1,313 @@
|
||||
name: PR comment GitHub CI
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types:
|
||||
- created
|
||||
branches-ignore:
|
||||
- main
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.issue.number }}-${{ startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow') }}
|
||||
cancel-in-progress: true
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
||||
# This token is created under the bot `hf-transformers-bot`.
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
get-pr-number:
|
||||
runs-on: ubuntu-22.04
|
||||
name: Get PR number
|
||||
# For security: only allow team members to run
|
||||
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
|
||||
outputs:
|
||||
PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }}
|
||||
steps:
|
||||
- name: Get PR number
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ github.event.issue.number }}" != "" && "${{ github.event.issue.pull_request }}" != "" ]]; then
|
||||
echo "PR_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "PR_NUMBER=" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Check PR number
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ env.PR_NUMBER }}"
|
||||
|
||||
- name: Set PR number
|
||||
id: set_pr_number
|
||||
run: echo "PR_NUMBER=${{ env.PR_NUMBER }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
get-sha:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: get-pr-number
|
||||
if: ${{ needs.get-pr-number.outputs.PR_NUMBER != ''}}
|
||||
outputs:
|
||||
PR_HEAD_SHA: ${{ steps.get_sha.outputs.PR_HEAD_SHA }}
|
||||
PR_MERGE_SHA: ${{ steps.get_sha.outputs.PR_MERGE_SHA }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: "0"
|
||||
ref: "refs/pull/${{needs.get-pr-number.outputs.PR_NUMBER}}/merge"
|
||||
|
||||
- name: Get SHA (and verify timestamps against the issue comment date)
|
||||
id: get_sha
|
||||
env:
|
||||
PR_NUMBER: ${{ needs.get-pr-number.outputs.PR_NUMBER }}
|
||||
COMMENT_DATE: ${{ github.event.comment.created_at }}
|
||||
run: |
|
||||
git fetch origin refs/pull/$PR_NUMBER/head:refs/remotes/pull/$PR_NUMBER/head
|
||||
git checkout refs/remotes/pull/$PR_NUMBER/head
|
||||
echo "PR_HEAD_SHA: $(git log -1 --format=%H)"
|
||||
echo "PR_HEAD_SHA=$(git log -1 --format=%H)" >> "$GITHUB_OUTPUT"
|
||||
git fetch origin refs/pull/$PR_NUMBER/merge:refs/remotes/pull/$PR_NUMBER/merge
|
||||
git checkout refs/remotes/pull/$PR_NUMBER/merge
|
||||
echo "PR_MERGE_SHA: $(git log -1 --format=%H)"
|
||||
echo "PR_MERGE_SHA=$(git log -1 --format=%H)" >> "$GITHUB_OUTPUT"
|
||||
PR_MERGE_COMMIT_TIMESTAMP=$(git log -1 --date=unix --format=%cd)
|
||||
echo "PR_MERGE_COMMIT_TIMESTAMP: $PR_MERGE_COMMIT_TIMESTAMP"
|
||||
COMMENT_TIMESTAMP=$(date -d "${COMMENT_DATE}" +"%s")
|
||||
echo "COMMENT_DATE: $COMMENT_DATE"
|
||||
echo "COMMENT_TIMESTAMP: $COMMENT_TIMESTAMP"
|
||||
if [ $COMMENT_TIMESTAMP -le $PR_MERGE_COMMIT_TIMESTAMP ]; then
|
||||
echo "Last commit on the pull request is newer than the issue comment triggering this run! Abort!";
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
# use a python script to handle this complex logic
|
||||
# case 1: `run-slow` (auto. infer with limited number of models, but in particular, new model)
|
||||
# case 2: `run-slow model_1, model_2`
|
||||
get-tests:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [get-pr-number, get-sha]
|
||||
if: ${{ needs.get-pr-number.outputs.PR_NUMBER != ''}}
|
||||
outputs:
|
||||
models: ${{ steps.models_to_run.outputs.models }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: "0"
|
||||
ref: "refs/pull/${{needs.get-pr-number.outputs.PR_NUMBER}}/merge"
|
||||
|
||||
- name: Verify merge commit SHA
|
||||
env:
|
||||
VERIFIED_PR_MERGE_SHA: ${{ needs.get-sha.outputs.PR_MERGE_SHA }}
|
||||
run: |
|
||||
PR_MERGE_SHA=$(git log -1 --format=%H)
|
||||
if [ $PR_MERGE_SHA != $VERIFIED_PR_MERGE_SHA ]; then
|
||||
echo "The merged commit SHA is not the same as the verified one! Security issue detected, abort the workflow!";
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
- name: Get models to test
|
||||
env:
|
||||
PR_COMMENT: ${{ github.event.comment.body }}
|
||||
run: |
|
||||
python -m pip install GitPython
|
||||
python utils/pr_slow_ci_models.py --message "$PR_COMMENT" | tee output.txt
|
||||
echo "models=$(tail -n 1 output.txt)" >> $GITHUB_ENV
|
||||
|
||||
- name: Show models to test
|
||||
id: models_to_run
|
||||
run: |
|
||||
echo "${{ env.models }}"
|
||||
echo "models=${{ env.models }}" >> $GITHUB_ENV
|
||||
echo "models=${{ env.models }}" >> $GITHUB_OUTPUT
|
||||
|
||||
reply_to_comment:
|
||||
name: Reply to the comment
|
||||
if: ${{ needs.get-tests.outputs.models != '[]' }}
|
||||
needs: [get-pr-number, get-tests]
|
||||
permissions:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Reply to the comment
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
MODELS: ${{ needs.get-tests.outputs.models }}
|
||||
run: |
|
||||
gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
repos/${{ github.repository }}/issues/${{ needs.get-pr-number.outputs.PR_NUMBER }}/comments \
|
||||
-f "body=This comment contains run-slow, running the specified jobs: ${{ env.MODELS }} ..."
|
||||
|
||||
create_run:
|
||||
name: Create run
|
||||
if: ${{ needs.get-tests.outputs.models != '[]' }}
|
||||
needs: [get-sha, get-tests, reply_to_comment]
|
||||
permissions:
|
||||
statuses: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Create Run
|
||||
id: create_run
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Create a commit status (pending) for a run of this workflow. The status has to be updated later in `update_run_status`.
|
||||
# See https://docs.github.com/en/rest/commits/statuses?apiVersion=2022-11-28#create-a-commit-status
|
||||
GITHUB_RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
run: |
|
||||
gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
repos/${{ github.repository }}/statuses/${{ needs.get-sha.outputs.PR_HEAD_SHA }} \
|
||||
-f "target_url=$GITHUB_RUN_URL" -f "state=pending" -f "description=Slow CI job" -f "context=pytest/custom-tests"
|
||||
|
||||
run_models_gpu:
|
||||
name: Run all tests for the model
|
||||
if: ${{ needs.get-tests.outputs.models != '[]' }}
|
||||
needs: [get-pr-number, get-sha, get-tests, create_run]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.get-tests.outputs.models) }}
|
||||
machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
|
||||
runs-on:
|
||||
group: '${{ matrix.machine_type }}'
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Echo input and matrix info
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout to PR merge commit
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch origin refs/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge:refs/remotes/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge
|
||||
git checkout refs/remotes/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge
|
||||
git log -1 --format=%H
|
||||
|
||||
- name: Verify merge commit SHA
|
||||
env:
|
||||
VERIFIED_PR_MERGE_SHA: ${{ needs.get-sha.outputs.PR_MERGE_SHA }}
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
PR_MERGE_SHA=$(git log -1 --format=%H)
|
||||
if [ $PR_MERGE_SHA != $VERIFIED_PR_MERGE_SHA ]; then
|
||||
echo "The merged commit SHA is not the same as the verified one! Security issue detected, abort the workflow!";
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Set `machine_type` for report and artifact names
|
||||
working-directory: /transformers
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ matrix.machine_type }}"
|
||||
if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
|
||||
machine_type=single-gpu
|
||||
elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
|
||||
machine_type=multi-gpu
|
||||
else
|
||||
machine_type=${{ matrix.machine_type }}
|
||||
fi
|
||||
echo "$machine_type"
|
||||
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
export CUDA_VISIBLE_DEVICES="$(python3 utils/set_cuda_devices_for_ci.py --test_folder ${{ matrix.folders }})"
|
||||
echo $CUDA_VISIBLE_DEVICES
|
||||
python3 -m pytest -v -rsfE --make-reports=${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/failures_short.txt
|
||||
|
||||
- name: Make sure report directory exists
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
|
||||
echo "hello" > /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/hello.txt
|
||||
echo "${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports"
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports
|
||||
path: /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
|
||||
|
||||
update_run_status:
|
||||
name: Update Check Run Status
|
||||
needs: [get-sha, create_run, run_models_gpu]
|
||||
permissions:
|
||||
statuses: write
|
||||
if: ${{ always() && needs.create_run.result == 'success' }}
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
steps:
|
||||
- name: Get `run_models_gpu` job status
|
||||
run: |
|
||||
echo "${{ needs.run_models_gpu.result }}"
|
||||
if [ "${{ needs.run_models_gpu.result }}" = "cancelled" ]; then
|
||||
echo "STATUS=failure" >> $GITHUB_ENV
|
||||
elif [ "${{ needs.run_models_gpu.result }}" = "skipped" ]; then
|
||||
echo "STATUS=success" >> $GITHUB_ENV
|
||||
else
|
||||
echo "STATUS=${{ needs.run_models_gpu.result }}" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update PR commit statuses
|
||||
run: |
|
||||
echo "${{ needs.run_models_gpu.result }}"
|
||||
echo "${{ env.STATUS }}"
|
||||
gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
repos/${{ github.repository }}/statuses/${{ needs.get-sha.outputs.PR_HEAD_SHA }} \
|
||||
-f "target_url=$GITHUB_RUN_URL" -f "state=${{ env.STATUS }}" -f "description=Slow CI job" -f "context=pytest/custom-tests"
|
@ -1,55 +1,55 @@
|
||||
name: Self-hosted runner (AMD mi210 scheduled CI caller)
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
|
||||
branches: ["main"]
|
||||
types: [completed]
|
||||
push:
|
||||
branches:
|
||||
- run_amd_scheduled_ci_caller*
|
||||
|
||||
jobs:
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi210
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi210
|
||||
secrets: inherit
|
||||
|
||||
torch-pipeline:
|
||||
name: Torch pipeline CI
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi210
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi210
|
||||
secrets: inherit
|
||||
|
||||
example-ci:
|
||||
name: Example CI
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi210
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi210
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi210
|
||||
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi210
|
||||
secrets: inherit
|
||||
name: Self-hosted runner (AMD mi210 scheduled CI caller)
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
|
||||
branches: ["main"]
|
||||
types: [completed]
|
||||
push:
|
||||
branches:
|
||||
- run_amd_scheduled_ci_caller*
|
||||
|
||||
jobs:
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi210
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi210
|
||||
secrets: inherit
|
||||
|
||||
torch-pipeline:
|
||||
name: Torch pipeline CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi210
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi210
|
||||
secrets: inherit
|
||||
|
||||
example-ci:
|
||||
name: Example CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi210
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi210
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi210
|
||||
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi210
|
||||
secrets: inherit
|
||||
|
@ -1,55 +1,55 @@
|
||||
name: Self-hosted runner (AMD mi250 scheduled CI caller)
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
|
||||
branches: ["main"]
|
||||
types: [completed]
|
||||
push:
|
||||
branches:
|
||||
- run_amd_scheduled_ci_caller*
|
||||
|
||||
jobs:
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
secrets: inherit
|
||||
|
||||
torch-pipeline:
|
||||
name: Torch pipeline CI
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
secrets: inherit
|
||||
|
||||
example-ci:
|
||||
name: Example CI
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
secrets: inherit
|
||||
name: Self-hosted runner (AMD mi250 scheduled CI caller)
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Self-hosted runner (AMD scheduled CI caller)"]
|
||||
branches: ["main"]
|
||||
types: [completed]
|
||||
push:
|
||||
branches:
|
||||
- run_amd_scheduled_ci_caller*
|
||||
|
||||
jobs:
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
secrets: inherit
|
||||
|
||||
torch-pipeline:
|
||||
name: Torch pipeline CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
secrets: inherit
|
||||
|
||||
example-ci:
|
||||
name: Example CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
secrets: inherit
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
secrets: inherit
|
||||
|
349
.github/workflows/self-scheduled-amd.yml
vendored
349
.github/workflows/self-scheduled-amd.yml
vendored
@ -1,349 +0,0 @@
|
||||
name: Self-hosted runner (scheduled-amd)
|
||||
|
||||
# Note: For the AMD CI, we rely on a caller workflow and on the workflow_call event to trigger the
|
||||
# CI in order to run it on both MI210 and MI250, without having to use matrix here which pushes
|
||||
# us towards the limit of allowed jobs on GitHub Actions.
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
job:
|
||||
required: true
|
||||
type: string
|
||||
slack_report_channel:
|
||||
required: true
|
||||
type: string
|
||||
runner:
|
||||
required: true
|
||||
type: string
|
||||
docker:
|
||||
required: true
|
||||
type: string
|
||||
ci_event:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
NUM_SLICES: 2
|
||||
|
||||
# Important note: each job (run_tests_single_gpu, run_tests_multi_gpu, run_examples_gpu, run_pipelines_torch_gpu) requires all the previous jobs before running.
|
||||
# This is done so that we avoid parallelizing the scheduled tests, to leave available
|
||||
# runners for the push CI that is running on the same machine.
|
||||
jobs:
|
||||
check_runner_status:
|
||||
name: Check Runner Status
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout transformers
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Check Runner Status
|
||||
run: python utils/check_self_hosted_runner.py --target_runners hf-amd-mi210-ci-1gpu-1,hf-amd-mi250-ci-1gpu-1,hf-amd-mi300-ci-1gpu-1 --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
|
||||
check_runners:
|
||||
name: Check Runners
|
||||
needs: check_runner_status
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-amd-gpu
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
setup:
|
||||
if: contains(fromJSON('["run_models_gpu"]'), inputs.job)
|
||||
name: Setup
|
||||
needs: check_runners
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-amd-gpu
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
outputs:
|
||||
folder_slices: ${{ steps.set-matrix.outputs.folder_slices }}
|
||||
slice_ids: ${{ steps.set-matrix.outputs.slice_ids }}
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Cleanup
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf tests/models/__pycache__
|
||||
rm -rf reports
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- id: set-matrix
|
||||
name: Identify models to test
|
||||
working-directory: /transformers/tests
|
||||
run: |
|
||||
echo "folder_slices=$(python3 ../utils/split_model_tests.py --num_splits ${{ env.NUM_SLICES }})" >> $GITHUB_OUTPUT
|
||||
echo "slice_ids=$(python3 -c 'd = list(range(${{ env.NUM_SLICES }})); print(d)')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
run_models_gpu:
|
||||
if: ${{ inputs.job == 'run_models_gpu' }}
|
||||
name: Single GPU tests
|
||||
needs: setup
|
||||
strategy:
|
||||
max-parallel: 1 # For now, not to parallelize. Can change later if it works well.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }}
|
||||
uses: ./.github/workflows/model_jobs_amd.yml
|
||||
with:
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
machine_type: ${{ matrix.machine_type }}
|
||||
slice_id: ${{ matrix.slice_id }}
|
||||
runner: ${{ inputs.runner }}
|
||||
docker: ${{ inputs.docker }}
|
||||
secrets: inherit
|
||||
|
||||
run_pipelines_torch_gpu:
|
||||
if: ${{ inputs.job == 'run_pipelines_torch_gpu' }}
|
||||
name: PyTorch pipelines
|
||||
needs: check_runners
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all pipeline tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports tests/pipelines -m "not not_device_test"
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports
|
||||
|
||||
run_examples_gpu:
|
||||
if: ${{ inputs.job == 'run_examples_gpu' }}
|
||||
name: Examples directory
|
||||
needs: check_runners
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run examples tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_examples_gpu_test_reports examples/pytorch -m "not not_device_test"
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_run_examples_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_examples_gpu_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_examples_gpu_test_reports
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_run_examples_gpu_test_reports
|
||||
|
||||
run_torch_cuda_extensions_gpu:
|
||||
if: ${{ inputs.job == 'run_torch_cuda_extensions_gpu' }}
|
||||
name: Torch ROCm deepspeed tests
|
||||
needs: check_runners
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended -m "not not_device_test"
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
|
||||
send_results:
|
||||
name: Slack Report
|
||||
needs: [
|
||||
check_runner_status,
|
||||
check_runners,
|
||||
setup,
|
||||
run_models_gpu,
|
||||
run_pipelines_torch_gpu,
|
||||
run_examples_gpu,
|
||||
run_torch_cuda_extensions_gpu
|
||||
]
|
||||
if: ${{ always() }}
|
||||
uses: ./.github/workflows/slack-report.yml
|
||||
with:
|
||||
job: ${{ inputs.job }}
|
||||
# This would be `skipped` if `setup` is skipped.
|
||||
setup_status: ${{ needs.setup.result }}
|
||||
slack_report_channel: ${{ inputs.slack_report_channel }}
|
||||
# This would be an empty string if `setup` is skipped.
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
quantization_matrix: ${{ needs.setup.outputs.quantization_matrix }}
|
||||
ci_event: ${{ inputs.ci_event }}
|
||||
|
||||
secrets: inherit
|
6
.github/workflows/slack-report.yml
vendored
6
.github/workflows/slack-report.yml
vendored
@ -70,7 +70,7 @@ jobs:
|
||||
with:
|
||||
name: ci_results_${{ inputs.job }}
|
||||
path: ci_results_${{ inputs.job }}
|
||||
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
- name: Send message to Slack for quantization workflow
|
||||
@ -90,7 +90,7 @@ jobs:
|
||||
pip install huggingface_hub
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service_quantization.py "${{ inputs.quantization_matrix }}"
|
||||
python utils/notification_service_quantization.py "${{ inputs.quantization_matrix }}"
|
||||
|
||||
# Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack.
|
||||
- name: Failure table artifacts
|
||||
@ -98,4 +98,4 @@ jobs:
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ci_results_${{ inputs.job }}
|
||||
path: ci_results_${{ inputs.job }}
|
||||
path: ci_results_${{ inputs.job }}
|
||||
|
27
CODEOWNERS
27
CODEOWNERS
@ -1,27 +0,0 @@
|
||||
# These owners will be the default owners for everything in
|
||||
# the repo. Unless a later match takes precedence,
|
||||
# @global-owner1 and @global-owner2 will be requested for
|
||||
# review when someone opens a pull request.
|
||||
* @Rocketknight1 @ArthurZucker # if no one is pinged based on the other rules, he will do the dispatch
|
||||
**.md @stevhliu
|
||||
docs/ @stevhliu
|
||||
/benchmark/ @McPatate
|
||||
/utils/modular_model_converter.py @Cyrilvallez @ArthurZucker
|
||||
/src/transformers/models/*/*processing* @molbap @yonigozlan @qubvel
|
||||
/src/transformers/models/*/image_processing* @qubvel
|
||||
/src/transformers/models/*/image_processing_*_fast* @yonigozlan
|
||||
/src/transformers/models/*/*_modeling* @Rocketknight1
|
||||
/src/transformers/**/*_tokenization* @ArthurZucker
|
||||
/src/transformers/generation/ @gante
|
||||
trainer.py @muellerzr @SunMarc
|
||||
/src/transformers/pipeline @Rocketknight1 @yonigozlan
|
||||
/src/transformers/integrations @SunMarc @MekkCyber @muellerzr
|
||||
/src/transformers/quantizers @SunMarc @MekkCyber
|
||||
/src/transformers/tests @ydshieh
|
||||
/src/transformers/models/auto @ArthurZucker
|
||||
/src/transformers/utils @ArthurZucker @Rocketknight1
|
||||
/docker @ydshieh @ArthurZucker
|
||||
/src/transformers/loss @ArthurZucker
|
||||
/src/transformers/onnx @michaelbenayoun
|
||||
/.circleci/config.yml @ArthurZucker @ydshieh
|
||||
/utils/tests_fetcher.py @ydshieh
|
26
README.md
26
README.md
@ -255,17 +255,37 @@ You should install 🤗 Transformers in a [virtual environment](https://docs.pyt
|
||||
|
||||
First, create a virtual environment with the version of Python you're going to use and activate it.
|
||||
|
||||
Then, you will need to install at least one of Flax, PyTorch, or TensorFlow.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax](https://github.com/google/flax#quick-install) and [Jax](https://github.com/google/jax#installation) installation pages regarding the specific installation command for your platform.
|
||||
**macOS/Linux**
|
||||
|
||||
```python -m venv env
|
||||
source env/bin/activate
|
||||
```
|
||||
|
||||
**Windows**
|
||||
|
||||
``` python -m venv env
|
||||
env\Scripts\activate
|
||||
```
|
||||
|
||||
To use 🤗 Transformers, you must install at least one of Flax, PyTorch, or TensorFlow. Refer to the official installation guides for platform-specific commands:
|
||||
|
||||
[TensorFlow installation page](https://www.tensorflow.org/install/),
|
||||
[PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax](https://github.com/google/flax#quick-install) and [Jax](https://github.com/google/jax#installation)
|
||||
|
||||
When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
```bash
|
||||
```
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
If you'd like to play with the examples or need the bleeding edge of the code and can't wait for a new release, you must [install the library from source](https://huggingface.co/docs/transformers/installation#installing-from-source).
|
||||
|
||||
```
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install
|
||||
```
|
||||
|
||||
### With conda
|
||||
|
||||
🤗 Transformers can be installed using conda as follows:
|
||||
|
@ -65,6 +65,9 @@ RUN python3 -m pip install --no-cache-dir python-Levenshtein
|
||||
# For `FastSpeech2ConformerTokenizer` tokenizer
|
||||
RUN python3 -m pip install --no-cache-dir g2p-en
|
||||
|
||||
# For Some bitsandbytes tests
|
||||
RUN python3 -m pip install --no-cache-dir einops
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
@ -1,5 +1,4 @@
|
||||
FROM rocm/dev-ubuntu-22.04:6.1
|
||||
# rocm/pytorch has no version with 2.1.0
|
||||
FROM rocm/dev-ubuntu-22.04:6.2.4
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
@ -11,7 +10,7 @@ RUN apt update && \
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip numpy
|
||||
|
||||
RUN python3 -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.1
|
||||
RUN python3 -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade importlib-metadata setuptools ninja git+https://github.com/facebookresearch/detectron2.git pytesseract "itsdangerous<2.1.0"
|
||||
|
||||
|
@ -1,11 +1,11 @@
|
||||
FROM rocm/dev-ubuntu-22.04:5.6
|
||||
FROM rocm/dev-ubuntu-22.04:6.2.4
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG PYTORCH='2.1.1'
|
||||
ARG TORCH_VISION='0.16.1'
|
||||
ARG TORCH_AUDIO='2.1.1'
|
||||
ARG ROCM='5.6'
|
||||
ARG PYTORCH='2.5.1'
|
||||
ARG TORCH_VISION='0.20.0'
|
||||
ARG TORCH_AUDIO='2.5.0'
|
||||
ARG ROCM='6.2'
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y --no-install-recommends \
|
||||
@ -45,4 +45,4 @@ RUN cd transformers && python3 setup.py develop
|
||||
RUN python3 -c "from deepspeed.launcher.runner import main"
|
||||
|
||||
# Remove nvml as it is not compatible with ROCm
|
||||
RUN python3 -m pip uninstall py3nvml pynvml -y
|
||||
RUN python3 -m pip uninstall py3nvml pynvml nvidia-ml-py apex -y
|
||||
|
@ -15,6 +15,10 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
|
||||
# Install Rust for Tokenizers
|
||||
RUN curl https://sh.rustup.rs -sSf | sh -s -- -y
|
||||
ENV PATH="$HOME/.cargo/bin:${PATH}"
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
|
||||
|
||||
# Install latest release PyTorch
|
||||
|
@ -33,16 +33,16 @@
|
||||
- sections:
|
||||
- isExpanded: false
|
||||
sections:
|
||||
# - local: tasks/sequence_classification
|
||||
# title: تصنيف النصوص
|
||||
# - local: tasks/token_classification
|
||||
# title: تصنيف الرموز
|
||||
- local: tasks/sequence_classification
|
||||
title: تصنيف النصوص
|
||||
- local: tasks/token_classification
|
||||
title: تصنيف الرموز
|
||||
- local: tasks/question_answering
|
||||
title: الإجابة على الأسئلة
|
||||
# - local: tasks/language_modeling
|
||||
# title: نمذجة اللغة السببية
|
||||
# - local: tasks/masked_language_modeling
|
||||
# title: نمذجة اللغة المقنعة
|
||||
- local: tasks/language_modeling
|
||||
title: نمذجة اللغة السببية
|
||||
- local: tasks/masked_language_modeling
|
||||
title: نمذجة اللغة المقنعة
|
||||
- local: tasks/translation
|
||||
title: الترجمة
|
||||
- local: tasks/summarization
|
||||
@ -110,7 +110,7 @@
|
||||
title: أدلة المهام
|
||||
- sections:
|
||||
- local: fast_tokenizers
|
||||
title: استخدم مجزئيات النصوص السريعة من 🤗 Tokenizers
|
||||
title: استخدم مجزئيات النصوص السريعة من 🤗 Tokenizers
|
||||
- local: multilingual
|
||||
title: الاستدلال باستخدام نماذج متعددة اللغات
|
||||
- local: create_a_model
|
||||
@ -129,8 +129,6 @@
|
||||
title: التصدير إلى TFLite
|
||||
- local: torchscript
|
||||
title: التصدير إلى TorchScript
|
||||
- local: benchmarks
|
||||
title: المعايير
|
||||
- local: notebooks
|
||||
title: دفاتر الملاحظات مع الأمثلة
|
||||
- local: community
|
||||
@ -883,7 +881,7 @@
|
||||
# - local: internal/pipelines_utils
|
||||
# title: مرافق خطوط الأنابيب
|
||||
# - local: internal/tokenization_utils
|
||||
# title: مرافق مقسم النصوص
|
||||
# title: مرافق مقسم النصوص
|
||||
# - local: internal/trainer_utils
|
||||
# title: مرافق المدرب
|
||||
# - local: internal/generation_utils
|
||||
|
@ -1,352 +0,0 @@
|
||||
# معايير الأداء
|
||||
<Tip warning={true}>
|
||||
|
||||
أدوات قياس الأداء من Hugging Face أصبحت قديمة،ويُنصح باستخدام مكتبات خارجية لقياس سرعة وتعقيد الذاكرة لنماذج Transformer.
|
||||
|
||||
</Tip>
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
لنلق نظرة على كيفية تقييم أداء نماذج 🤗 Transformers، وأفضل الممارسات، ومعايير الأداء المتاحة بالفعل.
|
||||
|
||||
يُمكن العثور على دفتر ملاحظات يشرح بالتفصيل كيفية قياس أداء نماذج 🤗 Transformers [هنا](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb).
|
||||
|
||||
## كيفية قياس أداء نماذج 🤗 Transformers
|
||||
|
||||
تسمح الفئتان [`PyTorchBenchmark`] و [`TensorFlowBenchmark`] بتقييم أداء نماذج 🤗 Transformers بمرونة. تتيح لنا فئات التقييم قياس الأداء قياس _الاستخدام الأقصى للذاكرة_ و _الوقت اللازم_ لكل من _الاستدلال_ و _التدريب_.
|
||||
|
||||
<Tip>
|
||||
|
||||
هنا، ييُعرَّف _الاستدلال_ بأنه تمريرة أمامية واحدة، ويتم تعريف _التدريب_ بأنه تمريرة أمامية واحدة وتمريرة خلفية واحدة.
|
||||
|
||||
</Tip>
|
||||
|
||||
تتوقع فئات تقييم الأداء [`PyTorchBenchmark`] و [`TensorFlowBenchmark`] كائنًا من النوع [`PyTorchBenchmarkArguments`] و [`TensorFlowBenchmarkArguments`]، على التوالي، للتنفيذ. [`PyTorchBenchmarkArguments`] و [`TensorFlowBenchmarkArguments`] هي فئات بيانات وتحتوي على جميع التكوينات ذات الصلة لفئة تقييم الأداء المقابلة. في المثال التالي، يتم توضيح كيفية تقييم أداء نموذج BERT من النوع _bert-base-cased_.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> benchmark = PyTorchBenchmark(args)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> benchmark = TensorFlowBenchmark(args)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
هنا، يتم تمرير ثلاثة معامﻻت إلى فئات بيانات حجة قياس الأداء، وهي `models` و `batch_sizes` و `sequence_lengths`. المعامل `models` مطلوبة وتتوقع `قائمة` من بمعرّفات النموذج من [مركز النماذج](https://huggingface.co/models) تحدد معامﻻت القائمة `batch_sizes` و `sequence_lengths` حجم `input_ids` الذي يتم قياس أداء النموذج عليه. هناك العديد من المعلمات الأخرى التي يمكن تكوينها عبر فئات بيانات معال قياس الأداء. لمزيد من التفاصيل حول هذه المعلمات، يمكنك إما الرجوع مباشرة إلى الملفات `src/transformers/benchmark/benchmark_args_utils.py`، `src/transformers/benchmark/benchmark_args.py` (لـ PyTorch) و `src/transformers/benchmark/benchmark_args_tf.py` (لـ Tensorflow). أو، بدلاً من ذلك، قم بتشغيل أوامر shell التالية من المجلد الرئيسي لطباعة قائمة وصفية بجميع المعلمات القابلة للتكوين لـ PyTorch و Tensorflow على التوالي.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
```bash
|
||||
python examples/pytorch/benchmarking/run_benchmark.py --help
|
||||
```
|
||||
|
||||
يُمكن ببساطة تشغيل كائن التقييم الذي تم تهيئته عن طريق استدعاء `benchmark.run()`.
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.006
|
||||
google-bert/bert-base-uncased 8 32 0.006
|
||||
google-bert/bert-base-uncased 8 128 0.018
|
||||
google-bert/bert-base-uncased 8 512 0.088
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1227
|
||||
google-bert/bert-base-uncased 8 32 1281
|
||||
google-bert/bert-base-uncased 8 128 1307
|
||||
google-bert/bert-base-uncased 8 512 1539
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 08:58:43.371351
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
```bash
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
```
|
||||
|
||||
يُمكن بعد ذلك تشغيل كائن قياس الأداء الذي تم تهيئته عن طريق استدعاء `benchmark.run()`.
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.005
|
||||
google-bert/bert-base-uncased 8 32 0.008
|
||||
google-bert/bert-base-uncased 8 128 0.022
|
||||
google-bert/bert-base-uncased 8 512 0.105
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1330
|
||||
google-bert/bert-base-uncased 8 32 1330
|
||||
google-bert/bert-base-uncased 8 128 1330
|
||||
google-bert/bert-base-uncased 8 512 1770
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 202.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:26:35.617317
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
بشكل افتراضي، يتم تقييم _الوقت_ و _الذاكرة المطلوبة_ لـ _الاستدلال_. في مثال المخرجات أعلاه، يُظهر القسمان الأولان النتيجة المقابلة لـ _وقت الاستدلال_ و _ذاكرة الاستدلال_. بالإضافة إلى ذلك، يتم طباعة جميع المعلومات ذات الصلة حول بيئة الحوسبة، على سبيل المثال نوع وحدة معالجة الرسومات (GPU)، والنظام، وإصدارات المكتبة، وما إلى ذلك، في القسم الثالث تحت _معلومات البيئة_. يمكن حفظ هذه المعلومات بشكل اختياري في ملف _.csv_ عند إضافة المعامل `save_to_csv=True` إلى [`PyTorchBenchmarkArguments`] و [`TensorFlowBenchmarkArguments`] على التوالي. في هذه الحالة، يتم حفظ كل قسم في ملف _.csv_ منفصل. يمكن اختيارًا تحديد مسار كل ملف _.csv_ عبر فئات بيانات معامل قياس الأداء.
|
||||
|
||||
بدلاً من تقييم النماذج المدربة مسبقًا عبر معرّف النموذج، على سبيل المثال `google-bert/bert-base-uncased`، يُمكن للمستخدم بدلاً من ذلك قياس أداء تكوين عشوائي لأي فئة نموذج متاحة. في هذه الحالة، يجب إدراج "قائمة" من التكوينات مع معامل قياس الأداء كما هو موضح أدناه.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark، PyTorchBenchmarkArguments، BertConfig
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(
|
||||
... models=["bert-base"، "bert-384-hid"، "bert-6-lay"]، batch_sizes=[8]، sequence_lengths=[8، 32، 128، 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = PyTorchBenchmark(args، configs=[config_base، config_384_hid، config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 128 0.006
|
||||
bert-base 8 512 0.006
|
||||
bert-base 8 128 0.018
|
||||
bert-base 8 512 0.088
|
||||
bert-384-hid 8 8 0.006
|
||||
bert-384-hid 8 32 0.006
|
||||
bert-384-hid 8 128 0.011
|
||||
bert-384-hid 8 512 0.054
|
||||
bert-6-lay 8 8 0.003
|
||||
bert-6-lay 8 32 0.004
|
||||
bert-6-lay 8 128 0.009
|
||||
bert-6-lay 8 512 0.044
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
## نتائج اختبار الأداء
|
||||
|
||||
في هذا القسم، يتم قياس _وقت الاستدلال_ و _الذاكرة المطلوبة_ للاستدلال، لمختلف تكوينات `BertModel`. يتم عرض النتائج في جدول، مع تنسيق مختلف قليلاً لكل من PyTorch و TensorFlow.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
| اسم النموذج | حجم الدفعة | طول التسلسل | الذاكرة بالميغابايت |
|
||||
--------------------------------------------------------------------------------
|
||||
| bert-base | 8 | 8 | 1277 |
|
||||
| bert-base | 8 | 32 | 1281 |
|
||||
| bert-base | 8 | 128 | 1307 |
|
||||
| bert-base | 8 | 512 | 1539 |
|
||||
| bert-384-hid | 8 | 8 | 1005 |
|
||||
| bert-384-hid | 8 | 32 | 1027 |
|
||||
| bert-384-hid | 8 | 128 | 1035 |
|
||||
| bert-384-hid | 8 | 512 | 1255 |
|
||||
| bert-6-lay | 8 | 8 | 1097 |
|
||||
| bert-6-lay | 8 | 32 | 1101 |
|
||||
| bert-6-lay | 8 | 128 | 1127 |
|
||||
| bert-6-lay | 8 | 512 | 1359 |
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== معلومات البيئة ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:35:25.143267
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== نتائج السرعة في الاستدلال ====================
|
||||
--------------------------------------------------------------------------------
|
||||
| اسم النموذج | حجم الدفعة | طول التسلسل | الوقت بالثانية |
|
||||
--------------------------------------------------------------------------------
|
||||
| bert-base | 8 | 8 | 0.005 |
|
||||
| bert-base | 8 | 32 | 0.008 |
|
||||
| bert-base | 8 | 128 | 0.022 |
|
||||
| bert-base | 8 | 512 | 0.106 |
|
||||
| bert-384-hid | 8 | 8 | 0.005 |
|
||||
| bert-384-hid | 8 | 32 | 0.007 |
|
||||
| bert-384-hid | 8 | 128 | 0.018 |
|
||||
| bert-384-hid | 8 | 512 | 0.064 |
|
||||
| bert-6-lay | 8 | 8 | 0.002 |
|
||||
| bert-6-lay | 8 | 32 | 0.003 |
|
||||
| bert-6-lay | 8 | 128 | 0.0011 |
|
||||
| bert-6-lay | 8 | 512 | 0.074 |
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== نتائج الذاكرة في الاستدلال ====================
|
||||
--------------------------------------------------------------------------------
|
||||
| اسم النموذج | حجم الدفعة | طول التسلسل | الذاكرة بالميغابايت |
|
||||
--------------------------------------------------------------------------------
|
||||
| اسم النموذج | حجم الدفعة | طول التسلسل | الذاكرة بالميغابايت |
|
||||
--------------------------------------------------------------------------------
|
||||
| bert-base | 8 | 8 | 1330 |
|
||||
| bert-base | 8 | 32 | 1330 |
|
||||
| bert-base | 8 | 128 | 1330 |
|
||||
| bert-base | 8 | 512 | 1770 |
|
||||
| bert-384-hid | 8 | 8 | 1330 |
|
||||
| bert-384-hid | 8 | 32 | 1330 |
|
||||
| bert-384-hid | 8 | 128 | 1330 |
|
||||
| bert-384-hid | 8 | 512 | 1540 |
|
||||
| bert-6-lay | 8 | 8 | 1330 |
|
||||
| bert-6-lay | 8 | 32 | 1330 |
|
||||
| bert-6-lay | 8 | 128 | 1330 |
|
||||
| bert-6-lay | 8 | 512 | 1540 |
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== معلومات البيئة ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:38:15.487125
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
مرة أخرى، يتم قياس _وقت الاستدلال_ و _الذاكرة المطلوبة_ للاستدلال، ولكن هذه المرة لتكوينات مخصصة لـ `BertModel`. يمكن أن تكون هذه الميزة مفيدة بشكل خاص عند اتخاذ قرار بشأن التكوين الذي يجب تدريب النموذج عليه.
|
||||
|
||||
## أفضل الممارسات في اختبار الأداء
|
||||
|
||||
يسرد هذا القسم بعض أفضل الممارسات التي يجب مراعاتها عند إجراء اختبار الأداء لنموذج ما.
|
||||
|
||||
- حالياً، يتم دعم اختبار الأداء على جهاز واحد فقط. عند إجراء الاختبار على وحدة معالجة الرسوميات (GPU)، يوصى بأن يقوم المستخدم بتحديد الجهاز الذي يجب تشغيل التعليمات البرمجية عليه من خلال تعيين متغير البيئة `CUDA_VISIBLE_DEVICES` في الشل، على سبيل المثال `export CUDA_VISIBLE_DEVICES=0` قبل تشغيل التعليمات البرمجية.
|
||||
- يجب تعيين الخيار `no_multi_processing` إلى `True` فقط لأغراض الاختبار والتصحيح. ولضمان قياس الذاكرة بدقة، يوصى بتشغيل كل اختبار ذاكرة في عملية منفصلة والتأكد من تعيين `no_multi_processing` إلى `True`.
|
||||
- يجب دائمًا ذكر معلومات البيئة عند مشاركة نتائج تقييم النموذج. يُمكن أن تختلف النتائج اختلافًا كبيرًا بين أجهزة GPU المختلفة وإصدارات المكتبات، وما إلى ذلك، لذلك فإن نتائج الاختبار بمفردها ليست مفيدة جدًا للمجتمع.
|
||||
|
||||
## مشاركة نتائج اختبار الأداء الخاص بك
|
||||
|
||||
في السابق، تم إجراء اختبار الأداء لجميع النماذج الأساسية المتاحة (10 في ذلك الوقت) لقياس _وقت الاستدلال_، عبر العديد من الإعدادات المختلفة: باستخدام PyTorch، مع TorchScript وبدونها، باستخدام TensorFlow، مع XLA وبدونه. تم إجراء جميع هذه الاختبارات على وحدات المعالجة المركزية (CPU) (باستثناء XLA TensorFlow) ووحدات معالجة الرسوميات (GPU).
|
||||
|
||||
يتم شرح هذا النهج بالتفصيل في [منشور المدونة هذا](https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2) وتتوفر النتائج [هنا](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing).
|
||||
|
||||
مع أدوات اختبار الأداء الجديدة، أصبح من الأسهل من أي وقت مضى مشاركة نتائج اختبار الأداء الخاص بك مع المجتمع:
|
||||
|
||||
- [نتائج اختبار الأداء في PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md).
|
||||
- [نتائج اختبار الأداء في TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md).
|
422
docs/source/ar/tasks/language_modeling.md
Normal file
422
docs/source/ar/tasks/language_modeling.md
Normal file
@ -0,0 +1,422 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# نمذجة اللغة السببية (Causal language modeling)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
هناك نوعان من نمذجة اللغة، السببية والمقنعة. يوضح هذا الدليل نمذجة اللغة السببية.
|
||||
تُستخدم نماذج اللغة السببية غالبًا لتوليد النص. يمكنك استخدام هذه النماذج للتطبيقات الإبداعية مثل
|
||||
اختيار مغامرة النص الخاصة بك أو مساعد ترميز ذكي مثل Copilot أو CodeParrot.
|
||||
|
||||
<Youtube id="Vpjb1lu0MDk"/>
|
||||
|
||||
تتنبأ نمذجة اللغة السببية بالرمز التالي في تسلسل من الرموز، ولا يمكن للنموذج سوى الاهتمام بالرموز على
|
||||
اليسار. هذا يعني أن النموذج لا يمكنه رؤية الرموز المستقبلية. GPT-2 هو مثال على نموذج اللغة السببية.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط دقيق [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) على مجموعة فرعية [r/askscience](https://www.reddit.com/r/askscience/) من مجموعة بيانات [ELI5](https://huggingface.co/datasets/eli5).
|
||||
2. استخدام النموذج المدرب الخاص بك للاستنتاج.
|
||||
|
||||
<Tip>
|
||||
|
||||
لرؤية جميع العمارات ونقاط التحقق المتوافقة مع هذه المهمة، نوصي بالتحقق من [task-page](https://huggingface.co/tasks/text-generation)
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate
|
||||
```
|
||||
|
||||
نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عند المطالبة، أدخل رمزك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات ELI5
|
||||
|
||||
ابدأ بتحميل أول 5000 مثال من [ELI5-Category](https://huggingface.co/datasets/eli5_category) مجموعة البيانات مع مكتبة 🤗 Datasets. سيعطيك هذا فرصة للتجربة والتأكد من أن كل شيء يعمل قبل قضاء المزيد من الوقت في التدريب على مجموعة البيانات الكاملة.
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> eli5 = load_dataset("eli5_category", split="train[:5000]")
|
||||
```
|
||||
|
||||
قم بتقسيم مجموعة بيانات `train` إلى مجموعتي تدريب واختبار باستخدام الخاصية [`~datasets.Dataset.train_test_split`]:
|
||||
|
||||
```py
|
||||
>>> eli5 = eli5.train_test_split(test_size=0.2)
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> eli5["train"][0]
|
||||
{'q_id': '7h191n',
|
||||
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
|
||||
'selftext': '',
|
||||
'category': 'Economics',
|
||||
'subreddit': 'explainlikeimfive',
|
||||
'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
|
||||
'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
|
||||
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
|
||||
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
|
||||
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
|
||||
'score': [21, 19, 5, 3],
|
||||
'text_urls': [[],
|
||||
[],
|
||||
[],
|
||||
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]},
|
||||
'title_urls': ['url'],
|
||||
'selftext_urls': ['url']}
|
||||
```
|
||||
|
||||
على الرغم من أن هذا قد يبدو معقدًا، إلا أنك مهتم حقًا بحقل `text`. ما هو رائع حول مهام نمذجة اللغة
|
||||
أنت لا تحتاج إلى تسميات (تُعرف أيضًا باسم المهمة غير الخاضعة للإشراف) لأن الكلمة التالية تعمل كتسمية.
|
||||
|
||||
## معالجة مسبقة (Preprocess)
|
||||
|
||||
<Youtube id="ma1TrR7gE7I"/>
|
||||
|
||||
الخطوة التالية هي تحميل مجزء النص DistilGPT2 لمعالجة حقل `text` الفرعي:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
|
||||
```
|
||||
|
||||
ستلاحظ من المثال أعلاه، الحقل `text` هو في الواقع متداخل داخل `answers`. هذا يعني أنك ستحتاج إلى
|
||||
استخراج حقل `text` الفرعي من بنيته المتداخلة باستخدام الدالة [`flatten`](https://huggingface.co/docs/datasets/process#flatten):
|
||||
|
||||
```py
|
||||
>>> eli5 = eli5.flatten()
|
||||
>>> eli5["train"][0]
|
||||
{'q_id': '7h191n',
|
||||
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
|
||||
'selftext': '',
|
||||
'category': 'Economics',
|
||||
'subreddit': 'explainlikeimfive',
|
||||
'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
|
||||
'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
|
||||
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
|
||||
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
|
||||
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
|
||||
'answers.score': [21, 19, 5, 3],
|
||||
'answers.text_urls': [[],
|
||||
[],
|
||||
[],
|
||||
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']],
|
||||
'title_urls': ['url'],
|
||||
'selftext_urls': ['url']}
|
||||
```
|
||||
|
||||
كل حقل فرعي هو الآن عموداً منفصلاً مسبوقاً بـ `answers`، وحقل `text` هو قائمة الآن. بدلاً من ذلك
|
||||
من تجزائة نص كل جملة بشكل منفصل، قم بتحويل القائمة إلى سلسلة حتى تتمكن من تجزئة نصها بشكل مجمّع.
|
||||
|
||||
هنا أول دالة معالجة مسبقة لدمج قائمة السلاسل لكل مثال ومجزىء النتيجة:
|
||||
|
||||
```py
|
||||
>>> def preprocess_function(examples):
|
||||
... return tokenizer([" ".join(x) for x in examples["answers.text"]])
|
||||
```
|
||||
|
||||
لتطبيق دالة المعالجة المسبقة هذه على مجموعة البيانات بأكملها، استخدم الدالة 🤗 Datasets [`~datasets.Dataset.map`]. يمكنك تسريع هذه العملية `map` عن طريق تعيين `batched=True` لمعالجة عناصر متعددة من مجموعة البيانات في وقت واحد، وزيادة عدد العمليات مع `num_proc`. احذف أي أعمدة لا تحتاجها:
|
||||
|
||||
```py
|
||||
>>> tokenized_eli5 = eli5.map(
|
||||
... preprocess_function,
|
||||
... batched=True,
|
||||
... num_proc=4,
|
||||
... remove_columns=eli5["train"].column_names,
|
||||
... )
|
||||
```
|
||||
|
||||
تحتوي هذه المجموعة من البيانات على تسلسلات الرموز، ولكن بعضها أطول من الطول الأقصى للمدخلات للنموذج.
|
||||
|
||||
يمكنك الآن استخدام دالة ما قبل المعالجة ثانية لـ:
|
||||
|
||||
- تجميع كل التسلسلات.
|
||||
- تقسيم التسلسلات المجمّعة إلى أجزاء أقصر محددة، بحجم `block_size`، والتي يجب أن تكون أقصر من الطول الأقصى للمدخلات ومناسبة لذاكرة GPU.
|
||||
|
||||
```py
|
||||
>>> block_size = 128
|
||||
|
||||
>>> def group_texts(examples):
|
||||
... # ربط جميع النصوص.
|
||||
... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
||||
... total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||||
... # نتجاهل الباقي الصغير، يمكننا إضافة الحشو إذا كان النموذج يدعمه بدلاً من هذا الإسقاط، يمكنك
|
||||
... # تخصيص هذا الجزء حسب احتياجاتك.
|
||||
... if total_length >= block_size:
|
||||
... total_length = (total_length // block_size) * block_size
|
||||
... # التقسيم إلى أجزاء بحجم block_size.
|
||||
... result = {
|
||||
... k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
||||
... for k, t in concatenated_examples.items()
|
||||
... }
|
||||
... result["labels"] = result["input_ids"].copy()
|
||||
... return result
|
||||
```
|
||||
|
||||
طبق دالة `group_texts` على كامل المجموعة من البيانات:
|
||||
|
||||
```py
|
||||
>>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)
|
||||
```
|
||||
|
||||
الآن قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorForLanguageModeling`]. من الأفضل أن تقوم بـ *الحشو الديناميكي* للجمل إلى الطول الأطول في الدفعة أثناء التجميع، بدلاً من حشو كامل المجموعة من البيانات إلى الطول الأقصى.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
استخدم رمز نهاية التسلسل كرمز للحشو، وحدد `mlm_probability` لحجب الرموز بشكل عشوائي عند كل تكرار للبيانات:
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForLanguageModeling
|
||||
|
||||
>>> tokenizer.pad_token = tokenizer.eos_token
|
||||
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
|
||||
```
|
||||
|
||||
</pt>
|
||||
<tf>
|
||||
استخدم رمز نهاية التسلسل كرمز للحشو، وحدد `mlm_probability` لحجب الرموز بشكل عشوائي عند كل تكرار للبيانات:
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForLanguageModeling
|
||||
|
||||
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")
|
||||
```
|
||||
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التدريب (Train)
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتدريب نموذج باستخدام [`Trainer`], اطلع على [البرنامج التعليمي الأساسي](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت جاهز الآن لبدء تدريب نموذجك! قم بتحميل DistilGPT2 باستخدام [`AutoModelForCausalLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer
|
||||
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
|
||||
```
|
||||
|
||||
في هذه المرحلة، تبقى ثلاث خطوات فقط:
|
||||
|
||||
1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعامل الوحيد المطلوب هو `output_dir` الذي يحدد أين سيتم حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub بتحديد `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك).
|
||||
2. قم بتمرير معاملات التدريب إلى [`Trainer`] إلى جانب النموذج، والمجموعات من البيانات، ومجمّع البيانات.
|
||||
3. قم باستدعاء [`~Trainer.train`] لتدريب نموذجك.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_eli5_clm-model",
|
||||
... eval_strategy="epoch",
|
||||
... learning_rate=2e-5,
|
||||
... weight_decay=0.01,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=lm_dataset["train"],
|
||||
... eval_dataset=lm_dataset["test"],
|
||||
... data_collator=data_collator,
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، استخدم طريقة [`~transformers.Trainer.evaluate`] لتقييم نموذجك والحصول على احتمالية الارتباك:
|
||||
|
||||
```py
|
||||
>>> import math
|
||||
|
||||
>>> eval_results = trainer.evaluate()
|
||||
>>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
|
||||
Perplexity: 49.61
|
||||
```
|
||||
|
||||
ثم شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتدريب نموذج باستخدام Keras، اطلع على [البرنامج التعليمي الأساسي](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لتدريب نموذج في TensorFlow، ابدأ بإعداد دالة المحسن، وجدول معدل التعلم، وبعض معاملات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer, AdamWeightDecay
|
||||
|
||||
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilGPT2 باستخدام [`TFAutoModelForCausalLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForCausalLM
|
||||
|
||||
>>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
|
||||
```
|
||||
|
||||
حول مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... lm_dataset["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_test_set = model.prepare_tf_dataset(
|
||||
... lm_dataset["test"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن جميع نماذج Transformers لديها دالة خسارة ذات صلة بالمهمة الافتراضية، لذلك لا تحتاج إلى تحديد واحدة ما لم ترغب في ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # لا يوجد حجة للخسارة!
|
||||
```
|
||||
|
||||
يمكن القيام بذلك عن طريق تحديد مكان دفع نموذجك ومجمّع البيانات في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_eli5_clm-model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
أخيراً، أنت جاهز لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق من الصحة، وعدد العصور، والتعليقات الخاصة بك لتدريب النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback])
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر تعمقًا حول كيفية تدريب نموذج للنمذجة اللغوية السببية، اطلع على الدفتر المقابل
|
||||
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)
|
||||
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال (Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بتدريب نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
قم بابتكار سؤال تود توليد نص منه:
|
||||
|
||||
```py
|
||||
>>> prompt = "Somatic hypermutation allows the immune system to"
|
||||
```
|
||||
|
||||
أبسط طريقة لتجربة نموذجك المدرب للاستدلال هي استخدامه في [`pipeline`]. قم بتنفيذ `pipeline` لتوليد النص مع نموذجك، ومرر نصك إليه:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> generator = pipeline("text-generation", model="username/my_awesome_eli5_clm-model")
|
||||
>>> generator(prompt)
|
||||
[{'generated_text': "Somatic hypermutation allows the immune system to be able to effectively reverse the damage caused by an infection.\n\n\nThe damage caused by an infection is caused by the immune system's ability to perform its own self-correcting tasks."}]
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قسم النص وإرجع `input_ids` كتنسورات PyTorch:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model")
|
||||
>>> inputs = tokenizer(prompt, return_tensors="pt").input_ids
|
||||
```
|
||||
|
||||
استخدم طريقة [`~generation.GenerationMixin.generate`] لتوليد النص.
|
||||
للمزيد من التفاصيل حول استراتيجيات توليد النص المختلفة والبارامترات للتحكم في التوليد، راجع صفحة [استراتيجيات توليد النص](../generation_strategies).
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForCausalLM
|
||||
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model")
|
||||
>>> outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
|
||||
```
|
||||
|
||||
فك ترميز الرموز المولدة مرة أخرى إلى نص:
|
||||
|
||||
```py
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
["Somatic hypermutation allows the immune system to react to drugs with the ability to adapt to a different environmental situation. In other words, a system of 'hypermutation' can help the immune system to adapt to a different environmental situation or in some cases even a single life. In contrast, researchers at the University of Massachusetts-Boston have found that 'hypermutation' is much stronger in mice than in humans but can be found in humans, and that it's not completely unknown to the immune system. A study on how the immune system"]
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتقسيم النص وإرجاع `input_ids` كـ TensorFlow tensors:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model")
|
||||
>>> inputs = tokenizer(prompt, return_tensors="tf").input_ids
|
||||
```
|
||||
|
||||
استخدم طريقة [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] لإنشاء الملخص. للمزيد من التفاصيل حول استراتيجيات توليد النص المختلفة والبارامترات للتحكم في التوليد، راجع صفحة [استراتيجيات توليد النص](../generation_strategies).
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForCausalLM
|
||||
|
||||
>>> model = TFAutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model")
|
||||
>>> outputs = model.generate(input_ids=inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
|
||||
```
|
||||
|
||||
فك ترميز الرموز المولدة مرة أخرى إلى نص:
|
||||
|
||||
```py
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['Somatic hypermutation allows the immune system to detect the presence of other viruses as they become more prevalent. Therefore, researchers have identified a high proportion of human viruses. The proportion of virus-associated viruses in our study increases with age. Therefore, we propose a simple algorithm to detect the presence of these new viruses in our samples as a sign of improved immunity. A first study based on this algorithm, which will be published in Science on Friday, aims to show that this finding could translate into the development of a better vaccine that is more effective for']
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
442
docs/source/ar/tasks/masked_language_modeling.md
Normal file
442
docs/source/ar/tasks/masked_language_modeling.md
Normal file
@ -0,0 +1,442 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# نمذجة اللغة المقنعة (Masked language modeling)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Youtube id="mqElG5QJWUg"/>
|
||||
|
||||
تتنبأ نمذجة اللغة المقنعة برمز مقنع في تسلسل، ويمكن للنموذج الانتباه إلى الرموز بشكل ثنائي الاتجاه. هذا
|
||||
يعني أن النموذج لديه إمكانية الوصول الكاملة إلى الرموز الموجودة على اليسار واليمين. تعد نمذجة اللغة المقنعة ممتازة للمهام التي
|
||||
تتطلب فهمًا سياقيًا جيدًا لتسلسل كامل. BERT هو مثال على نموذج لغة مقنع.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. تكييف [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) على مجموعة فرعية [r/askscience](https://www.reddit.com/r/askscience/) من مجموعة بيانات [ELI5](https://huggingface.co/datasets/eli5).
|
||||
2. استخدام نموذج المدرب الخاص بك للاستدلال.
|
||||
|
||||
<Tip>
|
||||
|
||||
لمعرفة جميع البنى والنسخ المتوافقة مع هذه المهمة، نوصي بالتحقق من [صفحة المهمة](https://huggingface.co/tasks/fill-mask)
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate
|
||||
```
|
||||
|
||||
نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عندما تتم مطالبتك، أدخل رمزك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات ELI5
|
||||
|
||||
ابدأ بتحميل أول 5000 مثال من مجموعة بيانات [ELI5-Category](https://huggingface.co/datasets/eli5_category) باستخدام مكتبة 🤗 Datasets. سيعطيك هذا فرصة للتجربة والتأكد من أن كل شيء يعمل قبل قضاء المزيد من الوقت في التدريب على مجموعة البيانات الكاملة.
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> eli5 = load_dataset("eli5_category", split="train[:5000]")
|
||||
```
|
||||
|
||||
قم بتقسيم مجموعة البيانات `train` إلى مجموعتي تدريب واختبار باستخدام الدالة [`~datasets.Dataset.train_test_split`]:
|
||||
|
||||
```py
|
||||
>>> eli5 = eli5.train_test_split(test_size=0.2)
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> eli5["train"][0]
|
||||
{'q_id': '7h191n',
|
||||
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
|
||||
'selftext': '',
|
||||
'category': 'Economics',
|
||||
'subreddit': 'explainlikeimfive',
|
||||
'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
|
||||
'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
|
||||
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
|
||||
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
|
||||
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
|
||||
'score': [21, 19, 5, 3],
|
||||
'text_urls': [[],
|
||||
[],
|
||||
[],
|
||||
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]},
|
||||
'title_urls': ['url'],
|
||||
'selftext_urls': ['url']}
|
||||
```
|
||||
|
||||
على الرغم من أن هذا قد يبدو كثيرًا، إلا أنك مهتم حقًا بحقل `text`. ما هو رائع حول مهام نمذجة اللغة هو أنك لا تحتاج إلى تسميات (تُعرف أيضًا باسم المهمة غير الخاضعة للإشراف) لأن الكلمة التالية *هي* التسمية.
|
||||
|
||||
## معالجة مسبقة (Preprocess)
|
||||
|
||||
<Youtube id="8PmhEIXhBvI"/>
|
||||
|
||||
بالنسبة لنمذجة اللغة المقنعة، فإن الخطوة التالية هي تحميل معالج DistilRoBERTa لمعالجة حقل `text` الفرعي:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilroberta-base")
|
||||
```
|
||||
|
||||
ستلاحظ من المثال أعلاه، أن حقل `text` موجود بالفعل داخل `answers`. هذا يعني أنك ستحتاج إلى استخراج حقل `text` الفرعي من بنيته المضمنة باستخدام الدالة [`flatten`](https://huggingface.co/docs/datasets/process#flatten):
|
||||
|
||||
```py
|
||||
>>> eli5 = eli5.flatten()
|
||||
>>> eli5["train"][0]
|
||||
{'q_id': '7h191n',
|
||||
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
|
||||
'selftext': '',
|
||||
'category': 'Economics',
|
||||
'subreddit': 'explainlikeimfive',
|
||||
'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
|
||||
'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
|
||||
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
|
||||
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
|
||||
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
|
||||
'answers.score': [21, 19, 5, 3],
|
||||
'answers.text_urls': [[],
|
||||
[],
|
||||
[],
|
||||
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']],
|
||||
'title_urls': ['url'],
|
||||
'selftext_urls': ['url']}
|
||||
```
|
||||
|
||||
كل حقل فرعي هو الآن عمود منفصل كما هو موضح بواسطة بادئة `answers`، وحقل `text` هو قائمة الآن. بدلاً من
|
||||
معالجة كل جملة بشكل منفصل، قم بتحويل القائمة إلى سلسلة حتى تتمكن من معالجتها بشكل مشترك.
|
||||
|
||||
هنا أول دالة معالجة مسبقة لربط قائمة السلاسل لكل مثال ومعالجة النتيجة:
|
||||
|
||||
```py
|
||||
>>> def preprocess_function(examples):
|
||||
... return tokenizer([" ".join(x) for x in examples["answers.text"]])
|
||||
```
|
||||
|
||||
لتطبيق دالة المعالجة المسبقة على مجموعة البيانات بأكملها، استخدم الدالة 🤗 Datasets [`~datasets.Dataset.map`]. يمكنك تسريع دالة `map` عن طريق تعيين `batched=True` لمعالجة عدة عناصر في وقت واحد، وزيادة عدد العمليات باستخدام `num_proc`. احذف أي أعمدة غير ضرورية:
|
||||
|
||||
```py
|
||||
>>> tokenized_eli5 = eli5.map(
|
||||
... preprocess_function,
|
||||
... batched=True,
|
||||
... num_proc=4,
|
||||
... remove_columns=eli5["train"].column_names,
|
||||
... )
|
||||
```
|
||||
|
||||
|
||||
تحتوي مجموعة البيانات هذه على تسلسلات رمزية، ولكن بعضها أطول من الطول الأقصى للمدخلات للنموذج.
|
||||
|
||||
يمكنك الآن استخدام دالة معالجة مسبقة ثانية لـ:
|
||||
- تجميع جميع التسلسلات
|
||||
- تقسيم التسلسلات المجمّعة إلى أجزاء أقصر محددة بـ `block_size`، والتي يجب أن تكون أقصر من الحد الأقصى لطول المدخلات ومناسبة لذاكرة GPU.
|
||||
|
||||
```py
|
||||
>>> block_size = 128
|
||||
|
||||
>>> def group_texts(examples):
|
||||
... # تجميع جميع النصوص.
|
||||
... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
||||
... total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||||
... # نتجاهل الجزء المتبقي الصغير، يمكننا إضافة الحشو إذا كان النموذج يدعمه بدلاً من هذا الإسقاط، يمكنك
|
||||
... # تخصيص هذا الجزء حسب احتياجاتك.
|
||||
... if total_length >= block_size:
|
||||
... total_length = (total_length // block_size) * block_size
|
||||
... # تقسيمها إلى أجزاء بحجم block_size.
|
||||
... result = {
|
||||
... k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
||||
... for k, t in concatenated_examples.items()
|
||||
... }
|
||||
... return result
|
||||
```
|
||||
|
||||
طبق دالة `group_texts` على مجموعة البيانات بأكملها:
|
||||
|
||||
```py
|
||||
>>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)
|
||||
```
|
||||
|
||||
الآن، قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorForLanguageModeling`]. من الأكثر كفاءة أن تقوم بـ *الحشو الديناميكي* ليصل طولها إلى أطول جملة في الدفعة أثناء التجميع، بدلاً من حشو مجموعة البيانات بأكملها إلى الطول الأقصى.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
استخدم رمز نهاية التسلسل كرمز الحشو وحدد `mlm_probability` لحجب الرموز عشوائياً كل مرة تكرر فيها البيانات:
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForLanguageModeling
|
||||
|
||||
>>> tokenizer.pad_token = tokenizer.eos_token
|
||||
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
استخدم رمز نهاية التسلسل كرمز الحشو وحدد `mlm_probability` لحجب الرموز عشوائياً كل مرة تكرر فيها البيانات:
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForLanguageModeling
|
||||
|
||||
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15, return_tensors="tf")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التدريب (Train)
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتعديل نموذج باستخدام [`Trainer`], ألق نظرة على الدليل الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت مستعد الآن لبدء تدريب نموذجك! قم بتحميل DistilRoBERTa باستخدام [`AutoModelForMaskedLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForMaskedLM
|
||||
|
||||
>>> model = AutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base")
|
||||
```
|
||||
|
||||
في هذه المرحلة، تبقى ثلاث خطوات فقط:
|
||||
|
||||
1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعلمة الوحيدة المطلوبة هي `output_dir` والتي تحدد مكان حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك).
|
||||
2. قم بتمرير معلمات التدريب إلى [`Trainer`] مع النموذج، ومجموعات البيانات، ومجمّع البيانات.
|
||||
3. قم باستدعاء [`~Trainer.train`] لتعديل نموذجك.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_eli5_mlm_model",
|
||||
... eval_strategy="epoch",
|
||||
... learning_rate=2e-5,
|
||||
... num_train_epochs=3,
|
||||
... weight_decay=0.01,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=lm_dataset["train"],
|
||||
... eval_dataset=lm_dataset["test"],
|
||||
... data_collator=data_collator,
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، استخدم طريقة [`~transformers.Trainer.evaluate`] لتقييم النموذج والحصول على مقياس
|
||||
الحيرة:
|
||||
|
||||
```py
|
||||
>>> import math
|
||||
|
||||
>>> eval_results = trainer.evaluate()
|
||||
>>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
|
||||
Perplexity: 8.76
|
||||
```
|
||||
|
||||
ثم شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتعديل نموذج باستخدام Keras، ألق نظرة على الدليل الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لتعديل نموذج في TensorFlow، ابدأ بإعداد دالة محسن، وجدول معدل التعلم، وبعض معلمات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer, AdamWeightDecay
|
||||
|
||||
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilRoBERTa باستخدام [`TFAutoModelForMaskedLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForMaskedLM
|
||||
|
||||
>>> model = TFAutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base")
|
||||
```
|
||||
|
||||
قم بتحويل مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... lm_dataset["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_test_set = model.prepare_tf_dataset(
|
||||
... lm_dataset["test"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن نماذج Transformers لديها جميعها دالة خسارة افتراضية ذات صلة بالمهمة، لذلك لا تحتاج إلى تحديد واحدة ما لم تكن تريد ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # لا توجد حجة للخسارة!
|
||||
```
|
||||
|
||||
يمكن القيام بذلك عن طريق تحديد مكان دفع نموذجك ومعالج الرموز في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_eli5_mlm_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
أخيراً، أنت مستعد لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق، وعدد العصور، والتعليقات الخاصة بك لتعديل النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback])
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائياً إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
لمثال أكثر تفصيلاً حول كيفية تعديل نموذج للنمذجة اللغوية المقنعة، ألق نظرة على الدفتر المقابل
|
||||
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)
|
||||
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال
|
||||
|
||||
رائع، الآن بعد أن قمت بتعديل نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
جهّز بعض النصوص التي تريد أن يملأ النموذج الفراغات فيها، واستخدم الرمز الخاص `<mask>` للإشارة إلى الفراغ:
|
||||
|
||||
```py
|
||||
>>> text = "The Milky Way is a <mask> galaxy."
|
||||
```
|
||||
|
||||
أبسط طريقة لتجربة نموذجك المعدل للاستدلال هي استخدامه في [`pipeline`]. قم بإنشاء كائن `pipeline` لملء الفراغ مع نموذجك، ومرر نصك إليه. إذا أردت، يمكنك استخدام معلمة `top_k` لتحديد عدد التنبؤات التي تريد إرجاعها:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> mask_filler = pipeline("fill-mask", "username/my_awesome_eli5_mlm_model")
|
||||
>>> mask_filler(text, top_k=3)
|
||||
[{'score': 0.5150994658470154,
|
||||
'token': 21300,
|
||||
'token_str': ' spiral',
|
||||
'sequence': 'The Milky Way is a spiral galaxy.'},
|
||||
{'score': 0.07087188959121704,
|
||||
'token': 2232,
|
||||
'token_str': ' massive',
|
||||
'sequence': 'The Milky Way is a massive galaxy.'},
|
||||
{'score': 0.06434620916843414,
|
||||
'token': 650,
|
||||
'token_str': ' small',
|
||||
'sequence': 'The Milky Way is a small galaxy.'}]
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قم بتجزئة النص وإرجاع `input_ids` كمتجهات PyTorch. ستحتاج أيضًا إلى تحديد موضع رمز `<mask>`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_mlm_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="pt")
|
||||
>>> mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1]
|
||||
```
|
||||
|
||||
قم بتمرير المدخلات إلى النموذج وإرجاع `logits` للرمز المقنع:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForMaskedLM
|
||||
|
||||
>>> model = AutoModelForMaskedLM.from_pretrained("username/my_awesome_eli5_mlm_model")
|
||||
>>> logits = model(**inputs).logits
|
||||
>>> mask_token_logits = logits[0, mask_token_index, :]
|
||||
```
|
||||
|
||||
ثم قم بإرجاع الرموز الثلاثة المقنعة ذات الاحتمالية الأعلى وطباعتها:
|
||||
|
||||
```py
|
||||
>>> top_3_tokens = torch.topk(mask_token_logits, 3, dim=1).indices[0].tolist()
|
||||
|
||||
>>> for token in top_3_tokens:
|
||||
... print(text.replace(tokenizer.mask_token, tokenizer.decode([token])))
|
||||
The Milky Way is a spiral galaxy.
|
||||
The Milky Way is a massive galaxy.
|
||||
The Milky Way is a small galaxy.
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتقسيم النص إلى رموز وإرجاع `input_ids` كـ TensorFlow tensors. ستحتاج أيضًا إلى تحديد موضع رمز `<mask>`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_mlm_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="tf")
|
||||
>>> mask_token_index = tf.where(inputs["input_ids"] == tokenizer.mask_token_id)[0, 1]
|
||||
```
|
||||
|
||||
قم بتمرير المدخلات إلى النموذج وإرجاع `logits` للرمز المقنع:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForMaskedLM
|
||||
|
||||
>>> model = TFAutoModelForMaskedLM.from_pretrained("username/my_awesome_eli5_mlm_model")
|
||||
>>> logits = model(**inputs).logits
|
||||
>>> mask_token_logits = logits[0, mask_token_index, :]
|
||||
```
|
||||
|
||||
ثم قم بإرجاع الرموز الثلاثة المقنعة ذات الاحتمالية الأعلى وطباعتها:
|
||||
|
||||
```py
|
||||
>>> top_3_tokens = tf.math.top_k(mask_token_logits, 3).indices.numpy()
|
||||
|
||||
>>> for token in top_3_tokens:
|
||||
... print(text.replace(tokenizer.mask_token, tokenizer.decode([token])))
|
||||
The Milky Way is a spiral galaxy.
|
||||
The Milky Way is a massive galaxy.
|
||||
The Milky Way is a small galaxy.
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
387
docs/source/ar/tasks/sequence_classification.md
Normal file
387
docs/source/ar/tasks/sequence_classification.md
Normal file
@ -0,0 +1,387 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# تصنيف النص(Text classification)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Youtube id="leNG9fN9FQU"/>
|
||||
|
||||
تصنيف النص هو مهمة NLP شائعة حيث يُعيّن تصنيفًا أو فئة للنص. تستخدم بعض أكبر الشركات تصنيف النصوص في الإنتاج لمجموعة واسعة من التطبيقات العملية. أحد أكثر أشكال تصنيف النص شيوعًا هو تحليل المشاعر، والذي يقوم بتعيين تسمية مثل 🙂 إيجابية، 🙁 سلبية، أو 😐 محايدة لتسلسل نصي.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) على مجموعة بيانات [IMDb](https://huggingface.co/datasets/imdb) لتحديد ما إذا كانت مراجعة الفيلم إيجابية أو سلبية.
|
||||
2. استخدام نموذج الضبط الدقيق للتنبؤ.
|
||||
|
||||
<Tip>
|
||||
|
||||
لرؤية جميع البنى ونقاط التحقق المتوافقة مع هذه المهمة، نوصي بالتحقق من [صفحة المهمة](https://huggingface.co/tasks/text-classification).
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate accelerate
|
||||
```
|
||||
|
||||
نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عند المطالبة، أدخل رمزك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات IMDb
|
||||
|
||||
ابدأ بتحميل مجموعة بيانات IMDb من مكتبة 🤗 Datasets:
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> imdb = load_dataset("imdb")
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> imdb["test"][0]
|
||||
{
|
||||
"label": 0,
|
||||
"text": "I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn't match the background, and painfully one-dimensional characters cannot be overcome with a 'sci-fi' setting. (I'm sure there are those of you out there who think Babylon 5 is good sci-fi TV. It's not. It's clichéd and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It's really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it's rubbish as they have to always say \"Gene Roddenberry's Earth...\" otherwise people would not continue watching. Roddenberry's ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.",
|
||||
}
|
||||
```
|
||||
|
||||
هناك حقولان في هذه المجموعة من البيانات:
|
||||
|
||||
- `text`: نص مراجعة الفيلم.
|
||||
- `label`: قيمة إما `0` لمراجعة سلبية أو `1` لمراجعة إيجابية.
|
||||
|
||||
## المعالجة المسبقة(Preprocess)
|
||||
|
||||
الخطوة التالية هي تحميل المُجزِّئ النص DistilBERT لتهيئة لحقل `text`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
أنشئ دالة لتهيئة حقل `text` وتقصير السلاسل النصية بحيث لا يتجاوز طولها الحد الأقصى لإدخالات DistilBERT:
|
||||
|
||||
```py
|
||||
>>> def preprocess_function(examples):
|
||||
... return tokenizer(examples["text"], truncation=True)
|
||||
```
|
||||
|
||||
لتطبيق دالة التهيئة على مجموعة البيانات بأكملها، استخدم دالة 🤗 Datasets [`~datasets.Dataset.map`] . يمكنك تسريع `map` باستخدام `batched=True` لمعالجة دفعات من البيانات:
|
||||
|
||||
```py
|
||||
tokenized_imdb = imdb.map(preprocess_function, batched=True)
|
||||
```
|
||||
|
||||
الآن قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorWithPadding`]. الأكثر كفاءة هو استخدام الحشو الديناميكي لجعل الجمل متساوية في الطول داخل كل دفعة، بدلًا من حشو كامل البيانات إلى الحد الأقصى للطول.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorWithPadding
|
||||
|
||||
>>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorWithPadding
|
||||
|
||||
>>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التقييم(Evaluate)
|
||||
|
||||
يُعدّ تضمين مقياس أثناء التدريب مفيدًا لتقييم أداء النموذج. يمكنك تحميل طريقة تقييم بسرعة باستخدام مكتبة 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) . بالنسبة لهذه المهمة، قم بتحميل مقياس [الدقة](https://huggingface.co/spaces/evaluate-metric/accuracy) (راجع جولة 🤗 Evaluate [السريعة](https://huggingface.co/docs/evaluate/a_quick_tour) لمعرفة المزيد حول كيفية تحميل وحساب مقياس):
|
||||
|
||||
```py
|
||||
>>> import evaluate
|
||||
|
||||
>>> accuracy = evaluate.load("accuracy")
|
||||
```
|
||||
|
||||
ثم أنشئ دالة تقوم بتمرير تنبؤاتك وتصنيفاتك إلى [`~evaluate.EvaluationModule.compute`] لحساب الدقة:
|
||||
|
||||
```py
|
||||
>>> import numpy as np
|
||||
|
||||
>>> def compute_metrics(eval_pred):
|
||||
... predictions, labels = eval_pred
|
||||
... predictions = np.argmax(predictions, axis=1)
|
||||
... return accuracy.compute(predictions=predictions, references=labels)
|
||||
```
|
||||
|
||||
دالة `compute_metrics` جاهزة الآن، وستعود إليها عند إعداد التدريب.
|
||||
|
||||
## التدريب(Train)
|
||||
|
||||
قبل أن تبدأ في تدريب نموذجك، قم بإنشاء خريطة من المعرفات المتوقعة إلى تسمياتها باستخدام `id2label` و `label2id`:
|
||||
|
||||
```py
|
||||
>>> id2label = {0: "NEGATIVE", 1: "POSITIVE"}
|
||||
>>> label2id = {"NEGATIVE": 0, "POSITIVE": 1}
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بضبط نموذج دقيق باستخدام [`Trainer`], فالق نظرة على البرنامج التعليمي الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت مستعد الآن لبدء تدريب نموذجك! قم بتحميل DistilBERT مع [`AutoModelForSequenceClassification`] جنبًا إلى جنب مع عدد التصنيفات المتوقعة، وتصنيفات الخرائط:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
|
||||
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained(
|
||||
... "distilbert/distilbert-base-uncased", num_labels=2, id2label=id2label, label2id=label2id
|
||||
... )
|
||||
```
|
||||
|
||||
في هذه المرحلة، هناك ثلاث خطوات فقط متبقية:
|
||||
|
||||
1. حدد مُعامِلات التدريب في [`TrainingArguments`]. المُعامل المطلوب الوحيد هو `output_dir`، لتحديد مكان حفظ النموذج. يمكنك رفع النموذج إلى Hub بتعيين `push_to_hub=True` (يجب تسجيل الدخول إلى Hugging Face لرفع النموذج). سيقوم `Trainer` بتقييم الدقة وحفظ نقاط التحقق في نهاية كل حقبة.
|
||||
2. مرر مُعامِلات التدريب إلى `Trainer` مع النموذج، ومجموعة البيانات، والمحلل اللغوي، ومُجمِّع البيانات، ووظيفة `compute_metrics`.
|
||||
3. استدعِ [`~Trainer.train`] لضبط النموذج.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_model",
|
||||
... learning_rate=2e-5,
|
||||
... per_device_train_batch_size=16,
|
||||
... per_device_eval_batch_size=16,
|
||||
... num_train_epochs=2,
|
||||
... weight_decay=0.01,
|
||||
... eval_strategy="epoch",
|
||||
... save_strategy="epoch",
|
||||
... load_best_model_at_end=True,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=tokenized_imdb["train"],
|
||||
... eval_dataset=tokenized_imdb["test"],
|
||||
... processing_class=tokenizer,
|
||||
... data_collator=data_collator,
|
||||
... compute_metrics=compute_metrics,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
يستخدم [`Trainer`] الحشو الديناميكي افتراضيًا عند تمرير `tokenizer` إليه. في هذه الحالة، لا تحتاج لتحديد مُجمِّع البيانات صراحةً.
|
||||
|
||||
</Tip>
|
||||
|
||||
بعد اكتمال التدريب، شارك نموذجك على Hub باستخدام الطريقة [`~transformers.Trainer.push_to_hub`] ليستخدمه الجميع:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بضبط نموذج باستخدام Keras، قم بالاطلاع على البرنامج التعليمي الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لضبط نموذج في TensorFlow، ابدأ بإعداد دالة المحسن، وجدول معدل التعلم، وبعض معلمات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> batch_size = 16
|
||||
>>> num_epochs = 5
|
||||
>>> batches_per_epoch = len(tokenized_imdb["train"]) // batch_size
|
||||
>>> total_train_steps = int(batches_per_epoch * num_epochs)
|
||||
>>> optimizer, schedule = create_optimizer(init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps)
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilBERT مع [`TFAutoModelForSequenceClassification`] بالإضافة إلى عدد التصنيفات المتوقعة، وتعيينات التسميات:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForSequenceClassification
|
||||
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained(
|
||||
... "distilbert/distilbert-base-uncased", num_labels=2, id2label=id2label, label2id=label2id
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتحويل مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... tokenized_imdb["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_validation_set = model.prepare_tf_dataset(
|
||||
... tokenized_imdb["test"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن جميع نماذج Transformers لديها دالة خسارة ذات صلة بالمهمة بشكل افتراضي، لذلك لا تحتاج إلى تحديد واحدة ما لم ترغب في ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # No loss argument!
|
||||
```
|
||||
|
||||
آخر أمرين يجب إعدادهما قبل بدء التدريب هو حساب الدقة من التوقعات، وتوفير طريقة لدفع نموذجك إلى Hub. يتم ذلك باستخدام [Keras callbacks](../main_classes/keras_callbacks).
|
||||
|
||||
قم بتمرير دالة `compute_metrics` الخاصة بك إلى [`~transformers.KerasMetricCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import KerasMetricCallback
|
||||
|
||||
>>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set)
|
||||
```
|
||||
|
||||
حدد مكان دفع نموذجك والمجزئ اللغوي في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> push_to_hub_callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم اجمع الاستدعاءات معًا:
|
||||
|
||||
```py
|
||||
>>> callbacks = [metric_callback, push_to_hub_callback]
|
||||
```
|
||||
|
||||
أخيرًا، أنت مستعد لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق، وعدد الحقبات، واستدعاءاتك لضبط النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks)
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر عمقًا حول كيفية ضبط نموذج لتصنيف النصوص، قم بالاطلاع على الدفتر المقابل
|
||||
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)
|
||||
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال(Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بضبط نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
احصل على بعض النصوص التي ترغب في إجراء الاستدلال عليها:
|
||||
|
||||
```py
|
||||
>>> text = "This was a masterpiece. Not completely faithful to the books, but enthralling from beginning to end. Might be my favorite of the three."
|
||||
```
|
||||
|
||||
أسهل طريقة لتجربة النموذج المضبوط للاستدلال هي استخدامه ضمن [`pipeline`]. قم بإنشاء `pipeline` لتحليل المشاعر مع نموذجك، ومرر نصك إليه:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> classifier = pipeline("sentiment-analysis", model="stevhliu/my_awesome_model")
|
||||
>>> classifier(text)
|
||||
[{'label': 'POSITIVE', 'score': 0.9994940757751465}]
|
||||
```
|
||||
|
||||
يمكنك أيضًا تكرار نتائج `pipeline` يدويًا إذا أردت:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قم يتجزئة النص وإرجاع تنسورات PyTorch:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="pt")
|
||||
```
|
||||
|
||||
مرر المدخلات إلى النموذج واسترجع `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSequenceClassification
|
||||
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained("stevhliu/my_awesome_model")
|
||||
>>> with torch.no_grad():
|
||||
... logits = model(**inputs).logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأعلى، واستخدم `id2label` لتحويلها إلى تصنيف نصي:
|
||||
|
||||
```py
|
||||
>>> predicted_class_id = logits.argmax().item()
|
||||
>>> model.config.id2label[predicted_class_id]
|
||||
'POSITIVE'
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتحليل النص وإرجاع تنسيقات TensorFlow:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="tf")
|
||||
```
|
||||
|
||||
قم بتمرير مدخلاتك إلى النموذج وإرجاع `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForSequenceClassification
|
||||
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained("stevhliu/my_awesome_model")
|
||||
>>> logits = model(**inputs).logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأعلى، واستخدم `id2label` لتحويلها إلى تصنيف نصي:
|
||||
|
||||
```py
|
||||
>>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0])
|
||||
>>> model.config.id2label[predicted_class_id]
|
||||
'POSITIVE'
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
550
docs/source/ar/tasks/token_classification.md
Normal file
550
docs/source/ar/tasks/token_classification.md
Normal file
@ -0,0 +1,550 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# تصنيف الرموز(Token classification)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Youtube id="wVHdVlPScxA"/>
|
||||
|
||||
يهدف تصنيف الرموز إلى إعطاء تسمية لكل رمز على حدة في الجملة. من أكثر مهام تصنيف الرموز شيوعًا هو التعرف على الكيانات المسماة (NER). يحاول NER تحديد تسمية لكل كيان في الجملة، مثل شخص، أو مكان، أو منظمة.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) على مجموعة بيانات [WNUT 17](https://huggingface.co/datasets/wnut_17) للكشف عن كيانات جديدة.
|
||||
2. استخدام نموذجك المضبوط بدقة للاستدلال.
|
||||
|
||||
<Tip>
|
||||
|
||||
للاطلاع جميع البنى والنقاط المتوافقة مع هذه المهمة، نوصي بالرجوع من [صفحة المهمة](https://huggingface.co/tasks/token-classification).
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate seqeval
|
||||
```
|
||||
|
||||
نحن نشجعك على تسجيل الدخول إلى حساب HuggingFace الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عندما يُطلب منك، أدخل رمزك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات WNUT 17
|
||||
|
||||
ابدأ بتحميل مجموعة بيانات WNUT 17 من مكتبة 🤗 Datasets:
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> wnut = load_dataset("wnut_17")
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> wnut["train"][0]
|
||||
{'id': '0',
|
||||
'ner_tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
'tokens': ['@paulwalk', 'It', "'s", 'the', 'view', 'from', 'where', 'I', "'m", 'living', 'for', 'two', 'weeks', '.', 'Empire', 'State', 'Building', '=', 'ESB', '.', 'Pretty', 'bad', 'storm', 'here', 'last', 'evening', '.']
|
||||
}
|
||||
```
|
||||
|
||||
يمثل كل رقم في `ner_tags` كياناً. حوّل الأرقام إلى أسماء التصنيفات لمعرفة ماهية الكيانات:
|
||||
|
||||
```py
|
||||
>>> label_list = wnut["train"].features[f"ner_tags"].feature.names
|
||||
>>> label_list
|
||||
[
|
||||
"O",
|
||||
"B-corporation",
|
||||
"I-corporation",
|
||||
"B-creative-work",
|
||||
"I-creative-work",
|
||||
"B-group",
|
||||
"I-group",
|
||||
"B-location",
|
||||
"I-location",
|
||||
"B-person",
|
||||
"I-person",
|
||||
"B-product",
|
||||
"I-product",
|
||||
]
|
||||
```
|
||||
|
||||
يشير الحرف الذي يسبق كل `ner_tag` إلى موضع الرمز للكيان:
|
||||
|
||||
- `B-` يشير إلى بداية الكيان.
|
||||
- `I-` يشير إلى أن الرمز يقع ضمن نفس الكيان (على سبيل المثال، الرمز `State` هو جزء من كيان مثل `Empire State Building`).
|
||||
- `0` يشير إلى أن الرمز لا يمثل أي كيان.
|
||||
|
||||
## المعالجة المسبقة(Preprocess)
|
||||
|
||||
<Youtube id="iY2AZYdZAr0"/>
|
||||
|
||||
الخطوة التالية هي تحميل مُجزِّئ النصوص DistilBERT للمعالجة المسبقة لحقل `tokens`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
كما رأيت في حقل `tokens` المثال أعلاه، يبدو أن المدخل قد تم تحليله بالفعل. لكن المدخل لم يُجزأ بعد ويتعيّن عليك ضبط `is_split_into_words=True` لتقسيم الكلمات إلى كلمات فرعية. على سبيل المثال:
|
||||
|
||||
```py
|
||||
>>> example = wnut["train"][0]
|
||||
>>> tokenized_input = tokenizer(example["tokens"], is_split_into_words=True)
|
||||
>>> tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"])
|
||||
>>> tokens
|
||||
['[CLS]', '@', 'paul', '##walk', 'it', "'", 's', 'the', 'view', 'from', 'where', 'i', "'", 'm', 'living', 'for', 'two', 'weeks', '.', 'empire', 'state', 'building', '=', 'es', '##b', '.', 'pretty', 'bad', 'storm', 'here', 'last', 'evening', '.', '[SEP]']
|
||||
```
|
||||
|
||||
ومع ذلك، يضيف هذا بعض الرموز الخاصة `[CLS]` و`[SEP]` وتقسيم الكلمات إلى أجزاء يُنشئ عدم تطابق بين المُدخلات والتسميات. قد يتم تقسيم كلمة واحدة تقابل تسمية واحدة الآن إلى كلمتين فرعيتين. ستحتاج إلى إعادة محاذاة الرموز والتسميات عن طريق:
|
||||
|
||||
1. ربط كل رمز بالكلمة الأصلية باستخدام الخاصية [`word_ids`](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.BatchEncoding.word_ids).
|
||||
2. تعيين التسمية `-100` للرموز الخاصة `[CLS]` و`[SEP]` بحيث يتم تجاهلها بواسطة دالة الخسارة PyTorch (انظر [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html)).
|
||||
3. تسمية الرمز الأول فقط لكلمة معينة. قم بتعيين `-100` لأجزاء الكلمة الأخرى.
|
||||
|
||||
هنا كيف يمكنك إنشاء وظيفة لإعادة محاذاة الرموز والتسميات، وقص الجمل لتتجاوز الحد الأقصى لطول مُدخلات DistilBERT:
|
||||
|
||||
```py
|
||||
>>> def tokenize_and_align_labels(examples):
|
||||
... tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True)
|
||||
|
||||
... labels = []
|
||||
... for i, label in enumerate(examples[f"ner_tags"]):
|
||||
... word_ids = tokenized_inputs.word_ids(batch_index=i) # تعيين الرموز إلى كلماتهم المقابلة.
|
||||
... previous_word_idx = None
|
||||
... label_ids = []
|
||||
... for word_idx in word_ids: # تعيين الرموز الخاصة إلى -100.
|
||||
... if word_idx is None:
|
||||
... label_ids.append(-100)
|
||||
... elif word_idx != previous_word_idx: # تسمية الرمز الأول فقط لكلمة معينة.
|
||||
... label_ids.append(label[word_idx])
|
||||
... else:
|
||||
... label_ids.append(-100)
|
||||
... previous_word_idx = word_idx
|
||||
... labels.append(label_ids)
|
||||
|
||||
... tokenized_inputs["labels"] = labels
|
||||
... return tokenized_inputs
|
||||
```
|
||||
|
||||
لتطبيق هذه العملية على كامل مجموعة البيانات، استخدم الدالة [`~datasets.Dataset.map`] لمجموعة بيانات 🤗. يمكنك تسريع الدالة `map` عن طريق تعيين `batched=True` لمعالجة عناصر متعددة من مجموعة البيانات في وقت واحد:
|
||||
|
||||
```py
|
||||
>>> tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True)
|
||||
```
|
||||
|
||||
الآن قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorWithPadding`].من الأفضل استخدام *الحشو الديناميكي* للجمل إلى أطول طول في دفعة أثناء التجميع، بدلاً من حشو مجموعة البيانات بالكامل إلى الطول الأقصى.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import DataCollatorForTokenClassification
|
||||
|
||||
>>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import DataCollatorForTokenClassification
|
||||
|
||||
>>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التقييم(Evaluate)
|
||||
|
||||
يُعدّ تضمين مقياس أثناء التدريب مفيدًا في تقييم أداء نموذجك. يمكنك تحميل طريقة تقييم بسرعة مع مكتبة 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index). لهذه المهمة، قم بتحميل إطار [seqeval](https://huggingface.co/spaces/evaluate-metric/seqeval) (انظر جولة 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) لمعرفة المزيد حول كيفية تحميل وحساب مقياس). يُخرج seqeval عدة نتائج: الدقة، والاستذكار، ومقياس F1، والدقة.
|
||||
|
||||
```py
|
||||
>>> import evaluate
|
||||
|
||||
>>> seqeval = evaluate.load("seqeval")
|
||||
```
|
||||
|
||||
احصل على تسميات الكيانات المسماة (NER) أولاً،ثم أنشئ دالة تُمرر تنبؤاتك وتسمياتك الصحيحة إلى [`~evaluate.EvaluationModule.compute`] لحساب النتائج:
|
||||
|
||||
```py
|
||||
>>> import numpy as np
|
||||
|
||||
>>> labels = [label_list[i] for i in example[f"ner_tags"]]
|
||||
|
||||
>>> def compute_metrics(p):
|
||||
... predictions, labels = p
|
||||
... predictions = np.argmax(predictions, axis=2)
|
||||
|
||||
... true_predictions = [
|
||||
... [label_list[p] for (p, l) in zip(prediction, label) if l != -100]
|
||||
... for prediction, label in zip(predictions, labels)
|
||||
... ]
|
||||
... true_labels = [
|
||||
... [label_list[l] for (p, l) in zip(prediction, label) if l != -100]
|
||||
... for prediction, label in zip(predictions, labels)
|
||||
... ]
|
||||
|
||||
... results = seqeval.compute(predictions=true_predictions, references=true_labels)
|
||||
... return {
|
||||
... "precision": results["overall_precision"],
|
||||
... "recall": results["overall_recall"],
|
||||
... "f1": results["overall_f1"],
|
||||
... "accuracy": results["overall_accuracy"],
|
||||
... }
|
||||
```
|
||||
|
||||
دالة `compute_metrics` جاهزة للاستخدام، وستحتاج إليها عند إعداد التدريب.
|
||||
|
||||
## التدريب(Train)
|
||||
|
||||
قبل تدريب النموذج، جهّز خريطة تربط بين المعرّفات المتوقعة وتسمياتها باستخدام `id2label` و `label2id`:
|
||||
|
||||
```py
|
||||
>>> id2label = {
|
||||
... 0: "O",
|
||||
... 1: "B-corporation",
|
||||
... 2: "I-corporation",
|
||||
... 3: "B-creative-work",
|
||||
... 4: "I-creative-work",
|
||||
... 5: "B-group",
|
||||
... 6: "I-group",
|
||||
... 7: "B-location",
|
||||
... 8: "I-location",
|
||||
... 9: "B-person",
|
||||
... 10: "I-person",
|
||||
... 11: "B-product",
|
||||
... 12: "I-product",
|
||||
... }
|
||||
>>> label2id = {
|
||||
... "O": 0,
|
||||
... "B-corporation": 1,
|
||||
... "I-corporation": 2,
|
||||
... "B-creative-work": 3,
|
||||
... "I-creative-work": 4,
|
||||
... "B-group": 5,
|
||||
... "I-group": 6,
|
||||
... "B-location": 7,
|
||||
... "I-location": 8,
|
||||
... "B-person": 9,
|
||||
... "I-person": 10,
|
||||
... "B-product": 11,
|
||||
... "I-product": 12,
|
||||
... }
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتعديل نموذج باستخدام [`Trainer`], ألق نظرة على الدليل التعليمي الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت مستعد الآن لبدء تدريب نموذجك! قم بتحميل DistilBERT مع [`AutoModelForTokenClassification`] إلى جانب عدد التصنيفات المتوقعة، وخريطة التسميات:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer
|
||||
|
||||
>>> model = AutoModelForTokenClassification.from_pretrained(
|
||||
... "distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id
|
||||
... )
|
||||
```
|
||||
|
||||
في هذه المرحلة، هناك ثلاث خطوات فقط متبقية:
|
||||
|
||||
1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعامل الوحيد المطلوب هو `output_dir` الذي يحدد مكان حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك). في نهاية كل حقبة، سيقوم [`Trainer`] بتقييم درجات seqeval وحفظ تسخة التدريب.
|
||||
2. قم بتمرير معاملات التدريب إلى [`Trainer`] إلى جانب النموذج، ومجموعة البيانات، والمُجزِّئ اللغوي، و`data collator`، ودالة `compute_metrics`.
|
||||
3.استدعِ [`~Trainer.train`] لتدريب نموذجك.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_wnut_model",
|
||||
... learning_rate=2e-5,
|
||||
... per_device_train_batch_size=16,
|
||||
... per_device_eval_batch_size=16,
|
||||
... num_train_epochs=2,
|
||||
... weight_decay=0.01,
|
||||
... eval_strategy="epoch",
|
||||
... save_strategy="epoch",
|
||||
... load_best_model_at_end=True,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=tokenized_wnut["train"],
|
||||
... eval_dataset=tokenized_wnut["test"],
|
||||
... processing_class=tokenizer,
|
||||
... data_collator=data_collator,
|
||||
... compute_metrics=compute_metrics,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتعديل نموذج باستخدام Keras، ألق نظرة على الدليل التعليمي الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
للتعديل على نموذج في TensorFlow، ابدأ بإعداد دالة محسن، وجدول معدل التعلم، وبعض معلمات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer
|
||||
|
||||
>>> batch_size = 16
|
||||
>>> num_train_epochs = 3
|
||||
>>> num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs
|
||||
>>> optimizer, lr_schedule = create_optimizer(
|
||||
... init_lr=2e-5,
|
||||
... num_train_steps=num_train_steps,
|
||||
... weight_decay_rate=0.01,
|
||||
... num_warmup_steps=0,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilBERT مع [`TFAutoModelForTokenClassification`] إلى جانب عدد التسميات المتوقعة، وتخطيطات التسميات:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForTokenClassification
|
||||
|
||||
>>> model = TFAutoModelForTokenClassification.from_pretrained(
|
||||
... "distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتحويل مجموعات بياناتك إلى تنسيق `tf.data.Dataset` مع [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... tokenized_wnut["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_validation_set = model.prepare_tf_dataset(
|
||||
... tokenized_wnut["validation"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
هيّئ النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن نماذج Transformers تتضمن دالة خسارة افتراضية مرتبطة بالمهمة، لذلك لا تحتاج إلى تحديد واحدة إلا إذا كنت ترغب في ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # No loss argument!
|
||||
```
|
||||
|
||||
آخر أمرين يجب إعدادهما قبل بدء التدريب هو حساب درجات seqeval من التنبؤات، وتوفير طريقة لدفع نموذجك إلى Hub. يتم ذلك باستخدام [Keras callbacks](../main_classes/keras_callbacks).
|
||||
|
||||
مرر دالة `compute_metrics` الخاصة بك إلى [`~transformers.KerasMetricCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import KerasMetricCallback
|
||||
|
||||
>>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set)
|
||||
```
|
||||
|
||||
حدد مكان دفع نموذجك والمحلل اللغوي في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> push_to_hub_callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_wnut_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم جمّع callbacks الخاصة بك معًا:
|
||||
|
||||
```py
|
||||
>>> callbacks = [metric_callback, push_to_hub_callback]
|
||||
```
|
||||
|
||||
أخيرًا، أنت جاهز الآن لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع بيانات التدريب والتحقق، وعدد الحقبات، وcallbacks لتعديل النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks)
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر تفصيلاً حول كيفية تعديل نموذج لتصنيف الرموز، ألق نظرة على الدفتر المقابل
|
||||
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)
|
||||
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال(Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بتعديل نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
احصل على بعض النصوص التي تريد تشغيل الاستدلال عليها:
|
||||
|
||||
```py
|
||||
>>> text = "The Golden State Warriors are an American professional basketball team based in San Francisco."
|
||||
```
|
||||
|
||||
أبسط طريقة لتجربة نموذجك المُدرب مسبقًا للاستدلال هي استخدامه في [`pipeline`]. قم بتنفيذ `pipeline` لتصنيف الكيانات المسماة مع نموذجك، ومرر نصك إليه:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> classifier = pipeline("ner", model="stevhliu/my_awesome_wnut_model")
|
||||
>>> classifier(text)
|
||||
[{'entity': 'B-location',
|
||||
'score': 0.42658573,
|
||||
'index': 2,
|
||||
'word': 'golden',
|
||||
'start': 4,
|
||||
'end': 10},
|
||||
{'entity': 'I-location',
|
||||
'score': 0.35856336,
|
||||
'index': 3,
|
||||
'word': 'state',
|
||||
'start': 11,
|
||||
'end': 16},
|
||||
{'entity': 'B-group',
|
||||
'score': 0.3064001,
|
||||
'index': 4,
|
||||
'word': 'warriors',
|
||||
'start': 17,
|
||||
'end': 25},
|
||||
{'entity': 'B-location',
|
||||
'score': 0.65523505,
|
||||
'index': 13,
|
||||
'word': 'san',
|
||||
'start': 80,
|
||||
'end': 83},
|
||||
{'entity': 'B-location',
|
||||
'score': 0.4668663,
|
||||
'index': 14,
|
||||
'word': 'francisco',
|
||||
'start': 84,
|
||||
'end': 93}]
|
||||
```
|
||||
|
||||
يمكنك أيضًا تكرار نتائج `pipeline` يدويًا إذا أردت:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قسّم النص إلى رموز وأرجع المُوتّرات بلغة PyTorch:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="pt")
|
||||
```
|
||||
|
||||
مرر مدخلاتك إلى النموذج واحصل على `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForTokenClassification
|
||||
|
||||
>>> model = AutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model")
|
||||
>>> with torch.no_grad():
|
||||
... logits = model(**inputs).logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأعلى، واستخدم جدول `id2label` الخاصة بالنموذج لتحويلها إلى تسمية نصية:
|
||||
|
||||
```py
|
||||
>>> predictions = torch.argmax(logits, dim=2)
|
||||
>>> predicted_token_class = [model.config.id2label[t.item()] for t in predictions[0]]
|
||||
>>> predicted_token_class
|
||||
['O',
|
||||
'O',
|
||||
'B-location',
|
||||
'I-location',
|
||||
'B-group',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'B-location',
|
||||
'B-location',
|
||||
'O',
|
||||
'O']
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قسّم النص إلى رموز وأرجع المُوتّرات ب TensorFlow:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="tf")
|
||||
```
|
||||
|
||||
مرر مدخلاتك إلى النموذج واحصل على `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForTokenClassification
|
||||
|
||||
>>> model = TFAutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model")
|
||||
>>> logits = model(**inputs).logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأعلى، واستخدم جدول `id2label` الخاصة بالنموذج لتحويلها إلى تسمية نصية:
|
||||
|
||||
```py
|
||||
>>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1)
|
||||
>>> predicted_token_class = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()]
|
||||
>>> predicted_token_class
|
||||
['O',
|
||||
'O',
|
||||
'B-location',
|
||||
'I-location',
|
||||
'B-group',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'B-location',
|
||||
'B-location',
|
||||
'O',
|
||||
'O']
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
@ -139,8 +139,6 @@
|
||||
title: Export to TFLite
|
||||
- local: torchscript
|
||||
title: Export to TorchScript
|
||||
- local: benchmarks
|
||||
title: Benchmarks
|
||||
- local: notebooks
|
||||
title: Notebooks with examples
|
||||
- local: community
|
||||
@ -408,8 +406,6 @@
|
||||
title: Falcon3
|
||||
- local: model_doc/falcon_mamba
|
||||
title: FalconMamba
|
||||
- local: model_doc/fastspeech2_conformer
|
||||
title: FastSpeech2Conformer
|
||||
- local: model_doc/flan-t5
|
||||
title: FLAN-T5
|
||||
- local: model_doc/flan-ul2
|
||||
@ -452,6 +448,10 @@
|
||||
title: Granite
|
||||
- local: model_doc/granitemoe
|
||||
title: GraniteMoe
|
||||
- local: model_doc/granitevision
|
||||
title: GraniteVision
|
||||
- local: model_doc/helium
|
||||
title: Helium
|
||||
- local: model_doc/herbert
|
||||
title: HerBERT
|
||||
- local: model_doc/ibert
|
||||
@ -505,7 +505,7 @@
|
||||
- local: model_doc/mobilebert
|
||||
title: MobileBERT
|
||||
- local: model_doc/modernbert
|
||||
title: ModernBERT
|
||||
title: ModernBert
|
||||
- local: model_doc/mpnet
|
||||
title: MPNet
|
||||
- local: model_doc/mpt
|
||||
@ -626,6 +626,8 @@
|
||||
title: YOSO
|
||||
- local: model_doc/zamba
|
||||
title: Zamba
|
||||
- local: model_doc/zamba2
|
||||
title: Zamba2
|
||||
title: Text models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
@ -709,6 +711,8 @@
|
||||
title: SegFormer
|
||||
- local: model_doc/seggpt
|
||||
title: SegGpt
|
||||
- local: model_doc/superglue
|
||||
title: SuperGlue
|
||||
- local: model_doc/superpoint
|
||||
title: SuperPoint
|
||||
- local: model_doc/swiftformer
|
||||
@ -760,6 +764,8 @@
|
||||
title: dac
|
||||
- local: model_doc/encodec
|
||||
title: EnCodec
|
||||
- local: model_doc/fastspeech2_conformer
|
||||
title: FastSpeech2Conformer
|
||||
- local: model_doc/hubert
|
||||
title: Hubert
|
||||
- local: model_doc/mctct
|
||||
@ -768,6 +774,8 @@
|
||||
title: Mimi
|
||||
- local: model_doc/mms
|
||||
title: MMS
|
||||
- local: model_doc/moonshine
|
||||
title: Moonshine
|
||||
- local: model_doc/moshi
|
||||
title: Moshi
|
||||
- local: model_doc/musicgen
|
||||
@ -858,6 +866,8 @@
|
||||
title: DePlot
|
||||
- local: model_doc/donut
|
||||
title: Donut
|
||||
- local: model_doc/emu3
|
||||
title: Emu3
|
||||
- local: model_doc/flava
|
||||
title: FLAVA
|
||||
- local: model_doc/git
|
||||
@ -922,6 +932,8 @@
|
||||
title: Pix2Struct
|
||||
- local: model_doc/pixtral
|
||||
title: Pixtral
|
||||
- local: model_doc/qwen2_5_vl
|
||||
title: Qwen2.5-VL
|
||||
- local: model_doc/qwen2_audio
|
||||
title: Qwen2Audio
|
||||
- local: model_doc/qwen2_vl
|
||||
|
@ -162,7 +162,7 @@ agent.run(
|
||||
improved_prompt could be "A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background"
|
||||
|
||||
Now that I have improved the prompt, I can use the image generator tool to generate an image based on this prompt.
|
||||
>>> Agent is executing the code below:
|
||||
=== Agent is executing the code below:
|
||||
image = image_generator(prompt="A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background")
|
||||
final_answer(image)
|
||||
```
|
||||
|
@ -1,387 +0,0 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Benchmarks
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Hugging Face's Benchmarking tools are deprecated and it is advised to use external Benchmarking libraries to measure the speed
|
||||
and memory complexity of Transformer models.
|
||||
|
||||
</Tip>
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
Let's take a look at how 🤗 Transformers models can be benchmarked, best practices, and already available benchmarks.
|
||||
|
||||
A notebook explaining in more detail how to benchmark 🤗 Transformers models can be found [here](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb).
|
||||
|
||||
## How to benchmark 🤗 Transformers models
|
||||
|
||||
The classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] allow to flexibly benchmark 🤗 Transformers models. The benchmark classes allow us to measure the _peak memory usage_ and _required time_ for both _inference_ and _training_.
|
||||
|
||||
<Tip>
|
||||
|
||||
Here, _inference_ is defined by a single forward pass, and _training_ is defined by a single forward pass and
|
||||
backward pass.
|
||||
|
||||
</Tip>
|
||||
|
||||
The benchmark classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] expect an object of type [`PyTorchBenchmarkArguments`] and
|
||||
[`TensorFlowBenchmarkArguments`], respectively, for instantiation. [`PyTorchBenchmarkArguments`] and [`TensorFlowBenchmarkArguments`] are data classes and contain all relevant configurations for their corresponding benchmark class. In the following example, it is shown how a BERT model of type _bert-base-cased_ can be benchmarked.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> benchmark = PyTorchBenchmark(args)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> benchmark = TensorFlowBenchmark(args)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
Here, three arguments are given to the benchmark argument data classes, namely `models`, `batch_sizes`, and
|
||||
`sequence_lengths`. The argument `models` is required and expects a `list` of model identifiers from the
|
||||
[model hub](https://huggingface.co/models) The `list` arguments `batch_sizes` and `sequence_lengths` define
|
||||
the size of the `input_ids` on which the model is benchmarked. There are many more parameters that can be configured
|
||||
via the benchmark argument data classes. For more detail on these one can either directly consult the files
|
||||
`src/transformers/benchmark/benchmark_args_utils.py`, `src/transformers/benchmark/benchmark_args.py` (for PyTorch)
|
||||
and `src/transformers/benchmark/benchmark_args_tf.py` (for Tensorflow). Alternatively, running the following shell
|
||||
commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow
|
||||
respectively.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```bash
|
||||
python examples/pytorch/benchmarking/run_benchmark.py --help
|
||||
```
|
||||
|
||||
An instantiated benchmark object can then simply be run by calling `benchmark.run()`.
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.006
|
||||
google-bert/bert-base-uncased 8 32 0.006
|
||||
google-bert/bert-base-uncased 8 128 0.018
|
||||
google-bert/bert-base-uncased 8 512 0.088
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1227
|
||||
google-bert/bert-base-uncased 8 32 1281
|
||||
google-bert/bert-base-uncased 8 128 1307
|
||||
google-bert/bert-base-uncased 8 512 1539
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 08:58:43.371351
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```bash
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
```
|
||||
|
||||
An instantiated benchmark object can then simply be run by calling `benchmark.run()`.
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.005
|
||||
google-bert/bert-base-uncased 8 32 0.008
|
||||
google-bert/bert-base-uncased 8 128 0.022
|
||||
google-bert/bert-base-uncased 8 512 0.105
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1330
|
||||
google-bert/bert-base-uncased 8 32 1330
|
||||
google-bert/bert-base-uncased 8 128 1330
|
||||
google-bert/bert-base-uncased 8 512 1770
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:26:35.617317
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
By default, the _time_ and the _required memory_ for _inference_ are benchmarked. In the example output above the first
|
||||
two sections show the result corresponding to _inference time_ and _inference memory_. In addition, all relevant
|
||||
information about the computing environment, _e.g._ the GPU type, the system, the library versions, etc... are printed
|
||||
out in the third section under _ENVIRONMENT INFORMATION_. This information can optionally be saved in a _.csv_ file
|
||||
when adding the argument `save_to_csv=True` to [`PyTorchBenchmarkArguments`] and
|
||||
[`TensorFlowBenchmarkArguments`] respectively. In this case, every section is saved in a separate
|
||||
_.csv_ file. The path to each _.csv_ file can optionally be defined via the argument data classes.
|
||||
|
||||
Instead of benchmarking pre-trained models via their model identifier, _e.g._ `google-bert/bert-base-uncased`, the user can
|
||||
alternatively benchmark an arbitrary configuration of any available model class. In this case, a `list` of
|
||||
configurations must be inserted with the benchmark args as follows.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 128 0.006
|
||||
bert-base 8 512 0.006
|
||||
bert-base 8 128 0.018
|
||||
bert-base 8 512 0.088
|
||||
bert-384-hid 8 8 0.006
|
||||
bert-384-hid 8 32 0.006
|
||||
bert-384-hid 8 128 0.011
|
||||
bert-384-hid 8 512 0.054
|
||||
bert-6-lay 8 8 0.003
|
||||
bert-6-lay 8 32 0.004
|
||||
bert-6-lay 8 128 0.009
|
||||
bert-6-lay 8 512 0.044
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1277
|
||||
bert-base 8 32 1281
|
||||
bert-base 8 128 1307
|
||||
bert-base 8 512 1539
|
||||
bert-384-hid 8 8 1005
|
||||
bert-384-hid 8 32 1027
|
||||
bert-384-hid 8 128 1035
|
||||
bert-384-hid 8 512 1255
|
||||
bert-6-lay 8 8 1097
|
||||
bert-6-lay 8 32 1101
|
||||
bert-6-lay 8 128 1127
|
||||
bert-6-lay 8 512 1359
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:35:25.143267
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 0.005
|
||||
bert-base 8 32 0.008
|
||||
bert-base 8 128 0.022
|
||||
bert-base 8 512 0.106
|
||||
bert-384-hid 8 8 0.005
|
||||
bert-384-hid 8 32 0.007
|
||||
bert-384-hid 8 128 0.018
|
||||
bert-384-hid 8 512 0.064
|
||||
bert-6-lay 8 8 0.002
|
||||
bert-6-lay 8 32 0.003
|
||||
bert-6-lay 8 128 0.0011
|
||||
bert-6-lay 8 512 0.074
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1330
|
||||
bert-base 8 32 1330
|
||||
bert-base 8 128 1330
|
||||
bert-base 8 512 1770
|
||||
bert-384-hid 8 8 1330
|
||||
bert-384-hid 8 32 1330
|
||||
bert-384-hid 8 128 1330
|
||||
bert-384-hid 8 512 1540
|
||||
bert-6-lay 8 8 1330
|
||||
bert-6-lay 8 32 1330
|
||||
bert-6-lay 8 128 1330
|
||||
bert-6-lay 8 512 1540
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:38:15.487125
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
Again, _inference time_ and _required memory_ for _inference_ are measured, but this time for customized configurations
|
||||
of the `BertModel` class. This feature can especially be helpful when deciding for which configuration the model
|
||||
should be trained.
|
||||
|
||||
|
||||
## Benchmark best practices
|
||||
|
||||
This section lists a couple of best practices one should be aware of when benchmarking a model.
|
||||
|
||||
- Currently, only single device benchmarking is supported. When benchmarking on GPU, it is recommended that the user
|
||||
specifies on which device the code should be run by setting the `CUDA_VISIBLE_DEVICES` environment variable in the
|
||||
shell, _e.g._ `export CUDA_VISIBLE_DEVICES=0` before running the code.
|
||||
- The option `no_multi_processing` should only be set to `True` for testing and debugging. To ensure accurate
|
||||
memory measurement it is recommended to run each memory benchmark in a separate process by making sure
|
||||
`no_multi_processing` is set to `True`.
|
||||
- One should always state the environment information when sharing the results of a model benchmark. Results can vary
|
||||
heavily between different GPU devices, library versions, etc., as a consequence, benchmark results on their own are not very
|
||||
useful for the community.
|
||||
|
||||
|
||||
## Sharing your benchmark
|
||||
|
||||
Previously all available core models (10 at the time) have been benchmarked for _inference time_, across many different
|
||||
settings: using PyTorch, with and without TorchScript, using TensorFlow, with and without XLA. All of those tests were
|
||||
done across CPUs (except for TensorFlow XLA) and GPUs.
|
||||
|
||||
The approach is detailed in the [following blogpost](https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2) and the results are
|
||||
available [here](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing).
|
||||
|
||||
With the new _benchmark_ tools, it is easier than ever to share your benchmark results with the community
|
||||
|
||||
- [PyTorch Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md).
|
||||
- [TensorFlow Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md).
|
@ -23,8 +23,8 @@ of text (as is the case with a standard language model), the model instead conti
|
||||
of one or more **messages**, each of which includes a **role**, like "user" or "assistant", as well as message text.
|
||||
|
||||
Much like tokenization, different models expect very different input formats for chat. This is the reason we added
|
||||
**chat templates** as a feature. Chat templates are part of the tokenizer. They specify how to convert conversations,
|
||||
represented as lists of messages, into a single tokenizable string in the format that the model expects.
|
||||
**chat templates** as a feature. Chat templates are part of the tokenizer for text-only LLMs or processor for multimodal LLMs. They specify how to convert conversations,
|
||||
represented as lists of messages, into a single tokenizable string in the format that the model expects.
|
||||
|
||||
Let's make this concrete with a quick example using the `mistralai/Mistral-7B-Instruct-v0.1` model:
|
||||
|
||||
@ -39,11 +39,11 @@ Let's make this concrete with a quick example using the `mistralai/Mistral-7B-In
|
||||
... ]
|
||||
|
||||
>>> tokenizer.apply_chat_template(chat, tokenize=False)
|
||||
"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]"
|
||||
"<s> [INST] Hello, how are you? [/INST] I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]"
|
||||
```
|
||||
|
||||
Notice how the tokenizer has added the control tokens [INST] and [/INST] to indicate the start and end of
|
||||
user messages (but not assistant messages!), and the entire chat is condensed into a single string.
|
||||
Notice how the tokenizer has added the control tokens [INST] and [/INST] to indicate the start and end of
|
||||
user messages (but not assistant messages!), and the entire chat is condensed into a single string.
|
||||
If we use `tokenize=True`, which is the default setting, that string will also be tokenized for us.
|
||||
|
||||
Now, try the same code, but swap in the `HuggingFaceH4/zephyr-7b-beta` model instead, and you should get:
|
||||
@ -59,17 +59,26 @@ I'd like to show off how chat templating works!</s>
|
||||
|
||||
Both Zephyr and Mistral-Instruct were fine-tuned from the same base model, `Mistral-7B-v0.1`. However, they were trained
|
||||
with totally different chat formats. Without chat templates, you would have to write manual formatting code for each
|
||||
model, and it's very easy to make minor errors that hurt performance! Chat templates handle the details of formatting
|
||||
model, and it's very easy to make minor errors that hurt performance! Chat templates handle the details of formatting
|
||||
for you, allowing you to write universal code that works for any model.
|
||||
|
||||
<Tip>
|
||||
|
||||
Chat templates are a critical component of our [chat CLI](quicktour#chat-with-text-generation-models).
|
||||
You can apply the learnings of this guide there as well.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
## How do I use chat templates?
|
||||
|
||||
As you can see in the example above, chat templates are easy to use. Simply build a list of messages, with `role`
|
||||
and `content` keys, and then pass it to the [`~PreTrainedTokenizer.apply_chat_template`] method. Once you do that,
|
||||
and `content` keys, and then pass it to the [`~PreTrainedTokenizer.apply_chat_template`] or [`~ProcessorMixin.apply_chat_template`] method
|
||||
depending on what type of model you are using. Once you do that,
|
||||
you'll get output that's ready to go! When using chat templates as input for model generation, it's also a good idea
|
||||
to use `add_generation_prompt=True` to add a [generation prompt](#what-are-generation-prompts).
|
||||
to use `add_generation_prompt=True` to add a [generation prompt](#what-are-generation-prompts).
|
||||
|
||||
## Usage with text-only LLMs
|
||||
Here's an example of preparing input for `model.generate()`, using `Zephyr` again:
|
||||
|
||||
```python
|
||||
@ -89,19 +98,19 @@ messages = [
|
||||
tokenized_chat = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt")
|
||||
print(tokenizer.decode(tokenized_chat[0]))
|
||||
```
|
||||
This will yield a string in the input format that Zephyr expects.
|
||||
This will yield a string in the input format that Zephyr expects.
|
||||
```text
|
||||
<|system|>
|
||||
You are a friendly chatbot who always responds in the style of a pirate</s>
|
||||
You are a friendly chatbot who always responds in the style of a pirate</s>
|
||||
<|user|>
|
||||
How many helicopters can a human eat in one sitting?</s>
|
||||
How many helicopters can a human eat in one sitting?</s>
|
||||
<|assistant|>
|
||||
```
|
||||
|
||||
Now that our input is formatted correctly for Zephyr, we can use the model to generate a response to the user's question:
|
||||
|
||||
```python
|
||||
outputs = model.generate(tokenized_chat, max_new_tokens=128)
|
||||
outputs = model.generate(tokenized_chat, max_new_tokens=128)
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
```
|
||||
|
||||
@ -109,20 +118,58 @@ This will yield:
|
||||
|
||||
```text
|
||||
<|system|>
|
||||
You are a friendly chatbot who always responds in the style of a pirate</s>
|
||||
You are a friendly chatbot who always responds in the style of a pirate</s>
|
||||
<|user|>
|
||||
How many helicopters can a human eat in one sitting?</s>
|
||||
How many helicopters can a human eat in one sitting?</s>
|
||||
<|assistant|>
|
||||
Matey, I'm afraid I must inform ye that humans cannot eat helicopters. Helicopters are not food, they are flying machines. Food is meant to be eaten, like a hearty plate o' grog, a savory bowl o' stew, or a delicious loaf o' bread. But helicopters, they be for transportin' and movin' around, not for eatin'. So, I'd say none, me hearties. None at all.
|
||||
```
|
||||
|
||||
## Usage with multimodal LLMs
|
||||
|
||||
For multimodal LLMs such as [LLaVA](https://huggingface.co/llava-hf) the prompts can be formatted in a similar way. The only difference is you need to pass input images/videos as well along with the text. Each `"content"`
|
||||
has to be a list containing either a text or an image/video.
|
||||
|
||||
Here's an example of preparing input for using `LLaVA` model:
|
||||
|
||||
```python
|
||||
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration
|
||||
|
||||
model_id = "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
|
||||
model = LlavaOnevisionForConditionalGeneration.from_pretrained(model_id) # You may want to use bfloat16 and/or move to GPU here
|
||||
processor = AutoProcessor.from_pretrained(model_id)
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
|
||||
{"type": "text", "text": "What are these?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
processed_chat = processor.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
print(processor.batch_decode(processed_chat["input_ids"][:, :30]))
|
||||
```
|
||||
This yields a string in LLaVAs expected input format with many `<image>` tokens at the end.
|
||||
The `<image>` tokens are placeholders and each one will be replaced by image embeddings when the mode is run in the forward call. The `processed_chat` can be further passed into [`~GenerationMixin.generate`] to generate text.
|
||||
```text
|
||||
'<|im_start|>system
|
||||
You are a friendly chatbot who always responds in the style of a pirate<|im_end|><|im_start|>user <image><image><image><image><image><image><image><image>'
|
||||
```
|
||||
|
||||
Arr, 'twas easy after all!
|
||||
|
||||
## Is there an automated pipeline for chat?
|
||||
|
||||
Yes, there is! Our text generation pipelines support chat inputs, which makes it easy to use chat models. In the past,
|
||||
we used to use a dedicated "ConversationalPipeline" class, but this has now been deprecated and its functionality
|
||||
has been merged into the [`TextGenerationPipeline`]. Let's try the `Zephyr` example again, but this time using
|
||||
has been merged into the [`TextGenerationPipeline`]. Let's try the `Zephyr` example again, but this time using
|
||||
a pipeline:
|
||||
|
||||
```python
|
||||
@ -187,9 +234,9 @@ Can I ask a question?<|im_end|>
|
||||
```
|
||||
|
||||
Note that this time, we've added the tokens that indicate the start of a bot response. This ensures that when the model
|
||||
generates text it will write a bot response instead of doing something unexpected, like continuing the user's
|
||||
message. Remember, chat models are still just language models - they're trained to continue text, and chat is just a
|
||||
special kind of text to them! You need to guide them with appropriate control tokens, so they know what they're
|
||||
generates text it will write a bot response instead of doing something unexpected, like continuing the user's
|
||||
message. Remember, chat models are still just language models - they're trained to continue text, and chat is just a
|
||||
special kind of text to them! You need to guide them with appropriate control tokens, so they know what they're
|
||||
supposed to be doing.
|
||||
|
||||
Not all models require generation prompts. Some models, like LLaMA, don't have any
|
||||
@ -201,7 +248,7 @@ effect that `add_generation_prompt` has will depend on the template being used.
|
||||
When passing a list of messages to `apply_chat_template` or `TextGenerationPipeline`, you can choose
|
||||
to format the chat so the model will continue the final message in the chat instead of starting a new one. This is done
|
||||
by removing any end-of-sequence tokens that indicate the end of the final message, so that the model will simply
|
||||
extend the final message when it begins to generate text. This is useful for "prefilling" the model's response.
|
||||
extend the final message when it begins to generate text. This is useful for "prefilling" the model's response.
|
||||
|
||||
Here's an example:
|
||||
|
||||
@ -226,9 +273,9 @@ get an error if you try!
|
||||
<Tip>
|
||||
|
||||
The default behaviour of `TextGenerationPipeline` is to set `add_generation_prompt=True` so that it starts a new
|
||||
message. However, if the final message in the input chat has the "assistant" role, it will assume that this message is
|
||||
a prefill and switch to `continue_final_message=True` instead, because most models do not support multiple
|
||||
consecutive assistant messages. You can override this behaviour by explicitly passing the `continue_final_message`
|
||||
message. However, if the final message in the input chat has the "assistant" role, it will assume that this message is
|
||||
a prefill and switch to `continue_final_message=True` instead, because most models do not support multiple
|
||||
consecutive assistant messages. You can override this behaviour by explicitly passing the `continue_final_message`
|
||||
argument when calling the pipeline.
|
||||
|
||||
</Tip>
|
||||
@ -237,8 +284,8 @@ argument when calling the pipeline.
|
||||
|
||||
Yes! This is a good way to ensure that the chat template matches the tokens the model sees during training.
|
||||
We recommend that you apply the chat template as a preprocessing step for your dataset. After this, you
|
||||
can simply continue like any other language model training task. When training, you should usually set
|
||||
`add_generation_prompt=False`, because the added tokens to prompt an assistant response will not be helpful during
|
||||
can simply continue like any other language model training task. When training, you should usually set
|
||||
`add_generation_prompt=False`, because the added tokens to prompt an assistant response will not be helpful during
|
||||
training. Let's see an example:
|
||||
|
||||
```python
|
||||
@ -272,8 +319,8 @@ From here, just continue training like you would with a standard language modell
|
||||
|
||||
<Tip>
|
||||
|
||||
By default, some tokenizers add special tokens like `<bos>` and `<eos>` to text they tokenize. Chat templates should
|
||||
already include all the special tokens they need, and so additional special tokens will often be incorrect or
|
||||
By default, some tokenizers add special tokens like `<bos>` and `<eos>` to text they tokenize. Chat templates should
|
||||
already include all the special tokens they need, and so additional special tokens will often be incorrect or
|
||||
duplicated, which will hurt model performance.
|
||||
|
||||
Therefore, if you format text with `apply_chat_template(tokenize=False)`, you should set the argument
|
||||
@ -286,7 +333,7 @@ Therefore, if you format text with `apply_chat_template(tokenize=False)`, you sh
|
||||
The only argument that `apply_chat_template` requires is `messages`. However, you can pass any keyword
|
||||
argument to `apply_chat_template` and it will be accessible inside the template. This gives you a lot of freedom to use
|
||||
chat templates for many things. There are no restrictions on the names or the format of these arguments - you can pass
|
||||
strings, lists, dicts or whatever else you want.
|
||||
strings, lists, dicts or whatever else you want.
|
||||
|
||||
That said, there are some common use-cases for these extra arguments,
|
||||
such as passing tools for function calling, or documents for retrieval-augmented generation. In these common cases,
|
||||
@ -309,7 +356,7 @@ def current_time():
|
||||
def multiply(a: float, b: float):
|
||||
"""
|
||||
A function that multiplies two numbers
|
||||
|
||||
|
||||
Args:
|
||||
a: The first number to multiply
|
||||
b: The second number to multiply
|
||||
@ -329,8 +376,8 @@ correctly as tools. Specifically, you should follow these rules:
|
||||
|
||||
- The function should have a descriptive name
|
||||
- Every argument must have a type hint
|
||||
- The function must have a docstring in the standard Google style (in other words, an initial function description
|
||||
followed by an `Args:` block that describes the arguments, unless the function does not have any arguments.
|
||||
- The function must have a docstring in the standard Google style (in other words, an initial function description
|
||||
followed by an `Args:` block that describes the arguments, unless the function does not have any arguments.
|
||||
- Do not include types in the `Args:` block. In other words, write `a: The first number to multiply`, not
|
||||
`a (int): The first number to multiply`. Type hints should go in the function header instead.
|
||||
- The function can have a return type and a `Returns:` block in the docstring. However, these are optional
|
||||
@ -372,7 +419,7 @@ Next, let's define a list of tools:
|
||||
def get_current_temperature(location: str, unit: str) -> float:
|
||||
"""
|
||||
Get the current temperature at a location.
|
||||
|
||||
|
||||
Args:
|
||||
location: The location to get the temperature for, in the format "City, Country"
|
||||
unit: The unit to return the temperature in. (choices: ["celsius", "fahrenheit"])
|
||||
@ -384,7 +431,7 @@ def get_current_temperature(location: str, unit: str) -> float:
|
||||
def get_current_wind_speed(location: str) -> float:
|
||||
"""
|
||||
Get the current wind speed in km/h at a given location.
|
||||
|
||||
|
||||
Args:
|
||||
location: The location to get the temperature for, in the format "City, Country"
|
||||
Returns:
|
||||
@ -429,8 +476,8 @@ the temperature in France should certainly be displayed in Celsius.
|
||||
|
||||
The output format above is specific to the `Hermes-2-Pro` model we're using in this example. Other models may emit different
|
||||
tool call formats, and you may need to do some manual parsing at this step. For example, `Llama-3.1` models will emit
|
||||
slightly different JSON, with `parameters` instead of `arguments`. Regardless of the format the model outputs, you
|
||||
should add the tool call to the conversation in the format below, with `tool_calls`, `function` and `arguments` keys.
|
||||
slightly different JSON, with `parameters` instead of `arguments`. Regardless of the format the model outputs, you
|
||||
should add the tool call to the conversation in the format below, with `tool_calls`, `function` and `arguments` keys.
|
||||
|
||||
</Tip>
|
||||
|
||||
@ -449,7 +496,7 @@ a dict, but in the OpenAI API it's a JSON string. Passing a string may cause err
|
||||
</Tip>
|
||||
|
||||
Now that we've added the tool call to the conversation, we can call the function and append the result to the
|
||||
conversation. Since we're just using a dummy function for this example that always returns 22.0, we can just append
|
||||
conversation. Since we're just using a dummy function for this example that always returns 22.0, we can just append
|
||||
that result directly.
|
||||
|
||||
```python
|
||||
@ -460,7 +507,7 @@ messages.append({"role": "tool", "name": "get_current_temperature", "content": "
|
||||
|
||||
Some model architectures, notably Mistral/Mixtral, also require a `tool_call_id` here, which should be
|
||||
9 randomly-generated alphanumeric characters, and assigned to the `id` key of the tool call
|
||||
dictionary. The same key should also be assigned to the `tool_call_id` key of the tool response dictionary below, so
|
||||
dictionary. The same key should also be assigned to the `tool_call_id` key of the tool response dictionary below, so
|
||||
that tool calls can be matched to tool responses. So, for Mistral/Mixtral models, the code above would be:
|
||||
|
||||
```python
|
||||
@ -492,13 +539,13 @@ And we get:
|
||||
The current temperature in Paris, France is 22.0 ° Celsius.<|im_end|>
|
||||
```
|
||||
|
||||
Although this was a simple demo with dummy tools and a single call, the same technique works with
|
||||
Although this was a simple demo with dummy tools and a single call, the same technique works with
|
||||
multiple real tools and longer conversations. This can be a powerful way to extend the capabilities of conversational
|
||||
agents with real-time information, computational tools like calculators, or access to large databases.
|
||||
|
||||
### Understanding tool schemas
|
||||
|
||||
Each function you pass to the `tools` argument of `apply_chat_template` is converted into a
|
||||
Each function you pass to the `tools` argument of `apply_chat_template` is converted into a
|
||||
[JSON schema](https://json-schema.org/learn/getting-started-step-by-step). These schemas
|
||||
are then passed to the model chat template. In other words, tool-use models do not see your functions directly, and they
|
||||
never see the actual code inside them. What they care about is the function **definitions** and the **arguments** they
|
||||
@ -507,7 +554,7 @@ to read their outputs, detect if they have requested to use a tool, pass their a
|
||||
return the response in the chat.
|
||||
|
||||
Generating JSON schemas to pass to the template should be automatic and invisible as long as your functions
|
||||
follow the specification above, but if you encounter problems, or you simply want more control over the conversion,
|
||||
follow the specification above, but if you encounter problems, or you simply want more control over the conversion,
|
||||
you can handle the conversion manually. Here is an example of a manual schema conversion.
|
||||
|
||||
```python
|
||||
@ -516,7 +563,7 @@ from transformers.utils import get_json_schema
|
||||
def multiply(a: float, b: float):
|
||||
"""
|
||||
A function that multiplies two numbers
|
||||
|
||||
|
||||
Args:
|
||||
a: The first number to multiply
|
||||
b: The second number to multiply
|
||||
@ -531,33 +578,33 @@ This will yield:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "function",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "multiply",
|
||||
"description": "A function that multiplies two numbers",
|
||||
"name": "multiply",
|
||||
"description": "A function that multiplies two numbers",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {
|
||||
"type": "number",
|
||||
"type": "number",
|
||||
"description": "The first number to multiply"
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
"type": "number",
|
||||
"description": "The second number to multiply"
|
||||
}
|
||||
},
|
||||
},
|
||||
"required": ["a", "b"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
If you wish, you can edit these schemas, or even write them from scratch yourself without using `get_json_schema` at
|
||||
all. JSON schemas can be passed directly to the `tools` argument of
|
||||
If you wish, you can edit these schemas, or even write them from scratch yourself without using `get_json_schema` at
|
||||
all. JSON schemas can be passed directly to the `tools` argument of
|
||||
`apply_chat_template` - this gives you a lot of power to define precise schemas for more complex functions. Be careful,
|
||||
though - the more complex your schemas, the more likely the model is to get confused when dealing with them! We
|
||||
recommend simple function signatures where possible, keeping arguments (and especially complex, nested arguments)
|
||||
though - the more complex your schemas, the more likely the model is to get confused when dealing with them! We
|
||||
recommend simple function signatures where possible, keeping arguments (and especially complex, nested arguments)
|
||||
to a minimum.
|
||||
|
||||
Here is an example of defining schemas by hand, and passing them directly to `apply_chat_template`:
|
||||
@ -565,7 +612,7 @@ Here is an example of defining schemas by hand, and passing them directly to `ap
|
||||
```python
|
||||
# A simple function that takes no arguments
|
||||
current_time = {
|
||||
"type": "function",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "current_time",
|
||||
"description": "Get the current local time as a string.",
|
||||
@ -581,18 +628,18 @@ multiply = {
|
||||
'type': 'function',
|
||||
'function': {
|
||||
'name': 'multiply',
|
||||
'description': 'A function that multiplies two numbers',
|
||||
'description': 'A function that multiplies two numbers',
|
||||
'parameters': {
|
||||
'type': 'object',
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'a': {
|
||||
'type': 'number',
|
||||
'description': 'The first number to multiply'
|
||||
},
|
||||
},
|
||||
'b': {
|
||||
'type': 'number', 'description': 'The second number to multiply'
|
||||
}
|
||||
},
|
||||
},
|
||||
'required': ['a', 'b']
|
||||
}
|
||||
}
|
||||
@ -607,7 +654,7 @@ model_input = tokenizer.apply_chat_template(
|
||||
## Advanced: Retrieval-augmented generation
|
||||
|
||||
"Retrieval-augmented generation" or "RAG" LLMs can search a corpus of documents for information before responding
|
||||
to a query. This allows models to vastly expand their knowledge base beyond their limited context size. Our
|
||||
to a query. This allows models to vastly expand their knowledge base beyond their limited context size. Our
|
||||
recommendation for RAG models is that their template
|
||||
should accept a `documents` argument. This should be a list of documents, where each "document"
|
||||
is a single dict with `title` and `contents` keys, both of which are strings. Because this format is much simpler
|
||||
@ -632,7 +679,7 @@ conversation = [
|
||||
# Define documents for retrieval-based generation
|
||||
documents = [
|
||||
{
|
||||
"title": "The Moon: Our Age-Old Foe",
|
||||
"title": "The Moon: Our Age-Old Foe",
|
||||
"text": "Man has always dreamed of destroying the moon. In this essay, I shall..."
|
||||
},
|
||||
{
|
||||
@ -650,7 +697,7 @@ input_ids = tokenizer.apply_chat_template(
|
||||
add_generation_prompt=True,
|
||||
return_tensors="pt").to(device)
|
||||
|
||||
# Generate a response
|
||||
# Generate a response
|
||||
gen_tokens = model.generate(
|
||||
input_ids,
|
||||
max_new_tokens=100,
|
||||
@ -710,8 +757,8 @@ Effectively, the template does three things:
|
||||
an assistant response.
|
||||
|
||||
This is a pretty simple template but Jinja gives you a lot of flexibility to do more complex things! Let's see a Jinja
|
||||
template that can format inputs similarly to the way LLaMA formats them (note that the real LLaMA template includes
|
||||
handling for default system messages and slightly different system message handling in general - don't use this one
|
||||
template that can format inputs similarly to the way LLaMA formats them (note that the real LLaMA template includes
|
||||
handling for default system messages and slightly different system message handling in general - don't use this one
|
||||
in your actual code!)
|
||||
|
||||
```
|
||||
@ -734,7 +781,7 @@ distinguishable to the model because of the tokens they're wrapped in.
|
||||
|
||||
### How do I create a chat template?
|
||||
|
||||
Simple, just write a jinja template and set `tokenizer.chat_template`. You may find it easier to start with an
|
||||
Simple, just write a jinja template and set `tokenizer.chat_template`. You may find it easier to start with an
|
||||
existing template from another model and simply edit it for your needs! For example, we could take the LLaMA template
|
||||
above and add "[ASST]" and "[/ASST]" to assistant messages:
|
||||
|
||||
@ -762,13 +809,13 @@ tokenizer.chat_template = template # Set the new template
|
||||
tokenizer.push_to_hub("model_name") # Upload your new template to the Hub!
|
||||
```
|
||||
|
||||
The method [`~PreTrainedTokenizer.apply_chat_template`] which uses your chat template is called by the [`TextGenerationPipeline`] class, so
|
||||
The method [`~PreTrainedTokenizer.apply_chat_template`] which uses your chat template is called by the [`TextGenerationPipeline`] class, so
|
||||
once you set the correct chat template, your model will automatically become compatible with [`TextGenerationPipeline`].
|
||||
|
||||
<Tip>
|
||||
If you're fine-tuning a model for chat, in addition to setting a chat template, you should probably add any new chat
|
||||
control tokens as special tokens in the tokenizer. Special tokens are never split,
|
||||
ensuring that your control tokens are always handled as single tokens rather than being tokenized in pieces. You
|
||||
control tokens as special tokens in the tokenizer. Special tokens are never split,
|
||||
ensuring that your control tokens are always handled as single tokens rather than being tokenized in pieces. You
|
||||
should also set the tokenizer's `eos_token` attribute to the token that marks the end of assistant generations in your
|
||||
template. This will ensure that text generation tools can correctly figure out when to stop generating text.
|
||||
</Tip>
|
||||
@ -796,13 +843,13 @@ trying to put it all in a single template where possible!
|
||||
|
||||
When setting the template for a model that's already been trained for chat, you should ensure that the template
|
||||
exactly matches the message formatting that the model saw during training, or else you will probably experience
|
||||
performance degradation. This is true even if you're training the model further - you will probably get the best
|
||||
performance degradation. This is true even if you're training the model further - you will probably get the best
|
||||
performance if you keep the chat tokens constant. This is very analogous to tokenization - you generally get the
|
||||
best performance for inference or fine-tuning when you precisely match the tokenization used during training.
|
||||
|
||||
If you're training a model from scratch, or fine-tuning a base language model for chat, on the other hand,
|
||||
you have a lot of freedom to choose an appropriate template! LLMs are smart enough to learn to handle lots of different
|
||||
input formats. One popular choice is the `ChatML` format, and this is a good, flexible choice for many use-cases.
|
||||
input formats. One popular choice is the `ChatML` format, and this is a good, flexible choice for many use-cases.
|
||||
It looks like this:
|
||||
|
||||
```
|
||||
@ -848,7 +895,7 @@ Once the attribute is set, that's it, you're done! `tokenizer.apply_chat_templat
|
||||
model, which means it is also automatically supported in places like `TextGenerationPipeline`!
|
||||
|
||||
By ensuring that models have this attribute, we can make sure that the whole community gets to use the full power of
|
||||
open-source models. Formatting mismatches have been haunting the field and silently harming performance for too long -
|
||||
open-source models. Formatting mismatches have been haunting the field and silently harming performance for too long -
|
||||
it's time to put an end to them!
|
||||
|
||||
## Advanced: Template writing tips
|
||||
@ -856,17 +903,17 @@ it's time to put an end to them!
|
||||
<Tip>
|
||||
|
||||
The easiest way to get started with writing Jinja templates is to take a look at some existing ones. You can use
|
||||
`print(tokenizer.chat_template)` for any chat model to see what template it's using. In general, models that support tool use have
|
||||
`print(tokenizer.chat_template)` for any chat model to see what template it's using. In general, models that support tool use have
|
||||
much more complex templates than other models - so when you're just getting started, they're probably a bad example
|
||||
to learn from! You can also take a look at the
|
||||
to learn from! You can also take a look at the
|
||||
[Jinja documentation](https://jinja.palletsprojects.com/en/3.1.x/templates/#synopsis) for details
|
||||
of general Jinja formatting and syntax.
|
||||
|
||||
</Tip>
|
||||
|
||||
Jinja templates in `transformers` are identical to Jinja templates elsewhere. The main thing to know is that
|
||||
the conversation history will be accessible inside your template as a variable called `messages`.
|
||||
You will be able to access `messages` in your template just like you can in Python, which means you can loop over
|
||||
Jinja templates in `transformers` are identical to Jinja templates elsewhere. The main thing to know is that
|
||||
the conversation history will be accessible inside your template as a variable called `messages`.
|
||||
You will be able to access `messages` in your template just like you can in Python, which means you can loop over
|
||||
it with `{% for message in messages %}` or access individual messages with `{{ messages[0] }}`, for example.
|
||||
|
||||
You can also use the following tips to write clean, efficient Jinja templates:
|
||||
@ -896,7 +943,7 @@ and indentation may end up being included in the output, which is probably not w
|
||||
|
||||
### Special variables
|
||||
|
||||
Inside your template, you will have access several special variables. The most important of these is `messages`,
|
||||
Inside your template, you will have access several special variables. The most important of these is `messages`,
|
||||
which contains the chat history as a list of message dicts. However, there are several others. Not every
|
||||
variable will be used in every template. The most common other variables are:
|
||||
|
||||
@ -930,7 +977,7 @@ There are multiple implementations of Jinja in various languages. They generally
|
||||
but a key difference is that when you're writing a template in Python you can use Python methods, such as
|
||||
`.lower()` on strings or `.items()` on dicts. This will break if someone tries to use your template on a non-Python
|
||||
implementation of Jinja. Non-Python implementations are particularly common in deployment environments, where JS
|
||||
and Rust are very popular.
|
||||
and Rust are very popular.
|
||||
|
||||
Don't panic, though! There are a few easy changes you can make to your templates to ensure they're compatible across
|
||||
all implementations of Jinja:
|
||||
@ -962,21 +1009,21 @@ Here is an example of a template that formats messages ChatML-style, with genera
|
||||
```
|
||||
|
||||
The exact content of the assistant header will depend on your specific model, but it should always be **the string
|
||||
that represents the start of an assistant message**, so that if the user applies your template with
|
||||
that represents the start of an assistant message**, so that if the user applies your template with
|
||||
`add_generation_prompt=True` and then generates text, the model will write an assistant response. Also note that some
|
||||
models do not need a generation prompt, because assistant messages always begin immediately after user messages.
|
||||
models do not need a generation prompt, because assistant messages always begin immediately after user messages.
|
||||
This is particularly common for LLaMA and Mistral models, where assistant messages begin immediately after the `[/INST]`
|
||||
token that ends user messages. In these cases, the template can ignore the `add_generation_prompt` flag.
|
||||
|
||||
Generation prompts are important! If your model requires a generation prompt but it is not set in the template, then
|
||||
model generations will likely be severely degraded, or the model may display unusual behaviour like continuing
|
||||
the final user message!
|
||||
model generations will likely be severely degraded, or the model may display unusual behaviour like continuing
|
||||
the final user message!
|
||||
|
||||
### Writing and debugging larger templates
|
||||
|
||||
When this feature was introduced, most templates were quite small, the Jinja equivalent of a "one-liner" script.
|
||||
When this feature was introduced, most templates were quite small, the Jinja equivalent of a "one-liner" script.
|
||||
However, with new models and features like tool-use and RAG, some templates can be 100 lines long or more. When
|
||||
writing templates like these, it's a good idea to write them in a separate file, using a text editor. You can easily
|
||||
writing templates like these, it's a good idea to write them in a separate file, using a text editor. You can easily
|
||||
extract a chat template to a file:
|
||||
|
||||
```python
|
||||
@ -995,7 +1042,7 @@ identify the source of issues.
|
||||
|
||||
### Writing templates for tools
|
||||
|
||||
Although chat templates do not enforce a specific API for tools (or for anything, really), we recommend
|
||||
Although chat templates do not enforce a specific API for tools (or for anything, really), we recommend
|
||||
template authors try to stick to a standard API where possible. The whole point of chat templates is to allow code
|
||||
to be transferable across models, so deviating from the standard tools API means users will have to write
|
||||
custom code to use tools with your model. Sometimes it's unavoidable, but often with clever templating you can
|
||||
@ -1005,30 +1052,30 @@ Below, we'll list the elements of the standard API, and give tips on writing tem
|
||||
|
||||
#### Tool definitions
|
||||
|
||||
Your template should expect that the variable `tools` will either be null (if no tools are passed), or is a list
|
||||
Your template should expect that the variable `tools` will either be null (if no tools are passed), or is a list
|
||||
of JSON schema dicts. Our chat template methods allow users to pass tools as either JSON schema or Python functions, but when
|
||||
functions are passed, we automatically generate JSON schema and pass that to your template. As a result, the
|
||||
functions are passed, we automatically generate JSON schema and pass that to your template. As a result, the
|
||||
`tools` variable that your template receives will always be a list of JSON schema. Here is
|
||||
a sample tool JSON schema:
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "function",
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "multiply",
|
||||
"description": "A function that multiplies two numbers",
|
||||
"name": "multiply",
|
||||
"description": "A function that multiplies two numbers",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {
|
||||
"type": "number",
|
||||
"type": "number",
|
||||
"description": "The first number to multiply"
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
"type": "number",
|
||||
"description": "The second number to multiply"
|
||||
}
|
||||
},
|
||||
},
|
||||
"required": ["a", "b"]
|
||||
}
|
||||
}
|
||||
@ -1052,13 +1099,13 @@ specific format - your model will probably need different formatting!
|
||||
|
||||
The specific tokens and tool descriptions your template renders should of course be chosen to match the ones your model
|
||||
was trained with. There is no requirement that your **model** understands JSON schema input, only that your template can translate
|
||||
JSON schema into your model's format. For example, [Command-R](https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024)
|
||||
was trained with tools defined using Python function headers, but the Command-R tool template accepts JSON schema,
|
||||
JSON schema into your model's format. For example, [Command-R](https://huggingface.co/CohereForAI/c4ai-command-r-plus-08-2024)
|
||||
was trained with tools defined using Python function headers, but the Command-R tool template accepts JSON schema,
|
||||
converts types internally and renders the input tools as Python headers. You can do a lot with templates!
|
||||
|
||||
#### Tool calls
|
||||
|
||||
Tool calls, if present, will be a list attached to a message with the "assistant" role. Note that `tool_calls` is
|
||||
Tool calls, if present, will be a list attached to a message with the "assistant" role. Note that `tool_calls` is
|
||||
always a list, even though most tool-calling models only support single tool calls at a time, which means
|
||||
the list will usually only have a single element. Here is a sample message dict containing a tool call:
|
||||
|
||||
|
@ -41,6 +41,13 @@ This guide describes:
|
||||
* common decoding strategies and their main parameters
|
||||
* saving and sharing custom generation configurations with your fine-tuned model on 🤗 Hub
|
||||
|
||||
<Tip>
|
||||
|
||||
`generate()` is a critical component of our [chat CLI](quicktour#chat-with-text-generation-models).
|
||||
You can apply the learnings of this guide there as well.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Default text generation configuration
|
||||
|
||||
A decoding strategy for a model is defined in its generation configuration. When using pre-trained models for inference
|
||||
@ -224,7 +231,7 @@ to check if the text is machine-generated (outputs `True` for machine-generated
|
||||
>>> detector = WatermarkDetector(model_config=model.config, device="cpu", watermarking_config=watermarking_config)
|
||||
>>> detection_out = detector(out, return_dict=True)
|
||||
>>> detection_out.prediction
|
||||
array([True, True])
|
||||
array([ True, True])
|
||||
```
|
||||
|
||||
|
||||
@ -262,7 +269,7 @@ dimension you can act upon, in addition to selecting a decoding strategy. Popula
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||
>>> outputs = model.generate(**inputs)
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n']
|
||||
['I look forward to seeing you all again!\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n']
|
||||
```
|
||||
|
||||
### Contrastive search
|
||||
@ -438,7 +445,7 @@ To enable assisted decoding, set the `assistant_model` argument with a model.
|
||||
>>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
|
||||
>>> outputs = model.generate(**inputs, assistant_model=assistant_model)
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
|
||||
['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a glass of wine.']
|
||||
```
|
||||
|
||||
<Tip>
|
||||
@ -454,7 +461,7 @@ If you're using a `pipeline` object, all you need to do is to pass the assistant
|
||||
... model="meta-llama/Llama-3.1-8B",
|
||||
... assistant_model="meta-llama/Llama-3.2-1B", # This extra line is all that's needed, also works with UAD
|
||||
... torch_dtype=torch.bfloat16
|
||||
>>> )
|
||||
... )
|
||||
>>> pipe_output = pipe("Once upon a time, ", max_new_tokens=50, do_sample=False)
|
||||
>>> pipe_output[0]["generated_text"]
|
||||
'Once upon a time, 3D printing was a niche technology that was only'
|
||||
@ -481,7 +488,7 @@ just like in multinomial sampling. However, in assisted decoding, reducing the t
|
||||
>>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
|
||||
>>> outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.5)
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['Alice and Bob, a couple of friends of mine, who are both in the same office as']
|
||||
['Alice and Bob are two people who are very different, but they are both very good at what they do. Alice']
|
||||
```
|
||||
|
||||
We recommend to install `scikit-learn` library to enhance the candidate generation strategy and achieve additional speedup.
|
||||
@ -511,7 +518,7 @@ to ensure the new tokens include the correct prompt suffix.
|
||||
>>> assistant_model = AutoModelForCausalLM.from_pretrained(assistant_checkpoint)
|
||||
>>> outputs = model.generate(**inputs, assistant_model=assistant_model, tokenizer=tokenizer, assistant_tokenizer=assistant_tokenizer)
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
|
||||
['Alice and Bob are playing a game. Alice has a set of $n$ integers $a_1, a']
|
||||
```
|
||||
|
||||
#### Prompt Lookup
|
||||
@ -540,7 +547,7 @@ If the model you're using was trained to do early exit, you can pass
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(checkpoint)
|
||||
>>> outputs = model.generate(**inputs, assistant_early_exit=4, do_sample=False, max_new_tokens=20)
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['Alice and Bob are sitting in a bar. Alice is drinking a beer and Bob is drinking a']
|
||||
['Alice and Bob are playing a game. Alice has a set of $n$ integers $a_1, a']
|
||||
```
|
||||
|
||||
### DoLa Decoding
|
||||
@ -564,10 +571,9 @@ See the following examples for DoLa decoding with the 32-layer LLaMA-7B model.
|
||||
>>> import torch
|
||||
>>> from accelerate.test_utils.testing import get_backend
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", torch_dtype=torch.float16)
|
||||
>>> device, _, _ = get_backend() # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.)
|
||||
>>> model.to(device)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("huggyllama/llama-7b")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("huggyllama/llama-7b", torch_dtype=torch.float16).to(device)
|
||||
>>> set_seed(42)
|
||||
|
||||
>>> text = "On what date was the Declaration of Independence officially signed?"
|
||||
@ -586,7 +592,7 @@ See the following examples for DoLa decoding with the 32-layer LLaMA-7B model.
|
||||
# DoLa decoding with contrasting specific layers (layers 28 and 30)
|
||||
>>> dola_custom_output = model.generate(**inputs, do_sample=False, max_new_tokens=50, dola_layers=[28,30], repetition_penalty=1.2)
|
||||
>>> tokenizer.batch_decode(dola_custom_output[:, inputs.input_ids.shape[-1]:], skip_special_tokens=True)
|
||||
['\nIt was officially signed on 2 August 1776, when 56 members of the Second Continental Congress, representing the original 13 American colonies, voted unanimously for the resolution for independence. The 2']
|
||||
['\nIn 1891, when he was 54 years old, John Jacob Astor founded his empire. He opened a one-man business and spent the next 27 years working 10-hour days. When']
|
||||
```
|
||||
|
||||
#### Understanding the `dola_layers` argument
|
||||
|
@ -137,6 +137,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [EfficientFormer](model_doc/efficientformer) | ✅ | ✅ | ❌ |
|
||||
| [EfficientNet](model_doc/efficientnet) | ✅ | ❌ | ❌ |
|
||||
| [ELECTRA](model_doc/electra) | ✅ | ✅ | ✅ |
|
||||
| [Emu3](model_doc/emu3) | ✅ | ❌ | ❌ |
|
||||
| [EnCodec](model_doc/encodec) | ✅ | ❌ | ❌ |
|
||||
| [Encoder decoder](model_doc/encoder-decoder) | ✅ | ✅ | ✅ |
|
||||
| [ERNIE](model_doc/ernie) | ✅ | ❌ | ❌ |
|
||||
@ -172,6 +173,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Graphormer](model_doc/graphormer) | ✅ | ❌ | ❌ |
|
||||
| [Grounding DINO](model_doc/grounding-dino) | ✅ | ❌ | ❌ |
|
||||
| [GroupViT](model_doc/groupvit) | ✅ | ✅ | ❌ |
|
||||
| [Helium](model_doc/helium) | ✅ | ❌ | ❌ |
|
||||
| [HerBERT](model_doc/herbert) | ✅ | ✅ | ✅ |
|
||||
| [Hiera](model_doc/hiera) | ✅ | ❌ | ❌ |
|
||||
| [Hubert](model_doc/hubert) | ✅ | ✅ | ❌ |
|
||||
@ -235,6 +237,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [MobileViT](model_doc/mobilevit) | ✅ | ✅ | ❌ |
|
||||
| [MobileViTV2](model_doc/mobilevitv2) | ✅ | ❌ | ❌ |
|
||||
| [ModernBERT](model_doc/modernbert) | ✅ | ❌ | ❌ |
|
||||
| [Moonshine](model_doc/moonshine) | ✅ | ❌ | ❌ |
|
||||
| [Moshi](model_doc/moshi) | ✅ | ❌ | ❌ |
|
||||
| [MPNet](model_doc/mpnet) | ✅ | ✅ | ❌ |
|
||||
| [MPT](model_doc/mpt) | ✅ | ❌ | ❌ |
|
||||
@ -282,6 +285,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [PVTv2](model_doc/pvt_v2) | ✅ | ❌ | ❌ |
|
||||
| [QDQBert](model_doc/qdqbert) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2](model_doc/qwen2) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2_5_VL](model_doc/qwen2_5_vl) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2Audio](model_doc/qwen2_audio) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2MoE](model_doc/qwen2_moe) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2VL](model_doc/qwen2_vl) | ✅ | ❌ | ❌ |
|
||||
@ -315,6 +319,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [SqueezeBERT](model_doc/squeezebert) | ✅ | ❌ | ❌ |
|
||||
| [StableLm](model_doc/stablelm) | ✅ | ❌ | ❌ |
|
||||
| [Starcoder2](model_doc/starcoder2) | ✅ | ❌ | ❌ |
|
||||
| [SuperGlue](model_doc/superglue) | ✅ | ❌ | ❌ |
|
||||
| [SuperPoint](model_doc/superpoint) | ✅ | ❌ | ❌ |
|
||||
| [SwiftFormer](model_doc/swiftformer) | ✅ | ✅ | ❌ |
|
||||
| [Swin Transformer](model_doc/swin) | ✅ | ✅ | ❌ |
|
||||
@ -356,8 +361,8 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [ViTMAE](model_doc/vit_mae) | ✅ | ✅ | ❌ |
|
||||
| [ViTMatte](model_doc/vitmatte) | ✅ | ❌ | ❌ |
|
||||
| [ViTMSN](model_doc/vit_msn) | ✅ | ❌ | ❌ |
|
||||
| [VitPose](model_doc/vitpose) | ✅ | ❌ | ❌ |
|
||||
| [VitPoseBackbone](model_doc/vitpose_backbone) | ✅ | ❌ | ❌ |
|
||||
| [ViTPose](model_doc/vitpose) | ✅ | ❌ | ❌ |
|
||||
| [ViTPoseBackbone](model_doc/vitpose_backbone) | ✅ | ❌ | ❌ |
|
||||
| [VITS](model_doc/vits) | ✅ | ❌ | ❌ |
|
||||
| [ViViT](model_doc/vivit) | ✅ | ❌ | ❌ |
|
||||
| [Wav2Vec2](model_doc/wav2vec2) | ✅ | ✅ | ✅ |
|
||||
@ -380,6 +385,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [YOLOS](model_doc/yolos) | ✅ | ❌ | ❌ |
|
||||
| [YOSO](model_doc/yoso) | ✅ | ❌ | ❌ |
|
||||
| [Zamba](model_doc/zamba) | ✅ | ❌ | ❌ |
|
||||
| [Zamba2](model_doc/zamba2) | ✅ | ❌ | ❌ |
|
||||
| [ZoeDepth](model_doc/zoedepth) | ✅ | ❌ | ❌ |
|
||||
|
||||
<!-- End table-->
|
||||
|
@ -32,29 +32,40 @@ Install 🤗 Transformers for whichever deep learning library you're working wit
|
||||
|
||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, take a look at this [guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). A virtual environment makes it easier to manage different projects, and avoid compatibility issues between dependencies.
|
||||
|
||||
Start by creating a virtual environment in your project directory:
|
||||
Create a virtual environment with [uv](https://docs.astral.sh/uv/) (refer to [Installation](https://docs.astral.sh/uv/getting-started/installation/) for installation instructions), a fast Rust-based Python package and project manager.
|
||||
|
||||
```bash
|
||||
python -m venv .env
|
||||
uv venv my-env
|
||||
source my-env/bin/activate
|
||||
```
|
||||
|
||||
Activate the virtual environment. On Linux and MacOs:
|
||||
Now you're ready to install 🤗 Transformers with pip or uv.
|
||||
|
||||
<hfoptions id="install">
|
||||
<hfoption id="uv">
|
||||
|
||||
```bash
|
||||
source .env/bin/activate
|
||||
```
|
||||
Activate Virtual environment on Windows
|
||||
|
||||
```bash
|
||||
.env/Scripts/activate
|
||||
uv pip install transformers
|
||||
```
|
||||
|
||||
Now you're ready to install 🤗 Transformers with the following command:
|
||||
</hfoption>
|
||||
<hfoption id="pip">
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
For GPU acceleration, install the appropriate CUDA drivers for [PyTorch](https://pytorch.org/get-started/locally) and TensorFlow(https://www.tensorflow.org/install/pip).
|
||||
|
||||
Run the command below to check if your system detects an NVIDIA GPU.
|
||||
|
||||
```bash
|
||||
nvidia-smi
|
||||
```
|
||||
|
||||
For CPU-support only, you can conveniently install 🤗 Transformers and a deep learning library in one line. For example, install 🤗 Transformers and PyTorch with:
|
||||
|
||||
```bash
|
||||
@ -254,3 +265,36 @@ Once your file is downloaded and locally cached, specify it's local path to load
|
||||
See the [How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream) section for more details on downloading files stored on the Hub.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
See below for some of the more common installation issues and how to resolve them.
|
||||
|
||||
### Unsupported Python version
|
||||
|
||||
Ensure you are using Python 3.9 or later. Run the command below to check your Python version.
|
||||
|
||||
```
|
||||
python --version
|
||||
```
|
||||
|
||||
### Missing dependencies
|
||||
|
||||
Install all required dependencies by running the following command. Ensure you’re in the project directory before executing the command.
|
||||
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
### Windows-specific
|
||||
|
||||
If you encounter issues on Windows, you may need to activate Developer Mode. Navigate to Windows Settings > For Developers > Developer Mode.
|
||||
|
||||
Alternatively, create and activate a virtual environment as shown below.
|
||||
|
||||
```
|
||||
python -m venv env
|
||||
.\env\Scripts\activate
|
||||
```
|
||||
|
||||
|
||||
|
@ -56,7 +56,7 @@ More concretely, key-value cache acts as a memory bank for these generative mode
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
||||
|
||||
>>> model_id = "meta-llama/Llama-2-7b-chat-hf"
|
||||
>>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda:0")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
|
||||
@ -82,7 +82,13 @@ More concretely, key-value cache acts as a memory bank for these generative mode
|
||||
... cache_position = cache_position[-1:] + 1 # add one more position for the next token
|
||||
|
||||
>>> print(tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0])
|
||||
"[INST] Hello, what's your name. [/INST] Hello! My name is LLaMA,"
|
||||
```
|
||||
```txt
|
||||
<|user|>
|
||||
Hello, what's your name.
|
||||
<|assistant|>
|
||||
My name is Sarah.
|
||||
<|
|
||||
```
|
||||
|
||||
</details>
|
||||
@ -132,17 +138,13 @@ Cache quantization can be detrimental in terms of latency if the context length
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> inputs = tokenizer("I like rock music because", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="quantized", cache_config={"nbits": 4, "backend": "quanto"})
|
||||
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
|
||||
I like rock music because it's loud and energetic. It's a great way to express myself and rel
|
||||
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20)
|
||||
>>> print(tokenizer.batch_decode(out, skip_special_tokens=True)[0])
|
||||
I like rock music because it's loud and energetic. I like to listen to it when I'm feeling
|
||||
I like rock music because it's a great way to express myself. I like the way it makes me feel, the
|
||||
```
|
||||
|
||||
### Offloaded Cache
|
||||
@ -231,14 +233,14 @@ For more examples with Static Cache and JIT compilation, take a look at [StaticC
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto")
|
||||
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # simply pass the cache implementation="static"
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="static")
|
||||
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
|
||||
"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
|
||||
"Hello, my name is [Your Name] and I am a [Your Position] at [Your Company]. I am writing"
|
||||
```
|
||||
|
||||
|
||||
@ -256,7 +258,7 @@ This will use the [`~OffloadedStaticCache`] implementation instead.
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
|
||||
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # simply pass the cache implementation="static"
|
||||
>>> # simply pass the cache implementation="offloaded_static"
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="offloaded_static")
|
||||
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
|
||||
"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
|
||||
@ -275,14 +277,14 @@ Note that you can use this cache only for models that support sliding window, e.
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("teknium/OpenHermes-2.5-Mistral-7B", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> inputs = tokenizer("Yesterday I was on a rock concert and.", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # can be used by passing in cache implementation
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, cache_implementation="sliding_window")
|
||||
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
|
||||
"Yesterday I was on a rock concert and. I was so excited to see my favorite band. I was so excited that I was jumping up and down and screaming. I was so excited that I"
|
||||
"Yesterday I was on a rock concert and. I was so excited to see my favorite band perform live. I was so happy that I could hardly contain myself. I was jumping up and down and"
|
||||
```
|
||||
|
||||
### Sink Cache
|
||||
@ -295,8 +297,8 @@ Unlike other cache classes, this one can't be used directly by indicating a `cac
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, SinkCache
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16).to("cuda:0")
|
||||
>>> inputs = tokenizer("This is a long story about unicorns, fairies and magic.", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # get our cache, specify number of sink tokens and window size
|
||||
@ -304,7 +306,7 @@ Unlike other cache classes, this one can't be used directly by indicating a `cac
|
||||
>>> past_key_values = SinkCache(window_length=256, num_sink_tokens=4)
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=30, past_key_values=past_key_values)
|
||||
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
|
||||
"This is a long story about unicorns, fairies and magic. It is a fantasy world where unicorns and fairies live together in harmony. The story follows a young girl named Lily"
|
||||
"This is a long story about unicorns, fairies and magic. It is a story about a young girl named Lily who discovers that she has the power to control the elements. She learns that she can"
|
||||
```
|
||||
|
||||
### Encoder-Decoder Cache
|
||||
@ -332,15 +334,15 @@ In case you are using Sink Cache, you have to crop your inputs to that maximum l
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer,AutoModelForCausalLM
|
||||
>>> from transformers.cache_utils import (
|
||||
>>> DynamicCache,
|
||||
>>> SinkCache,
|
||||
>>> StaticCache,
|
||||
>>> SlidingWindowCache,
|
||||
>>> QuantoQuantizedCache,
|
||||
>>> QuantizedCacheConfig,
|
||||
>>> )
|
||||
... DynamicCache,
|
||||
... SinkCache,
|
||||
... StaticCache,
|
||||
... SlidingWindowCache,
|
||||
... QuantoQuantizedCache,
|
||||
... QuantizedCacheConfig,
|
||||
... )
|
||||
|
||||
>>> model_id = "meta-llama/Llama-2-7b-chat-hf"
|
||||
>>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map='auto')
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
|
||||
@ -363,7 +365,7 @@ In case you are using Sink Cache, you have to crop your inputs to that maximum l
|
||||
... messages.append({"role": "assistant", "content": completion})
|
||||
|
||||
print(messages)
|
||||
[{'role': 'user', 'content': "Hello, what's your name?"}, {'role': 'assistant', 'content': " Hello! My name is LLaMA, I'm a large language model trained by a team of researcher at Meta AI. 😊"}, {'role': 'user', 'content': 'Btw, yesterday I was on a rock concert.'}, {'role': 'assistant', 'content': ' Oh, cool! That sounds like a lot of fun! 🎉 Did you enjoy the concert? What was the band like? 🤔'}]
|
||||
[{'role': 'user', 'content': "Hello, what's your name?"}, {'role': 'assistant', 'content': "Hello, I'm AI."}, {'role': 'user', 'content': 'Btw, yesterday I was on a rock concert.'}, {'role': 'assistant', 'content': "I'm sorry to hear that you were on a rock concert yesterday. It sounds like a fun experience, but I'm not capable of experiencing music or concerts. However, I can provide you with some information about rock music and its history. Rock music emerged in the 1950s and 1960s in the United States and Britain, and it quickly gained popularity around the world. Some of the most famous rock bands of all time include The Beatles, The Rolling Stones, Led Zeppelin, and Pink Floyd. Rock music has a distinct sound and style, with elements of blues, country, and folk music. It often features guitar solos, heavy bass lines, and drums. Rock music has had a significant impact on popular culture, influencing genres such as punk rock, heavy metal, and alternative rock."}]
|
||||
```
|
||||
|
||||
|
||||
@ -376,7 +378,7 @@ Sometimes you would want to first fill-in cache object with key/values for certa
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, DynamicCache, StaticCache
|
||||
|
||||
>>> model_id = "meta-llama/Llama-2-7b-chat-hf"
|
||||
>>> model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
|
||||
@ -400,7 +402,7 @@ Sometimes you would want to first fill-in cache object with key/values for certa
|
||||
... responses.append(response)
|
||||
|
||||
>>> print(responses)
|
||||
['<s> You are a helpful assistant. Help me to write a blogpost about travelling.\n\nTitle: The Ultimate Guide to Travelling: Tips, Tricks, and', '<s> You are a helpful assistant. What is the capital of France?\n\nYes, the capital of France is Paris.</s>']
|
||||
['<s> You are a helpful assistant. Help me to write a blogpost about travelling. I am excited to share my experiences with you. I have been traveling for the past', '<s> You are a helpful assistant. What is the capital of France? \n\nAnswer: Paris is the capital of France.</s>']
|
||||
```
|
||||
|
||||
|
||||
@ -414,8 +416,8 @@ this legacy format, you can seamlessly convert it to a `DynamicCache` and back.
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.float16, device_map="auto")
|
||||
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # `return_dict_in_generate=True` is required to return the cache. `return_legacy_cache` forces the returned cache
|
||||
|
@ -23,6 +23,12 @@ LLMs, or Large Language Models, are the key component behind text generation. In
|
||||
|
||||
Autoregressive generation is the inference-time procedure of iteratively calling a model with its own generated outputs, given a few initial inputs. In 🤗 Transformers, this is handled by the [`~generation.GenerationMixin.generate`] method, which is available to all models with generative capabilities.
|
||||
|
||||
<Tip>
|
||||
|
||||
If you want to jump straight to chatting with a model, [try our chat CLI](quicktour#chat-with-text-generation-models).
|
||||
|
||||
</Tip>
|
||||
|
||||
This tutorial will show you how to:
|
||||
|
||||
* Generate text with an LLM
|
||||
|
179
docs/source/en/model_doc/emu3.md
Normal file
179
docs/source/en/model_doc/emu3.md
Normal file
@ -0,0 +1,179 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Emu3
|
||||
|
||||
## Overview
|
||||
|
||||
The Emu3 model was proposed in [Emu3: Next-Token Prediction is All You Need](https://arxiv.org/abs/2409.18869) by Xinlong Wang, Xiaosong Zhang, Zhengxiong Luo, Quan Sun, Yufeng Cui, Jinsheng Wang, Fan Zhang, Yueze Wang, Zhen Li, Qiying Yu, Yingli Zhao, Yulong Ao, Xuebin Min, Tao Li, Boya Wu, Bo Zhao, Bowen Zhang, Liangdong Wang, Guang Liu, Zheqi He, Xi Yang, Jingjing Liu, Yonghua Lin, Tiejun Huang, Zhongyuan Wang.
|
||||
|
||||
Emu3 is a multimodal LLM that uses vector quantization to tokenize images into discrete tokens. Discretized image tokens are later fused with text token ids for image and text generation. The model can additionally generate images by predicting image token ids.
|
||||
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*While next-token prediction is considered a promising path towards artificial general intelligence, it has struggled to excel in multimodal tasks, which are still dominated by diffusion models (e.g., Stable Diffusion) and compositional approaches (e.g., CLIP combined with LLMs). In this paper, we introduce Emu3, a new suite of state-of-the-art multimodal models trained solely with next-token prediction. By tokenizing images, text, and videos into a discrete space, we train a single transformer from scratch on a mixture of multimodal sequences. Emu3 outperforms several well-established task-specific models in both generation and perception tasks, surpassing flagship models such as SDXL and LLaVA-1.6, while eliminating the need for diffusion or compositional architectures. Emu3 is also capable of generating high-fidelity video via predicting the next token in a video sequence. We simplify complex multimodal model designs by converging on a singular focus: tokens, unlocking great potential for scaling both during training and inference. Our results demonstrate that next-token prediction is a promising path towards building general multimodal intelligence beyond language. We open-source key techniques and models to support further research in this direction.*
|
||||
|
||||
Tips:
|
||||
|
||||
- We advise users to set `processor.tokenizer.padding_side = "left"` before batched generation as it leads to more accurate results.
|
||||
|
||||
- Note that the model has been trained with a specific prompt format for chatting. Use `processor.apply_chat_template(my_conversation_dict)` to correctly format your prompts.
|
||||
|
||||
- Emu3 has two different checkpoints for image-generation and text-generation, make sure to use the correct checkpoint when loading the model. To generate an image, it is advised to use `prefix_constraints` so that the generated tokens are sampled only from possible image tokens. See more below for usage examples.
|
||||
|
||||
> [!TIP]
|
||||
> Emu3 implementation in Transformers uses a special image token to indicate where to merge image embeddings. The special image token isn't new and uses one of the reserved tokens: `<|extra_0|>`. You have to add `<image>` to your prompt in the place where the image should be embedded for correct generation.
|
||||
|
||||
|
||||
This model was contributed by [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
|
||||
The original code can be found [here](https://github.com/baaivision/Emu3).
|
||||
|
||||
|
||||
## Usage example
|
||||
|
||||
### Text generation inference
|
||||
|
||||
Here's how to load the model and perform inference in half-precision (`torch.bfloat16`) to generate textual output from text or text and image inputs:
|
||||
|
||||
```python
|
||||
from transformers import Emu3Processor, Emu3ForConditionalGeneration
|
||||
import torch
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
processor = Emu3Processor.from_pretrained("BAAI/Emu3-Chat-hf")
|
||||
model = Emu3ForConditionalGeneration.from_pretrained("BAAI/Emu3-Chat-hf", torch_dtype=torch.bfloat16, device_map="cuda")
|
||||
|
||||
# prepare image and text prompt
|
||||
url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
prompt = "What do you see in this image?<image>"
|
||||
|
||||
inputs = processor(images=image, text=prompt, return_tensors="pt").to(model.device, dtype=torch.bfloat16)
|
||||
|
||||
# autoregressively complete prompt
|
||||
output = model.generate(**inputs, max_new_tokens=50)
|
||||
print(processor.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
### Image generation inference
|
||||
|
||||
Emu3 can also generate images from textual input. Here is how you can do it:
|
||||
|
||||
```python
|
||||
processor = Emu3Processor.from_pretrained("BAAI/Emu3-Gen-hf")
|
||||
model = Emu3ForConditionalGeneration.from_pretrained("BAAI/Emu3-Gen-hf", torch_dtype="bfloat16", device_map="auto", attn_implementation="flash_attention_2")
|
||||
|
||||
|
||||
inputs = processor(
|
||||
text=["a portrait of young girl. masterpiece, film grained, best quality.", "a dog running under the rain"],
|
||||
padding=True,
|
||||
return_tensors="pt",
|
||||
return_for_image_generation=True,
|
||||
)
|
||||
inputs = inputs.to(device="cuda:0", dtype=torch.bfloat16)
|
||||
|
||||
neg_prompt = "lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry."
|
||||
neg_inputs = processor(text=[neg_prompt] * 2, return_tensors="pt").to(device="cuda:0")
|
||||
|
||||
image_sizes = inputs.pop("image_sizes")
|
||||
HEIGHT, WIDTH = image_sizes[0]
|
||||
VISUAL_TOKENS = model.vocabulary_mapping.image_tokens
|
||||
|
||||
def prefix_allowed_tokens_fn(batch_id, input_ids):
|
||||
height, width = HEIGHT, WIDTH
|
||||
visual_tokens = VISUAL_TOKENS
|
||||
image_wrapper_token_id = torch.tensor([processor.tokenizer.image_wrapper_token_id], device=model.device)
|
||||
eoi_token_id = torch.tensor([processor.tokenizer.eoi_token_id], device=model.device)
|
||||
eos_token_id = torch.tensor([processor.tokenizer.eos_token_id], device=model.device)
|
||||
pad_token_id = torch.tensor([processor.tokenizer.pad_token_id], device=model.device)
|
||||
eof_token_id = torch.tensor([processor.tokenizer.eof_token_id], device=model.device)
|
||||
eol_token_id = processor.tokenizer.encode("<|extra_200|>", return_tensors="pt")[0]
|
||||
|
||||
position = torch.nonzero(input_ids == image_wrapper_token_id, as_tuple=True)[0][0]
|
||||
offset = input_ids.shape[0] - position
|
||||
if offset % (width + 1) == 0:
|
||||
return (eol_token_id, )
|
||||
elif offset == (width + 1) * height + 1:
|
||||
return (eof_token_id, )
|
||||
elif offset == (width + 1) * height + 2:
|
||||
return (eoi_token_id, )
|
||||
elif offset == (width + 1) * height + 3:
|
||||
return (eos_token_id, )
|
||||
elif offset > (width + 1) * height + 3:
|
||||
return (pad_token_id, )
|
||||
else:
|
||||
return visual_tokens
|
||||
|
||||
|
||||
out = model.generate(
|
||||
**inputs,
|
||||
max_new_tokens=50_000, # make sure to have enough tokens for one image
|
||||
prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
|
||||
return_dict_in_generate=True,
|
||||
negative_prompt_ids=neg_inputs.input_ids, # indicate for Classifier-Free Guidance
|
||||
negative_prompt_attention_mask=neg_inputs.attention_mask,
|
||||
)
|
||||
|
||||
image = model.decode_image_tokens(out.sequences[:, inputs.input_ids.shape[1]: ], height=HEIGHT, width=WIDTH)
|
||||
images = processor.postprocess(list(image.float()), return_tensors="PIL.Image.Image") # internally we convert to np but it's not supported in bf16 precision
|
||||
for i, image in enumerate(images['pixel_values']):
|
||||
image.save(f"result{i}.png")
|
||||
|
||||
```
|
||||
|
||||
|
||||
## Emu3Config
|
||||
|
||||
[[autodoc]] Emu3Config
|
||||
|
||||
## Emu3VQVAEConfig
|
||||
|
||||
[[autodoc]] Emu3VQVAEConfig
|
||||
|
||||
## Emu3TextConfig
|
||||
|
||||
[[autodoc]] Emu3TextConfig
|
||||
|
||||
## Emu3Processor
|
||||
|
||||
[[autodoc]] Emu3Processor
|
||||
|
||||
## Emu3ImageProcessor
|
||||
|
||||
[[autodoc]] Emu3ImageProcessor
|
||||
- preprocess
|
||||
|
||||
## Emu3VQVAE
|
||||
|
||||
[[autodoc]] Emu3VQVAE
|
||||
- forward
|
||||
|
||||
## Emu3TextModel
|
||||
|
||||
[[autodoc]] Emu3TextModel
|
||||
- forward
|
||||
|
||||
## Emu3ForCausalLM
|
||||
|
||||
[[autodoc]] Emu3ForCausalLM
|
||||
- forward
|
||||
|
||||
## Emu3ForConditionalGeneration
|
||||
|
||||
[[autodoc]] Emu3ForConditionalGeneration
|
||||
- forward
|
@ -56,7 +56,7 @@ In the following, we demonstrate how to use `glm-4-9b-chat` for the inference. N
|
||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
>>> device = "cuda" # the device to load the model onto
|
||||
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("THUDM/glm-4-9b-chat", device_map="auto")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("THUDM/glm-4-9b-chat", device_map="auto", trust_remote_code=True)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat")
|
||||
|
||||
>>> prompt = "Give me a short introduction to large language model."
|
||||
|
90
docs/source/en/model_doc/granitevision.md
Normal file
90
docs/source/en/model_doc/granitevision.md
Normal file
@ -0,0 +1,90 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Granite Vision
|
||||
|
||||
## Overview
|
||||
|
||||
The Granite Vision model is a variant of [LLaVA-NeXT](llava_next), leveraging a [Granite](granite) language model alongside a [SigLIP](SigLIP) visual encoder. It utilizes multiple concatenated vision hidden states as its image features, similar to [VipLlava](vipllava). It also uses a larger set of image grid pinpoints than the original LlaVa-NeXT models to support additional aspect ratios.
|
||||
|
||||
Tips:
|
||||
- This model is loaded into Transformers as an instance of LlaVA-Next. The usage and tips from [LLaVA-NeXT](llava_next) apply to this model as well.
|
||||
|
||||
- You can apply the chat template on the tokenizer / processor in the same way as well. Example chat format:
|
||||
```bash
|
||||
"<|user|>\nWhat’s shown in this image?\n<|assistant|>\nThis image shows a red stop sign.<|end_of_text|><|user|>\nDescribe the image in more details.\n<|assistant|>\n"
|
||||
```
|
||||
|
||||
Sample inference:
|
||||
```python
|
||||
from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
# Note: These docs were written prior to the public model release,
|
||||
# and this path is subject to change.
|
||||
# Please see https://huggingface.co/ibm-granite for the current model list.
|
||||
model_path = "ibm-granite/granite-3.1-2b-instruct-vision"
|
||||
processor = LlavaNextProcessor.from_pretrained(model_path)
|
||||
|
||||
model = LlavaNextForConditionalGeneration.from_pretrained(model_path).to("cuda")
|
||||
|
||||
# prepare image and text prompt, using the appropriate prompt template
|
||||
url = "https://github.com/haotian-liu/LLaVA/blob/1a91fc274d7c35a9b50b3cb29c4247ae5837ce39/images/llava_v1_5_radar.jpg?raw=true"
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "url": url},
|
||||
{"type": "text", "text": "What is shown in this image?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
inputs = processor.apply_chat_template(
|
||||
conversation,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt"
|
||||
).to("cuda")
|
||||
|
||||
|
||||
# autoregressively complete prompt
|
||||
output = model.generate(**inputs, max_new_tokens=100)
|
||||
|
||||
print(processor.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
This model was contributed by [Alexander Brooks](https://huggingface.co/abrooks9944).
|
||||
|
||||
## LlavaNextConfig
|
||||
|
||||
[[autodoc]] LlavaNextConfig
|
||||
|
||||
## LlavaNextImageProcessor
|
||||
|
||||
[[autodoc]] LlavaNextImageProcessor
|
||||
- preprocess
|
||||
|
||||
## LlavaNextProcessor
|
||||
|
||||
[[autodoc]] LlavaNextProcessor
|
||||
|
||||
## LlavaNextForConditionalGeneration
|
||||
|
||||
[[autodoc]] LlavaNextForConditionalGeneration
|
||||
- forward
|
@ -56,9 +56,9 @@ Here's how to use the model for zero-shot object detection:
|
||||
>>> image_url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(image_url, stream=True).raw)
|
||||
>>> # Check for cats and remote controls
|
||||
>>> text = "a cat. a remote control."
|
||||
>>> text_labels = [["a cat", "a remote control"]]
|
||||
|
||||
>>> inputs = processor(images=image, text=text, return_tensors="pt").to(device)
|
||||
>>> inputs = processor(images=image, text=text_labels, return_tensors="pt").to(device)
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(**inputs)
|
||||
|
||||
@ -69,12 +69,14 @@ Here's how to use the model for zero-shot object detection:
|
||||
... text_threshold=0.3,
|
||||
... target_sizes=[image.size[::-1]]
|
||||
... )
|
||||
>>> print(results)
|
||||
[{'boxes': tensor([[344.6959, 23.1090, 637.1833, 374.2751],
|
||||
[ 12.2666, 51.9145, 316.8582, 472.4392],
|
||||
[ 38.5742, 70.0015, 176.7838, 118.1806]], device='cuda:0'),
|
||||
'labels': ['a cat', 'a cat', 'a remote control'],
|
||||
'scores': tensor([0.4785, 0.4381, 0.4776], device='cuda:0')}]
|
||||
|
||||
# Retrieve the first image result
|
||||
>>> result = results[0]
|
||||
>>> for box, score, labels in zip(result["boxes"], result["scores"], result["labels"]):
|
||||
... box = [round(x, 2) for x in box.tolist()]
|
||||
... print(f"Detected {labels} with confidence {round(score.item(), 3)} at location {box}")
|
||||
Detected a cat with confidence 0.468 at location [344.78, 22.9, 637.3, 373.62]
|
||||
Detected a cat with confidence 0.426 at location [11.74, 51.55, 316.51, 473.22]
|
||||
```
|
||||
|
||||
## Grounded SAM
|
||||
|
158
docs/source/en/model_doc/helium.md
Normal file
158
docs/source/en/model_doc/helium.md
Normal file
@ -0,0 +1,158 @@
|
||||
<!--Copyright 2024 Kyutai and The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Helium
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
Helium was proposed in [Announcing Helium-1 Preview](https://kyutai.org/2025/01/13/helium.html) by the Kyutai Team.
|
||||
|
||||
|
||||
Helium-1 preview is a lightweight language model with 2B parameters, targeting edge and mobile devices.
|
||||
It supports the following languages: English, French, German, Italian, Portuguese, Spanish.
|
||||
|
||||
- **Developed by:** Kyutai
|
||||
- **Model type:** Large Language Model
|
||||
- **Language(s) (NLP):** English, French, German, Italian, Portuguese, Spanish
|
||||
- **License:** CC-BY 4.0
|
||||
|
||||
|
||||
|
||||
|
||||
## Evaluation
|
||||
|
||||
<!-- This section describes the evaluation protocols and provides the results. -->
|
||||
|
||||
#### Testing Data
|
||||
|
||||
<!-- This should link to a Dataset Card if possible. -->
|
||||
|
||||
The model was evaluated on MMLU, TriviaQA, NaturalQuestions, ARC Easy & Challenge, Open Book QA, Common Sense QA,
|
||||
Physical Interaction QA, Social Interaction QA, HellaSwag, WinoGrande, Multilingual Knowledge QA, FLORES 200.
|
||||
|
||||
#### Metrics
|
||||
|
||||
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
||||
|
||||
We report accuracy on MMLU, ARC, OBQA, CSQA, PIQA, SIQA, HellaSwag, WinoGrande.
|
||||
We report exact match on TriviaQA, NQ and MKQA.
|
||||
We report BLEU on FLORES.
|
||||
|
||||
### English Results
|
||||
|
||||
| Benchmark | Helium-1 Preview | HF SmolLM2 (1.7B) | Gemma-2 (2.6B) | Llama-3.2 (3B) | Qwen2.5 (1.5B) |
|
||||
|--------------|--------|--------|--------|--------|--------|
|
||||
| | | | | | |
|
||||
| MMLU | 51.2 | 50.4 | 53.1 | 56.6 | 61.0 |
|
||||
| NQ | 17.3 | 15.1 | 17.7 | 22.0 | 13.1 |
|
||||
| TQA | 47.9 | 45.4 | 49.9 | 53.6 | 35.9 |
|
||||
| ARC E | 80.9 | 81.8 | 81.1 | 84.6 | 89.7 |
|
||||
| ARC C | 62.7 | 64.7 | 66.0 | 69.0 | 77.2 |
|
||||
| OBQA | 63.8 | 61.4 | 64.6 | 68.4 | 73.8 |
|
||||
| CSQA | 65.6 | 59.0 | 64.4 | 65.4 | 72.4 |
|
||||
| PIQA | 77.4 | 77.7 | 79.8 | 78.9 | 76.0 |
|
||||
| SIQA | 64.4 | 57.5 | 61.9 | 63.8 | 68.7 |
|
||||
| HS | 69.7 | 73.2 | 74.7 | 76.9 | 67.5 |
|
||||
| WG | 66.5 | 65.6 | 71.2 | 72.0 | 64.8 |
|
||||
| | | | | | |
|
||||
| Average | 60.7 | 59.3 | 62.2 | 64.7 | 63.6 |
|
||||
|
||||
#### Multilingual Results
|
||||
|
||||
| Language | Benchmark | Helium-1 Preview | HF SmolLM2 (1.7B) | Gemma-2 (2.6B) | Llama-3.2 (3B) | Qwen2.5 (1.5B) |
|
||||
|-----|--------------|--------|--------|--------|--------|--------|
|
||||
| | | | | | | |
|
||||
|German| MMLU | 45.6 | 35.3 | 45.0 | 47.5 | 49.5 |
|
||||
|| ARC C | 56.7 | 38.4 | 54.7 | 58.3 | 60.2 |
|
||||
|| HS | 53.5 | 33.9 | 53.4 | 53.7 | 42.8 |
|
||||
|| MKQA | 16.1 | 7.1 | 18.9 | 20.2 | 10.4 |
|
||||
| | | | | | | |
|
||||
|Spanish| MMLU | 46.5 | 38.9 | 46.2 | 49.6 | 52.8 |
|
||||
|| ARC C | 58.3 | 43.2 | 58.8 | 60.0 | 68.1 |
|
||||
|| HS | 58.6 | 40.8 | 60.5 | 61.1 | 51.4 |
|
||||
|| MKQA | 16.0 | 7.9 | 18.5 | 20.6 | 10.6 |
|
||||
|
||||
|
||||
## Technical Specifications
|
||||
|
||||
### Model Architecture and Objective
|
||||
|
||||
| Hyperparameter | Value |
|
||||
|--------------|--------|
|
||||
| Layers | 24 |
|
||||
| Heads | 20 |
|
||||
| Model dimension | 2560 |
|
||||
| MLP dimension | 7040 |
|
||||
| Context size | 4096 |
|
||||
| Theta RoPE | 100,000 |
|
||||
|
||||
Tips:
|
||||
|
||||
- This model was contributed by [Laurent Mazare](https://huggingface.co/lmz)
|
||||
|
||||
|
||||
## Usage tips
|
||||
|
||||
`Helium` can be found on the [Huggingface Hub](https://huggingface.co/collections/kyutai/helium-1-preview)
|
||||
|
||||
In the following, we demonstrate how to use `helium-1-preview` for the inference.
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
>>> device = "cuda" # the device to load the model onto
|
||||
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("helium-1-preview", device_map="auto")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("helium-1-preview")
|
||||
|
||||
>>> prompt = "Give me a short introduction to large language model."
|
||||
|
||||
>>> messages = [{"role": "user", "content": prompt}]
|
||||
|
||||
>>> text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
||||
|
||||
>>> model_inputs = tokenizer([text], return_tensors="pt").to(device)
|
||||
|
||||
>>> generated_ids = model.generate(model_inputs.input_ids, max_new_tokens=512, do_sample=True)
|
||||
|
||||
>>> generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)]
|
||||
|
||||
>>> response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
||||
```
|
||||
|
||||
## HeliumConfig
|
||||
|
||||
[[autodoc]] HeliumConfig
|
||||
|
||||
## HeliumModel
|
||||
|
||||
[[autodoc]] HeliumModel
|
||||
- forward
|
||||
|
||||
## HeliumForCausalLM
|
||||
|
||||
[[autodoc]] HeliumForCausalLM
|
||||
- forward
|
||||
|
||||
## HeliumForSequenceClassification
|
||||
|
||||
[[autodoc]] HeliumForSequenceClassification
|
||||
- forward
|
||||
|
||||
## HeliumForTokenClassification
|
||||
|
||||
[[autodoc]] HeliumForTokenClassification
|
||||
- forward
|
@ -162,6 +162,16 @@ For multiple turns conversation:
|
||||
"USER: <image>\n<prompt1> ASSISTANT: <answer1></s>USER: <prompt2> ASSISTANT: <answer2></s>USER: <prompt3> ASSISTANT:"
|
||||
```
|
||||
|
||||
## Note regarding reproducing original implementation
|
||||
|
||||
In order to match the logits of the [original implementation](https://github.com/haotian-liu/LLaVA/tree/main), one needs to additionally specify `do_pad=True` when instantiating `LLavaImageProcessor`:
|
||||
|
||||
```python
|
||||
from transformers import LLavaImageProcessor
|
||||
|
||||
image_processor = LLavaImageProcessor.from_pretrained("https://huggingface.co/llava-hf/llava-1.5-7b-hf", do_pad=True)
|
||||
```
|
||||
|
||||
### Using Flash Attention 2
|
||||
|
||||
Flash Attention 2 is an even faster, optimized version of the previous optimization, please refer to the [Flash Attention 2 section of performance docs](https://huggingface.co/docs/transformers/perf_infer_gpu_one).
|
||||
@ -180,6 +190,11 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
||||
|
||||
[[autodoc]] LlavaConfig
|
||||
|
||||
## LlavaImageProcessor
|
||||
|
||||
[[autodoc]] LlavaImageProcessor
|
||||
- preprocess
|
||||
|
||||
## LlavaProcessor
|
||||
|
||||
[[autodoc]] LlavaProcessor
|
||||
|
@ -81,7 +81,7 @@ text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=
|
||||
|
||||
# Note that the template simply formats your prompt, you still have to tokenize it and obtain pixel values for your images
|
||||
print(text_prompt)
|
||||
>>> "<|im_start|>user\n<image>What is shown in this image?<|im_end|>\n<|im_start|>assistant\nPage showing the list of options.<|im_end|>"
|
||||
'<|im_start|>user\n<image>What is shown in this image?<|im_end|>\n<|im_start|>assistant\nPage showing the list of options.<|im_end|>'
|
||||
```
|
||||
|
||||
This model was contributed by [RaushanTurganbay](https://huggingface.co/RaushanTurganbay).
|
||||
|
56
docs/source/en/model_doc/moonshine.md
Normal file
56
docs/source/en/model_doc/moonshine.md
Normal file
@ -0,0 +1,56 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Moonshine
|
||||
|
||||
## Overview
|
||||
|
||||
The Moonshine model was proposed in [Moonshine: Speech Recognition for Live Transcription and Voice Commands
|
||||
](https://arxiv.org/abs/2410.15608) by Nat Jeffries, Evan King, Manjunath Kudlur, Guy Nicholson, James Wang, Pete Warden.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This paper introduces Moonshine, a family of speech recognition models optimized for live transcription and voice command processing. Moonshine is based on an encoder-decoder transformer architecture and employs Rotary Position Embedding (RoPE) instead of traditional absolute position embeddings. The model is trained on speech segments of various lengths, but without using zero-padding, leading to greater efficiency for the encoder during inference time. When benchmarked against OpenAI's Whisper tiny-en, Moonshine Tiny demonstrates a 5x reduction in compute requirements for transcribing a 10-second speech segment while incurring no increase in word error rates across standard evaluation datasets. These results highlight Moonshine's potential for real-time and resource-constrained applications.*
|
||||
|
||||
Tips:
|
||||
|
||||
- Moonshine improves upon Whisper's architecture:
|
||||
1. It uses SwiGLU activation instead of GELU in the decoder layers
|
||||
2. Most importantly, it replaces absolute position embeddings with Rotary Position Embeddings (RoPE). This allows Moonshine to handle audio inputs of any length, unlike Whisper which is restricted to fixed 30-second windows.
|
||||
|
||||
This model was contributed by [Eustache Le Bihan (eustlb)](https://huggingface.co/eustlb).
|
||||
The original code can be found [here](https://github.com/usefulsensors/moonshine).
|
||||
|
||||
## Resources
|
||||
|
||||
- [Automatic speech recognition task guide](../tasks/asr)
|
||||
|
||||
## MoonshineConfig
|
||||
|
||||
[[autodoc]] MoonshineConfig
|
||||
|
||||
## MoonshineModel
|
||||
|
||||
[[autodoc]] MoonshineModel
|
||||
- forward
|
||||
- _mask_input_features
|
||||
|
||||
## MoonshineForConditionalGeneration
|
||||
|
||||
[[autodoc]] MoonshineForConditionalGeneration
|
||||
- forward
|
||||
- generate
|
||||
|
@ -110,9 +110,14 @@ To follow the example of the following image, `"Hello, I'm Moshi"` could be tran
|
||||
>>> from datasets import load_dataset, Audio
|
||||
>>> import torch, math
|
||||
>>> from transformers import MoshiForConditionalGeneration, AutoFeatureExtractor, AutoTokenizer
|
||||
>>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
|
||||
|
||||
>>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
>>> feature_extractor = AutoFeatureExtractor.from_pretrained("kyutai/moshiko-pytorch-bf16")
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("kyutai/moshiko-pytorch-bf16")
|
||||
>>> device = "cuda"
|
||||
>>> dtype = torch.bfloat16
|
||||
|
||||
>>> # prepare user input audio
|
||||
>>> librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate))
|
||||
>>> audio_sample = librispeech_dummy[-1]["audio"]["array"]
|
||||
|
@ -44,37 +44,40 @@ One unique property of OmDet-Turbo compared to other zero-shot object detection
|
||||
Here's how to load the model and prepare the inputs to perform zero-shot object detection on a single image:
|
||||
|
||||
```python
|
||||
import requests
|
||||
from PIL import Image
|
||||
>>> import torch
|
||||
>>> import requests
|
||||
>>> from PIL import Image
|
||||
|
||||
from transformers import AutoProcessor, OmDetTurboForObjectDetection
|
||||
>>> from transformers import AutoProcessor, OmDetTurboForObjectDetection
|
||||
|
||||
processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
|
||||
model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
|
||||
>>> processor = AutoProcessor.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
|
||||
>>> model = OmDetTurboForObjectDetection.from_pretrained("omlab/omdet-turbo-swin-tiny-hf")
|
||||
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
classes = ["cat", "remote"]
|
||||
inputs = processor(image, text=classes, return_tensors="pt")
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
>>> text_labels = ["cat", "remote"]
|
||||
>>> inputs = processor(image, text=text_labels, return_tensors="pt")
|
||||
|
||||
outputs = model(**inputs)
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(**inputs)
|
||||
|
||||
# convert outputs (bounding boxes and class logits)
|
||||
results = processor.post_process_grounded_object_detection(
|
||||
outputs,
|
||||
classes=classes,
|
||||
target_sizes=[image.size[::-1]],
|
||||
score_threshold=0.3,
|
||||
nms_threshold=0.3,
|
||||
)[0]
|
||||
for score, class_name, box in zip(
|
||||
results["scores"], results["classes"], results["boxes"]
|
||||
):
|
||||
box = [round(i, 1) for i in box.tolist()]
|
||||
print(
|
||||
f"Detected {class_name} with confidence "
|
||||
f"{round(score.item(), 2)} at location {box}"
|
||||
)
|
||||
>>> # convert outputs (bounding boxes and class logits)
|
||||
>>> results = processor.post_process_grounded_object_detection(
|
||||
... outputs,
|
||||
... target_sizes=[(image.height, image.width)],
|
||||
... text_labels=text_labels,
|
||||
... threshold=0.3,
|
||||
... nms_threshold=0.3,
|
||||
... )
|
||||
>>> result = results[0]
|
||||
>>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"]
|
||||
>>> for box, score, text_label in zip(boxes, scores, text_labels):
|
||||
... box = [round(i, 2) for i in box.tolist()]
|
||||
... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}")
|
||||
Detected remote with confidence 0.768 at location [39.89, 70.35, 176.74, 118.04]
|
||||
Detected cat with confidence 0.72 at location [11.6, 54.19, 314.8, 473.95]
|
||||
Detected remote with confidence 0.563 at location [333.38, 75.77, 370.7, 187.03]
|
||||
Detected cat with confidence 0.552 at location [345.15, 23.95, 639.75, 371.67]
|
||||
```
|
||||
|
||||
### Multi image inference
|
||||
@ -93,22 +96,22 @@ OmDet-Turbo can perform batched multi-image inference, with support for differen
|
||||
|
||||
>>> url1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image1 = Image.open(BytesIO(requests.get(url1).content)).convert("RGB")
|
||||
>>> classes1 = ["cat", "remote"]
|
||||
>>> task1 = "Detect {}.".format(", ".join(classes1))
|
||||
>>> text_labels1 = ["cat", "remote"]
|
||||
>>> task1 = "Detect {}.".format(", ".join(text_labels1))
|
||||
|
||||
>>> url2 = "http://images.cocodataset.org/train2017/000000257813.jpg"
|
||||
>>> image2 = Image.open(BytesIO(requests.get(url2).content)).convert("RGB")
|
||||
>>> classes2 = ["boat"]
|
||||
>>> text_labels2 = ["boat"]
|
||||
>>> task2 = "Detect everything that looks like a boat."
|
||||
|
||||
>>> url3 = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
|
||||
>>> image3 = Image.open(BytesIO(requests.get(url3).content)).convert("RGB")
|
||||
>>> classes3 = ["statue", "trees"]
|
||||
>>> text_labels3 = ["statue", "trees"]
|
||||
>>> task3 = "Focus on the foreground, detect statue and trees."
|
||||
|
||||
>>> inputs = processor(
|
||||
... images=[image1, image2, image3],
|
||||
... text=[classes1, classes2, classes3],
|
||||
... text=[text_labels1, text_labels2, text_labels3],
|
||||
... task=[task1, task2, task3],
|
||||
... return_tensors="pt",
|
||||
... )
|
||||
@ -119,19 +122,19 @@ OmDet-Turbo can perform batched multi-image inference, with support for differen
|
||||
>>> # convert outputs (bounding boxes and class logits)
|
||||
>>> results = processor.post_process_grounded_object_detection(
|
||||
... outputs,
|
||||
... classes=[classes1, classes2, classes3],
|
||||
... target_sizes=[image1.size[::-1], image2.size[::-1], image3.size[::-1]],
|
||||
... score_threshold=0.2,
|
||||
... text_labels=[text_labels1, text_labels2, text_labels3],
|
||||
... target_sizes=[(image.height, image.width) for image in [image1, image2, image3]],
|
||||
... threshold=0.2,
|
||||
... nms_threshold=0.3,
|
||||
... )
|
||||
|
||||
>>> for i, result in enumerate(results):
|
||||
... for score, class_name, box in zip(
|
||||
... result["scores"], result["classes"], result["boxes"]
|
||||
... for score, text_label, box in zip(
|
||||
... result["scores"], result["text_labels"], result["boxes"]
|
||||
... ):
|
||||
... box = [round(i, 1) for i in box.tolist()]
|
||||
... print(
|
||||
... f"Detected {class_name} with confidence "
|
||||
... f"Detected {text_label} with confidence "
|
||||
... f"{round(score.item(), 2)} at location {box} in image {i}"
|
||||
... )
|
||||
Detected remote with confidence 0.77 at location [39.9, 70.4, 176.7, 118.0] in image 0
|
||||
|
@ -50,20 +50,22 @@ OWLv2 is, just like its predecessor [OWL-ViT](owlvit), a zero-shot text-conditio
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
>>> texts = [["a photo of a cat", "a photo of a dog"]]
|
||||
>>> inputs = processor(text=texts, images=image, return_tensors="pt")
|
||||
>>> text_labels = [["a photo of a cat", "a photo of a dog"]]
|
||||
>>> inputs = processor(text=text_labels, images=image, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
|
||||
>>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
|
||||
>>> target_sizes = torch.Tensor([image.size[::-1]])
|
||||
>>> # Convert outputs (bounding boxes and class logits) to Pascal VOC Format (xmin, ymin, xmax, ymax)
|
||||
>>> results = processor.post_process_object_detection(outputs=outputs, target_sizes=target_sizes, threshold=0.1)
|
||||
>>> i = 0 # Retrieve predictions for the first image for the corresponding text queries
|
||||
>>> text = texts[i]
|
||||
>>> boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
|
||||
>>> for box, score, label in zip(boxes, scores, labels):
|
||||
>>> target_sizes = torch.tensor([(image.height, image.width)])
|
||||
>>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
|
||||
>>> results = processor.post_process_grounded_object_detection(
|
||||
... outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels
|
||||
... )
|
||||
>>> # Retrieve predictions for the first image for the corresponding text queries
|
||||
>>> result = results[0]
|
||||
>>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"]
|
||||
>>> for box, score, text_label in zip(boxes, scores, text_labels):
|
||||
... box = [round(i, 2) for i in box.tolist()]
|
||||
... print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}")
|
||||
... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}")
|
||||
Detected a photo of a cat with confidence 0.614 at location [341.67, 23.39, 642.32, 371.35]
|
||||
Detected a photo of a cat with confidence 0.665 at location [6.75, 51.96, 326.62, 473.13]
|
||||
```
|
||||
@ -103,6 +105,9 @@ Usage of OWLv2 is identical to [OWL-ViT](owlvit) with a new, updated image proce
|
||||
## Owlv2Processor
|
||||
|
||||
[[autodoc]] Owlv2Processor
|
||||
- __call__
|
||||
- post_process_grounded_object_detection
|
||||
- post_process_image_guided_detection
|
||||
|
||||
## Owlv2Model
|
||||
|
||||
|
@ -49,20 +49,22 @@ OWL-ViT is a zero-shot text-conditioned object detection model. OWL-ViT uses [CL
|
||||
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
>>> texts = [["a photo of a cat", "a photo of a dog"]]
|
||||
>>> inputs = processor(text=texts, images=image, return_tensors="pt")
|
||||
>>> text_labels = [["a photo of a cat", "a photo of a dog"]]
|
||||
>>> inputs = processor(text=text_labels, images=image, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
|
||||
>>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
|
||||
>>> target_sizes = torch.Tensor([image.size[::-1]])
|
||||
>>> target_sizes = torch.tensor([(image.height, image.width)])
|
||||
>>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
|
||||
>>> results = processor.post_process_object_detection(outputs=outputs, target_sizes=target_sizes, threshold=0.1)
|
||||
>>> i = 0 # Retrieve predictions for the first image for the corresponding text queries
|
||||
>>> text = texts[i]
|
||||
>>> boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
|
||||
>>> for box, score, label in zip(boxes, scores, labels):
|
||||
>>> results = processor.post_process_grounded_object_detection(
|
||||
... outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels
|
||||
... )
|
||||
>>> # Retrieve predictions for the first image for the corresponding text queries
|
||||
>>> result = results[0]
|
||||
>>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"]
|
||||
>>> for box, score, text_label in zip(boxes, scores, text_labels):
|
||||
... box = [round(i, 2) for i in box.tolist()]
|
||||
... print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}")
|
||||
... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}")
|
||||
Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29]
|
||||
Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17]
|
||||
```
|
||||
@ -91,16 +93,12 @@ A demo notebook on using OWL-ViT for zero- and one-shot (image-guided) object de
|
||||
- post_process_object_detection
|
||||
- post_process_image_guided_detection
|
||||
|
||||
## OwlViTFeatureExtractor
|
||||
|
||||
[[autodoc]] OwlViTFeatureExtractor
|
||||
- __call__
|
||||
- post_process
|
||||
- post_process_image_guided_detection
|
||||
|
||||
## OwlViTProcessor
|
||||
|
||||
[[autodoc]] OwlViTProcessor
|
||||
- __call__
|
||||
- post_process_grounded_object_detection
|
||||
- post_process_image_guided_detection
|
||||
|
||||
## OwlViTModel
|
||||
|
||||
|
@ -57,10 +57,7 @@ Phi-3 has been integrated in the development version (4.40.0.dev) of `transforme
|
||||
>>> outputs = model.generate(inputs, max_new_tokens=32)
|
||||
>>> text = tokenizer.batch_decode(outputs)[0]
|
||||
>>> print(text)
|
||||
<s><|user|>
|
||||
Can you provide ways to eat combinations of bananas and dragonfruits?<|end|>
|
||||
<|assistant|>
|
||||
Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some ideas for eating combinations of bananas and
|
||||
<|user|> Can you provide ways to eat combinations of bananas and dragonfruits?<|end|><|assistant|> Certainly! Bananas and dragonfruits can be combined in various delicious ways. Here are some creative ideas for incorporating both fruits
|
||||
```
|
||||
|
||||
## Phi3Config
|
||||
|
300
docs/source/en/model_doc/qwen2_5_vl.md
Normal file
300
docs/source/en/model_doc/qwen2_5_vl.md
Normal file
@ -0,0 +1,300 @@
|
||||
<!--Copyright 2025 The Qwen Team and The HuggingFace Inc. team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Qwen2.5-VL
|
||||
|
||||
## Overview
|
||||
|
||||
The [Qwen2.5-VL](https://qwenlm.github.io/blog/qwen2_5-vl/) model is an update to [Qwen2-VL](https://arxiv.org/abs/2409.12191) from Qwen team, Alibaba Group.
|
||||
|
||||
The abstract from this update is the following:
|
||||
|
||||
*Qwen2.5-VL marks a major step forward from Qwen2-VL, built upon the latest Qwen2.5 LLM. We've accelerated training and testing through the strategic implementation of window attention within the ViT. The ViT architecture itself has been refined with SwiGLU and RMSNorm, aligning it more closely with the LLM's structure. A key innovation is the expansion of native dynamic resolution to encompass the temporal dimension, in addition to spatial aspects. Furthermore, we've upgraded MRoPE, incorporating absolute time alignment on the time axis to allow the model to effectively capture temporal dynamics, regardless of frame rate, leading to superior video understanding.*
|
||||
|
||||
## Usage example
|
||||
|
||||
### Single Media inference
|
||||
|
||||
The model can accept both images and videos as input. Here's an example code for inference.
|
||||
|
||||
```python
|
||||
|
||||
from PIL import Image
|
||||
import requests
|
||||
import torch
|
||||
from torchvision import io
|
||||
from typing import Dict
|
||||
from transformers.image_utils import load_images, load_video
|
||||
from transformers import Qwen2_5_VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
||||
|
||||
# Load the model in half-precision on the available device(s)
|
||||
model = Qwen2_5_VLForConditionalGeneration.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", device_map="auto")
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct")
|
||||
|
||||
# Image
|
||||
url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role":"user",
|
||||
"content":[
|
||||
{
|
||||
"type":"image",
|
||||
},
|
||||
{
|
||||
"type":"text",
|
||||
"text":"Describe this image."
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
# Preprocess the inputs
|
||||
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|>\n<|im_start|>assistant\n'
|
||||
|
||||
inputs = processor(text=[text_prompt], images=[image], padding=True, return_tensors="pt")
|
||||
inputs = inputs.to('cuda')
|
||||
|
||||
# Inference: Generation of the output
|
||||
output_ids = model.generate(**inputs, max_new_tokens=128)
|
||||
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
|
||||
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
print(output_text)
|
||||
|
||||
# Video
|
||||
video = load_video(video="/path/to/video.mp4")
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "video"},
|
||||
{"type": "text", "text": "What happened in the video?"},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
# Preprocess the inputs
|
||||
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|video_pad|><|vision_end|>What happened in the video?<|im_end|>\n<|im_start|>assistant\n'
|
||||
|
||||
# Qwen2.5VL modifies the time positional encoding (MRoPE) according to the video's frame rate (FPS).
|
||||
# Therefore, the video's FPS information needs to be provided as input.
|
||||
inputs = processor(text=[text_prompt], videos=[video], fps=[1.0], padding=True, return_tensors="pt")
|
||||
inputs = inputs.to('cuda')
|
||||
|
||||
# Inference: Generation of the output
|
||||
output_ids = model.generate(**inputs, max_new_tokens=128)
|
||||
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
|
||||
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
print(output_text)
|
||||
```
|
||||
|
||||
### Batch Mixed Media Inference
|
||||
|
||||
The model can batch inputs composed of mixed samples of various types such as images, videos, and text. Here is an example.
|
||||
|
||||
```python
|
||||
images = load_images([
|
||||
"/path/to/image1.jpg",
|
||||
"/path/to/image2.jpg",
|
||||
"/path/to/image3.jpg",
|
||||
"/path/to/image4.jpg",
|
||||
"/path/to/image5.jpg",
|
||||
])
|
||||
video = load_video(video="/path/to/video.mp4")
|
||||
|
||||
# Conversation for the first image
|
||||
conversation1 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "Describe this image."}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
# Conversation with two images
|
||||
conversation2 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What is written in the pictures?"}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
# Conversation with pure text
|
||||
conversation3 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "who are you?"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
# Conversation with mixed midia
|
||||
conversation4 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "image"},
|
||||
{"type": "video"},
|
||||
{"type": "text", "text": "What are the common elements in these medias?"},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
conversations = [conversation1, conversation2, conversation3, conversation4]
|
||||
# Preparation for batch inference
|
||||
texts = [processor.apply_chat_template(msg, add_generation_prompt=True) for msg in conversations]
|
||||
inputs = processor(
|
||||
text=texts,
|
||||
images=images,
|
||||
videos=[video],
|
||||
padding=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
inputs = inputs.to('cuda')
|
||||
|
||||
# Batch Inference
|
||||
output_ids = model.generate(**inputs, max_new_tokens=128)
|
||||
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
|
||||
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
print(output_text)
|
||||
```
|
||||
|
||||
### Usage Tips
|
||||
|
||||
#### Image Resolution trade-off
|
||||
|
||||
The model supports a wide range of resolution inputs. By default, it uses the native resolution for input, but higher resolutions can enhance performance at the cost of more computation. Users can set the minimum and maximum number of pixels to achieve an optimal configuration for their needs.
|
||||
|
||||
```python
|
||||
min_pixels = 224*224
|
||||
max_pixels = 2048*2048
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
|
||||
```
|
||||
|
||||
In case of limited GPU RAM, one can reduce the resolution as follows:
|
||||
|
||||
```python
|
||||
min_pixels = 256*28*28
|
||||
max_pixels = 1024*28*28
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2.5-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
|
||||
```
|
||||
This ensures each image gets encoded using a number between 256-1024 tokens. The 28 comes from the fact that the model uses a patch size of 14 and a temporal patch size of 2 (14 x 2 = 28).
|
||||
|
||||
#### Multiple Image Inputs
|
||||
|
||||
By default, images and video content are directly included in the conversation. When handling multiple images, it's helpful to add labels to the images and videos for better reference. Users can control this behavior with the following settings:
|
||||
|
||||
```python
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "Hello, how are you?"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "I'm doing well, thank you for asking. How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Can you describe these images and video?"},
|
||||
{"type": "image"},
|
||||
{"type": "image"},
|
||||
{"type": "video"},
|
||||
{"type": "text", "text": "These are from my vacation."}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "I'd be happy to describe the images and video for you. Could you please provide more context about your vacation?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "It was a trip to the mountains. Can you see the details in the images and video?"
|
||||
}
|
||||
]
|
||||
|
||||
# default:
|
||||
prompt_without_id = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Hello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing well, thank you for asking. How can I assist you today?<|im_end|>\n<|im_start|>user\nCan you describe these images and video?<|vision_start|><|image_pad|><|vision_end|><|vision_start|><|image_pad|><|vision_end|><|vision_start|><|video_pad|><|vision_end|>These are from my vacation.<|im_end|>\n<|im_start|>assistant\nI'd be happy to describe the images and video for you. Could you please provide more context about your vacation?<|im_end|>\n<|im_start|>user\nIt was a trip to the mountains. Can you see the details in the images and video?<|im_end|>\n<|im_start|>assistant\n'
|
||||
|
||||
|
||||
# add ids
|
||||
prompt_with_id = processor.apply_chat_template(conversation, add_generation_prompt=True, add_vision_id=True)
|
||||
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nPicture 1: <|vision_start|><|image_pad|><|vision_end|>Hello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing well, thank you for asking. How can I assist you today?<|im_end|>\n<|im_start|>user\nCan you describe these images and video?Picture 2: <|vision_start|><|image_pad|><|vision_end|>Picture 3: <|vision_start|><|image_pad|><|vision_end|>Video 1: <|vision_start|><|video_pad|><|vision_end|>These are from my vacation.<|im_end|>\n<|im_start|>assistant\nI'd be happy to describe the images and video for you. Could you please provide more context about your vacation?<|im_end|>\n<|im_start|>user\nIt was a trip to the mountains. Can you see the details in the images and video?<|im_end|>\n<|im_start|>assistant\n'
|
||||
|
||||
```
|
||||
|
||||
#### Flash-Attention 2 to speed up generation
|
||||
|
||||
First, make sure to install the latest version of Flash Attention 2:
|
||||
|
||||
```bash
|
||||
pip install -U flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
Also, you should have hardware that is compatible with FlashAttention 2. Read more about it in the official documentation of the [flash attention repository](https://github.com/Dao-AILab/flash-attention). FlashAttention-2 can only be used when a model is loaded in `torch.float16` or `torch.bfloat16`.
|
||||
|
||||
To load and run a model using FlashAttention-2, add `attn_implementation="flash_attention_2"` when loading the model:
|
||||
|
||||
```python
|
||||
from transformers import Qwen2_5_VLForConditionalGeneration
|
||||
|
||||
model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
||||
"Qwen/Qwen2.5-VL-7B-Instruct",
|
||||
torch_dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
|
||||
## Qwen2_5_VLConfig
|
||||
|
||||
[[autodoc]] Qwen2_5_VLConfig
|
||||
|
||||
## Qwen2_5_VLImageProcessor
|
||||
|
||||
[[autodoc]] Qwen2_5_VLImageProcessor
|
||||
- preprocess
|
||||
|
||||
## Qwen2_5_VLProcessor
|
||||
|
||||
[[autodoc]] Qwen2_5_VLProcessor
|
||||
|
||||
## Qwen2_5_VLModel
|
||||
|
||||
[[autodoc]] Qwen2_5_VLModel
|
||||
- forward
|
||||
|
||||
## Qwen2_5_VLForConditionalGeneration
|
||||
|
||||
[[autodoc]] Qwen2_5_VLForConditionalGeneration
|
||||
- forward
|
@ -315,6 +315,11 @@ model = Qwen2VLForConditionalGeneration.from_pretrained(
|
||||
[[autodoc]] Qwen2VLImageProcessor
|
||||
- preprocess
|
||||
|
||||
## Qwen2VLImageProcessorFast
|
||||
|
||||
[[autodoc]] Qwen2VLImageProcessorFast
|
||||
- preprocess
|
||||
|
||||
## Qwen2VLProcessor
|
||||
|
||||
[[autodoc]] Qwen2VLProcessor
|
||||
|
@ -52,7 +52,7 @@ Here is how to use the processor to process text and audio:
|
||||
```python
|
||||
>>> # let's load an audio sample from an Arabic speech corpus
|
||||
>>> from datasets import load_dataset
|
||||
>>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True)
|
||||
>>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True, trust_remote_code=True)
|
||||
>>> audio_sample = next(iter(dataset))["audio"]
|
||||
|
||||
>>> # now, process it
|
||||
|
@ -52,7 +52,7 @@ Here is how to use the processor to process text and audio:
|
||||
```python
|
||||
>>> # let's load an audio sample from an Arabic speech corpus
|
||||
>>> from datasets import load_dataset
|
||||
>>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True)
|
||||
>>> dataset = load_dataset("arabic_speech_corpus", split="test", streaming=True, trust_remote_code=True)
|
||||
>>> audio_sample = next(iter(dataset))["audio"]
|
||||
|
||||
>>> # now, process it
|
||||
|
@ -86,7 +86,7 @@ If you want to do the pre- and postprocessing yourself, here's how to do that:
|
||||
>>> candidate_labels = ["2 cats", "2 dogs"]
|
||||
# follows the pipeline prompt template to get same results
|
||||
>>> texts = [f'This is a photo of {label}.' for label in candidate_labels]
|
||||
>>> # important: we pass `padding=max_length` since the model was trained with this
|
||||
# important: we pass `padding=max_length` since the model was trained with this
|
||||
>>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
|
||||
|
||||
>>> with torch.no_grad():
|
||||
@ -95,14 +95,14 @@ If you want to do the pre- and postprocessing yourself, here's how to do that:
|
||||
>>> logits_per_image = outputs.logits_per_image
|
||||
>>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
|
||||
>>> print(f"{probs[0][0]:.1%} that image 0 is '{candidate_labels[0]}'")
|
||||
31.9% that image 0 is 'a photo of 2 cats'
|
||||
19.8% that image 0 is '2 cats'
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with SigLIP.
|
||||
|
||||
- [Zero-shot image classification task guide](../tasks/zero_shot_image_classification_md)
|
||||
- [Zero-shot image classification task guide](../tasks/zero_shot_image_classification)
|
||||
- Demo notebooks for SigLIP can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/SigLIP). 🌎
|
||||
|
||||
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
||||
@ -142,8 +142,7 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
|
||||
# follows the pipeline prompt template to get same results
|
||||
>>> texts = [f'This is a photo of {label}.' for label in candidate_labels]
|
||||
# important: we pass `padding=max_length` since the model was trained with this
|
||||
>>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
|
||||
>>> inputs.to(device)
|
||||
>>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt").to(device)
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... with torch.autocast(device):
|
||||
@ -152,7 +151,7 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
|
||||
>>> logits_per_image = outputs.logits_per_image
|
||||
>>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
|
||||
>>> print(f"{probs[0][0]:.1%} that image 0 is '{candidate_labels[0]}'")
|
||||
51.3% that image 0 is 'This is a photo of 2 cats.'
|
||||
19.8% that image 0 is '2 cats'
|
||||
```
|
||||
|
||||
|
||||
|
138
docs/source/en/model_doc/superglue.md
Normal file
138
docs/source/en/model_doc/superglue.md
Normal file
@ -0,0 +1,138 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the MIT License; you may not use this file except in compliance with
|
||||
the License.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
|
||||
-->
|
||||
|
||||
# SuperGlue
|
||||
|
||||
## Overview
|
||||
|
||||
The SuperGlue model was proposed in [SuperGlue: Learning Feature Matching with Graph Neural Networks](https://arxiv.org/abs/1911.11763) by Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz and Andrew Rabinovich.
|
||||
|
||||
This model consists of matching two sets of interest points detected in an image. Paired with the
|
||||
[SuperPoint model](https://huggingface.co/magic-leap-community/superpoint), it can be used to match two images and
|
||||
estimate the pose between them. This model is useful for tasks such as image matching, homography estimation, etc.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*This paper introduces SuperGlue, a neural network that matches two sets of local features by jointly finding correspondences
|
||||
and rejecting non-matchable points. Assignments are estimated by solving a differentiable optimal transport problem, whose costs
|
||||
are predicted by a graph neural network. We introduce a flexible context aggregation mechanism based on attention, enabling
|
||||
SuperGlue to reason about the underlying 3D scene and feature assignments jointly. Compared to traditional, hand-designed heuristics,
|
||||
our technique learns priors over geometric transformations and regularities of the 3D world through end-to-end training from image
|
||||
pairs. SuperGlue outperforms other learned approaches and achieves state-of-the-art results on the task of pose estimation in
|
||||
challenging real-world indoor and outdoor environments. The proposed method performs matching in real-time on a modern GPU and
|
||||
can be readily integrated into modern SfM or SLAM systems. The code and trained weights are publicly available at this [URL](https://github.com/magicleap/SuperGluePretrainedNetwork).*
|
||||
|
||||
## How to use
|
||||
|
||||
Here is a quick example of using the model. Since this model is an image matching model, it requires pairs of images to be matched.
|
||||
The raw outputs contain the list of keypoints detected by the keypoint detector as well as the list of matches with their corresponding
|
||||
matching scores.
|
||||
```python
|
||||
from transformers import AutoImageProcessor, AutoModel
|
||||
import torch
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
url_image1 = "https://raw.githubusercontent.com/magicleap/SuperGluePretrainedNetwork/refs/heads/master/assets/phototourism_sample_images/united_states_capitol_98169888_3347710852.jpg"
|
||||
image1 = Image.open(requests.get(url_image1, stream=True).raw)
|
||||
url_image2 = "https://raw.githubusercontent.com/magicleap/SuperGluePretrainedNetwork/refs/heads/master/assets/phototourism_sample_images/united_states_capitol_26757027_6717084061.jpg"
|
||||
image_2 = Image.open(requests.get(url_image2, stream=True).raw)
|
||||
|
||||
images = [image1, image2]
|
||||
|
||||
processor = AutoImageProcessor.from_pretrained("magic-leap-community/superglue_outdoor")
|
||||
model = AutoModel.from_pretrained("magic-leap-community/superglue_outdoor")
|
||||
|
||||
inputs = processor(images, return_tensors="pt")
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs)
|
||||
```
|
||||
|
||||
You can use the `post_process_keypoint_matching` method from the `SuperGlueImageProcessor` to get the keypoints and matches in a more readable format:
|
||||
|
||||
```python
|
||||
image_sizes = [[(image.height, image.width) for image in images]]
|
||||
outputs = processor.post_process_keypoint_matching(outputs, image_sizes, threshold=0.2)
|
||||
for i, output in enumerate(outputs):
|
||||
print("For the image pair", i)
|
||||
for keypoint0, keypoint1, matching_score in zip(
|
||||
output["keypoints0"], output["keypoints1"], output["matching_scores"]
|
||||
):
|
||||
print(
|
||||
f"Keypoint at coordinate {keypoint0.numpy()} in the first image matches with keypoint at coordinate {keypoint1.numpy()} in the second image with a score of {matching_score}."
|
||||
)
|
||||
|
||||
```
|
||||
|
||||
From the outputs, you can visualize the matches between the two images using the following code:
|
||||
```python
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# Create side by side image
|
||||
merged_image = np.zeros((max(image1.height, image2.height), image1.width + image2.width, 3))
|
||||
merged_image[: image1.height, : image1.width] = np.array(image1) / 255.0
|
||||
merged_image[: image2.height, image1.width :] = np.array(image2) / 255.0
|
||||
plt.imshow(merged_image)
|
||||
plt.axis("off")
|
||||
|
||||
# Retrieve the keypoints and matches
|
||||
output = outputs[0]
|
||||
keypoints0 = output["keypoints0"]
|
||||
keypoints1 = output["keypoints1"]
|
||||
matching_scores = output["matching_scores"]
|
||||
keypoints0_x, keypoints0_y = keypoints0[:, 0].numpy(), keypoints0[:, 1].numpy()
|
||||
keypoints1_x, keypoints1_y = keypoints1[:, 0].numpy(), keypoints1[:, 1].numpy()
|
||||
|
||||
# Plot the matches
|
||||
for keypoint0_x, keypoint0_y, keypoint1_x, keypoint1_y, matching_score in zip(
|
||||
keypoints0_x, keypoints0_y, keypoints1_x, keypoints1_y, matching_scores
|
||||
):
|
||||
plt.plot(
|
||||
[keypoint0_x, keypoint1_x + image1.width],
|
||||
[keypoint0_y, keypoint1_y],
|
||||
color=plt.get_cmap("RdYlGn")(matching_score.item()),
|
||||
alpha=0.9,
|
||||
linewidth=0.5,
|
||||
)
|
||||
plt.scatter(keypoint0_x, keypoint0_y, c="black", s=2)
|
||||
plt.scatter(keypoint1_x + image1.width, keypoint1_y, c="black", s=2)
|
||||
|
||||
# Save the plot
|
||||
plt.savefig("matched_image.png", dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
```
|
||||
|
||||

|
||||
|
||||
This model was contributed by [stevenbucaille](https://huggingface.co/stevenbucaille).
|
||||
The original code can be found [here](https://github.com/magicleap/SuperGluePretrainedNetwork).
|
||||
|
||||
## SuperGlueConfig
|
||||
|
||||
[[autodoc]] SuperGlueConfig
|
||||
|
||||
## SuperGlueImageProcessor
|
||||
|
||||
[[autodoc]] SuperGlueImageProcessor
|
||||
|
||||
- preprocess
|
||||
|
||||
## SuperGlueForKeypointMatching
|
||||
|
||||
[[autodoc]] SuperGlueForKeypointMatching
|
||||
|
||||
- forward
|
||||
- post_process_keypoint_matching
|
@ -47,6 +47,17 @@ Helper class to enable loading timm models to be used with the transformers libr
|
||||
>>> top5_probabilities, top5_class_indices = torch.topk(logits.softmax(dim=1) * 100, k=5)
|
||||
```
|
||||
|
||||
## Resources:
|
||||
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with TimmWrapper.
|
||||
|
||||
<PipelineTag pipeline="image-classification"/>
|
||||
|
||||
- [Collection of Example Notebook](https://github.com/ariG23498/timm-wrapper-examples) 🌎
|
||||
|
||||
> [!TIP]
|
||||
> For a more detailed overview please read the [official blog post](https://huggingface.co/blog/timm-transformers) on the timm integration.
|
||||
|
||||
## TimmWrapperConfig
|
||||
|
||||
[[autodoc]] TimmWrapperConfig
|
||||
|
@ -10,24 +10,28 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express o
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# VitPose
|
||||
# ViTPose
|
||||
|
||||
## Overview
|
||||
|
||||
The VitPose model was proposed in [ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation](https://arxiv.org/abs/2204.12484) by Yufei Xu, Jing Zhang, Qiming Zhang, Dacheng Tao. VitPose employs a standard, non-hierarchical [Vision Transformer](https://arxiv.org/pdf/2010.11929v2) as backbone for the task of keypoint estimation. A simple decoder head is added on top to predict the heatmaps from a given image. Despite its simplicity, the model gets state-of-the-art results on the challenging MS COCO Keypoint Detection benchmark.
|
||||
The ViTPose model was proposed in [ViTPose: Simple Vision Transformer Baselines for Human Pose Estimation](https://arxiv.org/abs/2204.12484) by Yufei Xu, Jing Zhang, Qiming Zhang, Dacheng Tao. ViTPose employs a standard, non-hierarchical [Vision Transformer](vit) as backbone for the task of keypoint estimation. A simple decoder head is added on top to predict the heatmaps from a given image. Despite its simplicity, the model gets state-of-the-art results on the challenging MS COCO Keypoint Detection benchmark. The model was further improved in [ViTPose++: Vision Transformer for Generic Body Pose Estimation](https://arxiv.org/abs/2212.04246) where the authors employ
|
||||
a mixture-of-experts (MoE) module in the ViT backbone along with pre-training on more data, which further enhances the performance.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Although no specific domain knowledge is considered in the design, plain vision transformers have shown excellent performance in visual recognition tasks. However, little effort has been made to reveal the potential of such simple structures for pose estimation tasks. In this paper, we show the surprisingly good capabilities of plain vision transformers for pose estimation from various aspects, namely simplicity in model structure, scalability in model size, flexibility in training paradigm, and transferability of knowledge between models, through a simple baseline model called ViTPose. Specifically, ViTPose employs plain and non-hierarchical vision transformers as backbones to extract features for a given person instance and a lightweight decoder for pose estimation. It can be scaled up from 100M to 1B parameters by taking the advantages of the scalable model capacity and high parallelism of transformers, setting a new Pareto front between throughput and performance. Besides, ViTPose is very flexible regarding the attention type, input resolution, pre-training and finetuning strategy, as well as dealing with multiple pose tasks. We also empirically demonstrate that the knowledge of large ViTPose models can be easily transferred to small ones via a simple knowledge token. Experimental results show that our basic ViTPose model outperforms representative methods on the challenging MS COCO Keypoint Detection benchmark, while the largest model sets a new state-of-the-art.*
|
||||
|
||||

|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vitpose-architecture.png"
|
||||
alt="drawing" width="600"/>
|
||||
|
||||
<small> ViTPose architecture. Taken from the <a href="https://arxiv.org/abs/2204.12484">original paper.</a> </small>
|
||||
|
||||
This model was contributed by [nielsr](https://huggingface.co/nielsr) and [sangbumchoi](https://github.com/SangbumChoi).
|
||||
The original code can be found [here](https://github.com/ViTAE-Transformer/ViTPose).
|
||||
|
||||
## Usage Tips
|
||||
|
||||
ViTPose is a so-called top-down keypoint detection model. This means that one first uses an object detector, like [RT-DETR](rt_detr.md), to detect people (or other instances) in an image. Next, ViTPose takes the cropped images as input and predicts the keypoints.
|
||||
ViTPose is a so-called top-down keypoint detection model. This means that one first uses an object detector, like [RT-DETR](rt_detr.md), to detect people (or other instances) in an image. Next, ViTPose takes the cropped images as input and predicts the keypoints for each of them.
|
||||
|
||||
```py
|
||||
import torch
|
||||
@ -36,11 +40,7 @@ import numpy as np
|
||||
|
||||
from PIL import Image
|
||||
|
||||
from transformers import (
|
||||
AutoProcessor,
|
||||
RTDetrForObjectDetection,
|
||||
VitPoseForPoseEstimation,
|
||||
)
|
||||
from transformers import AutoProcessor, RTDetrForObjectDetection, VitPoseForPoseEstimation
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
@ -51,7 +51,7 @@ image = Image.open(requests.get(url, stream=True).raw)
|
||||
# Stage 1. Detect humans on the image
|
||||
# ------------------------------------------------------------------------
|
||||
|
||||
# You can choose detector by your choice
|
||||
# You can choose any detector of your choice
|
||||
person_image_processor = AutoProcessor.from_pretrained("PekingU/rtdetr_r50vd_coco_o365")
|
||||
person_model = RTDetrForObjectDetection.from_pretrained("PekingU/rtdetr_r50vd_coco_o365", device_map=device)
|
||||
|
||||
@ -89,9 +89,50 @@ pose_results = image_processor.post_process_pose_estimation(outputs, boxes=[pers
|
||||
image_pose_result = pose_results[0] # results for first image
|
||||
```
|
||||
|
||||
### ViTPose++ models
|
||||
|
||||
### Visualization for supervision user
|
||||
```py
|
||||
The best [checkpoints](https://huggingface.co/collections/usyd-community/vitpose-677fcfd0a0b2b5c8f79c4335) are those of the [ViTPose++ paper](https://arxiv.org/abs/2212.04246). ViTPose++ models employ a so-called [Mixture-of-Experts (MoE)](https://huggingface.co/blog/moe) architecture for the ViT backbone, resulting in better performance.
|
||||
|
||||
The ViTPose+ checkpoints use 6 experts, hence 6 different dataset indices can be passed.
|
||||
An overview of the various dataset indices is provided below:
|
||||
|
||||
- 0: [COCO validation 2017](https://cocodataset.org/#overview) dataset, using an object detector that gets 56 AP on the "person" class
|
||||
- 1: [AiC](https://github.com/fabbrimatteo/AiC-Dataset) dataset
|
||||
- 2: [MPII](https://www.mpi-inf.mpg.de/departments/computer-vision-and-machine-learning/software-and-datasets/mpii-human-pose-dataset) dataset
|
||||
- 3: [AP-10K](https://github.com/AlexTheBad/AP-10K) dataset
|
||||
- 4: [APT-36K](https://github.com/pandorgan/APT-36K) dataset
|
||||
- 5: [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) dataset
|
||||
|
||||
Pass the `dataset_index` argument in the forward of the model to indicate which experts to use for each example in the batch. Example usage is shown below:
|
||||
|
||||
```python
|
||||
image_processor = AutoProcessor.from_pretrained("usyd-community/vitpose-plus-base")
|
||||
model = VitPoseForPoseEstimation.from_pretrained("usyd-community/vitpose-plus-base", device=device)
|
||||
|
||||
inputs = image_processor(image, boxes=[person_boxes], return_tensors="pt").to(device)
|
||||
|
||||
dataset_index = torch.tensor([0], device=device) # must be a tensor of shape (batch_size,)
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs, dataset_index=dataset_index)
|
||||
```
|
||||
|
||||
The ViTPose+ checkpoints use 6 experts, hence 6 different dataset indices can be passed.
|
||||
An overview of the various dataset indices is provided below:
|
||||
|
||||
- 0: [COCO validation 2017](https://cocodataset.org/#overview) dataset, using an object detector that gets 56 AP on the "person" class
|
||||
- 1: [AiC](https://github.com/fabbrimatteo/AiC-Dataset) dataset
|
||||
- 2: [MPII](https://www.mpi-inf.mpg.de/departments/computer-vision-and-machine-learning/software-and-datasets/mpii-human-pose-dataset) dataset
|
||||
- 3: [AP-10K](https://github.com/AlexTheBad/AP-10K) dataset
|
||||
- 4: [APT-36K](https://github.com/pandorgan/APT-36K) dataset
|
||||
- 5: [COCO-WholeBody](https://github.com/jin-s13/COCO-WholeBody) dataset
|
||||
|
||||
|
||||
### Visualization
|
||||
|
||||
To visualize the various keypoints, one can either leverage the `supervision` [library](https://github.com/roboflow/supervision (requires `pip install supervision`):
|
||||
|
||||
```python
|
||||
import supervision as sv
|
||||
|
||||
xy = torch.stack([pose_result['keypoints'] for pose_result in image_pose_result]).cpu().numpy()
|
||||
@ -119,8 +160,9 @@ annotated_frame = vertex_annotator.annotate(
|
||||
)
|
||||
```
|
||||
|
||||
### Visualization for advanced user
|
||||
```py
|
||||
Alternatively, one can also visualize the keypoints using [OpenCV](https://opencv.org/) (requires `pip install opencv-python`):
|
||||
|
||||
```python
|
||||
import math
|
||||
import cv2
|
||||
|
||||
@ -223,26 +265,18 @@ pose_image
|
||||
```
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/vitpose-coco.jpg" alt="drawing" width="600"/>
|
||||
|
||||
### MoE backbone
|
||||
## Resources
|
||||
|
||||
To enable MoE (Mixture of Experts) function in the backbone, user has to give appropriate configuration such as `num_experts` and input value `dataset_index` to the backbone model. However, it is not used in default parameters. Below is the code snippet for usage of MoE function.
|
||||
A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ViTPose. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
|
||||
|
||||
```py
|
||||
>>> from transformers import VitPoseBackboneConfig, VitPoseBackbone
|
||||
>>> import torch
|
||||
|
||||
>>> config = VitPoseBackboneConfig(num_experts=3, out_indices=[-1])
|
||||
>>> model = VitPoseBackbone(config)
|
||||
|
||||
>>> pixel_values = torch.randn(3, 3, 256, 192)
|
||||
>>> dataset_index = torch.tensor([1, 2, 3])
|
||||
>>> outputs = model(pixel_values, dataset_index)
|
||||
```
|
||||
- A demo of ViTPose on images and video can be found [here](https://huggingface.co/spaces/hysts/ViTPose-transformers).
|
||||
- A notebook illustrating inference and visualization can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/ViTPose/Inference_with_ViTPose_for_human_pose_estimation.ipynb).
|
||||
|
||||
## VitPoseImageProcessor
|
||||
|
||||
[[autodoc]] VitPoseImageProcessor
|
||||
- preprocess
|
||||
- post_process_pose_estimation
|
||||
|
||||
## VitPoseConfig
|
||||
|
||||
|
93
docs/source/en/model_doc/zamba2.md
Normal file
93
docs/source/en/model_doc/zamba2.md
Normal file
@ -0,0 +1,93 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
# Zamba2
|
||||
|
||||
Zamba2 is a large language model (LLM) trained by Zyphra, and made available under an Apache 2.0 license. Please see the [Zyphra Hugging Face](https://huggingface.co/collections/zyphra/) repository for model weights.
|
||||
|
||||
This model was contributed by [pglo](https://huggingface.co/pglo).
|
||||
|
||||
|
||||
## Model details
|
||||
|
||||
Zamba2-1.2B, Zamba2-2.7B and Zamba2-7B are hybrid models combining state-space models (Specifically [Mamba](https://github.com/state-spaces/mamba)) and transformer, and were trained using next-token prediction. Zamba2 uses shared transformer layers after every 6 mamba blocks. It uses the [Mistral v0.1 tokenizer](https://huggingface.co/mistralai/Mistral-7B-v0.1). We came to this architecture after a series of ablations at small scales. Zamba2-1.2B, Zamba2-2.7B and Zamba2-7B were pre-trained on 2T and 3T tokens, respectively.
|
||||
|
||||
<img src=https://github.com/user-attachments/assets/c2cff209-b901-483c-87aa-774b82a0769f width=30% height=40% />
|
||||
|
||||
## Quick start
|
||||
|
||||
|
||||
### Presequities
|
||||
|
||||
Zamba2 requires you use `transformers` version 4.48.0 or higher:
|
||||
```bash
|
||||
pip install transformers>=4.48.0
|
||||
```
|
||||
|
||||
## Inference
|
||||
|
||||
```python
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("Zyphra/Zamba2-7B")
|
||||
model = AutoModelForCausalLM.from_pretrained("Zyphra/Zamba2-7B", device_map="cuda", torch_dtype=torch.bfloat16)
|
||||
|
||||
input_text = "What factors contributed to the fall of the Roman Empire?"
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
outputs = model.generate(**input_ids, max_new_tokens=100)
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
```
|
||||
|
||||
|
||||
## Model card
|
||||
|
||||
The model cards can be found at:
|
||||
* [Zamba2-1.2B](https://huggingface.co/Zyphra/Zamba2-1.2B)
|
||||
* [Zamba2-2.7B](https://huggingface.co/Zyphra/Zamba2-2.7B)
|
||||
* [Zamba2-7B](https://huggingface.co/Zyphra/Zamba2-7B)
|
||||
|
||||
|
||||
## Issues
|
||||
For issues with model output, or community discussion, please use the Hugging Face community [forum](https://huggingface.co/Zyphra/Zamba2-7B/discussions)
|
||||
|
||||
|
||||
## License
|
||||
|
||||
The model weights are open-sourced via an Apache 2.0 license.
|
||||
|
||||
|
||||
## Zamba2Config
|
||||
|
||||
[[autodoc]] Zamba2Config
|
||||
|
||||
|
||||
## Zamba2Model
|
||||
|
||||
[[autodoc]] Zamba2Model
|
||||
- forward
|
||||
|
||||
|
||||
## Zamba2ForCausalLM
|
||||
|
||||
[[autodoc]] Zamba2ForCausalLM
|
||||
- forward
|
||||
|
||||
|
||||
## Zamba2ForSequenceClassification
|
||||
|
||||
[[autodoc]] transformers.Zamba2ForSequenceClassification
|
||||
- forward
|
@ -70,7 +70,7 @@ Alternatively, one can also perform inference using the classes:
|
||||
>>> inputs = image_processor(images=image, return_tensors="pt")
|
||||
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(pixel_values)
|
||||
... outputs = model(inputs)
|
||||
|
||||
>>> # interpolate to original size and visualize the prediction
|
||||
>>> ## ZoeDepth dynamically pads the input image. Thus we pass the original image size as argument
|
||||
|
@ -59,8 +59,8 @@ inheritance.
|
||||
For example:
|
||||
- If a configuration class inherits from another and adds/deletes an argument, the generated file will either directly
|
||||
reference it (in case of addition) or completely remove it (in case of deletion).
|
||||
- If a class inherits from another, for example: class GemmaModel(LlamaModel):, dependencies are automatically
|
||||
inferred. All submodules will be automatically inferred from the superclass.
|
||||
- If a class inherits from another, for example: `class GemmaModel(LlamaModel):`, dependencies are automatically
|
||||
inferred. All submodules will be automatically added from the superclass.
|
||||
- If you define new functions in the `modular` and use them inside classes, the linter will automatically infer the
|
||||
|
||||
You should be able to write everything (the tokenizer, the image processor, the model, the config) in this `modular`
|
||||
@ -120,46 +120,362 @@ class RobertaForMaskedLM(BertForMaskedLM):
|
||||
self.model = RobertaModel(config)
|
||||
```
|
||||
|
||||
Note that if you do not use the dependency that you defined, you will have the following error:
|
||||
|
||||
```bash
|
||||
ValueError: You defined `RobertaEmbeddings` in the modular_roberta.py, it should be used
|
||||
when you define `BertModel`, as it is one of it's direct dependencies. Make sure
|
||||
you use it in the `__init__` function.
|
||||
```
|
||||
|
||||
Additionally, you may find a list of examples here:
|
||||
|
||||
## What it is not
|
||||
|
||||
It is not a replacement for the modeling code (yet?), and if your model is not based on anything else that ever existed, then you can add a `modeling` file as usual.
|
||||
It is not a replacement for the modeling code (yet?), and if your model is not based on anything else that ever existed, then you can add a `modeling` file as usual. Similarly, if you cannot easily inherit your `configuration` (or `tokenization` or `processing`) file from another model's similar file, you can add that filetype directly (even though defining it in the modular file would work, it would clutter it).
|
||||
|
||||
|
||||
## Real world example breakdown
|
||||
|
||||
As explained, modular allows you to use regular Python inheritance from any other model's code in the library, in order to define your own. For this reason, it will work better/be easier if you first browse the library a bit to find models close to yours, in order to inherit from them. For example, are you using a sliding window in the `Attention` class? Then start by checking models that are well known to use it, e.g. `Mistral`, or `Qwen2`! Are you using interleaved `RotaryEmbedding` modules? Check out `Cohere`, `Cohere2` and `Glm` models! Otherwise a very strong starting point is to check out `Llama`. And if you are doing a bit of all of that at once, then you can mix and match!
|
||||
|
||||
Here are some common properties that your model might be using, and corresponding modeling files to check as an example:
|
||||
- Mixture of expert: `SwitchTransformers` or `Mixtral`
|
||||
- Interleaved (and/or partial) rotary embedding: `Glm`, `Phi`
|
||||
- State space models:
|
||||
- Hybrid with attention: `Jamba` , `Bamba`, `Zamba`
|
||||
- Mamba2: `Mamba2`
|
||||
- Recurrent hidden states: `Gemma2`
|
||||
- Different sliding window attention/full attention patterns per layer: `Gemma2`, `Cohere2`
|
||||
- Clipping of QKV: `Olmo`
|
||||
- Normalization of QK: `Olmo2`, `Cohere`
|
||||
- Fused QKV (not recommended): `Phi3`
|
||||
|
||||
At Hugging Face, we feel that learning by example is usually (one of) the best way, so we will now go over a typical modular file, and the different features our linter provides (and its limitations)! 🤗 Let's use a real world example with Olmo2 model, which I feel provides a very good illustration of the modular mechanisms. The original file can be found [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/olmo2/modular_olmo2.py). For simplicity, we will go over it class by class, and repeat the modular's definition of ech class. For reference, the modeling and configuration of Olmo (v1) on which we will inherit a lot can be found [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/olmo/modeling_olmo.py) and [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/olmo/configuration_olmo.py) respectively. The final modeling of Olmo2 (generated by running our linter on the modular we will describe below) can be found [here](https://github.com/huggingface/transformers/blob/main/src/transformers/models/olmo2/modeling_olmo2.py)
|
||||
|
||||
Let's break it down!
|
||||
|
||||
|
||||
### Config class
|
||||
|
||||
Here is the `Config` definition in modular:
|
||||
|
||||
```py
|
||||
from ..olmo.configuration_olmo import OlmoConfig
|
||||
|
||||
class Olmo2Config(OlmoConfig):
|
||||
r"""
|
||||
This is the configuration class to store the configuration of a [`Olmo2Model`].
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
vocab_size=50304,
|
||||
hidden_size=4096,
|
||||
intermediate_size=11008,
|
||||
num_hidden_layers=32,
|
||||
num_attention_heads=32,
|
||||
num_key_value_heads=None,
|
||||
hidden_act="silu",
|
||||
max_position_embeddings=2048,
|
||||
initializer_range=0.02,
|
||||
use_cache=True,
|
||||
pad_token_id=1,
|
||||
bos_token_id=None,
|
||||
eos_token_id=50279,
|
||||
tie_word_embeddings=False,
|
||||
rope_theta=10000.0,
|
||||
rope_scaling=None,
|
||||
attention_bias=False,
|
||||
attention_dropout=0.0,
|
||||
rms_norm_eps=1e-5,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
vocab_size=vocab_size,
|
||||
hidden_size=hidden_size,
|
||||
intermediate_size=intermediate_size,
|
||||
num_hidden_layers=num_hidden_layers,
|
||||
num_attention_heads=num_attention_heads,
|
||||
num_key_value_heads=num_key_value_heads,
|
||||
hidden_act=hidden_act,
|
||||
max_position_embeddings=max_position_embeddings,
|
||||
initializer_range=initializer_range,
|
||||
use_cache=use_cache,
|
||||
pad_token_id=pad_token_id,
|
||||
bos_token_id=bos_token_id,
|
||||
eos_token_id=eos_token_id,
|
||||
tie_word_embeddings=tie_word_embeddings,
|
||||
rope_theta=rope_theta,
|
||||
rope_scaling=rope_scaling,
|
||||
attention_bias=attention_bias,
|
||||
attention_dropout=attention_dropout,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
self.rms_norm_eps = rms_norm_eps
|
||||
del self.clip_qkv
|
||||
```
|
||||
|
||||
Here, we correctly identified that the `Config` in Olmo2 is similar to Olmo's, up to a few details:
|
||||
1. The default value of most arguments has changed
|
||||
2. we have a new argument, `rms_norm_eps`
|
||||
3. the argument `clip_qkv` is not used anymore
|
||||
|
||||
To solve points 1. and 2., simply overwriting the `__init__` function with the new default arguments and adding the new one is enough, as you would expect when you want to overwrite a method in Python! Of course you also need to assign the new attribute `rms_norm_eps` to `self` in the `__init__`'s body.
|
||||
For point 3., we use the special syntax `del self.clip_qkv`, which, has you can expect, removed the assignment of this attribute in the unravelled code (after the conversion with the linter).
|
||||
|
||||
Now, there is a subtility here: as you can see, we used `super().__init__(...)`. Usually, in Python, it is simply used to call the parent's `__init__`. In modular terms, however, it has a _slightly_ different meaning. When we find a call such as `super().my_function(...)` in the modular file, the linter will take the body of the `my_function` function in the parent, and unravel it where the call to `super().my_function(...)` occured. Then, the `del self.clip_qkv` statement will remove the reference to `self.clip_qkv` from the unravelled body. Thus `del self.xxx` can only work in pair with `super().my_function(...)`, and should always be placed after it (but you can add whatever you want _before_ calling `super()`, and it will be placed, as you can expect, before the parent's body).
|
||||
|
||||
### Norm class
|
||||
|
||||
Here is the `Norm` class:
|
||||
|
||||
```py
|
||||
from ..llama.modeling_llama import LlamaRMSNorm
|
||||
|
||||
class Olmo2RMSNorm(LlamaRMSNorm):
|
||||
pass
|
||||
```
|
||||
|
||||
What to say here, it is pretty explicit isn't it? We do not modify anything from the `LlamaRMSNorm` definition. Thus the linter will unravel exactly the content of the parent (`LlamaRMSNorm`). Only change will be that every reference to "llama" on the docstrings, type hints, and comments (basically everywhere) will be changed to references to "olmo2" for consistency!
|
||||
|
||||
### Attention class
|
||||
|
||||
Here is the `Attention` class:
|
||||
|
||||
```py
|
||||
from ..llama.modeling_llama import eager_attention_forward
|
||||
from ..olmo.modeling_olmo import OlmoAttention, apply_rotary_pos_emb
|
||||
|
||||
|
||||
# Olmo2 attention is identical to OLMo attention except:
|
||||
# - Norm is applied to attention queries and keys.
|
||||
# - No qkv clipping.
|
||||
class Olmo2Attention(OlmoAttention):
|
||||
def __init__(self, config: Olmo2Config, layer_idx: Optional[int] = None):
|
||||
super().__init__(config, layer_idx=layer_idx)
|
||||
self.q_norm = Olmo2RMSNorm(config.num_attention_heads * self.head_dim, config.rms_norm_eps)
|
||||
self.k_norm = Olmo2RMSNorm(config.num_key_value_heads * self.head_dim, config.rms_norm_eps)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
||||
attention_mask: Optional[torch.Tensor],
|
||||
past_key_value: Optional[Cache] = None,
|
||||
cache_position: Optional[torch.LongTensor] = None,
|
||||
**kwargs,
|
||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
||||
input_shape = hidden_states.shape[:-1]
|
||||
hidden_shape = (*input_shape, -1, self.head_dim)
|
||||
|
||||
query_states = self.q_norm(self.q_proj(hidden_states))
|
||||
key_states = self.k_norm(self.k_proj(hidden_states))
|
||||
value_states = self.v_proj(hidden_states)
|
||||
|
||||
query_states = query_states.view(hidden_shape).transpose(1, 2)
|
||||
key_states = key_states.view(hidden_shape).transpose(1, 2)
|
||||
value_states = value_states.view(hidden_shape).transpose(1, 2)
|
||||
|
||||
cos, sin = position_embeddings
|
||||
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
||||
|
||||
if past_key_value is not None:
|
||||
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
||||
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
||||
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
||||
|
||||
attention_interface: Callable = eager_attention_forward
|
||||
if self.config._attn_implementation != "eager":
|
||||
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
|
||||
logger.warning_once(
|
||||
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
|
||||
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
||||
)
|
||||
else:
|
||||
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
||||
|
||||
attn_output, attn_weights = attention_interface(
|
||||
self,
|
||||
query_states,
|
||||
key_states,
|
||||
value_states,
|
||||
attention_mask,
|
||||
dropout=0.0 if not self.training else self.attention_dropout,
|
||||
scaling=self.scaling,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
||||
attn_output = self.o_proj(attn_output)
|
||||
return attn_output, attn_weights
|
||||
```
|
||||
|
||||
Now, what's happening here? In the `__init__`, we call `super().__init__(...)`, thus copying the parent's definition, then add 2 new layers of the `Olmo2RMSNorm` we just added previously. Indeed, those were not present in the original `Olmo` (v1) model. So, now, we also have to overwrite the `forward` method to use these 2 new layers right? Indeed, if you check carefully, the definition of `forward` is identical to `Olmo`'s, but we added a pass with the norm layers just before projecting with `q_proj` and `k_proj`. However, to help us, we directly imported the functions `eager_attention_forward` from llama, and `apply_rotary_pos_emb` from olmo. The linter will then automatically add these imported functions in the final `modeling_olmo2.py` file, by copying their definitions from the source (imported) files. And it will even add the `rotate_half` and `repeat_kv` functions (which are used inside `apply_rotary_pos_embed` and `eager_attention_forward` respectively) by figuring out the dependency automatically. Neat, right?
|
||||
Note that we had to redefine this class, because we did not find any model defining the `Attention` layer with the added `RMSNorm` layer anywhere else in the library! Otherwise, we would have simply inherited from this model instead as we did for the `RMSNorm`!
|
||||
|
||||
### The DecoderLayer class
|
||||
|
||||
Here is the `DecoderLayer` class:
|
||||
|
||||
```py
|
||||
from ..olmo.modeling_olmo import OlmoDecoderLayer
|
||||
|
||||
# The OLMo2 layers are identical to those of the OLMo model except:
|
||||
# - RMSNorm is used instead of standard layer norm.
|
||||
# - Norm is applied after attention/feedforward rather than before.
|
||||
class Olmo2DecoderLayer(OlmoDecoderLayer):
|
||||
def __init__(self, config: Olmo2Config, layer_idx: int):
|
||||
super().__init__(config, layer_idx=layer_idx)
|
||||
self.post_attention_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
self.post_feedforward_layernorm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
self.self_attn = Olmo2Attention(config=config, layer_idx=layer_idx)
|
||||
del self.input_layernorm
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_value: Optional[Cache] = None,
|
||||
output_attentions: Optional[bool] = False,
|
||||
use_cache: Optional[bool] = False,
|
||||
cache_position: Optional[torch.LongTensor] = None,
|
||||
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
|
||||
**kwargs,
|
||||
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
||||
residual = hidden_states
|
||||
|
||||
# Self Attention
|
||||
hidden_states, self_attn_weights = self.self_attn(
|
||||
hidden_states=hidden_states,
|
||||
attention_mask=attention_mask,
|
||||
position_ids=position_ids,
|
||||
past_key_value=past_key_value,
|
||||
output_attentions=output_attentions,
|
||||
use_cache=use_cache,
|
||||
cache_position=cache_position,
|
||||
position_embeddings=position_embeddings,
|
||||
**kwargs,
|
||||
)
|
||||
hidden_states = self.post_attention_layernorm(hidden_states)
|
||||
hidden_states = residual + hidden_states
|
||||
|
||||
# Fully Connected
|
||||
residual = hidden_states
|
||||
hidden_states = self.mlp(hidden_states)
|
||||
hidden_states = self.post_feedforward_layernorm(hidden_states)
|
||||
hidden_states = residual + hidden_states
|
||||
|
||||
outputs = (hidden_states,)
|
||||
if output_attentions:
|
||||
outputs += (self_attn_weights,)
|
||||
|
||||
return outputs
|
||||
```
|
||||
|
||||
At this point, you should start to pick up what is happening for this class. We switched the type of norm in the `__init__` by overwriting `self.post_attention_layernorm` after the call to `super().__init__(...)`, thus going from a `LayerNorm` in the parent class, to our `RMSNorm` in this class. Then we simply deleted the `self.input_layernorm` attribute, and replaced it by `self.post_feedforward_layernorm`, because the name was not making sense anymore as we apply it after in `Olmo2` instead of before in `Olmo`. For this reason, we also need to overwrite the `forward` method, to reflect the logic change.
|
||||
|
||||
Note however that if we had only switched `self.post_attention_layernorm` and `self.input_layernorm` from `LayerNorm`s to `RMSNorm`s (without the name and logic change of `elf.input_layernorm`), we would not have had to redefine the `forward` method!
|
||||
|
||||
### The Model class
|
||||
|
||||
```py
|
||||
from ..olmo.modeling_olmo import OlmoModel
|
||||
|
||||
# The OLMo2 model is identical to the OLMo model, except RMSNorm is used instead of
|
||||
# standard layer norm for the output norm.
|
||||
class Olmo2Model(OlmoModel):
|
||||
def __init__(self, config: Olmo2Config):
|
||||
super().__init__(config)
|
||||
self.norm = Olmo2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
self.layers = nn.ModuleList(
|
||||
[Olmo2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
||||
)
|
||||
```
|
||||
|
||||
Here, this is exactly what I was pointing out before: we simply change the _type_ of the `self.norm` attribute (going from `LayerNorn` in `Olmo` to `RMSNorm` in `Olmo2`). Since this change does not reflect the logic of the `forward` method (the name of the layer and where it is used is identical to the parent's), then we do not even need to overwrite it! It will be unravelled automatically! Note that we redefined `self.layers` for the sake of being explicit, but this is not even strictly required here as the definition is similar to what is found in `Olmo` (v1).
|
||||
|
||||
### Finally... The ForCausalLM class
|
||||
|
||||
Finally, here is the definition of the `ForCausalLM`:
|
||||
|
||||
```py
|
||||
from ..olmo.modeling_olmo import OlmoForCausalLM
|
||||
|
||||
class Olmo2ForCausalLM(OlmoForCausalLM):
|
||||
pass
|
||||
```
|
||||
|
||||
As for the `RMSNorm`, it is exactly similar to the parent's in logic, so we do not have anything to do, the linter will all figure it out by itself. Almost disappointing, no?
|
||||
|
||||
|
||||
<a id="dependencies"></a>
|
||||
### But... What about the MLP, RotaryEmbedding and PreTrainedModel classes?
|
||||
|
||||
Indeed, if you inspect the file [modeling_olmo2.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/olmo2/modeling_olmo2.py) which is created by running the linter on `modular_olmo2.py`, you will notice that it also creates `Olmo2MLP`, `Olmo2RotaryEmbedding`, and `Olmo2PreTrainedModel` classes, that we did not define explicitly in `modular_olmo2.py`.
|
||||
|
||||
Well, it is one of the main feature of our modular linter. Similarly to how some functions were added automatically with the `Attention` class (without directly importing them), classes that are a dependency of one of the class inherited class and which are not explicitly defined in the modular file, will be added automatically as part of the dependeny tracing. For example, in `OlmoDecoderLayer`, there is an attribute defined as `self.mlp = OlmoMLP(config)`. Because we never explicitly redefined a class named `Olmo2MLP` in `modular_olmo2.py`, the linter automatically created a class `Olmo2MLP`, similar to `OlmoMLP`. This is exactly the same as if we had done:
|
||||
|
||||
```py
|
||||
from ..olmo.modeling_olmo import OlmoMLP
|
||||
|
||||
class Olmo2MLP(OlmoMLP):
|
||||
pass
|
||||
```
|
||||
|
||||
but we did not even bother, because we _know_ this class is supposed to be exactly similar, and we never needed it anywhere else in the `modular_olmo2.py` file. In contrast, the class `Olmo2RMSNorm` was needed to (re)define the norms both in the `Attention` and `DecoderLayer` classes. The same logic is true for the `Olmo2PreTrainedModel` and `Olmo2RotaryEmbedding` classes.
|
||||
|
||||
Note however that if not redefined, classes will be copied from the file in which an inherited module uses them first. So if you wanted e.g. `Olmo2MLP` to inherit from, say, `MistralMLP` instead of `OlmoMLP` (here it was `OlmoMLP` because it was first implicitly used in `Olmo2DecoderLayer`, which inherited from `OlmoDecoderLayer`), you would need to be explicit and do:
|
||||
|
||||
```py
|
||||
# switch to mistral definition
|
||||
from ..mistral.modeling_mistral import MistralMLP
|
||||
|
||||
class Olmo2MLP(MistralMLP):
|
||||
pass
|
||||
```
|
||||
|
||||
## Advanced usage
|
||||
|
||||
### Removing attributes and functions
|
||||
To remove attributes that are not used in your modular model, and that you don't want to see in the unravelled modeling:
|
||||
Now that you should have a good grasp of how modular works, let's see some more advanced use cases and features you can use.
|
||||
|
||||
```python
|
||||
class GemmaModel(LlamaModel): | class GemmaModel(PreTrainedModel):
|
||||
def __init__(self, config): | def __init__(self, config):
|
||||
super().__init__(self, eos_token) | super().__init__(config)
|
||||
del self.embed_tokens | self.padding_idx = config.pad_token_id
|
||||
| self.vocab_size = config.vocab_size
|
||||
|
|
||||
| self.layers = nn.ModuleList(
|
||||
| [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
||||
| )
|
||||
| self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
| self.rotary_emb = LlamaRotaryEmbedding(config=config)
|
||||
| self.gradient_checkpointing = False
|
||||
|
|
||||
| # Initialize weights and apply final processing
|
||||
| self.post_init()
|
||||
### Removing attributes which are not just assignments
|
||||
|
||||
As we have seen before, after using `super().__init__()`, we can use `del self.attribute` to remove a specific attribute which was defined in the parent. What if this attribute was used elsewhere though? Meaning it was not just "defined to be stored" as in the config for example. For example, consider the following case:
|
||||
|
||||
```py
|
||||
class DummyModel(nn.Module):
|
||||
|
||||
def __init__(self, config: DummyConfig):
|
||||
super().__init__()
|
||||
self.attribute = config.attribute
|
||||
if self.attribute:
|
||||
# do more stuff with `self.attribute` here
|
||||
...
|
||||
```
|
||||
If you check the original `LlamaModel`, it has a `embed_tokens` which was removed here (as you would expect!)
|
||||
|
||||
Removing a function is pretty similar, you just need to write it with a `raise ValueError("")` to mimick the behaviour you actually want when you remove a parent function in python.
|
||||
Then inheriting from this `DummyModel` and doing
|
||||
|
||||
```py
|
||||
class MyNewDummyModel(DummyModel):
|
||||
|
||||
def __init__(self, config: MyNewDummyConfig):
|
||||
super().__init__(config)
|
||||
del self.attribute
|
||||
```
|
||||
|
||||
is not supported, because it will only suppress the assignment, i.e. the line `self.attribute = config.attribute` will disappear, but the `if` statement will stay and reference the attribute. We tried to make it work by suppressing every mentions of the attribute, however it it not a sound solution in the general case (it can lead to very surprising effects and remove other important parts) and is therefore not possible.
|
||||
|
||||
But what if I still want to inherit from `DummyModel`? How to properly do it? How to use `super().__init__()` without copy/pasting the parent then? This brings us to the next point:
|
||||
|
||||
### Avoiding super() special meaning
|
||||
|
||||
Say you still want to inherit from `DummyModel` (because it is convenient for some other methods) but you do want to remove the `self.attribute`. How to properly override the `__init__` method, while calling `super()` but without unravelling the parent's code? Well, then be explicit about which class `super()`'s you are calling! If we want to call the `nn.Module`'s `super()` for example, we can do the following (unravelled code on the right):
|
||||
|
||||
```py
|
||||
class MyNewDummyModel(DummyModel, nn.Module): | class MyNewDummyModel(nn.Module):
|
||||
|
|
||||
def __init__(self, config: MyNewDummyConfig): | def __init__(self, config: MyNewDummyConfig):
|
||||
nn.Module.__init__(config) | super().__init__()
|
||||
self.foo = config.foo | self.foo = config.foo
|
||||
... | ...
|
||||
```
|
||||
|
||||
### Deleting unused methods
|
||||
|
||||
Removing a class method is pretty similar to remove an attribute, you just need to overwrite it with a `raise AttributeError("")` to mimick the behaviour you actually want when you remove a parent function in python. For example, the following will remove the methods in the unravelled code:
|
||||
|
||||
```python
|
||||
class GemmaTokenizer(LlamaTokenizer):
|
||||
@ -174,37 +490,172 @@ class GemmaTokenizer(LlamaTokenizer):
|
||||
|
||||
### Define new functions
|
||||
|
||||
If you define a new function in the `modular` file to be used inside a class, say
|
||||
Of course, if you define a new function in the `modular` file, and use it inside an inherited class, say
|
||||
|
||||
```python
|
||||
def my_new_function(*args, **kwargs):
|
||||
# Do something here
|
||||
pass
|
||||
|
||||
class GemmaModel(LlamaModel):
|
||||
class DummyModel(LlamaModel):
|
||||
def forward(*args, **kwargs):
|
||||
# Call the function
|
||||
example = my_new_function(*args, **kwargs)
|
||||
# continue here
|
||||
```
|
||||
|
||||
the `my_new_function` function (and, recursively, any other new functions called in its body) will be automatically copy-pasted
|
||||
in the file where it is used.
|
||||
the `my_new_function` function (and, recursively, any other functions called in its body) will be automatically added to the unravelled code even if it is not present in the parent's file (here Llama).
|
||||
|
||||
### Calling `super()`
|
||||
We recently shipped a few features that allow you to go from:
|
||||
```python
|
||||
class GemmaTokenizer(LlamaTokenizer, PretrainedTokenizerFast): | class GemmaModel(nn.Module):
|
||||
def __init__(self, eos_token="</s>"): | def __init__(self):
|
||||
eos_token = AddedToken(eos_token) | eos_token = AddedToken(eos_token)
|
||||
PretrainedTokenizerFast.__init__(self, eos_token) | super().__init__(eos_token)
|
||||
### Decorators
|
||||
|
||||
By default, if you inherit from a class and override a method which has 1 (or more) decorators in the parent's method, the decorators will be added as well in the unravelled code, _but only if you do not add any yourself_. Otherwise, it will of course use whatever decorator your redefined.
|
||||
|
||||
That, is, imagine the following parent class
|
||||
|
||||
```py
|
||||
class DummyModel(nn.Module):
|
||||
...
|
||||
|
||||
@decorator(...)
|
||||
def forward(...)
|
||||
# do stuff here
|
||||
```
|
||||
This is useful want you **don't** want to unravel the call to `super()`, and you want to differentiate which super init call you are doing!
|
||||
|
||||
### Special naming
|
||||
We now also support special cases like
|
||||
```python
|
||||
class GemmaVisionModel(CLIPModel):
|
||||
Then, if you simply override the method it will produce (modular on the left, unravelled code on the right):
|
||||
|
||||
```py
|
||||
class NewModel(DummyModel): | class NewModel(nn.Module):
|
||||
... | ...
|
||||
|
|
||||
def forward(...): | @decorator(...)
|
||||
... | def forward(...):
|
||||
| ...
|
||||
```
|
||||
|
||||
That is, it keeps the parent's decorators by default. However, if you do:
|
||||
|
||||
```py
|
||||
class NewModel(DummyModel): | class NewModel(nn.Module):
|
||||
... | ...
|
||||
|
|
||||
@my_new_decorator(...) | @my_new_decorator(...)
|
||||
def forward(...): | def forward(...):
|
||||
... | ...
|
||||
```
|
||||
|
||||
Then it keeps you own new decorator.
|
||||
|
||||
### The super_kwargs special case
|
||||
|
||||
In the above case about decorators, what if the `forward` method is really long, and I just want to switch the decorators? Do I really have to redefine it all and copy/paste the body just for the decorator? Fortunately, no. If you followed until this point, you now that you can use `super().forward(...)`, and it will unravel the parent's body automatically. But what if there are plenty of arguments in the function's signature, and we are very lazy? For that use-case, we introduced the special syntax `**super_kwargs` in the overriden method signature. It basically mean: "unravel all the parent's signature arguments here". For example, a common signature in the `ForCausalLM` model is the following (copied from llama's modeling):
|
||||
|
||||
```py
|
||||
class LlamaForCausalLM(nn.Module):
|
||||
...
|
||||
|
||||
@add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
|
||||
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
||||
def forward(
|
||||
self,
|
||||
input_ids: torch.LongTensor = None,
|
||||
attention_mask: Optional[torch.Tensor] = None,
|
||||
position_ids: Optional[torch.LongTensor] = None,
|
||||
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
||||
inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||
labels: Optional[torch.LongTensor] = None,
|
||||
use_cache: Optional[bool] = None,
|
||||
output_attentions: Optional[bool] = None,
|
||||
output_hidden_states: Optional[bool] = None,
|
||||
return_dict: Optional[bool] = None,
|
||||
cache_position: Optional[torch.LongTensor] = None,
|
||||
num_logits_to_keep: int = 0,
|
||||
**kwargs: Unpack[KwargsForCausalLM],
|
||||
) -> Union[Tuple, CausalLMOutputWithPast]:
|
||||
...
|
||||
```
|
||||
|
||||
As you can see, this is a rather long and complicated signature. But if you do the following (as usual, modular on the left, unravelled code by the linter on the right):
|
||||
|
||||
```py
|
||||
class NewModelForCausalLM(LlamaForCausalLM): | class LlamaForCausalLM(nn.Module):
|
||||
... | ...
|
||||
|
|
||||
@my_new_decorator | @my_new_decorator
|
||||
def forward(self, **super_kwargs): | def forward(
|
||||
super().forward(**super_kwargs) | self,
|
||||
| input_ids: torch.LongTensor = None,
|
||||
| attention_mask: Optional[torch.Tensor] = None,
|
||||
| position_ids: Optional[torch.LongTensor] = None,
|
||||
| past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = |None,
|
||||
| inputs_embeds: Optional[torch.FloatTensor] = None,
|
||||
| labels: Optional[torch.LongTensor] = None,
|
||||
| use_cache: Optional[bool] = None,
|
||||
| output_attentions: Optional[bool] = None,
|
||||
| output_hidden_states: Optional[bool] = None,
|
||||
| return_dict: Optional[bool] = None,
|
||||
| cache_position: Optional[torch.LongTensor] = None,
|
||||
| num_logits_to_keep: int = 0,
|
||||
| **kwargs: Unpack[KwargsForCausalLM],
|
||||
| ) -> Union[Tuple, CausalLMOutputWithPast]:
|
||||
| ...
|
||||
```
|
||||
|
||||
and the `**super_kwargs` syntax unravelled all the arguments, while the `super().forward()` syntax unravelled the whole body! As you can see, this is great combo when you just want to switch the decorators, as it is very easy to use, and make it explicit that the only change you want to apply is the decorator.
|
||||
|
||||
However, we want to make it clear that the `**super_kwargs` syntax is not a replacement to being explicit when you redefine your methods: if you actually overwrite the method (i.e. you do not call `super().method()`), then we want you to explicitly write the signature as you would usually. This is only a short-cut when switching decorators, and a few other niche cases.
|
||||
|
||||
### The DOCSTRING variables
|
||||
|
||||
Usually, if whatever object is defned both in the modular file and the modeling file from which we inherit, then the definition of the modular takes precedence. However, this is not the case for assignments containing the pattern `DOCSTRING`. Indeed, we usually have variables defined as `MODEL_START_DOCSTRING` and `MODEL_INPUT_DOCSTRING` in the modeling files. These are just very big blocks of, well, docstrings... But they are (almost) always exactly the same up to the model name! And modular automatically rewrite the names everywhere! For this reason, assignments containing the pattern will _always_ use the definition found in the source file instead of the modular file. This is extremely handy if we need the variable reference somewhere (e.g. to redefine a decorator) but we do not want to clutter the modular file with 100 lines of docstrings which are always the same. It allows to do the following (taken from [modular_starcoder2.py](https://github.com/huggingface/transformers/blob/main/src/transformers/models/starcoder2/modular_starcoder2.py#L146))
|
||||
|
||||
```py
|
||||
STARCODER2_INPUTS_DOCSTRING = None # will be automatically redefined
|
||||
|
||||
class Starcoder2Model(MistralModel):
|
||||
...
|
||||
|
||||
@add_start_docstrings_to_model_forward(STARCODER2_INPUTS_DOCSTRING)
|
||||
def forward(...)
|
||||
...
|
||||
```
|
||||
|
||||
and here, the linter will correctly take the same definition of the docstring as in `Mistral`, without having to clutter the modular file!
|
||||
|
||||
## Limitations
|
||||
|
||||
Now, let's go over some of the limitations of modular.
|
||||
|
||||
### Special naming (essentially for multimodal models)
|
||||
|
||||
Because our linter automatically renames everything when inheriting from a class (defining `class NewModelMLP(LlamaMLP)` will rename every mention of `Llama` to `NewModel`, and recursively for all dependencies grabbed), it has somewhat strict rules when it comes to naming. For consistency reasons, we require that you always use the same class name prefix when inheriting different classes from the same file. For example, doing:
|
||||
|
||||
```py
|
||||
class MyModelIncredibleMLP(LlamaMLP):
|
||||
...
|
||||
|
||||
class MyModelDecoderLayer(LlamaDecoderLayer):
|
||||
...
|
||||
```
|
||||
|
||||
is not recommended, first because it breaks standards in the library and we do not like it, and second because the linter will not know how to rename potential high-order dependencies (should we use `MyModelIncredible`, or `MyModel`?).
|
||||
|
||||
If there are no dependencies to grab implicitly however (see [this section](#dependencies) to understand implicit dependencies), local renaming (for a single class) will not be an issue and the linter will not complain. But make sure to explicitly redefine every other mentions of the class with the new name pattern! For example in the example above, all mentions of `LlamaMLP` in other modules inherited should be explicitly replaced by mentions to `MyModelIncredibleMLP`, otherwise the linter may add a new and unwanted `MyModelMLP` class!
|
||||
|
||||
In any way, if there is an ambiguous case detected, the linter will raise a warning such as
|
||||
|
||||
```
|
||||
We detected multiple prefix names when inheriting from transformers.models.llama.modeling_llama: ('Emu3Text', 'Emu3'). We will only use the most used 'Emu3' prefix when grabbing args and dependencies. Make sure to subclass the intermediate classes with the prefix you want (if different from 'Emu3') or use a single prefix in all the modular (best).
|
||||
```
|
||||
|
||||
explaining what is happening, and which prefix is used by default for grabbing dependencies. As explained, if you see automatic dependencies appear with a prefix but you want another one, then explicitly rename these classes locally with a simple `pass` class, such as
|
||||
|
||||
```py
|
||||
class Emu3TextMLP(LlamaMLP):
|
||||
pass
|
||||
```
|
||||
where the name of your class `GemmaVision` is not the same as the modular `Gemma`. This is super useful for composite models.
|
||||
|
||||
Such warnings and renaming patterns complications usually only arise when defining multimodel models, when you want to define e.g. the text part of your model from an existing model, but want to add the part `Text` to the class names to make it clear what they refer to in the multimodal setup.
|
||||
|
||||
### Automatic docstrings issue (mostly for Configs)
|
||||
|
||||
When inheriting a Config class and adding or deleting some attributes, it may be tempting to only redefine the new attributes in the docstring, and hoping that modular will do the rest. And similarly when deleting an argument, do nothing and hope that modular will remove itself from the docstring. However, due to current limitations of our linter, this is not yet supported. Thus, if you are in this case, you need to directly put the whole docstring (as it should appear in the end, with the correct arguments and default values) directly in the modular file under the class definition.
|
@ -49,6 +49,7 @@ FlashAttention-2 is currently supported for the following architectures:
|
||||
* [Dbrx](https://huggingface.co/docs/transformers/model_doc/dbrx#transformers.DbrxModel)
|
||||
* [DiffLlama](https://huggingface.co/docs/transformers/model_doc/diffllama#transformers.DiffLlamaModel)
|
||||
* [DistilBert](https://huggingface.co/docs/transformers/model_doc/distilbert#transformers.DistilBertModel)
|
||||
* [Emu3](https://huggingface.co/docs/transformers/model_doc/emu3)
|
||||
* [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel)
|
||||
* [Gemma2](https://huggingface.co/docs/transformers/model_doc/gemma2#transformers.Gemma2Model)
|
||||
* [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)
|
||||
@ -68,6 +69,7 @@ FlashAttention-2 is currently supported for the following architectures:
|
||||
* [Llava-NeXT](https://huggingface.co/docs/transformers/model_doc/llava_next)
|
||||
* [Llava-NeXT-Video](https://huggingface.co/docs/transformers/model_doc/llava_next_video)
|
||||
* [LLaVA-Onevision](https://huggingface.co/docs/transformers/model_doc/llava_onevision)
|
||||
* [Moonshine](https://huggingface.co/docs/transformers/model_doc/moonshine#transformers.MoonshineModel)
|
||||
* [Mimi](https://huggingface.co/docs/transformers/model_doc/mimi)
|
||||
* [VipLlava](https://huggingface.co/docs/transformers/model_doc/vipllava)
|
||||
* [VideoLlava](https://huggingface.co/docs/transformers/model_doc/video_llava)
|
||||
@ -95,6 +97,7 @@ FlashAttention-2 is currently supported for the following architectures:
|
||||
* [Qwen2Audio](https://huggingface.co/docs/transformers/model_doc/qwen2_audio#transformers.Qwen2AudioEncoder)
|
||||
* [Qwen2MoE](https://huggingface.co/docs/transformers/model_doc/qwen2_moe#transformers.Qwen2MoeModel)
|
||||
* [Qwen2VL](https://huggingface.co/docs/transformers/model_doc/qwen2_vl#transformers.Qwen2VLModel)
|
||||
* [Qwen2.5VL](https://huggingface.co/docs/transformers/model_doc/qwen2_5_vl#transformers.Qwen2_5_VLModel)
|
||||
* [RAG](https://huggingface.co/docs/transformers/model_doc/rag#transformers.RagModel)
|
||||
* [SpeechEncoderDecoder](https://huggingface.co/docs/transformers/model_doc/speech_encoder_decoder#transformers.SpeechEncoderDecoderModel)
|
||||
* [VisionEncoderDecoder](https://huggingface.co/docs/transformers/model_doc/vision_encoder_decoder#transformers.VisionEncoderDecoderModel)
|
||||
@ -107,6 +110,8 @@ FlashAttention-2 is currently supported for the following architectures:
|
||||
* [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip)
|
||||
* [UniSpeech](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech#transformers.UniSpeechModel)
|
||||
* [unispeech_sat](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech-sat#transformers.UniSpeechSatModel)
|
||||
* [helium](https://huggingface.co/docs/transformers/main/en/model_doc/heliumtransformers.HeliumModel)
|
||||
* [Zamba2](https://huggingface.co/docs/transformers/model_doc/zamba2)
|
||||
|
||||
You can request to add FlashAttention-2 support for another model by opening a GitHub Issue or Pull Request.
|
||||
|
||||
@ -244,6 +249,7 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [DistilBert](https://huggingface.co/docs/transformers/model_doc/distilbert#transformers.DistilBertModel)
|
||||
* [Dpr](https://huggingface.co/docs/transformers/model_doc/dpr#transformers.DprReader)
|
||||
* [EncoderDecoder](https://huggingface.co/docs/transformers/model_doc/encoder_decoder#transformers.EncoderDecoderModel)
|
||||
* [Emu3](https://huggingface.co/docs/transformers/model_doc/emu3)
|
||||
* [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon#transformers.FalconModel)
|
||||
* [Gemma](https://huggingface.co/docs/transformers/model_doc/gemma#transformers.GemmaModel)
|
||||
* [Gemma2](https://huggingface.co/docs/transformers/model_doc/gemma2#transformers.Gemma2Model)
|
||||
@ -265,6 +271,7 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [Llava-NeXT-Video](https://huggingface.co/docs/transformers/model_doc/llava_next_video)
|
||||
* [LLaVA-Onevision](https://huggingface.co/docs/transformers/model_doc/llava_onevision)
|
||||
* [M2M100](https://huggingface.co/docs/transformers/model_doc/m2m_100#transformers.M2M100Model)
|
||||
* [Moonshine](https://huggingface.co/docs/transformers/model_doc/moonshine#transformers.MoonshineModel)
|
||||
* [Mimi](https://huggingface.co/docs/transformers/model_doc/mimi)
|
||||
* [Mistral](https://huggingface.co/docs/transformers/model_doc/mistral#transformers.MistralModel)
|
||||
* [Mllama](https://huggingface.co/docs/transformers/model_doc/mllama#transformers.MllamaForConditionalGeneration)
|
||||
@ -283,8 +290,8 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [Phi3](https://huggingface.co/docs/transformers/model_doc/phi3#transformers.Phi3Model)
|
||||
* [PhiMoE](https://huggingface.co/docs/transformers/model_doc/phimoe#transformers.PhimoeModel)
|
||||
* [Idefics](https://huggingface.co/docs/transformers/model_doc/idefics#transformers.IdeficsModel)
|
||||
* [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperModel)
|
||||
* [mBart](https://huggingface.co/docs/transformers/model_doc/mbart#transformers.MBartModel)
|
||||
* [Moonshine](https://huggingface.co/docs/transformers/model_doc/moonshine#transformers.MoonshineModel)
|
||||
* [Mistral](https://huggingface.co/docs/transformers/model_doc/mistral#transformers.MistralModel)
|
||||
* [Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral#transformers.MixtralModel)
|
||||
* [StableLm](https://huggingface.co/docs/transformers/model_doc/stablelm#transformers.StableLmModel)
|
||||
@ -292,6 +299,7 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2#transformers.Qwen2Model)
|
||||
* [Qwen2Audio](https://huggingface.co/docs/transformers/model_doc/qwen2_audio#transformers.Qwen2AudioEncoder)
|
||||
* [Qwen2MoE](https://huggingface.co/docs/transformers/model_doc/qwen2_moe#transformers.Qwen2MoeModel)
|
||||
* [Qwen2.5VL](https://huggingface.co/docs/transformers/model_doc/qwen2_5_vl#transformers.Qwen2_5_VLModel)
|
||||
* [RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta#transformers.RobertaModel)
|
||||
* [Sew](https://huggingface.co/docs/transformers/main/en/model_doc/sew#transformers.SEWModel)
|
||||
* [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip)
|
||||
@ -320,6 +328,8 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaModel)
|
||||
* [XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLModel)
|
||||
* [YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos#transformers.YolosModel)
|
||||
* [helium](https://huggingface.co/docs/transformers/main/en/model_doc/heliumtransformers.HeliumModel)
|
||||
* [Zamba2](https://huggingface.co/docs/transformers/model_doc/zamba2)
|
||||
|
||||
<Tip>
|
||||
|
||||
|
@ -22,15 +22,42 @@ Try GPTQ quantization with PEFT in this [notebook](https://colab.research.google
|
||||
|
||||
</Tip>
|
||||
|
||||
The [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) library implements the GPTQ algorithm, a post-training quantization technique where each row of the weight matrix is quantized independently to find a version of the weights that minimizes the error. These weights are quantized to int4, but they're restored to fp16 on the fly during inference. This can save your memory-usage by 4x because the int4 weights are dequantized in a fused kernel rather than a GPU's global memory, and you can also expect a speedup in inference because using a lower bitwidth takes less time to communicate.
|
||||
Both [GPTQModel](https://github.com/ModelCloud/GPTQModel) and [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) libraries implement the GPTQ algorithm, a post-training quantization technique where each row of the weight matrix is quantized independently to find a version of the weights that minimizes error. These weights are quantized to int4, stored as int32 (int4 x 8) and dequantized (restored) to fp16 on the fly during inference. This can save memory by almost 4x because the int4 weights are often dequantized in a fused kernel. You can also expect a substantial speedup in inference due to lower bandwidth requirements for lower bitwidth.
|
||||
|
||||
Before you begin, make sure the following libraries are installed:
|
||||
[GPTQModel](https://github.com/ModelCloud/GPTQModel) started as a maintained fork of AutoGPTQ but has since differentiated itself with the following major differences.
|
||||
|
||||
* Model support: GPTQModel continues to support all of the latest LLM models.
|
||||
* Multimodal support: GPTQModel supports accurate quantization of Qwen 2-VL and Ovis 1.6-VL image-to-text models.
|
||||
* Platform support: Linux, macOS (Apple Silicon), and Windows 11.
|
||||
* Hardware support: NVIDIA CUDA, AMD ROCm, Apple Silicon M1/MPS /CPU, Intel/AMD CPU, and Intel Datacenter Max/Arc GPUs.
|
||||
* Asymmetric support: Asymmetric quantization can potentially introduce lower quantization errors compared to symmetric quantization. However, it is not backward compatible with AutoGPTQ, and not all kernels, such as Marlin, support asymmetric quantization.
|
||||
* IPEX kernel for Intel/AMD accelerated CPU and Intel GPU (Datacenter Max/Arc GPUs) support.
|
||||
* Updated Marlin kernel from Neural Magic optimized for A100 (Ampere).
|
||||
* Updated kernels with auto-padding for legacy model support and models with non-uniform in/out-features.
|
||||
* Faster quantization, lower memory usage, and more accurate default quantization via GPTQModel quantization APIs.
|
||||
* User and developer friendly APIs.
|
||||
|
||||
|
||||
[AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) will likely be deprecated in the future due the lack of continued support for new models and features.
|
||||
|
||||
Before you begin, make sure the following libraries are installed and updated to the latest release:
|
||||
|
||||
```bash
|
||||
pip install auto-gptq
|
||||
pip install --upgrade accelerate optimum transformers
|
||||
```
|
||||
|
||||
Then install either GPTQModel or AutoGPTQ.
|
||||
|
||||
```bash
|
||||
pip install gptqmodel --no-build-isolation
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```bash
|
||||
pip install auto-gptq --no-build-isolation
|
||||
```
|
||||
|
||||
To quantize a model (currently only supported for text models), you need to create a [`GPTQConfig`] class and set the number of bits to quantize to, a dataset to calibrate the weights for quantization, and a tokenizer to prepare the dataset.
|
||||
|
||||
```py
|
||||
@ -92,9 +119,22 @@ from transformers import AutoModelForCausalLM
|
||||
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto")
|
||||
```
|
||||
|
||||
## Marlin
|
||||
|
||||
[Marlin](https://github.com/IST-DASLab/marlin) is a 4-bit only CUDA GPTQ kernel, highly optimized for the NVIDIA A100 GPU (Ampere) architecture. Loading, dequantization, and execution of post-dequantized weights are highly parallelized, offering a substantial inference improvement versus the original CUDA GPTQ kernel. Marlin is only available for quantized inference and does not support model quantization.
|
||||
|
||||
Marlin inference can be activated with the `backend` parameter in [`GPTQConfig`].
|
||||
|
||||
```py
|
||||
|
||||
from transformers import AutoModelForCausalLM, GPTQConfig
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="auto", quantization_config=GPTQConfig(bits=4, backend="marlin"))
|
||||
```
|
||||
|
||||
## ExLlama
|
||||
|
||||
[ExLlama](https://github.com/turboderp/exllama) is a Python/C++/CUDA implementation of the [Llama](model_doc/llama) model that is designed for faster inference with 4-bit GPTQ weights (check out these [benchmarks](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark)). The ExLlama kernel is activated by default when you create a [`GPTQConfig`] object. To boost inference speed even further, use the [ExLlamaV2](https://github.com/turboderp/exllamav2) kernels by configuring the `exllama_config` parameter:
|
||||
[ExLlama](https://github.com/turboderp/exllama) is a CUDA implementation of the [Llama](model_doc/llama) model that is designed for faster inference with 4-bit GPTQ weights (check out these [benchmarks](https://github.com/huggingface/optimum/tree/main/tests/benchmark#gptq-benchmark)). The ExLlama kernel is activated by default when you create a [`GPTQConfig`] object. To boost inference speed even further, use the [ExLlamaV2](https://github.com/turboderp/exllamav2) kernels by configuring the `exllama_config` parameter:
|
||||
|
||||
```py
|
||||
import torch
|
||||
@ -110,11 +150,11 @@ Only 4-bit models are supported, and we recommend deactivating the ExLlama kerne
|
||||
|
||||
</Tip>
|
||||
|
||||
The ExLlama kernels are only supported when the entire model is on the GPU. If you're doing inference on a CPU with AutoGPTQ (version > 0.4.2), then you'll need to disable the ExLlama kernel. This overwrites the attributes related to the ExLlama kernels in the quantization config of the config.json file.
|
||||
The ExLlama kernels are only supported when the entire model is on the GPU. If you're doing inference on a CPU with AutoGPTQ or GPTQModel, then you'll need to disable the ExLlama kernel. This overwrites the attributes related to the ExLlama kernels in the quantization config of the config.json file.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, GPTQConfig
|
||||
gptq_config = GPTQConfig(bits=4, use_exllama=False)
|
||||
model = AutoModelForCausalLM.from_pretrained("{your_username}/opt-125m-gptq", device_map="cpu", quantization_config=gptq_config)
|
||||
```
|
||||
```
|
||||
|
@ -45,32 +45,50 @@ In short, supporting a wide range of quantization methods allows you to pick the
|
||||
|
||||
Use the table below to help you decide which quantization method to use.
|
||||
|
||||
| Quantization method | On the fly quantization | CPU | CUDA GPU | RoCm GPU (AMD) | Metal (Apple Silicon) | Intel GPU | torch.compile() support | Number of bits | Supports fine-tuning (through PEFT) | Serializable with 🤗 transformers | 🤗 transformers support | Link to library |
|
||||
|-------------------------------------|-------------------------|-----|----------|----------------|-----------------------|-----------|-------------------------|----------------|-------------------------------------|--------------|------------------------|---------------------------------------------|
|
||||
| [AQLM](./aqlm) | 🔴 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 1 / 2 | 🟢 | 🟢 | 🟢 | https://github.com/Vahe1994/AQLM |
|
||||
| [AWQ](./awq) | 🔴 | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | ? | 4 | 🟢 | 🟢 | 🟢 | https://github.com/casper-hansen/AutoAWQ |
|
||||
| [bitsandbytes](./bitsandbytes) | 🟢 | 🟡 * | 🟢 | 🟡 * | 🔴 ** | 🟡 * | 🔴 (soon!) | 4 / 8 | 🟢 | 🟢 | 🟢 | https://github.com/bitsandbytes-foundation/bitsandbytes |
|
||||
| [compressed-tensors](./compressed_tensors) | 🔴 | 🟢 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 1 - 8 | 🟢 | 🟢 | 🟢 | https://github.com/neuralmagic/compressed-tensors |
|
||||
| [EETQ](./eetq) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | ? | 8 | 🟢 | 🟢 | 🟢 | https://github.com/NetEase-FuXi/EETQ |
|
||||
| GGUF / GGML (llama.cpp) | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 1 - 8 | 🔴 | [See GGUF section](../gguf) | [See GGUF section](../gguf) | https://github.com/ggerganov/llama.cpp |
|
||||
| [GPTQ](./gptq) | 🔴 | 🔴 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 2 - 3 - 4 - 8 | 🟢 | 🟢 | 🟢 | https://github.com/AutoGPTQ/AutoGPTQ |
|
||||
| [HIGGS](./higgs) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 2 - 4 | 🔴 | 🟢 | 🟢 | https://github.com/HanGuo97/flute |
|
||||
| [HQQ](./hqq) | 🟢 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 1 - 8 | 🟢 | 🔴 | 🟢 | https://github.com/mobiusml/hqq/ |
|
||||
| [optimum-quanto](./quanto) | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | 🔴 | 🟢 | 2 / 4 / 8 | 🔴 | 🔴 | 🟢 | https://github.com/huggingface/optimum-quanto |
|
||||
| [FBGEMM_FP8](./fbgemm_fp8.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🔴 | 8 | 🔴 | 🟢 | 🟢 | https://github.com/pytorch/FBGEMM |
|
||||
| [torchao](./torchao.md) | 🟢 | | 🟢 | 🔴 | partial support (int4 weight only) | 🔴 | | 4 / 8 | | 🟢🔴 | 🟢 | https://github.com/pytorch/ao |
|
||||
| [VPTQ](./vptq) | 🔴 | 🔴 | 🟢 | 🟡 | 🔴 | 🔴 | 🟢 | 1 - 8 | 🔴 | 🟢 | 🟢 | https://github.com/microsoft/VPTQ |
|
||||
| Quantization Method | On the fly quantization | CPU | CUDA GPU | ROCm GPU | Metal (Apple Silicon) | Intel GPU | Torch compile() | Bits | PEFT Fine Tuning | Serializable with 🤗Transformers | 🤗Transformers Support | Link to library |
|
||||
|-----------------------------------------------|----------------------|-----------------|----------|-----------|------------------------------------|-----------------|-----------------|---------------|------------------|-----------------------------|-------------------------|---------------------------------------------|
|
||||
| [AQLM](./aqlm.md) | 🔴 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 1/2 | 🟢 | 🟢 | 🟢 | https://github.com/Vahe1994/AQLM |
|
||||
| [AWQ](./awq.md) | 🔴 | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | ? | 4 | 🟢 | 🟢 | 🟢 | https://github.com/casper-hansen/AutoAWQ |
|
||||
| [bitsandbytes](./bitsandbytes.md) | 🟢 | 🟡 <sub>1</sub> | 🟢 | 🟡 <sub>1</sub> | 🔴 <sub>2</sub> | 🟡 <sub>1</sub> | 🔴 <sub>1</sub> | 4/8 | 🟢 | 🟢 | 🟢 | https://github.com/bitsandbytes-foundation/bitsandbytes |
|
||||
| [compressed-tensors](./compressed_tensors.md) | 🔴 | 🟢 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 1/8 | 🟢 | 🟢 | 🟢 | https://github.com/neuralmagic/compressed-tensors |
|
||||
| [EETQ](./eetq.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | ? | 8 | 🟢 | 🟢 | 🟢 | https://github.com/NetEase-FuXi/EETQ |
|
||||
| [GGUF / GGML (llama.cpp)](../gguf.md) | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 1/8 | 🔴 | [See Notes](../gguf.md) | [See Notes](../gguf.md) | https://github.com/ggerganov/llama.cpp |
|
||||
| [GPTQModel](./gptq.md) | 🔴 | 🟢 <sub>3</sub> | 🟢 | 🟢 | 🟢 | 🟢 <sub>4</sub> | 🔴 | 2/3/4/8 | 🟢 | 🟢 | 🟢 | https://github.com/ModelCloud/GPTQModel |
|
||||
| [AutoGPTQ](./gptq.md) | 🔴 | 🔴 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 2/3/4/8 | 🟢 | 🟢 | 🟢 | https://github.com/AutoGPTQ/AutoGPTQ |
|
||||
| [HIGGS](./higgs.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 2/4 | 🔴 | 🟢 | 🟢 | https://github.com/HanGuo97/flute |
|
||||
| [HQQ](./hqq.md) | 🟢 | 🟢 | 🟢 | 🔴 | 🔴 | 🔴 | 🟢 | 1/8 | 🟢 | 🔴 | 🟢 | https://github.com/mobiusml/hqq/ |
|
||||
| [optimum-quanto](./quanto.md) | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | 🔴 | 🟢 | 2/4/8 | 🔴 | 🔴 | 🟢 | https://github.com/huggingface/optimum-quanto |
|
||||
| [FBGEMM_FP8](./fbgemm_fp8.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 🔴 | 8 | 🔴 | 🟢 | 🟢 | https://github.com/pytorch/FBGEMM |
|
||||
| [torchao](./torchao.md) | 🟢 | | 🟢 | 🔴 | 🟡 <sub>5</sub> | 🔴 | | 4/8 | | 🟢🔴 | 🟢 | https://github.com/pytorch/ao |
|
||||
| [VPTQ](./vptq.md) | 🔴 | 🔴 | 🟢 | 🟡 | 🔴 | 🔴 | 🟢 | 1/8 | 🔴 | 🟢 | 🟢 | https://github.com/microsoft/VPTQ |
|
||||
|
||||
<Tip>
|
||||
|
||||
\* bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend).
|
||||
|
||||
We value your feedback to help identify bugs before the full release! Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
|
||||
|
||||
**1:** bitsandbytes is being refactored to support multiple backends beyond CUDA. Currently, ROCm (AMD GPU) and Intel CPU implementations are mature, with Intel XPU in progress and Apple Silicon support expected by Q4/Q1. For installation instructions and the latest backend updates, visit [this link](https://huggingface.co/docs/bitsandbytes/main/en/installation#multi-backend). Check out [these docs](https://huggingface.co/docs/bitsandbytes/main/en/non_cuda_backends) for more details and feedback links.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
\** bitsandbytes is seeking contributors to help develop and lead the Apple Silicon backend. Interested? Contact them directly via their repo. Stipends may be available through sponsorships.
|
||||
**2:** bitsandbytes is seeking contributors to help develop and lead the Apple Silicon backend. Interested? Contact them directly via their repo. Stipends may be available through sponsorships.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
**3:** GPTQModel[CPU] supports 4-bit via IPEX on Intel/AMD and full bit range via Torch on Intel/AMD/Apple Silicon.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
**4:** GPTQModel[Intel GPU] via IPEX only supports 4-bit for Intel Datacenter Max/Arc GPUs.
|
||||
|
||||
</Tip>
|
||||
|
||||
<Tip>
|
||||
|
||||
**5:** torchao only supports int4 weight on Metal (Apple Silicon).
|
||||
|
||||
</Tip>
|
||||
|
||||
</Tip>
|
@ -16,7 +16,8 @@ rendered properly in your Markdown viewer.
|
||||
Before you begin, make sure the following libraries are installed with their latest version:
|
||||
|
||||
```bash
|
||||
pip install --upgrade torch torchao
|
||||
# Updating 🤗 Transformers to the latest version, as the example script below uses the new auto compilation
|
||||
pip install --upgrade torch torchao transformers
|
||||
```
|
||||
|
||||
By default, the weights are loaded in full precision (torch.float32) regardless of the actual data type the weights are stored in such as torch.float16. Set `torch_dtype="auto"` to load the weights in the data type defined in a model's `config.json` file to automatically load the most memory-optimal data type.
|
||||
@ -35,12 +36,8 @@ tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
input_text = "What are we having for dinner?"
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
# compile the quantized model to get speedup
|
||||
import torchao
|
||||
torchao.quantization.utils.recommended_inductor_config_setter()
|
||||
quantized_model = torch.compile(quantized_model, mode="max-autotune")
|
||||
|
||||
output = quantized_model.generate(**input_ids, max_new_tokens=10)
|
||||
# auto-compile the quantized model with `cache_implementation="static"` to get speedup
|
||||
output = quantized_model.generate(**input_ids, max_new_tokens=10, cache_implementation="static")
|
||||
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||
|
||||
# benchmark the performance
|
||||
@ -59,11 +56,11 @@ def benchmark_fn(f, *args, **kwargs):
|
||||
return f"{(t0.blocked_autorange().mean):.3f}"
|
||||
|
||||
MAX_NEW_TOKENS = 1000
|
||||
print("int4wo-128 model:", benchmark_fn(quantized_model.generate, **input_ids, max_new_tokens=MAX_NEW_TOKENS))
|
||||
print("int4wo-128 model:", benchmark_fn(quantized_model.generate, **input_ids, max_new_tokens=MAX_NEW_TOKENS, cache_implementation="static"))
|
||||
|
||||
bf16_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="cuda", torch_dtype=torch.bfloat16)
|
||||
bf16_model = torch.compile(bf16_model, mode="max-autotune")
|
||||
print("bf16 model:", benchmark_fn(bf16_model.generate, **input_ids, max_new_tokens=MAX_NEW_TOKENS))
|
||||
output = bf16_model.generate(**input_ids, max_new_tokens=10, cache_implementation="static") # auto-compile
|
||||
print("bf16 model:", benchmark_fn(bf16_model.generate, **input_ids, max_new_tokens=MAX_NEW_TOKENS, cache_implementation="static"))
|
||||
|
||||
```
|
||||
|
||||
|
@ -553,6 +553,32 @@ All models are a standard [`tf.keras.Model`](https://www.tensorflow.org/api_docs
|
||||
>>> model.fit(tf_dataset) # doctest: +SKIP
|
||||
```
|
||||
|
||||
|
||||
## Chat with text generation models
|
||||
|
||||
If you're working with a model that generates text as an output, you can also engage in a multi-turn conversation with
|
||||
it through the `transformers-cli chat` command. This is the fastest way to interact with a model, e.g. for a
|
||||
qualitative assessment (aka vibe check).
|
||||
|
||||
This CLI is implemented on top of our `AutoClass` abstraction, leveraging our [text generation](llm_tutorial.md) and
|
||||
[chat](chat_templating.md) tooling, and thus will be compatible with any 🤗 Transformers model. If you have the library
|
||||
[installed](installation.md), you can launch the chat session on your terminal with
|
||||
|
||||
```bash
|
||||
transformers-cli chat --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct
|
||||
```
|
||||
|
||||
For a full list of options to launch the chat, type
|
||||
|
||||
```bash
|
||||
transformers-cli chat -h
|
||||
```
|
||||
|
||||
After the chat is launched, you will enter an interactive session with the model. There are special commands for this
|
||||
session as well, such as `clear` to reset the conversation. Type `help` at any moment to display all special chat
|
||||
commands, and `exit` to terminate the session.
|
||||
|
||||
|
||||
## What's next?
|
||||
|
||||
Now that you've completed the 🤗 Transformers quick tour, check out our guides and learn how to do more specific things like writing a custom model, fine-tuning a model for a task, and how to train a model with a script. If you're interested in learning more about 🤗 Transformers core concepts, grab a cup of coffee and take a look at our Conceptual Guides!
|
||||
|
@ -305,10 +305,7 @@ There are two types of language modeling:
|
||||
... for pred in preds
|
||||
... ]
|
||||
>>> preds
|
||||
[{'score': 0.2236,
|
||||
'token': 1761,
|
||||
'token_str': ' platform',
|
||||
'sequence': 'Hugging Face is a community-based open-source platform for machine learning.'}]
|
||||
[{'score': 0.224, 'token': 3944, 'token_str': ' tool', 'sequence': 'Hugging Face is a community-based open-source tool for machine learning.'}]
|
||||
```
|
||||
|
||||
## Multimodal
|
||||
|
@ -80,7 +80,7 @@ Run inference with decoder-only models with the `text-generation` pipeline:
|
||||
>>> prompt = "Hello, I'm a language model"
|
||||
|
||||
>>> generator(prompt, max_length = 30)
|
||||
[{'generated_text': "Hello, I'm a language model programmer so you can use some of my stuff. But you also need some sort of a C program to run."}]
|
||||
[{'generated_text': "Hello, I'm a language model. Not a programming language at all: it's pretty simple.\n\nWhen I write a function, I mean"}]
|
||||
```
|
||||
|
||||
To run inference with an encoder-decoder, use the `text2text-generation` pipeline:
|
||||
@ -258,7 +258,7 @@ also be a suitable location for instructions. Typically, it's better to place th
|
||||
|
||||
>>> for seq in sequences:
|
||||
... print(f"{seq['generated_text']}")
|
||||
Permaculture is an ecological design mimicking natural ecosystems to meet basic needs and prepare for climate change. It is based on traditional knowledge and scientific understanding.
|
||||
"Permaculture is an ecological design method that mimics natural ecosystems' diversity, functionality, and resilience using modern technology and indigenous knowledge. It aims to help"
|
||||
```
|
||||
|
||||
#### Question answering
|
||||
@ -284,7 +284,7 @@ the leading word or phrase (`"Answer:"`) to nudge the model to start generating
|
||||
|
||||
>>> for seq in sequences:
|
||||
... print(f"Result: {seq['generated_text']}")
|
||||
Result: Modern tools often used to make gazpacho include
|
||||
"Result: Modern tools are used, such as immersion blenders"
|
||||
```
|
||||
|
||||
#### Reasoning
|
||||
@ -309,7 +309,7 @@ Let's try if we can make a model reason about a simple arithmetics task with a b
|
||||
>>> for seq in sequences:
|
||||
... print(f"Result: {seq['generated_text']}")
|
||||
Result:
|
||||
There are a total of 5 groups, so there are 5 x 4=20 students in the class.
|
||||
There are a total of 50 students in the class (5 groups x 4 students per group = 20 groups, and
|
||||
```
|
||||
|
||||
Correct! Let's increase the complexity a little and see if we can still get away with a basic prompt:
|
||||
|
@ -117,8 +117,6 @@
|
||||
title: TFLite へのエクスポート
|
||||
- local: torchscript
|
||||
title: トーチスクリプトへのエクスポート
|
||||
- local: benchmarks
|
||||
title: ベンチマーク
|
||||
- local: community
|
||||
title: コミュニティリソース
|
||||
- local: custom_tools
|
||||
|
@ -1,381 +0,0 @@
|
||||
<!--
|
||||
Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ このファイルはMarkdownですが、Hugging Faceのdoc-builder(MDXに類似)向けの特定の構文を含んでいるため、
|
||||
Markdownビューアでは正しく表示されないことに注意してください。
|
||||
-->
|
||||
|
||||
# Benchmarks
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Hugging Faceのベンチマークツールは非推奨であり、Transformerモデルの速度とメモリの複雑さを測定するために外部のベンチマークライブラリを使用することをお勧めします。
|
||||
|
||||
</Tip>
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
🤗 Transformersモデルをベンチマークし、ベストプラクティス、すでに利用可能なベンチマークについて見てみましょう。
|
||||
|
||||
🤗 Transformersモデルをベンチマークする方法について詳しく説明したノートブックは[こちら](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb)で利用できます。
|
||||
|
||||
## How to benchmark 🤗 Transformers models
|
||||
|
||||
[`PyTorchBenchmark`]クラスと[`TensorFlowBenchmark`]クラスを使用すると、🤗 Transformersモデルを柔軟にベンチマークできます。
|
||||
ベンチマーククラスを使用すると、_ピークメモリ使用量_ および _必要な時間_ を _推論_ および _トレーニング_ の両方について測定できます。
|
||||
|
||||
<Tip>
|
||||
|
||||
ここでの _推論_ は、単一のフォワードパスによって定義され、 _トレーニング_ は単一のフォワードパスと
|
||||
バックワードパスによって定義されます。
|
||||
|
||||
</Tip>
|
||||
|
||||
ベンチマーククラス[`PyTorchBenchmark`]と[`TensorFlowBenchmark`]は、それぞれのベンチマーククラスに対する適切な設定を含む [`PyTorchBenchmarkArguments`] および [`TensorFlowBenchmarkArguments`] タイプのオブジェクトを必要とします。
|
||||
[`PyTorchBenchmarkArguments`] および [`TensorFlowBenchmarkArguments`] はデータクラスであり、それぞれのベンチマーククラスに対するすべての関連する設定を含んでいます。
|
||||
次の例では、タイプ _bert-base-cased_ のBERTモデルをベンチマークする方法が示されています。
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> benchmark = PyTorchBenchmark(args)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> benchmark = TensorFlowBenchmark(args)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
|
||||
ここでは、ベンチマーク引数のデータクラスに対して、`models`、`batch_sizes`
|
||||
および`sequence_lengths`の3つの引数が指定されています。引数`models`は必須で、
|
||||
[モデルハブ](https://huggingface.co/models)からのモデル識別子の`リスト`を期待し
|
||||
ます。`batch_sizes`と`sequence_lengths`の2つの`リスト`引数は
|
||||
モデルのベンチマーク対象となる`input_ids`のサイズを定義します。
|
||||
ベンチマーク引数データクラスを介して設定できる他の多くのパラメータがあります。これらの詳細については、直接ファイル
|
||||
`src/transformers/benchmark/benchmark_args_utils.py`、
|
||||
`src/transformers/benchmark/benchmark_args.py`(PyTorch用)、および`src/transformers/benchmark/benchmark_args_tf.py`(Tensorflow用)
|
||||
を参照するか、次のシェルコマンドをルートから実行すると、PyTorchとTensorflowのそれぞれに対して設定可能なすべてのパラメータの記述的なリストが表示されます。
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```bash
|
||||
python examples/pytorch/benchmarking/run_benchmark.py --help
|
||||
```
|
||||
|
||||
インスタンス化されたベンチマークオブジェクトは、単に `benchmark.run()` を呼び出すことで実行できます。
|
||||
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.006
|
||||
google-bert/bert-base-uncased 8 32 0.006
|
||||
google-bert/bert-base-uncased 8 128 0.018
|
||||
google-bert/bert-base-uncased 8 512 0.088
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1227
|
||||
google-bert/bert-base-uncased 8 32 1281
|
||||
google-bert/bert-base-uncased 8 128 1307
|
||||
google-bert/bert-base-uncased 8 512 1539
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 08:58:43.371351
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```bash
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
```
|
||||
|
||||
インスタンス化されたベンチマークオブジェクトは、単に `benchmark.run()` を呼び出すことで実行できます。
|
||||
|
||||
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.005
|
||||
google-bert/bert-base-uncased 8 32 0.008
|
||||
google-bert/bert-base-uncased 8 128 0.022
|
||||
google-bert/bert-base-uncased 8 512 0.105
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1330
|
||||
google-bert/bert-base-uncased 8 32 1330
|
||||
google-bert/bert-base-uncased 8 128 1330
|
||||
google-bert/bert-base-uncased 8 512 1770
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:26:35.617317
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
デフォルトでは、_推論時間_ と _必要なメモリ_ がベンチマークされます。
|
||||
上記の例の出力では、最初の2つのセクションが _推論時間_ と _推論メモリ_
|
||||
に対応する結果を示しています。さらに、計算環境に関するすべての関連情報、
|
||||
例えば GPU タイプ、システム、ライブラリのバージョンなどが、_ENVIRONMENT INFORMATION_ の下に表示されます。この情報は、[`PyTorchBenchmarkArguments`]
|
||||
および [`TensorFlowBenchmarkArguments`] に引数 `save_to_csv=True`
|
||||
を追加することで、オプションで _.csv_ ファイルに保存することができます。この場合、各セクションは別々の _.csv_ ファイルに保存されます。_.csv_
|
||||
ファイルへのパスは、データクラスの引数を使用してオプションで定義できます。
|
||||
|
||||
モデル識別子、例えば `google-bert/bert-base-uncased` を使用して事前学習済みモデルをベンチマークする代わりに、利用可能な任意のモデルクラスの任意の設定をベンチマークすることもできます。この場合、ベンチマーク引数と共に設定の `list` を挿入する必要があります。
|
||||
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 128 0.006
|
||||
bert-base 8 512 0.006
|
||||
bert-base 8 128 0.018
|
||||
bert-base 8 512 0.088
|
||||
bert-384-hid 8 8 0.006
|
||||
bert-384-hid 8 32 0.006
|
||||
bert-384-hid 8 128 0.011
|
||||
bert-384-hid 8 512 0.054
|
||||
bert-6-lay 8 8 0.003
|
||||
bert-6-lay 8 32 0.004
|
||||
bert-6-lay 8 128 0.009
|
||||
bert-6-lay 8 512 0.044
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1277
|
||||
bert-base 8 32 1281
|
||||
bert-base 8 128 1307
|
||||
bert-base 8 512 1539
|
||||
bert-384-hid 8 8 1005
|
||||
bert-384-hid 8 32 1027
|
||||
bert-384-hid 8 128 1035
|
||||
bert-384-hid 8 512 1255
|
||||
bert-6-lay 8 8 1097
|
||||
bert-6-lay 8 32 1101
|
||||
bert-6-lay 8 128 1127
|
||||
bert-6-lay 8 512 1359
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:35:25.143267
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 0.005
|
||||
bert-base 8 32 0.008
|
||||
bert-base 8 128 0.022
|
||||
bert-base 8 512 0.106
|
||||
bert-384-hid 8 8 0.005
|
||||
bert-384-hid 8 32 0.007
|
||||
bert-384-hid 8 128 0.018
|
||||
bert-384-hid 8 512 0.064
|
||||
bert-6-lay 8 8 0.002
|
||||
bert-6-lay 8 32 0.003
|
||||
bert-6-lay 8 128 0.0011
|
||||
bert-6-lay 8 512 0.074
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1330
|
||||
bert-base 8 32 1330
|
||||
bert-base 8 128 1330
|
||||
bert-base 8 512 1770
|
||||
bert-384-hid 8 8 1330
|
||||
bert-384-hid 8 32 1330
|
||||
bert-384-hid 8 128 1330
|
||||
bert-384-hid 8 512 1540
|
||||
bert-6-lay 8 8 1330
|
||||
bert-6-lay 8 32 1330
|
||||
bert-6-lay 8 128 1330
|
||||
bert-6-lay 8 512 1540
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:38:15.487125
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
カスタマイズされたBertModelクラスの構成に対する推論時間と必要なメモリのベンチマーク
|
||||
|
||||
この機能は、モデルをトレーニングする際にどの構成を選択すべきかを決定する際に特に役立つことがあります。
|
||||
|
||||
## Benchmark best practices
|
||||
|
||||
このセクションでは、モデルをベンチマークする際に注意すべきいくつかのベストプラクティスをリストアップしています。
|
||||
|
||||
- 現在、単一デバイスのベンチマークしかサポートされていません。GPUでベンチマークを実行する場合、コードを実行するデバイスをユーザーが指定することを推奨します。
|
||||
これはシェルで`CUDA_VISIBLE_DEVICES`環境変数を設定することで行えます。例:`export CUDA_VISIBLE_DEVICES=0`を実行してからコードを実行します。
|
||||
- `no_multi_processing`オプションは、テストおよびデバッグ用にのみ`True`に設定すべきです。正確なメモリ計測を確保するために、各メモリベンチマークを別々のプロセスで実行することをお勧めします。これにより、`no_multi_processing`が`True`に設定されます。
|
||||
- モデルのベンチマーク結果を共有する際には、常に環境情報を記述するべきです。異なるGPUデバイス、ライブラリバージョンなどでベンチマーク結果が大きく異なる可能性があるため、ベンチマーク結果単体ではコミュニティにとってあまり有用ではありません。
|
||||
|
||||
## Sharing your benchmark
|
||||
|
||||
以前、すべての利用可能なコアモデル(当時10モデル)に対して、多くの異なる設定で推論時間のベンチマークが行われました:PyTorchを使用し、TorchScriptの有無、TensorFlowを使用し、XLAの有無などです。これらのテストはすべてCPUで行われました(TensorFlow XLAを除く)。
|
||||
|
||||
このアプローチの詳細については、[次のブログポスト](https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2)に詳しく説明されており、結果は[こちら](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing)で利用できます。
|
||||
|
||||
新しいベンチマークツールを使用すると、コミュニティとベンチマーク結果を共有することがこれまで以上に簡単になります。
|
||||
|
||||
- [PyTorchベンチマーク結果](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md)。
|
||||
- [TensorFlowベンチマーク結果](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md)。
|
@ -218,9 +218,13 @@ that\'s a terrible feeling."']
|
||||
|
||||
貪欲探索とは異なり、ビームサーチデコーディングは各時間ステップでいくつかの仮説を保持し、最終的にシーケンス全体で最も確率が高い仮説を選択します。これにより、貪欲探索では無視されてしまう初期トークンの確率が低い高確率のシーケンスを特定する利点があります。
|
||||
|
||||
このデコーディング戦略を有効にするには、`num_beams`(追跡する仮説の数)を1よりも大きな値に指定します。
|
||||
<a href="https://huggingface.co/spaces/m-ric/beam_search_visualizer" class="flex flex-col justify-center">
|
||||
<img style="max-width: 90%; margin: auto;" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/beam_search.png"/>
|
||||
</a>
|
||||
|
||||
希望されるテキストの翻訳がお手伝いできて嬉しいです!もしさらなる質問やサポートが必要な場合は、お気軽にお知らせください。
|
||||
ビームサーチデコーディングの動作を[このインタラクティブデモ](https://huggingface.co/spaces/m-ric/beam_search_visualizer)で確認することができます。文章を入力し、パラメータをいじることでデコーディングビームがどのように変化するかを知ることができます。
|
||||
|
||||
このデコーディング戦略を有効にするには、`num_beams`(追跡する仮説の数)を1よりも大きな値に指定します。
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
@ -23,31 +23,28 @@ Lili Chen, Kevin Lu, Aravind Rajeswaran, Kimin Lee, Aditya Grover, Michael Laski
|
||||
|
||||
論文の要約は次のとおりです。
|
||||
|
||||
*強化学習(RL)をシーケンスモデリング問題として抽象化するフレームワークを紹介します。
|
||||
_強化学習(RL)をシーケンスモデリング問題として抽象化するフレームワークを紹介します。
|
||||
これにより、Transformer アーキテクチャのシンプルさとスケーラビリティ、および関連する進歩を活用できるようになります。
|
||||
GPT-x や BERT などの言語モデリングで。特に、Decision Transformer というアーキテクチャを紹介します。
|
||||
RL の問題を条件付きシーケンス モデリングとして投げかけます。値関数に適合する以前の RL アプローチとは異なり、
|
||||
ポリシー勾配を計算すると、Decision Transformer は因果的にマスクされたアルゴリズムを利用して最適なアクションを出力するだけです。
|
||||
変成器。望ましいリターン (報酬)、過去の状態、アクションに基づいて自己回帰モデルを条件付けすることにより、
|
||||
Decision Transformer モデルは、望ましいリターンを達成する将来のアクションを生成できます。そのシンプルさにも関わらず、
|
||||
Decision Transformer は、最先端のモデルフリーのオフライン RL ベースラインのパフォーマンスと同等、またはそれを超えています。
|
||||
Atari、OpenAI Gym、Key-to-Door タスク*
|
||||
GPT-x や BERT などの言語モデリングで。特に、Decision Transformer というアーキテクチャを紹介します。
|
||||
RL の問題を条件付きシーケンス モデリングとして投げかけます。値関数に適合する以前の RL アプローチとは異なり、
|
||||
ポリシー勾配を計算すると、Decision Transformer は因果的にマスクされたアルゴリズムを利用して最適なアクションを出力するだけです。
|
||||
変成器。望ましいリターン (報酬)、過去の状態、アクションに基づいて自己回帰モデルを条件付けすることにより、
|
||||
Decision Transformer モデルは、望ましいリターンを達成する将来のアクションを生成できます。そのシンプルさにも関わらず、
|
||||
Decision Transformer は、最先端のモデルフリーのオフライン RL ベースラインのパフォーマンスと同等、またはそれを超えています。
|
||||
Atari、OpenAI Gym、Key-to-Door タスク_
|
||||
|
||||
このバージョンのモデルは、状態がベクトルであるタスク用です。
|
||||
|
||||
このモデルは、[edbeeching](https://huggingface.co/edbeeching) によって提供されました。元のコードは [ここ](https://github.com/kzl/decion-transformer) にあります。
|
||||
このモデルは、[edbeeching](https://huggingface.co/edbeeching) によって提供されました。元のコードは [ここ](https://github.com/kzl/decision-transformer) にあります。
|
||||
|
||||
## DecisionTransformerConfig
|
||||
|
||||
[[autodoc]] DecisionTransformerConfig
|
||||
|
||||
|
||||
## DecisionTransformerGPT2Model
|
||||
|
||||
[[autodoc]] DecisionTransformerGPT2Model
|
||||
- forward
|
||||
[[autodoc]] DecisionTransformerGPT2Model - forward
|
||||
|
||||
## DecisionTransformerModel
|
||||
|
||||
[[autodoc]] DecisionTransformerModel
|
||||
- forward
|
||||
[[autodoc]] DecisionTransformerModel - forward
|
||||
|
@ -127,8 +127,6 @@
|
||||
title: TFLite로 내보내기
|
||||
- local: torchscript
|
||||
title: TorchScript로 내보내기
|
||||
- local: in_translation
|
||||
title: (번역중) Benchmarks
|
||||
- local: in_translation
|
||||
title: (번역중) Notebooks with examples
|
||||
- local: community
|
||||
@ -152,7 +150,7 @@
|
||||
- local: in_translation
|
||||
title: (번역중) AQLM
|
||||
- local: in_translation
|
||||
title: (번역중) VPTQ
|
||||
title: (번역중) VPTQ
|
||||
- local: quantization/quanto
|
||||
title: Quanto
|
||||
- local: quantization/eetq
|
||||
|
@ -95,8 +95,6 @@
|
||||
title: Eksport ke ONNX
|
||||
- local: torchscript
|
||||
title: Eksport ke TorchScript
|
||||
- local: benchmarks
|
||||
title: Penanda aras
|
||||
- local: Buku nota dengan contoh
|
||||
title: Notebooks with examples
|
||||
- local: Sumber komuniti
|
||||
|
@ -52,8 +52,6 @@
|
||||
title: 导出为 TFLite
|
||||
- local: torchscript
|
||||
title: 导出为 TorchScript
|
||||
- local: benchmarks
|
||||
title: 对模型进行基准测试
|
||||
- local: gguf
|
||||
title: 与 GGUF 格式的互操作性
|
||||
- local: tiktoken
|
||||
@ -166,7 +164,4 @@
|
||||
- local: internal/time_series_utils
|
||||
title: 时序数据工具
|
||||
title: 内部辅助工具
|
||||
title: 应用程序接口 (API)
|
||||
|
||||
|
||||
|
||||
title: 应用程序接口 (API)
|
||||
|
@ -1,377 +0,0 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# 基准测试
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
小提示:Hugging Face的基准测试工具已经不再更新,建议使用外部基准测试库来衡量Transformer模
|
||||
型的速度和内存复杂度。
|
||||
|
||||
</Tip>
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
让我们来看看如何对🤗 Transformers模型进行基准测试,以及进行测试的推荐策略和已有的基准测试结果。
|
||||
|
||||
如果您需要更详细的回答,可以在[这里](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb)找到更多关于基准测试的内容。
|
||||
|
||||
|
||||
## 如何对🤗 Transformers模型进行基准测试
|
||||
|
||||
使用[`PyTorchBenchmark`]和[`TensorFlowBenchmark`]类可以灵活地对🤗 Transformers模型进行基准测试。这些基准测试类可以衡量模型在**推理**和**训练**过程中所需的**峰值内存**和**时间**。
|
||||
|
||||
<Tip>
|
||||
|
||||
这里的**推理**指的是一次前向传播(forward pass),而训练则指一次前向传播和反向传播(backward pass)。
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
基准测试类 [`PyTorchBenchmark`] 和 [`TensorFlowBenchmark`] 需要分别传入 [`PyTorchBenchmarkArguments`] 和 [`TensorFlowBenchmarkArguments`] 类型的对象来进行实例化。这些类是数据类型,包含了所有相关的配置参数,用于其对应的基准测试类。
|
||||
|
||||
在下面的示例中,我们展示了如何对类型为 **bert-base-cased** 的BERT模型进行基准测试:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> benchmark = PyTorchBenchmark(args)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> benchmark = TensorFlowBenchmark(args)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
在这里,基准测试的参数数据类接受了三个主要的参数,即 `models`、`batch_sizes` 和`sequence_lengths`。其中,`models` 是必需的参数,它期望一个来自[模型库](https://huggingface.co/models)的模型标识符列表。`batch_sizes` 和 `sequence_lengths` 是列表类型的参数,定义了进行基准测试时 `input_ids` 的批量大小和序列长度。
|
||||
|
||||
这些是基准测试数据类中可以配置的一些主要参数。除此之外,基准测试数据类中还可以配置很多其他参数。如需要查看更详细的配置参数,可以直接查看以下文件:
|
||||
|
||||
* `src/transformers/benchmark/benchmark_args_utils.py`
|
||||
* `src/transformers/benchmark/benchmark_args.py`(针对 PyTorch)
|
||||
* `src/transformers/benchmark/benchmark_args_tf.py`(针对 TensorFlow)
|
||||
|
||||
另外,您还可以通过在根目录下运行以下命令,查看针对 PyTorch 和 TensorFlow 的所有可配置参数的描述列表:
|
||||
``` bash python examples/pytorch/benchmarking/run_benchmark.py --help ```
|
||||
这些命令将列出所有可以配置的参数,它们可以帮助您更加灵活地进行基准测试。
|
||||
|
||||
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
以下代码通过`PyTorchBenchmarkArguments`设置模型批处理大小和序列长度,然后调用`benchmark.run()`执行基准测试。
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.006
|
||||
google-bert/bert-base-uncased 8 32 0.006
|
||||
google-bert/bert-base-uncased 8 128 0.018
|
||||
google-bert/bert-base-uncased 8 512 0.088
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1227
|
||||
google-bert/bert-base-uncased 8 32 1281
|
||||
google-bert/bert-base-uncased 8 128 1307
|
||||
google-bert/bert-base-uncased 8 512 1539
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 08:58:43.371351
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```bash
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
```
|
||||
|
||||
接下来,只需要调用 `benchmark.run()` 就能轻松运行已经实例化的基准测试对象。
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.005
|
||||
google-bert/bert-base-uncased 8 32 0.008
|
||||
google-bert/bert-base-uncased 8 128 0.022
|
||||
google-bert/bert-base-uncased 8 512 0.105
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1330
|
||||
google-bert/bert-base-uncased 8 32 1330
|
||||
google-bert/bert-base-uncased 8 128 1330
|
||||
google-bert/bert-base-uncased 8 512 1770
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:26:35.617317
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
|
||||
|
||||
在一般情况下,基准测试会测量推理(inference)的**时间**和**所需内存**。在上面的示例输出中,前两部分显示了与**推理时间**和**推理内存**对应的结果。与此同时,关于计算环境的所有相关信息(例如 GPU 类型、系统、库版本等)会在第三部分的**环境信息**中打印出来。你可以通过在 [`PyTorchBenchmarkArguments`] 和 [`TensorFlowBenchmarkArguments`] 中添加 `save_to_csv=True`参数,将这些信息保存到一个 .csv 文件中。在这种情况下,每一部分的信息会分别保存在不同的 .csv 文件中。每个 .csv 文件的路径也可以通过参数数据类进行定义。
|
||||
|
||||
|
||||
您可以选择不通过预训练模型的模型标识符(如 `google-bert/bert-base-uncased`)进行基准测试,而是对任何可用模型类的任意配置进行基准测试。在这种情况下,我们必须将一系列配置与基准测试参数一起传入,方法如下:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 128 0.006
|
||||
bert-base 8 512 0.006
|
||||
bert-base 8 128 0.018
|
||||
bert-base 8 512 0.088
|
||||
bert-384-hid 8 8 0.006
|
||||
bert-384-hid 8 32 0.006
|
||||
bert-384-hid 8 128 0.011
|
||||
bert-384-hid 8 512 0.054
|
||||
bert-6-lay 8 8 0.003
|
||||
bert-6-lay 8 32 0.004
|
||||
bert-6-lay 8 128 0.009
|
||||
bert-6-lay 8 512 0.044
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1277
|
||||
bert-base 8 32 1281
|
||||
bert-base 8 128 1307
|
||||
bert-base 8 512 1539
|
||||
bert-384-hid 8 8 1005
|
||||
bert-384-hid 8 32 1027
|
||||
bert-384-hid 8 128 1035
|
||||
bert-384-hid 8 512 1255
|
||||
bert-6-lay 8 8 1097
|
||||
bert-6-lay 8 32 1101
|
||||
bert-6-lay 8 128 1127
|
||||
bert-6-lay 8 512 1359
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:35:25.143267
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 0.005
|
||||
bert-base 8 32 0.008
|
||||
bert-base 8 128 0.022
|
||||
bert-base 8 512 0.106
|
||||
bert-384-hid 8 8 0.005
|
||||
bert-384-hid 8 32 0.007
|
||||
bert-384-hid 8 128 0.018
|
||||
bert-384-hid 8 512 0.064
|
||||
bert-6-lay 8 8 0.002
|
||||
bert-6-lay 8 32 0.003
|
||||
bert-6-lay 8 128 0.0011
|
||||
bert-6-lay 8 512 0.074
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1330
|
||||
bert-base 8 32 1330
|
||||
bert-base 8 128 1330
|
||||
bert-base 8 512 1770
|
||||
bert-384-hid 8 8 1330
|
||||
bert-384-hid 8 32 1330
|
||||
bert-384-hid 8 128 1330
|
||||
bert-384-hid 8 512 1540
|
||||
bert-6-lay 8 8 1330
|
||||
bert-6-lay 8 32 1330
|
||||
bert-6-lay 8 128 1330
|
||||
bert-6-lay 8 512 1540
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:38:15.487125
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
|
||||
**推理时间**和**推理所需内存**会被重新测量,不过这次是针对 `BertModel` 类的自定义配置进行基准测试。这个功能在决定模型应该使用哪种配置进行训练时尤其有用。
|
||||
|
||||
|
||||
## 基准测试的推荐策略
|
||||
本节列出了一些在对模型进行基准测试时比较推荐的策略:
|
||||
|
||||
* 目前,该模块只支持单设备基准测试。在进行 GPU 基准测试时,建议用户通过设置 `CUDA_VISIBLE_DEVICES` 环境变量来指定代码应在哪个设备上运行,例如在运行代码前执行 `export CUDA_VISIBLE_DEVICES=0`。
|
||||
* `no_multi_processing` 选项仅应在测试和调试时设置为 `True`。为了确保内存测量的准确性,建议将每个内存基准测试单独运行在一个进程中,并确保 `no_multi_processing` 设置为 `True`。
|
||||
* 当您分享模型基准测试结果时,应始终提供环境信息。由于 GPU 设备、库版本等之间可能存在较大差异,单独的基准测试结果对社区的帮助有限。
|
||||
|
||||
|
||||
## 分享您的基准测试结果
|
||||
|
||||
先前的所有可用的核心模型(当时有10个)都已针对 **推理时间** 进行基准测试,涵盖了多种不同的设置:使用 PyTorch(包不包含 TorchScript),使用 TensorFlow(包不包含 XLA)。所有的测试都在 CPU(除了 TensorFlow XLA)和 GPU 上进行。
|
||||
|
||||
这种方法的详细信息可以在 [这篇博客](https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2) 中找到,测试结果可以在 [这里](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing) 查看。
|
||||
|
||||
|
||||
您可以借助新的 **基准测试** 工具比以往任何时候都更容易地分享您的基准测试结果!
|
||||
|
||||
- [PyTorch 基准测试结果](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md)
|
||||
- [TensorFlow 基准测试结果](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md)
|
||||
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
# Language model training examples
|
||||
# Language model training and inference examples
|
||||
|
||||
The following example showcases how to train a language model from scratch
|
||||
using the JAX/Flax backend.
|
||||
@ -542,3 +542,27 @@ python3 -m torch.distributed.launch --nproc_per_node ${NUM_GPUS} run_mlm.py \
|
||||
--report_to="tensorboard" \
|
||||
--save_strategy="no"
|
||||
```
|
||||
|
||||
## Language model inference with bfloat16
|
||||
|
||||
The following example demonstrates performing inference with a language model using the JAX/Flax backend.
|
||||
|
||||
The example script run_bert_flax.py uses bert-base-uncased, and the model is loaded into `FlaxBertModel`.
|
||||
The input data are randomly generated tokens, and the model is also jitted with JAX.
|
||||
By default, it uses float32 precision for inference. To enable bfloat16, add the flag shown in the command below.
|
||||
|
||||
```bash
|
||||
python3 run_bert_flax.py --precision bfloat16
|
||||
> NOTE: For JAX Versions after v0.4.33 or later, users will need to set the below environment variables as a \
|
||||
> temporary workaround to use Bfloat16 datatype. \
|
||||
> This restriction is expected to be removed in future version
|
||||
```bash
|
||||
export XLA_FLAGS=--xla_cpu_use_thunk_runtime=false
|
||||
```
|
||||
bfloat16 gives better performance on GPUs and also Intel CPUs (Sapphire Rapids or later) with Advanced Matrix Extension (Intel AMX).
|
||||
By changing the dtype for `FlaxBertModel `to `jax.numpy.bfloat16`, you get the performance benefits of the underlying hardware.
|
||||
```python
|
||||
import jax
|
||||
model = FlaxBertModel.from_pretrained("bert-base-uncased", config=config, dtype=jax.numpy.bfloat16)
|
||||
```
|
||||
Switching from float32 to bfloat16 can increase the speed of an AWS c7i.4xlarge with Intel Sapphire Rapids by more than 2x.
|
||||
|
56
examples/flax/language-modeling/run_bert_flax.py
Normal file
56
examples/flax/language-modeling/run_bert_flax.py
Normal file
@ -0,0 +1,56 @@
|
||||
#!/usr/bin/env python3
|
||||
import time
|
||||
from argparse import ArgumentParser
|
||||
|
||||
import jax
|
||||
import numpy as np
|
||||
|
||||
from transformers import BertConfig, FlaxBertModel
|
||||
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument("--precision", type=str, choices=["float32", "bfloat16"], default="float32")
|
||||
args = parser.parse_args()
|
||||
|
||||
dtype = jax.numpy.float32
|
||||
if args.precision == "bfloat16":
|
||||
dtype = jax.numpy.bfloat16
|
||||
|
||||
VOCAB_SIZE = 30522
|
||||
BS = 32
|
||||
SEQ_LEN = 128
|
||||
|
||||
|
||||
def get_input_data(batch_size=1, seq_length=384):
|
||||
shape = (batch_size, seq_length)
|
||||
input_ids = np.random.randint(1, VOCAB_SIZE, size=shape).astype(np.int32)
|
||||
token_type_ids = np.ones(shape).astype(np.int32)
|
||||
attention_mask = np.ones(shape).astype(np.int32)
|
||||
return {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
|
||||
|
||||
|
||||
inputs = get_input_data(BS, SEQ_LEN)
|
||||
config = BertConfig.from_pretrained("bert-base-uncased", hidden_act="gelu_new")
|
||||
model = FlaxBertModel.from_pretrained("bert-base-uncased", config=config, dtype=dtype)
|
||||
|
||||
|
||||
@jax.jit
|
||||
def func():
|
||||
outputs = model(**inputs)
|
||||
return outputs
|
||||
|
||||
|
||||
(nwarmup, nbenchmark) = (5, 100)
|
||||
|
||||
# warmpup
|
||||
for _ in range(nwarmup):
|
||||
func()
|
||||
|
||||
# benchmark
|
||||
|
||||
start = time.time()
|
||||
for _ in range(nbenchmark):
|
||||
func()
|
||||
end = time.time()
|
||||
print(end - start)
|
||||
print(f"Throughput: {((nbenchmark * BS)/(end-start)):.3f} examples/sec")
|
@ -61,7 +61,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
@ -60,7 +60,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risk.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/flax/speech-recognition/requirements.txt")
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils import check_min_version, send_example_telemetry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
Array = Any
|
||||
Dataset = datasets.arrow_dataset.Dataset
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt")
|
||||
|
||||
|
66
examples/modular-transformers/modeling_add_function.py
Normal file
66
examples/modular-transformers/modeling_add_function.py
Normal file
@ -0,0 +1,66 @@
|
||||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||||
# This file was automatically generated from examples/modular-transformers/modular_add_function.py.
|
||||
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
||||
# the file from the modular. If any change should be done, please apply the change to the
|
||||
# modular_add_function.py file directly. One of our CI enforces this.
|
||||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||||
# Note that zamba does not have the `apply_rotary_pos_emb` function!
|
||||
from typing import Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
|
||||
def rotate_half(x):
|
||||
"""Rotates half the hidden dims of the input."""
|
||||
x1 = x[..., : x.shape[-1] // 2]
|
||||
x2 = x[..., x.shape[-1] // 2 :]
|
||||
return torch.cat((-x2, x1), dim=-1)
|
||||
|
||||
|
||||
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
||||
"""Applies Rotary Position Embedding to the query and key tensors.
|
||||
|
||||
Args:
|
||||
q (`torch.Tensor`): The query tensor.
|
||||
k (`torch.Tensor`): The key tensor.
|
||||
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
||||
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
||||
position_ids (`torch.Tensor`, *optional*):
|
||||
Deprecated and unused.
|
||||
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
||||
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
||||
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
||||
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
||||
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
||||
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
||||
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
||||
Returns:
|
||||
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
||||
"""
|
||||
cos = cos.unsqueeze(unsqueeze_dim)
|
||||
sin = sin.unsqueeze(unsqueeze_dim)
|
||||
q_embed = (q * cos) + (rotate_half(q) * sin)
|
||||
k_embed = (k * cos) + (rotate_half(k) * sin)
|
||||
return q_embed, k_embed
|
||||
|
||||
|
||||
class TestAttention(nn.Module):
|
||||
"""
|
||||
Multi-headed attention from 'Attention Is All You Need' paper. Modified to use sliding window attention: Longformer
|
||||
and "Generating Long Sequences with Sparse Transformers".
|
||||
|
||||
Adapted from transformers.models.mistral.modeling_mistral.MistralAttention:
|
||||
The input dimension here is attention_hidden_size = 2 * hidden_size, and head_dim = attention_hidden_size // num_heads.
|
||||
The extra factor of 2 comes from the input being the concatenation of original_hidden_states with the output of the previous (mamba) layer
|
||||
(see fig. 2 in https://arxiv.org/pdf/2405.16712).
|
||||
Additionally, replaced
|
||||
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) with
|
||||
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim/2)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def forward(self) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
||||
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
@ -45,13 +45,8 @@ class DummyRMSNorm(nn.Module):
|
||||
|
||||
|
||||
class DummyRotaryEmbedding(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
config: DummyConfig,
|
||||
device=None,
|
||||
):
|
||||
def __init__(self, config: DummyConfig, device=None):
|
||||
super().__init__()
|
||||
self.rope_kwargs = {}
|
||||
# BC: "rope_type" was originally "type"
|
||||
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
|
||||
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
||||
@ -63,7 +58,7 @@ class DummyRotaryEmbedding(nn.Module):
|
||||
self.config = config
|
||||
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
||||
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||
self.original_inv_freq = self.inv_freq
|
||||
|
||||
@ -75,13 +70,14 @@ class DummyRotaryEmbedding(nn.Module):
|
||||
"""
|
||||
seq_len = torch.max(position_ids) + 1
|
||||
if seq_len > self.max_seq_len_cached: # growth
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(
|
||||
self.config, device, seq_len=seq_len, **self.rope_kwargs
|
||||
)
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
||||
self.max_seq_len_cached = seq_len
|
||||
|
||||
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
||||
# This .to() is needed if the model has been moved to a device after being initialized (because
|
||||
# the buffer is automatically moved, but not the original copy)
|
||||
self.original_inv_freq = self.original_inv_freq.to(device)
|
||||
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
||||
self.max_seq_len_cached = self.original_max_seq_len
|
||||
|
||||
@ -356,6 +352,7 @@ class DummyPreTrainedModel(PreTrainedModel):
|
||||
_skip_keys_device_placement = ["past_key_values"]
|
||||
_supports_flash_attn_2 = True
|
||||
_supports_sdpa = True
|
||||
_supports_flex_attn = True
|
||||
_supports_cache_class = True
|
||||
_supports_quantized_cache = True
|
||||
_supports_static_cache = True
|
||||
|
@ -45,13 +45,8 @@ class Multimodal1TextRMSNorm(nn.Module):
|
||||
|
||||
|
||||
class Multimodal1TextRotaryEmbedding(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
config: Multimodal1TextConfig,
|
||||
device=None,
|
||||
):
|
||||
def __init__(self, config: Multimodal1TextConfig, device=None):
|
||||
super().__init__()
|
||||
self.rope_kwargs = {}
|
||||
# BC: "rope_type" was originally "type"
|
||||
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
|
||||
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
||||
@ -63,7 +58,7 @@ class Multimodal1TextRotaryEmbedding(nn.Module):
|
||||
self.config = config
|
||||
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
||||
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||
self.original_inv_freq = self.inv_freq
|
||||
|
||||
@ -75,13 +70,14 @@ class Multimodal1TextRotaryEmbedding(nn.Module):
|
||||
"""
|
||||
seq_len = torch.max(position_ids) + 1
|
||||
if seq_len > self.max_seq_len_cached: # growth
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(
|
||||
self.config, device, seq_len=seq_len, **self.rope_kwargs
|
||||
)
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
||||
self.max_seq_len_cached = seq_len
|
||||
|
||||
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
||||
# This .to() is needed if the model has been moved to a device after being initialized (because
|
||||
# the buffer is automatically moved, but not the original copy)
|
||||
self.original_inv_freq = self.original_inv_freq.to(device)
|
||||
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
||||
self.max_seq_len_cached = self.original_max_seq_len
|
||||
|
||||
@ -356,6 +352,7 @@ class Multimodal1TextPreTrainedModel(PreTrainedModel):
|
||||
_skip_keys_device_placement = ["past_key_values"]
|
||||
_supports_flash_attn_2 = True
|
||||
_supports_sdpa = True
|
||||
_supports_flex_attn = True
|
||||
_supports_cache_class = True
|
||||
_supports_quantized_cache = True
|
||||
_supports_static_cache = True
|
||||
|
@ -61,13 +61,8 @@ class MyNewModel2MLP(nn.Module):
|
||||
|
||||
|
||||
class MyNewModel2RotaryEmbedding(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
config: MyNewModel2Config,
|
||||
device=None,
|
||||
):
|
||||
def __init__(self, config: MyNewModel2Config, device=None):
|
||||
super().__init__()
|
||||
self.rope_kwargs = {}
|
||||
# BC: "rope_type" was originally "type"
|
||||
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
|
||||
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
||||
@ -79,7 +74,7 @@ class MyNewModel2RotaryEmbedding(nn.Module):
|
||||
self.config = config
|
||||
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
||||
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||
self.original_inv_freq = self.inv_freq
|
||||
|
||||
@ -91,13 +86,14 @@ class MyNewModel2RotaryEmbedding(nn.Module):
|
||||
"""
|
||||
seq_len = torch.max(position_ids) + 1
|
||||
if seq_len > self.max_seq_len_cached: # growth
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(
|
||||
self.config, device, seq_len=seq_len, **self.rope_kwargs
|
||||
)
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
||||
self.max_seq_len_cached = seq_len
|
||||
|
||||
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
||||
# This .to() is needed if the model has been moved to a device after being initialized (because
|
||||
# the buffer is automatically moved, but not the original copy)
|
||||
self.original_inv_freq = self.original_inv_freq.to(device)
|
||||
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
||||
self.max_seq_len_cached = self.original_max_seq_len
|
||||
|
||||
@ -356,6 +352,7 @@ class MyNewModel2PreTrainedModel(PreTrainedModel):
|
||||
_skip_keys_device_placement = ["past_key_values"]
|
||||
_supports_flash_attn_2 = True
|
||||
_supports_sdpa = True
|
||||
_supports_flex_attn = True
|
||||
_supports_cache_class = True
|
||||
_supports_quantized_cache = True
|
||||
_supports_static_cache = True
|
||||
|
@ -107,7 +107,6 @@ class NewTaskModelPreTrainedModel(PreTrainedModel):
|
||||
_supports_cache_class = True
|
||||
_supports_quantized_cache = True
|
||||
_supports_static_cache = True
|
||||
_supports_cache_class = True
|
||||
_supports_flash_attn_2 = True
|
||||
_supports_sdpa = True
|
||||
|
||||
@ -249,9 +248,6 @@ class NewTaskModelForNewTask(NewTaskModelPreTrainedModel, GenerationMixin):
|
||||
def get_decoder(self):
|
||||
return self.language_model.get_decoder()
|
||||
|
||||
def tie_weights(self):
|
||||
return self.language_model.tie_weights()
|
||||
|
||||
def _update_causal_mask(
|
||||
self,
|
||||
attention_mask,
|
||||
|
@ -45,13 +45,8 @@ class SuperRMSNorm(nn.Module):
|
||||
|
||||
|
||||
class SuperRotaryEmbedding(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
config: SuperConfig,
|
||||
device=None,
|
||||
):
|
||||
def __init__(self, config: SuperConfig, device=None):
|
||||
super().__init__()
|
||||
self.rope_kwargs = {}
|
||||
# BC: "rope_type" was originally "type"
|
||||
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
|
||||
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
||||
@ -63,7 +58,7 @@ class SuperRotaryEmbedding(nn.Module):
|
||||
self.config = config
|
||||
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
||||
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
||||
self.original_inv_freq = self.inv_freq
|
||||
|
||||
@ -75,13 +70,14 @@ class SuperRotaryEmbedding(nn.Module):
|
||||
"""
|
||||
seq_len = torch.max(position_ids) + 1
|
||||
if seq_len > self.max_seq_len_cached: # growth
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(
|
||||
self.config, device, seq_len=seq_len, **self.rope_kwargs
|
||||
)
|
||||
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
|
||||
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
||||
self.max_seq_len_cached = seq_len
|
||||
|
||||
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
||||
# This .to() is needed if the model has been moved to a device after being initialized (because
|
||||
# the buffer is automatically moved, but not the original copy)
|
||||
self.original_inv_freq = self.original_inv_freq.to(device)
|
||||
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
||||
self.max_seq_len_cached = self.original_max_seq_len
|
||||
|
||||
@ -356,6 +352,7 @@ class SuperPreTrainedModel(PreTrainedModel):
|
||||
_skip_keys_device_placement = ["past_key_values"]
|
||||
_supports_flash_attn_2 = True
|
||||
_supports_sdpa = True
|
||||
_supports_flex_attn = True
|
||||
_supports_cache_class = True
|
||||
_supports_quantized_cache = True
|
||||
_supports_static_cache = True
|
||||
|
170
examples/modular-transformers/modeling_switch_function.py
Normal file
170
examples/modular-transformers/modeling_switch_function.py
Normal file
@ -0,0 +1,170 @@
|
||||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||||
# This file was automatically generated from examples/modular-transformers/modular_switch_function.py.
|
||||
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
||||
# the file from the modular. If any change should be done, please apply the change to the
|
||||
# modular_switch_function.py file directly. One of our CI enforces this.
|
||||
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
||||
# Note that llama and cohere have different definitions for rotate_half
|
||||
from typing import Callable, Optional, Tuple
|
||||
|
||||
import torch
|
||||
from torch import nn
|
||||
|
||||
from ...cache_utils import Cache
|
||||
from ...modeling_flash_attention_utils import FlashAttentionKwargs
|
||||
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS
|
||||
from ...processing_utils import Unpack
|
||||
from ...utils import logging
|
||||
from .configuration_switch_function import SwitchFunctionConfig
|
||||
|
||||
|
||||
logger = logging.get_logger(__name__)
|
||||
|
||||
|
||||
def rotate_half(x):
|
||||
# Split and rotate. Note that this function is different from e.g. Llama.
|
||||
x1 = x[..., ::2]
|
||||
x2 = x[..., 1::2]
|
||||
rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2)
|
||||
return rot_x
|
||||
|
||||
|
||||
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
||||
"""Applies Rotary Position Embedding to the query and key tensors.
|
||||
|
||||
Args:
|
||||
q (`torch.Tensor`): The query tensor.
|
||||
k (`torch.Tensor`): The key tensor.
|
||||
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
||||
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
||||
position_ids (`torch.Tensor`, *optional*):
|
||||
Deprecated and unused.
|
||||
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
||||
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
||||
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
||||
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
||||
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
||||
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
||||
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
||||
Returns:
|
||||
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
||||
"""
|
||||
cos = cos.unsqueeze(unsqueeze_dim)
|
||||
sin = sin.unsqueeze(unsqueeze_dim)
|
||||
q_embed = (q * cos) + (rotate_half(q) * sin)
|
||||
k_embed = (k * cos) + (rotate_half(k) * sin)
|
||||
return q_embed, k_embed
|
||||
|
||||
|
||||
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
||||
"""
|
||||
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
||||
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
||||
"""
|
||||
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
||||
if n_rep == 1:
|
||||
return hidden_states
|
||||
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
||||
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
||||
|
||||
|
||||
def eager_attention_forward(
|
||||
module: nn.Module,
|
||||
query: torch.Tensor,
|
||||
key: torch.Tensor,
|
||||
value: torch.Tensor,
|
||||
attention_mask: Optional[torch.Tensor],
|
||||
scaling: float,
|
||||
dropout: float = 0.0,
|
||||
**kwargs,
|
||||
):
|
||||
key_states = repeat_kv(key, module.num_key_value_groups)
|
||||
value_states = repeat_kv(value, module.num_key_value_groups)
|
||||
|
||||
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
|
||||
if attention_mask is not None:
|
||||
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
||||
attn_weights = attn_weights + causal_mask
|
||||
|
||||
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
||||
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
|
||||
attn_output = torch.matmul(attn_weights, value_states)
|
||||
attn_output = attn_output.transpose(1, 2).contiguous()
|
||||
|
||||
return attn_output, attn_weights
|
||||
|
||||
|
||||
class SwitchFunctionAttention(nn.Module):
|
||||
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
||||
|
||||
def __init__(self, config: SwitchFunctionConfig, layer_idx: int):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
self.layer_idx = layer_idx
|
||||
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
|
||||
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
|
||||
self.scaling = self.head_dim**-0.5
|
||||
self.attention_dropout = config.attention_dropout
|
||||
self.is_causal = True
|
||||
|
||||
self.q_proj = nn.Linear(
|
||||
config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
|
||||
)
|
||||
self.k_proj = nn.Linear(
|
||||
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
|
||||
)
|
||||
self.v_proj = nn.Linear(
|
||||
config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
|
||||
)
|
||||
self.o_proj = nn.Linear(
|
||||
config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: torch.Tensor,
|
||||
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
||||
attention_mask: Optional[torch.Tensor],
|
||||
past_key_value: Optional[Cache] = None,
|
||||
cache_position: Optional[torch.LongTensor] = None,
|
||||
**kwargs: Unpack[FlashAttentionKwargs],
|
||||
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
||||
input_shape = hidden_states.shape[:-1]
|
||||
hidden_shape = (*input_shape, -1, self.head_dim)
|
||||
|
||||
query_states = self.q_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
||||
key_states = self.k_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
||||
value_states = self.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
|
||||
|
||||
cos, sin = position_embeddings
|
||||
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
||||
|
||||
if past_key_value is not None:
|
||||
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
||||
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
||||
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
||||
|
||||
attention_interface: Callable = eager_attention_forward
|
||||
if self.config._attn_implementation != "eager":
|
||||
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
|
||||
logger.warning_once(
|
||||
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
|
||||
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
||||
)
|
||||
else:
|
||||
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
||||
|
||||
attn_output, attn_weights = attention_interface(
|
||||
self,
|
||||
query_states,
|
||||
key_states,
|
||||
value_states,
|
||||
attention_mask,
|
||||
dropout=0.0 if not self.training else self.attention_dropout,
|
||||
scaling=self.scaling,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
||||
attn_output = self.o_proj(attn_output)
|
||||
return attn_output, attn_weights
|
15
examples/modular-transformers/modular_add_function.py
Normal file
15
examples/modular-transformers/modular_add_function.py
Normal file
@ -0,0 +1,15 @@
|
||||
# Note that zamba does not have the `apply_rotary_pos_emb` function!
|
||||
from transformers.models.llama.modeling_llama import apply_rotary_pos_emb
|
||||
from transformers.models.zamba.modeling_zamba import ZambaAttention
|
||||
|
||||
|
||||
# When following ZambaAttention dependencies, the function `apply_rotary_pos_emb` is not present
|
||||
# by default as it is absent from the class definition (and the file altogether).
|
||||
# Note that this syntax should be able to add both `apply_rotary_pos_emb` as imported directly, but
|
||||
# `rotate_half` as well as a dependency from the imported function!!
|
||||
class TestAttention(ZambaAttention):
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def forward(self):
|
||||
_ = apply_rotary_pos_emb(1, 1, 1, 1)
|
10
examples/modular-transformers/modular_switch_function.py
Normal file
10
examples/modular-transformers/modular_switch_function.py
Normal file
@ -0,0 +1,10 @@
|
||||
# Note that llama and cohere have different definitions for rotate_half
|
||||
from transformers.models.cohere.modeling_cohere import rotate_half # noqa
|
||||
from transformers.models.llama.modeling_llama import LlamaAttention
|
||||
|
||||
|
||||
# When following LlamaAttention dependencies, we will grab the function `rotate_half` defined
|
||||
# in `modeling_llama.py`. But here we imported it explicitly from Cohere, so it should use Cohere's
|
||||
# definition instead
|
||||
class SwitchFunctionAttention(LlamaAttention):
|
||||
pass
|
@ -45,7 +45,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
|
||||
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/contrastive-image-text/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
|
||||
|
||||
|
@ -49,7 +49,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -43,7 +43,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -48,7 +48,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -53,7 +53,7 @@ Any model supported by the AutoModelForMaskedImageModeling API can be used.
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
|
||||
|
||||
|
@ -46,7 +46,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/instance-segmentation/requirements.txt")
|
||||
|
||||
|
@ -52,7 +52,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/instance-segmentation/requirements.txt")
|
||||
|
||||
|
@ -55,7 +55,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -58,7 +58,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -60,7 +60,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
@ -54,7 +54,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -57,7 +57,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils.versions import require_version
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.14.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
|
@ -47,7 +47,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
@ -56,7 +56,7 @@ from transformers.utils import PaddingStrategy, check_min_version, send_example_
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
# You should update this to your particular problem to have better documentation of `model_type`
|
||||
|
@ -48,7 +48,7 @@ from transformers.utils.versions import require_version
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
check_min_version("4.48.0.dev0")
|
||||
check_min_version("4.49.0.dev0")
|
||||
|
||||
require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/object-detection/requirements.txt")
|
||||
|
||||
@ -271,6 +271,10 @@ class DataTrainingArguments:
|
||||
)
|
||||
},
|
||||
)
|
||||
use_fast: Optional[bool] = field(
|
||||
default=True,
|
||||
metadata={"help": "Use a fast torchvision-base image processor if it is supported for a given model."},
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -427,6 +431,7 @@ def main():
|
||||
size={"max_height": data_args.image_square_size, "max_width": data_args.image_square_size},
|
||||
do_pad=True,
|
||||
pad_size={"height": data_args.image_square_size, "width": data_args.image_square_size},
|
||||
use_fast=data_args.use_fast,
|
||||
**common_pretrained_args,
|
||||
)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user