mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-20 17:13:56 +08:00
Merge remote-tracking branch 'origin/main' into serve-quantization
This commit is contained in:
@ -29,6 +29,7 @@ COMMON_ENV_VARIABLES = {
|
|||||||
"RUN_PIPELINE_TESTS": False,
|
"RUN_PIPELINE_TESTS": False,
|
||||||
# will be adjust in `CircleCIJob.to_dict`.
|
# will be adjust in `CircleCIJob.to_dict`.
|
||||||
"RUN_FLAKY": True,
|
"RUN_FLAKY": True,
|
||||||
|
"DISABLE_SAFETENSORS_CONVERSION": True,
|
||||||
}
|
}
|
||||||
# Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical
|
# Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical
|
||||||
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "vvv": None, "rsfE":None}
|
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "vvv": None, "rsfE":None}
|
||||||
@ -185,6 +186,7 @@ class CircleCIJob:
|
|||||||
# During the CircleCI docker images build time, we might already (or not) download the data.
|
# During the CircleCI docker images build time, we might already (or not) download the data.
|
||||||
# If it's done already, the files are inside the directory `/test_data/`.
|
# If it's done already, the files are inside the directory `/test_data/`.
|
||||||
{"run": {"name": "fetch hub objects before pytest", "command": "cp -r /test_data/* . 2>/dev/null || true; python3 utils/fetch_hub_objects_for_ci.py"}},
|
{"run": {"name": "fetch hub objects before pytest", "command": "cp -r /test_data/* . 2>/dev/null || true; python3 utils/fetch_hub_objects_for_ci.py"}},
|
||||||
|
{"run": {"name": "download and unzip hub cache", "command": 'curl -L -o huggingface-cache.tar.gz https://huggingface.co/datasets/hf-internal-testing/hf_hub_cache/resolve/main/huggingface-cache.tar.gz && apt-get install pigz && tar --use-compress-program="pigz -d -p 8" -xf huggingface-cache.tar.gz && mv -n hub/* /root/.cache/huggingface/hub/ && ls -la /root/.cache/huggingface/hub/'}},
|
||||||
{"run": {
|
{"run": {
|
||||||
"name": "Run tests",
|
"name": "Run tests",
|
||||||
"command": f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {junit_flags} {repeat_on_failure_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"}
|
"command": f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {junit_flags} {repeat_on_failure_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"}
|
||||||
|
8
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
8
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -48,19 +48,19 @@ body:
|
|||||||
- continuous batching: @remi-or @ArthurZucker @McPatate
|
- continuous batching: @remi-or @ArthurZucker @McPatate
|
||||||
- pipelines: @Rocketknight1
|
- pipelines: @Rocketknight1
|
||||||
- tokenizers: @ArthurZucker and @itazap
|
- tokenizers: @ArthurZucker and @itazap
|
||||||
- trainer: @zach-huggingface @SunMarc
|
- trainer: @SunMarc
|
||||||
- attention: @vasqu @ArthurZucker @CyrilVallez
|
- attention: @vasqu @ArthurZucker @CyrilVallez
|
||||||
- model loading (from pretrained, etc): @CyrilVallez
|
- model loading (from pretrained, etc): @CyrilVallez
|
||||||
- distributed: @3outeille @ArthurZucker @S1ro1
|
- distributed: @3outeille @ArthurZucker
|
||||||
- CIs: @ydshieh
|
- CIs: @ydshieh
|
||||||
|
|
||||||
Integrations:
|
Integrations:
|
||||||
|
|
||||||
- deepspeed: HF Trainer/Accelerate: @SunMarc @zach-huggingface
|
|
||||||
- ray/raytune: @richardliaw, @amogkam
|
- ray/raytune: @richardliaw, @amogkam
|
||||||
- Big Model Inference: @SunMarc
|
- Big Model Inference: @SunMarc
|
||||||
- quantization (bitsandbytes, autogpt): @SunMarc @MekkCyber
|
- quantization: @SunMarc @MekkCyber
|
||||||
- kernels: @MekkCyber @drbh
|
- kernels: @MekkCyber @drbh
|
||||||
|
- peft: @BenjaminBossan @githubnemo
|
||||||
|
|
||||||
Devices/Backends:
|
Devices/Backends:
|
||||||
|
|
||||||
|
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -51,19 +51,19 @@ Library:
|
|||||||
- continuous batching: @remi-or @ArthurZucker @McPatate
|
- continuous batching: @remi-or @ArthurZucker @McPatate
|
||||||
- pipelines: @Rocketknight1
|
- pipelines: @Rocketknight1
|
||||||
- tokenizers: @ArthurZucker and @itazap
|
- tokenizers: @ArthurZucker and @itazap
|
||||||
- trainer: @zach-huggingface @SunMarc
|
- trainer: @SunMarc
|
||||||
- attention: @vasqu @ArthurZucker @CyrilVallez
|
- attention: @vasqu @ArthurZucker @CyrilVallez
|
||||||
- model loading (from pretrained, etc): @CyrilVallez
|
- model loading (from pretrained, etc): @CyrilVallez
|
||||||
- distributed: @3outeille @ArthurZucker @S1ro1
|
- distributed: @3outeille @ArthurZucker
|
||||||
- CIs: @ydshieh
|
- CIs: @ydshieh
|
||||||
|
|
||||||
Integrations:
|
Integrations:
|
||||||
|
|
||||||
- deepspeed: HF Trainer/Accelerate: @SunMarc @zach-huggingface
|
|
||||||
- ray/raytune: @richardliaw, @amogkam
|
- ray/raytune: @richardliaw, @amogkam
|
||||||
- Big Model Inference: @SunMarc
|
- Big Model Inference: @SunMarc
|
||||||
- quantization (bitsandbytes, autogpt): @SunMarc @MekkCyber
|
- quantization: @SunMarc @MekkCyber
|
||||||
- kernels: @MekkCyber @drbh
|
- kernels: @MekkCyber @drbh
|
||||||
|
- peft: @BenjaminBossan @githubnemo
|
||||||
|
|
||||||
Devices/Backends:
|
Devices/Backends:
|
||||||
|
|
||||||
|
5
.github/workflows/benchmark.yml
vendored
5
.github/workflows/benchmark.yml
vendored
@ -1,10 +1,7 @@
|
|||||||
name: Self-hosted runner (benchmark)
|
name: Self-hosted runner (benchmark)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
workflow_dispatch:
|
||||||
branches: [main]
|
|
||||||
pull_request:
|
|
||||||
types: [ opened, labeled, reopened, synchronize ]
|
|
||||||
|
|
||||||
concurrency:
|
concurrency:
|
||||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||||
|
32
.github/workflows/benchmark_v2.yml
vendored
32
.github/workflows/benchmark_v2.yml
vendored
@ -1,35 +1,7 @@
|
|||||||
name: Benchmark v2 Framework
|
name: Benchmark v2 Framework
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_call:
|
workflow_dispatch:
|
||||||
inputs:
|
|
||||||
runner:
|
|
||||||
description: 'GH Actions runner group to use'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
container_image:
|
|
||||||
description: 'Docker image to use'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
container_options:
|
|
||||||
description: 'Container options to use'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
commit_sha:
|
|
||||||
description: 'Commit SHA to benchmark'
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: ''
|
|
||||||
run_id:
|
|
||||||
description: 'Custom run ID for organizing results (auto-generated if not provided)'
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: ''
|
|
||||||
benchmark_repo_id:
|
|
||||||
description: 'HuggingFace Dataset to upload results to (e.g., "org/benchmark-results")'
|
|
||||||
required: false
|
|
||||||
type: string
|
|
||||||
default: ''
|
|
||||||
|
|
||||||
env:
|
env:
|
||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
@ -82,4 +54,4 @@ jobs:
|
|||||||
--token '${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }}' \
|
--token '${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }}' \
|
||||||
--log-level INFO
|
--log-level INFO
|
||||||
env:
|
env:
|
||||||
HF_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
|
@ -1,11 +1,7 @@
|
|||||||
name: Benchmark v2 Scheduled Runner - A10 Single-GPU
|
name: Benchmark v2 Scheduled Runner - A10 Single-GPU
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
workflow_dispatch:
|
||||||
# Run daily at 16:30 UTC
|
|
||||||
- cron: "30 16 * * *"
|
|
||||||
pull_request:
|
|
||||||
types: [ opened, labeled, reopened, synchronize ]
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
benchmark-v2-default:
|
benchmark-v2-default:
|
||||||
@ -18,4 +14,4 @@ jobs:
|
|||||||
commit_sha: ${{ github.sha }}
|
commit_sha: ${{ github.sha }}
|
||||||
run_id: ${{ github.run_id }}
|
run_id: ${{ github.run_id }}
|
||||||
benchmark_repo_id: hf-internal-testing/transformers-daily-benchmarks
|
benchmark_repo_id: hf-internal-testing/transformers-daily-benchmarks
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
@ -1,11 +1,7 @@
|
|||||||
name: Benchmark v2 Scheduled Runner - MI325 Single-GPU
|
name: Benchmark v2 Scheduled Runner - MI325 Single-GPU
|
||||||
|
|
||||||
on:
|
on:
|
||||||
schedule:
|
workflow_dispatch:
|
||||||
# Run daily at 16:30 UTC
|
|
||||||
- cron: "30 16 * * *"
|
|
||||||
pull_request:
|
|
||||||
types: [ opened, labeled, reopened, synchronize ]
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
benchmark-v2-default:
|
benchmark-v2-default:
|
||||||
@ -18,4 +14,4 @@ jobs:
|
|||||||
commit_sha: ${{ github.sha }}
|
commit_sha: ${{ github.sha }}
|
||||||
run_id: ${{ github.run_id }}
|
run_id: ${{ github.run_id }}
|
||||||
benchmark_repo_id: hf-internal-testing/transformers-daily-benchmarks
|
benchmark_repo_id: hf-internal-testing/transformers-daily-benchmarks
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
3
.github/workflows/build-docker-images.yml
vendored
3
.github/workflows/build-docker-images.yml
vendored
@ -5,6 +5,7 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- build_ci_docker_image*
|
- build_ci_docker_image*
|
||||||
repository_dispatch:
|
repository_dispatch:
|
||||||
|
workflow_dispatch:
|
||||||
workflow_call:
|
workflow_call:
|
||||||
inputs:
|
inputs:
|
||||||
image_postfix:
|
image_postfix:
|
||||||
@ -221,7 +222,7 @@ jobs:
|
|||||||
latest-pytorch-amd:
|
latest-pytorch-amd:
|
||||||
name: "Latest PyTorch (AMD) [dev]"
|
name: "Latest PyTorch (AMD) [dev]"
|
||||||
runs-on:
|
runs-on:
|
||||||
group: aws-general-8-plus
|
group: aws-highcpu-32-priv
|
||||||
steps:
|
steps:
|
||||||
-
|
-
|
||||||
name: Set up Docker Buildx
|
name: Set up Docker Buildx
|
||||||
|
14
.github/workflows/build_documentation.yml
vendored
14
.github/workflows/build_documentation.yml
vendored
@ -16,8 +16,20 @@ jobs:
|
|||||||
commit_sha: ${{ github.sha }}
|
commit_sha: ${{ github.sha }}
|
||||||
package: transformers
|
package: transformers
|
||||||
notebook_folder: transformers_doc
|
notebook_folder: transformers_doc
|
||||||
languages: ar de en es fr hi it ja ko pt zh
|
languages: en
|
||||||
custom_container: huggingface/transformers-doc-builder
|
custom_container: huggingface/transformers-doc-builder
|
||||||
secrets:
|
secrets:
|
||||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||||
|
|
||||||
|
build_other_lang:
|
||||||
|
uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
|
||||||
|
with:
|
||||||
|
commit_sha: ${{ github.sha }}
|
||||||
|
package: transformers
|
||||||
|
notebook_folder: transformers_doc
|
||||||
|
languages: ar de es fr hi it ja ko pt zh
|
||||||
|
custom_container: huggingface/transformers-doc-builder
|
||||||
|
secrets:
|
||||||
|
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||||
|
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
1
.github/workflows/check_failed_tests.yml
vendored
1
.github/workflows/check_failed_tests.yml
vendored
@ -35,7 +35,6 @@ env:
|
|||||||
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
||||||
# This token is created under the bot `hf-transformers-bot`.
|
# This token is created under the bot `hf-transformers-bot`.
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
|
1
.github/workflows/doctest_job.yml
vendored
1
.github/workflows/doctest_job.yml
vendored
@ -16,7 +16,6 @@ env:
|
|||||||
RUN_SLOW: yes
|
RUN_SLOW: yes
|
||||||
OMP_NUM_THREADS: 16
|
OMP_NUM_THREADS: 16
|
||||||
MKL_NUM_THREADS: 16
|
MKL_NUM_THREADS: 16
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
1
.github/workflows/model_jobs.yml
vendored
1
.github/workflows/model_jobs.yml
vendored
@ -38,7 +38,6 @@ env:
|
|||||||
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
||||||
# This token is created under the bot `hf-transformers-bot`.
|
# This token is created under the bot `hf-transformers-bot`.
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
|
1
.github/workflows/model_jobs_intel_gaudi.yml
vendored
1
.github/workflows/model_jobs_intel_gaudi.yml
vendored
@ -26,7 +26,6 @@ env:
|
|||||||
TRANSFORMERS_IS_CI: yes
|
TRANSFORMERS_IS_CI: yes
|
||||||
PT_ENABLE_INT64_SUPPORT: 1
|
PT_ENABLE_INT64_SUPPORT: 1
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
|
||||||
HF_HOME: /mnt/cache/.cache/huggingface
|
HF_HOME: /mnt/cache/.cache/huggingface
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
@ -14,7 +14,7 @@ permissions: {}
|
|||||||
jobs:
|
jobs:
|
||||||
get-pr-number:
|
get-pr-number:
|
||||||
name: Get PR number
|
name: Get PR number
|
||||||
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr", "eustlb", "MekkCyber", "manueldeprada", "vasqu", "ivarflakstad", "stevhliu", "ebezzam", "itazap"]'), github.actor) && (startsWith(github.event.comment.body, 'build-doc')) }}
|
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "eustlb", "MekkCyber", "vasqu", "ivarflakstad", "stevhliu", "ebezzam", "itazap"]'), github.actor) && (startsWith(github.event.comment.body, 'build-doc')) }}
|
||||||
uses: ./.github/workflows/get-pr-number.yml
|
uses: ./.github/workflows/get-pr-number.yml
|
||||||
|
|
||||||
get-pr-info:
|
get-pr-info:
|
||||||
|
3
.github/workflows/self-comment-ci.yml
vendored
3
.github/workflows/self-comment-ci.yml
vendored
@ -20,7 +20,6 @@ env:
|
|||||||
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
||||||
# This token is created under the bot `hf-transformers-bot`.
|
# This token is created under the bot `hf-transformers-bot`.
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
@ -29,7 +28,7 @@ jobs:
|
|||||||
runs-on: ubuntu-22.04
|
runs-on: ubuntu-22.04
|
||||||
name: Get PR number
|
name: Get PR number
|
||||||
# For security: only allow team members to run
|
# For security: only allow team members to run
|
||||||
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr", "eustlb", "MekkCyber", "manueldeprada", "vasqu", "ivarflakstad", "stevhliu", "ebezzam", "remi-or", "itazap"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
|
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "eustlb", "MekkCyber", "vasqu", "ivarflakstad", "stevhliu", "ebezzam", "remi-or", "itazap"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
|
||||||
outputs:
|
outputs:
|
||||||
PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }}
|
PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }}
|
||||||
steps:
|
steps:
|
||||||
|
@ -26,7 +26,6 @@ env:
|
|||||||
TRANSFORMERS_IS_CI: yes
|
TRANSFORMERS_IS_CI: yes
|
||||||
PT_ENABLE_INT64_SUPPORT: 1
|
PT_ENABLE_INT64_SUPPORT: 1
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
|
||||||
HF_HOME: /mnt/cache/.cache/huggingface
|
HF_HOME: /mnt/cache/.cache/huggingface
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
1
.github/workflows/self-scheduled.yml
vendored
1
.github/workflows/self-scheduled.yml
vendored
@ -48,7 +48,6 @@ env:
|
|||||||
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
||||||
# This token is created under the bot `hf-transformers-bot`.
|
# This token is created under the bot `hf-transformers-bot`.
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
NUM_SLICES: 2
|
NUM_SLICES: 2
|
||||||
|
18
.github/workflows/ssh-runner.yml
vendored
18
.github/workflows/ssh-runner.yml
vendored
@ -20,7 +20,6 @@ env:
|
|||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`.
|
RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`.
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
@ -33,14 +32,17 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Get runner to use
|
- name: Get runner to use
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
NUM_GPUS: ${{ github.event.inputs.num_gpus }}
|
||||||
|
RUNNER_TYPE: ${{ github.event.inputs.runner_type }}
|
||||||
run: |
|
run: |
|
||||||
if [[ "${{ github.event.inputs.num_gpus }}" == "single" && "${{ github.event.inputs.runner_type }}" == "t4" ]]; then
|
if [[ "$NUM_GPUS" == "single" && "$RUNNER_TYPE" == "t4" ]]; then
|
||||||
echo "RUNNER=aws-g4dn-4xlarge-cache" >> $GITHUB_ENV
|
echo "RUNNER=aws-g4dn-4xlarge-cache" >> $GITHUB_ENV
|
||||||
elif [[ "${{ github.event.inputs.num_gpus }}" == "multi" && "${{ github.event.inputs.runner_type }}" == "t4" ]]; then
|
elif [[ "$NUM_GPUS" == "multi" && "$RUNNER_TYPE" == "t4" ]]; then
|
||||||
echo "RUNNER=aws-g4dn-12xlarge-cache" >> $GITHUB_ENV
|
echo "RUNNER=aws-g4dn-12xlarge-cache" >> $GITHUB_ENV
|
||||||
elif [[ "${{ github.event.inputs.num_gpus }}" == "single" && "${{ github.event.inputs.runner_type }}" == "a10" ]]; then
|
elif [[ "$NUM_GPUS" == "single" && "$RUNNER_TYPE" == "a10" ]]; then
|
||||||
echo "RUNNER=aws-g5-4xlarge-cache" >> $GITHUB_ENV
|
echo "RUNNER=aws-g5-4xlarge-cache" >> $GITHUB_ENV
|
||||||
elif [[ "${{ github.event.inputs.num_gpus }}" == "multi" && "${{ github.event.inputs.runner_type }}" == "a10" ]]; then
|
elif [[ "$NUM_GPUS" == "multi" && "$RUNNER_TYPE" == "a10" ]]; then
|
||||||
echo "RUNNER=aws-g5-12xlarge-cache" >> $GITHUB_ENV
|
echo "RUNNER=aws-g5-12xlarge-cache" >> $GITHUB_ENV
|
||||||
else
|
else
|
||||||
echo "RUNNER=" >> $GITHUB_ENV
|
echo "RUNNER=" >> $GITHUB_ENV
|
||||||
@ -85,9 +87,11 @@ jobs:
|
|||||||
- name: Store Slack infos
|
- name: Store Slack infos
|
||||||
#because the SSH can be enabled dynamically if the workflow failed, so we need to store slack infos to be able to retrieve them during the waitforssh step
|
#because the SSH can be enabled dynamically if the workflow failed, so we need to store slack infos to be able to retrieve them during the waitforssh step
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
GITHUB_ACTOR: ${{ github.actor }}
|
||||||
run: |
|
run: |
|
||||||
echo "${{ github.actor }}"
|
echo "$GITHUB_ACTOR"
|
||||||
github_actor=${{ github.actor }}
|
github_actor=$GITHUB_ACTOR
|
||||||
github_actor=${github_actor/'-'/'_'}
|
github_actor=${github_actor/'-'/'_'}
|
||||||
echo "$github_actor"
|
echo "$github_actor"
|
||||||
echo "github_actor=$github_actor" >> $GITHUB_ENV
|
echo "github_actor=$github_actor" >> $GITHUB_ENV
|
||||||
|
@ -153,7 +153,7 @@ You are not required to read the following guidelines before opening an issue. H
|
|||||||
cd examples/seq2seq
|
cd examples/seq2seq
|
||||||
torchrun --nproc_per_node=2 ./finetune_trainer.py \
|
torchrun --nproc_per_node=2 ./finetune_trainer.py \
|
||||||
--model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \
|
--model_name_or_path sshleifer/distill-mbart-en-ro-12-4 --data_dir wmt_en_ro \
|
||||||
--output_dir output_dir --overwrite_output_dir \
|
--output_dir output_dir \
|
||||||
--do_train --n_train 500 --num_train_epochs 1 \
|
--do_train --n_train 500 --num_train_epochs 1 \
|
||||||
--per_device_train_batch_size 1 --freeze_embeds \
|
--per_device_train_batch_size 1 --freeze_embeds \
|
||||||
--src_lang en_XX --tgt_lang ro_RO --task translation \
|
--src_lang en_XX --tgt_lang ro_RO --task translation \
|
||||||
|
@ -48,6 +48,7 @@ limitations under the License.
|
|||||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||||
|
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_it.md">Italiano</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
|
||||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ur.md">اردو</a> |
|
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ur.md">اردو</a> |
|
||||||
@ -110,10 +111,10 @@ git clone https://github.com/huggingface/transformers.git
|
|||||||
cd transformers
|
cd transformers
|
||||||
|
|
||||||
# pip
|
# pip
|
||||||
pip install .[torch]
|
pip install '.[torch]'
|
||||||
|
|
||||||
# uv
|
# uv
|
||||||
uv pip install .[torch]
|
uv pip install '.[torch]'
|
||||||
```
|
```
|
||||||
|
|
||||||
## Quickstart
|
## Quickstart
|
||||||
|
3
benchmark_v2/.gitignore
vendored
3
benchmark_v2/.gitignore
vendored
@ -1 +1,2 @@
|
|||||||
benchmark_results/
|
benchmark_results/
|
||||||
|
benchmark_results_profiles/
|
||||||
|
@ -1 +0,0 @@
|
|||||||
# Benchmark implementations directory
|
|
@ -1,165 +0,0 @@
|
|||||||
# Copyright 2025 The HuggingFace Team. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
from typing import Any
|
|
||||||
|
|
||||||
import torch
|
|
||||||
from benchmark_framework import ModelBenchmark
|
|
||||||
|
|
||||||
|
|
||||||
os.environ["TOKENIZERS_PARALLELISM"] = "1"
|
|
||||||
torch.set_float32_matmul_precision("high")
|
|
||||||
|
|
||||||
|
|
||||||
class LLaMABenchmark(ModelBenchmark):
|
|
||||||
"""Simplified LLaMA model benchmark implementation using the ModelBenchmark base class."""
|
|
||||||
|
|
||||||
def __init__(self, logger: logging.Logger):
|
|
||||||
super().__init__(logger)
|
|
||||||
self._default_prompt = "Why dogs are so cute?" # Custom prompt for LLaMA
|
|
||||||
|
|
||||||
def get_scenario_configs(self) -> list[dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Get LLaMA-specific scenario configurations.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of scenario configuration dictionaries
|
|
||||||
"""
|
|
||||||
return [
|
|
||||||
# Eager variants
|
|
||||||
{"variant": "eager", "compile_mode": None, "use_cache": True, "description": "Eager execution with cache"},
|
|
||||||
# Compiled variants
|
|
||||||
{
|
|
||||||
"variant": "compiled",
|
|
||||||
"compile_mode": "max-autotune",
|
|
||||||
"use_cache": True,
|
|
||||||
"description": "Compiled with max autotune",
|
|
||||||
},
|
|
||||||
# Kernelized variant (if available)
|
|
||||||
{
|
|
||||||
"variant": "kernelized",
|
|
||||||
"compile_mode": "max-autotune",
|
|
||||||
"use_cache": True,
|
|
||||||
"description": "Kernelized execution",
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
def _is_kernelization_available(self) -> bool:
|
|
||||||
"""Check if kernelization is available for LLaMA."""
|
|
||||||
try:
|
|
||||||
from kernels import Mode, kernelize # noqa: F401
|
|
||||||
|
|
||||||
return True
|
|
||||||
except ImportError:
|
|
||||||
self.logger.debug("Kernelization not available: kernels module not found")
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_default_generation_config(self) -> dict[str, Any]:
|
|
||||||
"""Get LLaMA-specific generation configuration."""
|
|
||||||
return {
|
|
||||||
"do_sample": False,
|
|
||||||
"top_p": 1.0,
|
|
||||||
"temperature": 1.0,
|
|
||||||
"repetition_penalty": 1.0,
|
|
||||||
"max_new_tokens": None, # Will be set per scenario
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_model_init_kwargs(self, config) -> dict[str, Any]:
|
|
||||||
"""Get LLaMA-specific model initialization kwargs."""
|
|
||||||
return {
|
|
||||||
"torch_dtype": getattr(torch, config.torch_dtype),
|
|
||||||
"attn_implementation": config.attn_implementation,
|
|
||||||
"use_cache": True,
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_default_torch_dtype(self) -> str:
|
|
||||||
"""Get default torch dtype for LLaMA."""
|
|
||||||
return "float16" # LLaMA works well with float16
|
|
||||||
|
|
||||||
def get_default_device(self) -> str:
|
|
||||||
"""Get default device for LLaMA."""
|
|
||||||
return "cuda" # LLaMA prefers CUDA
|
|
||||||
|
|
||||||
|
|
||||||
def run_llama(logger, output_dir, **kwargs):
|
|
||||||
"""
|
|
||||||
Run LLaMA benchmark with the given configuration.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
logger: Logger instance
|
|
||||||
output_dir: Output directory for results
|
|
||||||
**kwargs: Additional configuration options
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path to output file if successful
|
|
||||||
"""
|
|
||||||
from benchmark_framework import BenchmarkRunner
|
|
||||||
|
|
||||||
# Extract parameters with defaults
|
|
||||||
model_id = kwargs.get("model_id", "meta-llama/Llama-2-7b-hf")
|
|
||||||
warmup_iterations = kwargs.get("warmup_iterations", 3)
|
|
||||||
measurement_iterations = kwargs.get("measurement_iterations", 5)
|
|
||||||
num_tokens_to_generate = kwargs.get("num_tokens_to_generate", 100)
|
|
||||||
include_sdpa_variants = kwargs.get("include_sdpa_variants", True)
|
|
||||||
device = kwargs.get("device", "cuda")
|
|
||||||
torch_dtype = kwargs.get("torch_dtype", "float16")
|
|
||||||
batch_size = kwargs.get("batch_size", 1)
|
|
||||||
commit_id = kwargs.get("commit_id")
|
|
||||||
|
|
||||||
logger.info(f"Starting LLaMA benchmark for model: {model_id}")
|
|
||||||
logger.info(
|
|
||||||
f"Configuration: warmup={warmup_iterations}, measurement={measurement_iterations}, tokens={num_tokens_to_generate}"
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Create benchmark instance
|
|
||||||
benchmark = LLaMABenchmark(logger)
|
|
||||||
|
|
||||||
# Create scenarios
|
|
||||||
scenarios = benchmark.create_scenarios(
|
|
||||||
model_id=model_id,
|
|
||||||
warmup_iterations=warmup_iterations,
|
|
||||||
measurement_iterations=measurement_iterations,
|
|
||||||
num_tokens_to_generate=num_tokens_to_generate,
|
|
||||||
include_sdpa_variants=include_sdpa_variants,
|
|
||||||
device=device,
|
|
||||||
torch_dtype=torch_dtype,
|
|
||||||
batch_size=batch_size,
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f"Created {len(scenarios)} benchmark scenarios")
|
|
||||||
|
|
||||||
# Create runner and execute benchmarks
|
|
||||||
runner = BenchmarkRunner(logger, output_dir)
|
|
||||||
results = runner.run_benchmark(benchmark, scenarios, commit_id=commit_id)
|
|
||||||
|
|
||||||
if not results:
|
|
||||||
logger.warning("No successful benchmark results")
|
|
||||||
return None
|
|
||||||
|
|
||||||
# Save results
|
|
||||||
model_name = model_id.split("/")[-1] # Extract model name from ID
|
|
||||||
output_file = runner.save_results(model_name, results)
|
|
||||||
|
|
||||||
logger.info(f"LLaMA benchmark completed successfully. Results saved to: {output_file}")
|
|
||||||
return output_file
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"LLaMA benchmark failed: {e}")
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
logger.debug(traceback.format_exc())
|
|
||||||
raise
|
|
File diff suppressed because it is too large
Load Diff
218
benchmark_v2/framework/benchmark_config.py
Normal file
218
benchmark_v2/framework/benchmark_config.py
Normal file
@ -0,0 +1,218 @@
|
|||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
|
||||||
|
KERNELIZATION_AVAILABLE = False
|
||||||
|
try:
|
||||||
|
from kernels import Mode, kernelize # noqa: F401
|
||||||
|
|
||||||
|
KERNELIZATION_AVAILABLE = True
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class BenchmarkConfig:
|
||||||
|
"""Configuration for a single benchmark scenario."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self,
|
||||||
|
warmup_iterations: int = 5,
|
||||||
|
measurement_iterations: int = 20,
|
||||||
|
gpu_monitoring: bool = False, # False by default because it slows down the benchmark by a lot
|
||||||
|
batch_size: int = 1,
|
||||||
|
sequence_length: int = 128,
|
||||||
|
num_tokens_to_generate: int = 128,
|
||||||
|
attn_implementation: str = "eager",
|
||||||
|
sdpa_backend: Optional[str] = None,
|
||||||
|
compile_mode: Optional[str] = None,
|
||||||
|
compile_options: Optional[dict[str, Any]] = None,
|
||||||
|
kernelize: bool = False,
|
||||||
|
name: Optional[str] = None,
|
||||||
|
skip_validity_check: bool = False,
|
||||||
|
) -> None:
|
||||||
|
# Benchmark parameters
|
||||||
|
self.warmup_iterations = warmup_iterations
|
||||||
|
self.measurement_iterations = measurement_iterations
|
||||||
|
self.gpu_monitoring = gpu_monitoring
|
||||||
|
# Input parameters
|
||||||
|
self.batch_size = batch_size
|
||||||
|
self.sequence_length = sequence_length
|
||||||
|
self.num_tokens_to_generate = num_tokens_to_generate
|
||||||
|
# Generation parameters
|
||||||
|
self.attn_implementation = attn_implementation
|
||||||
|
self.sdpa_backend = sdpa_backend
|
||||||
|
# Optimization parameters
|
||||||
|
self.compile_mode = compile_mode
|
||||||
|
self.compile_options = compile_options if compile_options is not None else {}
|
||||||
|
self.kernelize = kernelize
|
||||||
|
# Constant parameters
|
||||||
|
self.dtype = "torch.bfloat16"
|
||||||
|
self.device = "cuda"
|
||||||
|
|
||||||
|
self.check_validity(skip_validity_check)
|
||||||
|
self.name = name if name is not None else self.infer_name()
|
||||||
|
|
||||||
|
def check_validity(self, skip_validity_check: bool = False) -> None:
|
||||||
|
if skip_validity_check:
|
||||||
|
return
|
||||||
|
# Flash attention does not support compile mode, so we turn it off # FIXME: it would be better to support it
|
||||||
|
is_fa = self.attn_implementation == "flash_attention_2"
|
||||||
|
is_fa |= self.attn_implementation == "sdpa" and self.sdpa_backend == "flash_attention"
|
||||||
|
if is_fa:
|
||||||
|
logger.warning("Flash attention does not support compile mode. Turning off compile mode.")
|
||||||
|
self.compile_mode = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def hash(self) -> str:
|
||||||
|
return hashlib.sha256(json.dumps(self.to_dict()).encode()).hexdigest()
|
||||||
|
|
||||||
|
def infer_name(self, compact: bool = True) -> str:
|
||||||
|
"""Infer a human-readable name for the benchmark config, either compact or verbose."""
|
||||||
|
if compact:
|
||||||
|
iter_str = f"w{self.warmup_iterations}_i{self.measurement_iterations}"
|
||||||
|
gpu_monitor_str = "monitored" if self.gpu_monitoring else "unmonitored"
|
||||||
|
dimensions_str = f"b{self.batch_size}_s{self.sequence_length}_n{self.num_tokens_to_generate}"
|
||||||
|
attn_code = self.attn_implementation
|
||||||
|
attn_code += f"_{self.sdpa_backend}" if self.attn_implementation == "sdpa" else ""
|
||||||
|
compile_str = f"compiled_{self.compile_mode}" if self.compile_mode is not None else "uncompiled"
|
||||||
|
kernelize_str = "kernelized" if self.kernelize else "unkernelized"
|
||||||
|
sep = "-"
|
||||||
|
else:
|
||||||
|
iter_str = f"{self.warmup_iterations} warmup, {self.measurement_iterations} iterations"
|
||||||
|
gpu_monitor_str = ("with" if self.gpu_monitoring else "no") + " GPU monitoring"
|
||||||
|
dimensions_str = f"batch size {self.batch_size}, sequence length {self.sequence_length}, {self.num_tokens_to_generate} generated tokens"
|
||||||
|
attn_code = f"{self.attn_implementation} attention"
|
||||||
|
attn_code += f" with {self.sdpa_backend} backend" if self.attn_implementation == "sdpa" else ""
|
||||||
|
compile_str = "compiled" if self.compile_mode is not None else "not compiled"
|
||||||
|
kernelize_str = "kernelized" if self.kernelize else "not kernelized"
|
||||||
|
sep = ", "
|
||||||
|
return sep.join([iter_str, gpu_monitor_str, dimensions_str, attn_code, compile_str, kernelize_str])
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"name": self.name,
|
||||||
|
"warmup_iterations": self.warmup_iterations,
|
||||||
|
"measurement_iterations": self.measurement_iterations,
|
||||||
|
"gpu_monitoring": self.gpu_monitoring,
|
||||||
|
"batch_size": self.batch_size,
|
||||||
|
"sequence_length": self.sequence_length,
|
||||||
|
"num_tokens_to_generate": self.num_tokens_to_generate,
|
||||||
|
"attn_implementation": self.attn_implementation,
|
||||||
|
"sdpa_backend": self.sdpa_backend,
|
||||||
|
"compile_mode": self.compile_mode,
|
||||||
|
"compile_options": self.compile_options,
|
||||||
|
"kernelize": self.kernelize,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Any], skip_validity_check: bool = False) -> "BenchmarkConfig":
|
||||||
|
return cls(
|
||||||
|
warmup_iterations=data.get("warmup_iterations", 5),
|
||||||
|
measurement_iterations=data.get("measurement_iterations", 20),
|
||||||
|
gpu_monitoring=data.get("gpu_monitoring", False),
|
||||||
|
batch_size=data.get("batch_size", 1),
|
||||||
|
sequence_length=data.get("sequence_length", 128),
|
||||||
|
num_tokens_to_generate=data.get("num_tokens_to_generate", 128),
|
||||||
|
attn_implementation=data.get("attn_implementation", "eager"),
|
||||||
|
sdpa_backend=data.get("sdpa_backend"),
|
||||||
|
compile_mode=data.get("compile_mode"),
|
||||||
|
compile_options=data.get("compile_options"),
|
||||||
|
kernelize=data.get("kernelize", False),
|
||||||
|
name=data.get("name"),
|
||||||
|
skip_validity_check=skip_validity_check,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def cross_generate_configs(
|
||||||
|
attn_impl_and_sdpa_backend: list[tuple[str, Optional[str]]],
|
||||||
|
compiled_mode: list[Optional[str]],
|
||||||
|
kernelized: list[bool],
|
||||||
|
warmup_iterations: int = 5,
|
||||||
|
measurement_iterations: int = 20,
|
||||||
|
batch_size: int = 1,
|
||||||
|
sequence_length: int = 128,
|
||||||
|
num_tokens_to_generate: int = 128,
|
||||||
|
gpu_monitoring: bool = False, # this slows down the benchmark by a lot so we disable it by default
|
||||||
|
) -> list[BenchmarkConfig]:
|
||||||
|
# Create kwargs common to all configs
|
||||||
|
kwargs = {
|
||||||
|
"warmup_iterations": warmup_iterations,
|
||||||
|
"measurement_iterations": measurement_iterations,
|
||||||
|
"batch_size": batch_size,
|
||||||
|
"sequence_length": sequence_length,
|
||||||
|
"num_tokens_to_generate": num_tokens_to_generate,
|
||||||
|
"gpu_monitoring": gpu_monitoring,
|
||||||
|
}
|
||||||
|
# Cross-generate all combinations of attn_implementation, compiled_mode, and kernelized
|
||||||
|
configs = []
|
||||||
|
for attn_implementation, sdpa_backend in list(dict.fromkeys(attn_impl_and_sdpa_backend)):
|
||||||
|
for cm in list(dict.fromkeys(compiled_mode)):
|
||||||
|
for kernelize_on in list(dict.fromkeys(kernelized)):
|
||||||
|
config = BenchmarkConfig(
|
||||||
|
attn_implementation=attn_implementation,
|
||||||
|
sdpa_backend=sdpa_backend,
|
||||||
|
compile_mode=cm,
|
||||||
|
kernelize=kernelize_on,
|
||||||
|
**kwargs,
|
||||||
|
)
|
||||||
|
configs.append(config)
|
||||||
|
return configs
|
||||||
|
|
||||||
|
|
||||||
|
def generate_all_configs(
|
||||||
|
warmup_iterations: int = 5,
|
||||||
|
measurement_iterations: int = 20,
|
||||||
|
batch_size: int = 1,
|
||||||
|
sequence_length: int = 128,
|
||||||
|
num_tokens_to_generate: int = 128,
|
||||||
|
gpu_monitoring: bool = False,
|
||||||
|
) -> list[BenchmarkConfig]:
|
||||||
|
all_attn_implementations = [
|
||||||
|
("flash_attention_2", None),
|
||||||
|
("eager", None),
|
||||||
|
("sdpa", "math"),
|
||||||
|
("sdpa", "flash_attention"),
|
||||||
|
("flex_attention", None),
|
||||||
|
]
|
||||||
|
return cross_generate_configs(
|
||||||
|
attn_impl_and_sdpa_backend=all_attn_implementations,
|
||||||
|
compiled_mode=[None, "default", "reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"],
|
||||||
|
kernelized=[False, KERNELIZATION_AVAILABLE],
|
||||||
|
warmup_iterations=warmup_iterations,
|
||||||
|
measurement_iterations=measurement_iterations,
|
||||||
|
batch_size=batch_size,
|
||||||
|
sequence_length=sequence_length,
|
||||||
|
num_tokens_to_generate=num_tokens_to_generate,
|
||||||
|
gpu_monitoring=gpu_monitoring,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def generate_default_configs(
|
||||||
|
warmup_iterations: int = 5,
|
||||||
|
measurement_iterations: int = 20,
|
||||||
|
batch_size: int = 1,
|
||||||
|
sequence_length: int = 128,
|
||||||
|
num_tokens_to_generate: int = 128,
|
||||||
|
gpu_monitoring: bool = False,
|
||||||
|
) -> list[BenchmarkConfig]:
|
||||||
|
all_attn_implementations = [
|
||||||
|
("flash_attention_2", None),
|
||||||
|
("eager", None),
|
||||||
|
("sdpa", "math"),
|
||||||
|
("sdpa", "flash_attention"), # note: this one can fail with compile because of attn mask
|
||||||
|
]
|
||||||
|
return cross_generate_configs(
|
||||||
|
attn_impl_and_sdpa_backend=all_attn_implementations,
|
||||||
|
compiled_mode=[None, "max-autotune"],
|
||||||
|
kernelized=[False, KERNELIZATION_AVAILABLE],
|
||||||
|
warmup_iterations=warmup_iterations,
|
||||||
|
measurement_iterations=measurement_iterations,
|
||||||
|
batch_size=batch_size,
|
||||||
|
sequence_length=sequence_length,
|
||||||
|
num_tokens_to_generate=num_tokens_to_generate,
|
||||||
|
gpu_monitoring=gpu_monitoring,
|
||||||
|
)
|
388
benchmark_v2/framework/benchmark_runner.py
Normal file
388
benchmark_v2/framework/benchmark_runner.py
Normal file
@ -0,0 +1,388 @@
|
|||||||
|
import gc
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import pathlib
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
from contextlib import nullcontext
|
||||||
|
from datetime import datetime
|
||||||
|
from queue import Queue
|
||||||
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
import torch
|
||||||
|
from tqdm import trange
|
||||||
|
|
||||||
|
from transformers import (
|
||||||
|
AutoModelForCausalLM,
|
||||||
|
AutoTokenizer,
|
||||||
|
CompileConfig,
|
||||||
|
GenerationConfig,
|
||||||
|
GenerationMixin,
|
||||||
|
)
|
||||||
|
from transformers.generation.streamers import BaseStreamer
|
||||||
|
|
||||||
|
from .benchmark_config import BenchmarkConfig
|
||||||
|
from .data_classes import BenchmarkMetadata, BenchmarkResult, GPURawMetrics, pretty_print_dict
|
||||||
|
from .hardware_metrics import GPUMonitor
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
from kernels import Mode, kernelize # noqa: F401
|
||||||
|
except ImportError:
|
||||||
|
kernelize = None
|
||||||
|
Mode = None
|
||||||
|
|
||||||
|
|
||||||
|
DEFAULT_PROMPT = "\n".join([
|
||||||
|
"The French Revolution was a period of political and societal change in France that began with the Estates General of 1789 and ended with the Coup of 18 Brumaire on 9 November 1799.",
|
||||||
|
"Many of the revolution's ideas are considered fundamental principles of liberal democracy, and its values remain central to modern French political discourse.",
|
||||||
|
"It was caused by a combination of social, political, and economic factors which the existing regime proved unable to manage.",
|
||||||
|
"Financial crisis and widespread social distress led to the convocation of the Estates General in May 1789, its first meeting since 1614.",
|
||||||
|
"The representatives of the Third Estate broke away and re-constituted themselves as a National Assembly in June.",
|
||||||
|
"The Storming of the Bastille in Paris on 14 July led to a series of radical measures by the Assembly, including the abolition of feudalism, state control over the Catholic Church in France, and issuing the Declaration of the Rights of Man and of the Citizen.",
|
||||||
|
"The next three years were dominated by a struggle for political control.",
|
||||||
|
"King Louis XVI's attempted flight to Varennes in June 1791 further discredited the monarchy, and military defeats after the outbreak of the French Revolutionary Wars in April 1792 led to the insurrection of 10 August 1792.",
|
||||||
|
"As a result, the monarchy was replaced by the French First Republic in September, followed by the execution of Louis XVI himself in January 1793.",
|
||||||
|
"After another revolt in June 1793, the constitution was suspended, and political power passed from the National Convention to the Committee of Public Safety, dominated by radical Jacobins led by Maximilien Robespierre.",
|
||||||
|
"About 16,000 people were sentenced by the Revolutionary Tribunal and executed in the Reign of Terror, which ended in July 1794 with the Thermidorian Reaction.",
|
||||||
|
"Weakened by external threats and internal opposition, the Committee of Public Safety was replaced in November 1795 by the Directory.",
|
||||||
|
"Its instability ended in the coup of 18 Brumaire and the establishment of the Consulate, with Napoleon Bonaparte as First Consul.",
|
||||||
|
]) # fmt: skip
|
||||||
|
|
||||||
|
|
||||||
|
def compact_json_numeric_arrays(data: dict):
|
||||||
|
# Match arrays that contain only numbers (ints/floats), whitespace, commas, and newlines
|
||||||
|
pattern = r"\[\s*\n\s*((?:\d+(?:\.\d+)?\s*,\s*)*\d+(?:\.\d+)?)\s*\n\s*\]"
|
||||||
|
|
||||||
|
def replace_numeric_array(match):
|
||||||
|
# Get the array content
|
||||||
|
content = match.group(1)
|
||||||
|
# Remove extra whitespace but keep commas
|
||||||
|
compact_content = re.sub(r"\s+", " ", content).strip()
|
||||||
|
return f"[{compact_content}]"
|
||||||
|
|
||||||
|
return re.sub(pattern, replace_numeric_array, json.dumps(data, indent=4, default=str), flags=re.DOTALL)
|
||||||
|
|
||||||
|
|
||||||
|
def get_git_revision() -> str:
|
||||||
|
base_path = pathlib.Path(__file__).parent.parent.parent
|
||||||
|
git_dir = base_path / ".git"
|
||||||
|
with (git_dir / "HEAD").open("r") as head:
|
||||||
|
ref = head.readline().split(" ")[-1].strip()
|
||||||
|
with (git_dir / ref).open("r") as git_hash:
|
||||||
|
return git_hash.readline().strip()
|
||||||
|
|
||||||
|
|
||||||
|
def get_sdpa_backend(backend_name: Optional[str]) -> Optional[torch.nn.attention.SDPBackend]:
|
||||||
|
"""Get the SDPA backend enum from string name."""
|
||||||
|
if backend_name is None:
|
||||||
|
return None
|
||||||
|
|
||||||
|
try:
|
||||||
|
backend_map = {
|
||||||
|
"math": torch.nn.attention.SDPBackend.MATH,
|
||||||
|
"flash_attention": torch.nn.attention.SDPBackend.FLASH_ATTENTION,
|
||||||
|
"efficient_attention": torch.nn.attention.SDPBackend.EFFICIENT_ATTENTION,
|
||||||
|
"cudnn_attention": torch.nn.attention.SDPBackend.CUDNN_ATTENTION,
|
||||||
|
}
|
||||||
|
return backend_map.get(backend_name.lower())
|
||||||
|
except AttributeError:
|
||||||
|
# torch.nn.attention.SDPBackend not available in older torch versions
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def flush_memory():
|
||||||
|
"""Flush GPU memory and run garbage collection."""
|
||||||
|
gc.collect()
|
||||||
|
# Dynamo resets
|
||||||
|
torch._dynamo.reset()
|
||||||
|
torch._dynamo.reset_code_caches()
|
||||||
|
if hasattr(torch._inductor, "codecache"):
|
||||||
|
# Clear FX graph cache
|
||||||
|
if hasattr(torch._inductor.codecache, "FxGraphCache"):
|
||||||
|
torch._inductor.codecache.FxGraphCache.clear()
|
||||||
|
# Clear PyCodeCache
|
||||||
|
if hasattr(torch._inductor.codecache, "PyCodeCache"):
|
||||||
|
torch._inductor.codecache.PyCodeCache.cache_clear()
|
||||||
|
# Clear TritonFuture cache (for async compilation)
|
||||||
|
if hasattr(torch._inductor.codecache, "TritonFuture"):
|
||||||
|
if hasattr(torch._inductor.codecache.TritonFuture, "_compile_cache"):
|
||||||
|
torch._inductor.codecache.TritonFuture._compile_cache.clear()
|
||||||
|
# Clear CUDA cache
|
||||||
|
if torch.cuda.is_available():
|
||||||
|
torch.cuda.empty_cache()
|
||||||
|
torch.cuda.reset_max_memory_allocated()
|
||||||
|
torch.cuda.reset_peak_memory_stats()
|
||||||
|
torch.cuda.synchronize()
|
||||||
|
gc.collect()
|
||||||
|
|
||||||
|
|
||||||
|
class BenchmarkStreamer(BaseStreamer):
|
||||||
|
def __init__(self, **kwargs) -> None:
|
||||||
|
self.timestamps = []
|
||||||
|
self.text_queue = Queue()
|
||||||
|
|
||||||
|
def put(self, value):
|
||||||
|
"""Receives tokens and logs the timestamp of the generation."""
|
||||||
|
self.timestamps.append(time.perf_counter())
|
||||||
|
|
||||||
|
def end(self):
|
||||||
|
self.timestamps.append(time.perf_counter())
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __next__(self):
|
||||||
|
value = self.text_queue.get(timeout=self.timeout)
|
||||||
|
if value == self.stop_signal:
|
||||||
|
raise StopIteration()
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
class BenchmarkRunner:
|
||||||
|
"""Main benchmark runner that coordinates benchmark execution."""
|
||||||
|
|
||||||
|
def __init__(
|
||||||
|
self, logger: logging.Logger, output_dir: str = "benchmark_results", commit_id: Optional[str] = None
|
||||||
|
) -> None:
|
||||||
|
# Those stay constant for the whole run
|
||||||
|
self.logger = logger
|
||||||
|
self.output_dir = output_dir
|
||||||
|
self.commit_id = get_git_revision() if commit_id is None else commit_id
|
||||||
|
os.makedirs(self.output_dir, exist_ok=True)
|
||||||
|
self.profile_dir = None
|
||||||
|
# Attributes that are reset for each model
|
||||||
|
self._setup_for = ""
|
||||||
|
# Attributes that are reset for each run
|
||||||
|
self.model: Optional[GenerationMixin] = None
|
||||||
|
|
||||||
|
def cleanup(self) -> None:
|
||||||
|
del self.model
|
||||||
|
self.model = None
|
||||||
|
flush_memory()
|
||||||
|
|
||||||
|
def setup_one_run(self, model_id: str, config: BenchmarkConfig) -> None:
|
||||||
|
# Some attributes only need to be set once per model
|
||||||
|
if self._setup_for != model_id:
|
||||||
|
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||||
|
# We set the EOS token to the padding token for open-ended generation
|
||||||
|
self.tokenizer.eos_token = self.tokenizer.pad_token
|
||||||
|
self._setup_for = model_id
|
||||||
|
|
||||||
|
# Prepare inputs
|
||||||
|
self.inputs = self.tokenizer(
|
||||||
|
[DEFAULT_PROMPT for _ in range(config.batch_size)],
|
||||||
|
return_tensors="pt",
|
||||||
|
max_length=config.sequence_length,
|
||||||
|
truncation=True,
|
||||||
|
return_attention_mask=True,
|
||||||
|
).to(config.device)
|
||||||
|
self.inputs["use_cache"] = True
|
||||||
|
|
||||||
|
# Prepare generation config
|
||||||
|
gen_config = GenerationConfig(
|
||||||
|
do_sample=False, top_p=1.0, temperature=1.0, max_new_tokens=config.num_tokens_to_generate
|
||||||
|
)
|
||||||
|
|
||||||
|
# Prepare compile config
|
||||||
|
if config.compile_mode is not None:
|
||||||
|
gen_config.compile_config = CompileConfig(mode=config.compile_mode, options=config.compile_options)
|
||||||
|
gen_config.cache_implementation = "static"
|
||||||
|
|
||||||
|
# Load model
|
||||||
|
self.logger.debug(f"Loading model {model_id} on device {config.device}...")
|
||||||
|
dtype = getattr(torch, config.dtype.removeprefix("torch."))
|
||||||
|
self.model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_id, dtype=dtype, attn_implementation=config.attn_implementation, generation_config=gen_config
|
||||||
|
)
|
||||||
|
self.model = self.model.eval().to(config.device)
|
||||||
|
|
||||||
|
# Kernelize the model if needed
|
||||||
|
if config.kernelize:
|
||||||
|
self.model = kernelize(self.model, mode=Mode.INFERENCE)
|
||||||
|
|
||||||
|
def run_one_benchmark(self, model_id: str, config: BenchmarkConfig, num_tokens_to_profile: int = 0) -> None:
|
||||||
|
sdpa_ctx = nullcontext()
|
||||||
|
if config.attn_implementation == "sdpa":
|
||||||
|
sdpa_backend = get_sdpa_backend(config.sdpa_backend)
|
||||||
|
sdpa_ctx = torch.nn.attention.sdpa_kernel(sdpa_backend)
|
||||||
|
|
||||||
|
with sdpa_ctx, torch.no_grad():
|
||||||
|
self.logger.info(f"Running benchmark scenario: {config.name}")
|
||||||
|
|
||||||
|
# Quick validation: try one measurement first to see if this scenario works
|
||||||
|
flush_memory()
|
||||||
|
e2e_latency, token_generation_times, decoded_output, gpu_metrics = self.time_generate(
|
||||||
|
max_new_tokens=1, gpu_monitor=None
|
||||||
|
)
|
||||||
|
if e2e_latency < 0:
|
||||||
|
self.logger.warning(f"Skipping config {config.name}: {e2e_latency = } (no GPU monitoring)")
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Warmup runs
|
||||||
|
self.logger.info(f"Warming up with {config.warmup_iterations} iterations...")
|
||||||
|
for _ in trange(config.warmup_iterations):
|
||||||
|
_ = self.time_generate(max_new_tokens=config.num_tokens_to_generate)
|
||||||
|
self.logger.info("Warmup over.")
|
||||||
|
|
||||||
|
# Measurement runs
|
||||||
|
result = BenchmarkResult()
|
||||||
|
self.logger.info(f"Benchmarking with {config.measurement_iterations} iterations.")
|
||||||
|
for _ in trange(config.measurement_iterations):
|
||||||
|
e2e_latency, token_generation_times, decoded_output, gpu_metrics = self.time_generate(
|
||||||
|
max_new_tokens=config.num_tokens_to_generate,
|
||||||
|
gpu_monitor=(GPUMonitor(logger=self.logger) if config.gpu_monitoring else None),
|
||||||
|
)
|
||||||
|
result.accumulate(e2e_latency, token_generation_times, decoded_output, gpu_metrics)
|
||||||
|
self.logger.info("Benchmarking done. Cleaning up.")
|
||||||
|
|
||||||
|
# Profile if needed
|
||||||
|
if num_tokens_to_profile > 0:
|
||||||
|
self.profile_generate(num_tokens_to_profile, config.name)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"metadata": BenchmarkMetadata(model_id=model_id, commit_id=self.commit_id),
|
||||||
|
"measurements": result,
|
||||||
|
"config": config,
|
||||||
|
}
|
||||||
|
|
||||||
|
def time_generate(
|
||||||
|
self,
|
||||||
|
max_new_tokens: int,
|
||||||
|
gpu_monitor: Optional[GPUMonitor] = None,
|
||||||
|
) -> tuple[float, list[float], str, Optional[GPURawMetrics]]:
|
||||||
|
"""Time the latency of a call to model.generate() with the given (inputs) and (max_new_tokens)."""
|
||||||
|
# Prepare gpu monitoring if needed
|
||||||
|
if gpu_monitor is not None:
|
||||||
|
gpu_monitor.start()
|
||||||
|
# Prepare streamer
|
||||||
|
streamer = BenchmarkStreamer()
|
||||||
|
# Generate and time
|
||||||
|
wall_time_0 = time.perf_counter()
|
||||||
|
outputs = self.model.generate(
|
||||||
|
**self.inputs,
|
||||||
|
max_new_tokens=max_new_tokens,
|
||||||
|
streamer=streamer,
|
||||||
|
)
|
||||||
|
wall_time_1 = time.perf_counter()
|
||||||
|
# Stop gpu monitoring if needed
|
||||||
|
gpu_metrics = gpu_monitor.stop_and_collect() if gpu_monitor is not None else None
|
||||||
|
# Check if generation had the right number of tokens
|
||||||
|
input_tokens = self.inputs["input_ids"].size(-1)
|
||||||
|
batch_size, output_tokens = outputs.shape
|
||||||
|
new_tokens = output_tokens - input_tokens
|
||||||
|
if new_tokens != max_new_tokens:
|
||||||
|
raise RuntimeError(f"Generated {new_tokens} tokens, expected {max_new_tokens}")
|
||||||
|
# Decode outputs
|
||||||
|
decoded_output = self.tokenizer.decode(outputs[0, input_tokens:], skip_special_tokens=True)
|
||||||
|
# Compute intermediate quantities
|
||||||
|
e2e_latency = wall_time_1 - wall_time_0
|
||||||
|
token_generation_times = [t - wall_time_0 for t in streamer.timestamps[1:]]
|
||||||
|
return e2e_latency, token_generation_times, decoded_output, gpu_metrics
|
||||||
|
|
||||||
|
def profile_generate(self, num_tokens_to_profile: int, config_name: str) -> None:
|
||||||
|
"""Profile the latency of a call to model.generate() with the given (inputs) and (max_new_tokens)."""
|
||||||
|
profiler = torch.profiler.profile(
|
||||||
|
activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA],
|
||||||
|
record_shapes=True,
|
||||||
|
)
|
||||||
|
with profiler as prof:
|
||||||
|
_ = self.model.generate(
|
||||||
|
**self.inputs,
|
||||||
|
max_new_tokens=num_tokens_to_profile,
|
||||||
|
)
|
||||||
|
if self.profile_dir is None:
|
||||||
|
self.profile_dir = self.output_dir + "_profiles"
|
||||||
|
os.makedirs(self.profile_dir, exist_ok=True)
|
||||||
|
prof.export_chrome_trace(f"{self.profile_dir}/{config_name}.json")
|
||||||
|
|
||||||
|
def run_benchmarks(
|
||||||
|
self,
|
||||||
|
model_id: str,
|
||||||
|
benchmark_configs: list[BenchmarkConfig],
|
||||||
|
num_tokens_to_profile: int = 0,
|
||||||
|
pretty_print_summary: bool = True,
|
||||||
|
) -> dict[str, Any]:
|
||||||
|
all_results = {}
|
||||||
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
|
start_time = time.perf_counter()
|
||||||
|
|
||||||
|
n_configs = len(benchmark_configs)
|
||||||
|
for i, config in enumerate(benchmark_configs):
|
||||||
|
# Handle SDPA backend if not determined by the config (needs to be done before skipping duplicates)
|
||||||
|
if config.attn_implementation == "sdpa" and config.sdpa_backend is None:
|
||||||
|
default_backend = "flash_attention" # FIXME: torch has a _cur_sdpa_kernel_backends but it fails
|
||||||
|
self.logger.warning(f"No SDPA backend provided, using {default_backend} instead.")
|
||||||
|
config.sdpa_backend = default_backend
|
||||||
|
|
||||||
|
# Skip if already run
|
||||||
|
if config.hash in all_results:
|
||||||
|
self.logger.info(f"Skipping duplicate config {config.name} for model {model_id} ({i + 1}/{n_configs})")
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Otherwise, run the benchmark
|
||||||
|
self.setup_one_run(model_id, config)
|
||||||
|
self.logger.info(
|
||||||
|
f"Running benchmark of model {model_id} with scenario: {config.name} ({i + 1}/{n_configs})"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Launch benchmark in a try/except block to avoid stopping the whole run if one benchmark fails
|
||||||
|
try:
|
||||||
|
results = self.run_one_benchmark(model_id, config, num_tokens_to_profile)
|
||||||
|
if results is not None:
|
||||||
|
all_results[config.hash] = results
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.logger.error(f"Error running with scenario: {config.name}:\n{repr(e)}")
|
||||||
|
# Cleanup model and save results
|
||||||
|
self.cleanup()
|
||||||
|
self.save_results(model_id, all_results, timestamp=timestamp)
|
||||||
|
|
||||||
|
if pretty_print_summary:
|
||||||
|
print()
|
||||||
|
print("=" * 100)
|
||||||
|
print(f"Finished benchmarks in {time.perf_counter() - start_time:.2f} seconds")
|
||||||
|
print(f"Total number of benchmarks: {len(all_results)}")
|
||||||
|
if len(all_results) > 0:
|
||||||
|
print("First run metadata:")
|
||||||
|
first_key = list(all_results.keys())[0]
|
||||||
|
first_metadata = all_results[first_key]["metadata"].to_dict()
|
||||||
|
hardware_info = first_metadata.pop("hardware_info")
|
||||||
|
pretty_print_dict(first_metadata | hardware_info, tabs=1)
|
||||||
|
for value in all_results.values():
|
||||||
|
print("=" * 100)
|
||||||
|
print(f"Config: {value['config'].infer_name(compact=False)}\n")
|
||||||
|
value["measurements"].pprint(tabs=1)
|
||||||
|
print("=" * 100)
|
||||||
|
|
||||||
|
return all_results
|
||||||
|
|
||||||
|
def save_results(self, model_name: str, results: dict, timestamp: str = "") -> str:
|
||||||
|
"""Save benchmark results to JSON file."""
|
||||||
|
# Create model-specific subdirectory
|
||||||
|
model_name = model_name.replace("/", "_")
|
||||||
|
model_dir = os.path.join(self.output_dir, model_name)
|
||||||
|
os.makedirs(model_dir, exist_ok=True)
|
||||||
|
|
||||||
|
# Create filename with timestamp
|
||||||
|
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") if not timestamp else timestamp
|
||||||
|
filename = f"{model_name}_benchmark_{timestamp}.json"
|
||||||
|
filepath = os.path.join(model_dir, filename)
|
||||||
|
|
||||||
|
# Convert results to dict
|
||||||
|
converted_results = {}
|
||||||
|
for cfg_hash in results.keys():
|
||||||
|
converted_results[cfg_hash] = {
|
||||||
|
"metadata": results[cfg_hash]["metadata"].to_dict(),
|
||||||
|
"measurements": results[cfg_hash]["measurements"].to_dict(),
|
||||||
|
"config": results[cfg_hash]["config"].to_dict(),
|
||||||
|
}
|
||||||
|
|
||||||
|
# Save to JSON file
|
||||||
|
with open(filepath, "w") as f:
|
||||||
|
f.write(compact_json_numeric_arrays(converted_results))
|
||||||
|
|
||||||
|
self.logger.info(f"Results saved to {filepath}")
|
||||||
|
return filepath
|
152
benchmark_v2/framework/data_classes.py
Normal file
152
benchmark_v2/framework/data_classes.py
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
from dataclasses import dataclass
|
||||||
|
from datetime import datetime
|
||||||
|
from typing import Any, Optional, Union
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from .hardware_metrics import GPURawMetrics, HardwareInfo
|
||||||
|
|
||||||
|
|
||||||
|
def compute_basic_statistics(measurements: list[float]) -> dict[str, float]:
|
||||||
|
return {
|
||||||
|
"avg": np.mean(measurements),
|
||||||
|
"std": np.std(measurements),
|
||||||
|
"min": np.min(measurements),
|
||||||
|
"med": np.median(measurements),
|
||||||
|
"max": np.max(measurements),
|
||||||
|
"p95": np.percentile(measurements, 95),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def add_unit_to_duration(stats: dict[str, float]) -> dict[str, str]:
|
||||||
|
for key in list(stats.keys()):
|
||||||
|
value = stats[key]
|
||||||
|
if value > 3600:
|
||||||
|
stats[key] = f"{(value / 3600):.2f}hr"
|
||||||
|
elif value > 60:
|
||||||
|
stats[key] = f"{(value / 60):.2f}min"
|
||||||
|
elif value > 1:
|
||||||
|
stats[key] = f"{value:.2f}s"
|
||||||
|
elif value > 1e-3:
|
||||||
|
stats[key] = f"{(value * 1e3):.2f}ms"
|
||||||
|
elif value > 1e-6:
|
||||||
|
stats[key] = f"{(value * 1e6):.2f}us"
|
||||||
|
else:
|
||||||
|
stats[key] = f"{(value * 1e9):.2f}ns"
|
||||||
|
return stats
|
||||||
|
|
||||||
|
|
||||||
|
def equalize_lengths_and_collate(stats: list[dict[str, str]]) -> list[str]:
|
||||||
|
keys = ["avg", "std", "min", "med", "max", "p95"]
|
||||||
|
for key in keys:
|
||||||
|
max_length = max(len(stat[key]) for stat in stats)
|
||||||
|
for stat in stats:
|
||||||
|
stat[key] = stat[key].ljust(max_length, " ")
|
||||||
|
return [" ".join([f"{key}={stat[key]}" for key in keys]) for stat in stats]
|
||||||
|
|
||||||
|
|
||||||
|
def pretty_print_dict(data: dict[str, Any], tabs: int = 0) -> None:
|
||||||
|
max_key_length = max([len(key) for key in data.keys()])
|
||||||
|
for key, value in data.items():
|
||||||
|
tabs_str = " " * tabs
|
||||||
|
padded_key = key.ljust(max_key_length + 1, ".")
|
||||||
|
print(f"{tabs_str}{padded_key}: {value}")
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class BenchmarkMetadata:
|
||||||
|
"""Metadata collected for each benchmark run."""
|
||||||
|
|
||||||
|
model_id: str
|
||||||
|
timestamp: str
|
||||||
|
commit_id: str
|
||||||
|
hardware_info: HardwareInfo
|
||||||
|
|
||||||
|
def __init__(self, model_id: str, commit_id: str):
|
||||||
|
self.model_id = model_id
|
||||||
|
self.timestamp = datetime.utcnow().isoformat()
|
||||||
|
self.commit_id = commit_id
|
||||||
|
self.hardware_info = HardwareInfo()
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Any]:
|
||||||
|
return {
|
||||||
|
"timestamp": self.timestamp,
|
||||||
|
"commit_id": self.commit_id,
|
||||||
|
"hardware_info": self.hardware_info.to_dict(),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class BenchmarkResult:
|
||||||
|
"""Result from a series of benchmark runs."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.e2e_latency = []
|
||||||
|
self.token_generation_times = [] # time at which each token was generated (relative to start of the generation)
|
||||||
|
self.decoded_outputs = []
|
||||||
|
self.gpu_metrics = []
|
||||||
|
|
||||||
|
def accumulate(
|
||||||
|
self,
|
||||||
|
e2e_latency: float,
|
||||||
|
token_generation_times: list[float],
|
||||||
|
decoded_output: str,
|
||||||
|
gpu_metrics: Optional[GPURawMetrics],
|
||||||
|
) -> None:
|
||||||
|
self.e2e_latency.append(e2e_latency)
|
||||||
|
self.token_generation_times.append(token_generation_times)
|
||||||
|
self.decoded_outputs.append(decoded_output)
|
||||||
|
self.gpu_metrics.append(gpu_metrics)
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Union[None, int, float]]:
|
||||||
|
# Save GPU metrics as None if it contains only None values
|
||||||
|
if all(gm is None for gm in self.gpu_metrics):
|
||||||
|
gpu_metrics = None
|
||||||
|
else:
|
||||||
|
gpu_metrics = [gm.to_dict() for gm in self.gpu_metrics]
|
||||||
|
return {
|
||||||
|
"e2e_latency": self.e2e_latency,
|
||||||
|
"token_generation_times": self.token_generation_times,
|
||||||
|
"decoded_outputs": self.decoded_outputs,
|
||||||
|
"gpu_metrics": gpu_metrics,
|
||||||
|
}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_dict(cls, data: dict[str, Union[None, int, float]]) -> "BenchmarkResult":
|
||||||
|
# Handle GPU metrics, which is saved as None if it contains only None values
|
||||||
|
if data["gpu_metrics"] is None:
|
||||||
|
gpu_metrics = [None for _ in range(len(data["e2e_latency"]))]
|
||||||
|
else:
|
||||||
|
gpu_metrics = [GPURawMetrics.from_dict(gm) for gm in data["gpu_metrics"]]
|
||||||
|
# Create a new instance and accumulate the data
|
||||||
|
new_instance = cls()
|
||||||
|
for i in range(len(data["e2e_latency"])):
|
||||||
|
new_instance.accumulate(
|
||||||
|
e2e_latency=data["e2e_latency"][i],
|
||||||
|
token_generation_times=data["token_generation_times"][i],
|
||||||
|
decoded_output=data["decoded_output"][i],
|
||||||
|
gpu_metrics=gpu_metrics[i],
|
||||||
|
)
|
||||||
|
return new_instance
|
||||||
|
|
||||||
|
def get_measured_ttft(self) -> list[float]:
|
||||||
|
return [dt[0] for dt in self.token_generation_times if len(dt) > 0]
|
||||||
|
|
||||||
|
def get_measured_itl(self) -> list[float]:
|
||||||
|
return [(dt[-1] - dt[0]) / (len(dt) - 1) for dt in self.token_generation_times if len(dt) > 1]
|
||||||
|
|
||||||
|
def pprint(self, tabs: int = 0) -> None:
|
||||||
|
collated_stats = equalize_lengths_and_collate(
|
||||||
|
[
|
||||||
|
add_unit_to_duration(compute_basic_statistics(self.e2e_latency)),
|
||||||
|
add_unit_to_duration(compute_basic_statistics(self.get_measured_ttft())),
|
||||||
|
add_unit_to_duration(compute_basic_statistics(self.get_measured_itl())),
|
||||||
|
]
|
||||||
|
)
|
||||||
|
pretty_print_dict(
|
||||||
|
{
|
||||||
|
"E2E Latency": collated_stats[0],
|
||||||
|
"Time to First Token": collated_stats[1],
|
||||||
|
"Inter-Token Latency": collated_stats[2],
|
||||||
|
},
|
||||||
|
tabs=tabs,
|
||||||
|
)
|
172
benchmark_v2/framework/hardware_metrics.py
Normal file
172
benchmark_v2/framework/hardware_metrics.py
Normal file
@ -0,0 +1,172 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
import sys
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from enum import Enum
|
||||||
|
from logging import Logger
|
||||||
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
import gpustat
|
||||||
|
import psutil
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
|
# Data class to hold the hardware information
|
||||||
|
def get_device_name_and_memory_total() -> tuple[str, float]:
|
||||||
|
"""Returns the name and memory total of GPU 0."""
|
||||||
|
device_name = torch.cuda.get_device_properties(0).name
|
||||||
|
device_memory_total = torch.cuda.get_device_properties(0).total_memory / 1024**3
|
||||||
|
return device_name, device_memory_total
|
||||||
|
|
||||||
|
|
||||||
|
class HardwareInfo:
|
||||||
|
"""A class to hold information about the hardware."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
# Retrieve GPU stats
|
||||||
|
try:
|
||||||
|
self.gpu_name, self.gpu_memory_total_gb = get_device_name_and_memory_total()
|
||||||
|
except Exception:
|
||||||
|
self.gpu_name, self.gpu_memory_total_gb = None, None
|
||||||
|
# Retrieve python, torch and CUDA version
|
||||||
|
self.python_version = f"{sys.version.split()[0]}"
|
||||||
|
self.torch_version = torch.__version__
|
||||||
|
if hasattr(torch, "cuda") and torch.cuda.is_available():
|
||||||
|
self.cuda_version = torch.version.cuda
|
||||||
|
else:
|
||||||
|
self.cuda_version = None
|
||||||
|
# Retrieve general hardware information
|
||||||
|
self.cpu_count = psutil.cpu_count()
|
||||||
|
self.memory_total_mb = int(psutil.virtual_memory().total / (1024 * 1024))
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Union[None, int, float, str]]:
|
||||||
|
return {
|
||||||
|
"gpu_name": self.gpu_name,
|
||||||
|
"gpu_memory_total_gb": self.gpu_memory_total_gb,
|
||||||
|
"python_version": self.python_version,
|
||||||
|
"torch_version": self.torch_version,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Functions to get information about the GPU
|
||||||
|
def get_amd_gpu_stats() -> tuple[int, float]:
|
||||||
|
"""Returns the utilization and memory used of an AMD GPU, both in percent"""
|
||||||
|
rocm_smi_output = subprocess.check_output(["rocm-smi", "--json", "--showuse", "--showmeminfo", "VRAM"])
|
||||||
|
gpu_stats = json.loads(rocm_smi_output.decode("utf-8"))
|
||||||
|
gpu_stats = [
|
||||||
|
(card_id, stats["GPU use (%)"], stats["VRAM Total Used Memory (B)"]) for card_id, stats in gpu_stats.items()
|
||||||
|
]
|
||||||
|
gpu_stats.sort(key=lambda x: x[1], reverse=True)
|
||||||
|
return int(gpu_stats[0][1]), float(gpu_stats[0][2]) / 1024**3
|
||||||
|
|
||||||
|
|
||||||
|
def get_nvidia_gpu_stats() -> tuple[int, float]:
|
||||||
|
"""Returns the utilization and memory used of an NVIDIA GPU, both in percent"""
|
||||||
|
gpu_stats = gpustat.GPUStatCollection.new_query()
|
||||||
|
gpu_stats = gpu_stats[0]
|
||||||
|
return int(gpu_stats["utilization.gpu"]), float(gpu_stats["memory.used"]) / 1024**3
|
||||||
|
|
||||||
|
|
||||||
|
class GPUStatsCollector:
|
||||||
|
"""A class to get statistics about the GPU. It serves as a wrapper that holds the GPU total memory and its name,
|
||||||
|
which is used to call the right function to get the utilization and memory used."""
|
||||||
|
|
||||||
|
def __init__(self) -> None:
|
||||||
|
self.device_name, self.device_memory_total = get_device_name_and_memory_total()
|
||||||
|
# Monkey patch the get_utilization_and_memory_used method based on the GPU type
|
||||||
|
if "amd" in self.device_name.lower():
|
||||||
|
self.get_utilization_and_memory_used = get_amd_gpu_stats
|
||||||
|
elif "nvidia" in self.device_name.lower():
|
||||||
|
self.get_utilization_and_memory_used = get_nvidia_gpu_stats
|
||||||
|
else:
|
||||||
|
raise RuntimeError(f"Unsupported GPU: {self.device_name}")
|
||||||
|
|
||||||
|
def get_measurements(self) -> tuple[int, float]:
|
||||||
|
"""Get the utilization and memory used of the GPU, both in percent"""
|
||||||
|
raise NotImplementedError("This method is meant to be monkey patched during __init__")
|
||||||
|
|
||||||
|
|
||||||
|
# Simple data classes to hold the raw GPU metrics
|
||||||
|
class GPUMonitoringStatus(Enum):
|
||||||
|
"""Status of GPU monitoring."""
|
||||||
|
|
||||||
|
SUCCESS = "success"
|
||||||
|
FAILED = "failed"
|
||||||
|
NO_GPUS_AVAILABLE = "no_gpus_available"
|
||||||
|
NO_SAMPLES_COLLECTED = "no_samples_collected"
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class GPURawMetrics:
|
||||||
|
"""Raw values for GPU utilization and memory used."""
|
||||||
|
|
||||||
|
utilization: list[float] # in percent
|
||||||
|
memory_used: list[float] # in GB
|
||||||
|
timestamps: list[float] # in seconds
|
||||||
|
timestamp_0: float # in seconds
|
||||||
|
monitoring_status: GPUMonitoringStatus
|
||||||
|
|
||||||
|
def to_dict(self) -> dict[str, Union[None, int, float, str]]:
|
||||||
|
return {
|
||||||
|
"utilization": self.utilization,
|
||||||
|
"memory_used": self.memory_used,
|
||||||
|
"timestamps": self.timestamps,
|
||||||
|
"timestamp_0": self.timestamp_0,
|
||||||
|
"monitoring_status": self.monitoring_status.value,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Main class, used to monitor the GPU utilization during benchmark execution
|
||||||
|
class GPUMonitor:
|
||||||
|
"""Monitor GPU utilization during benchmark execution."""
|
||||||
|
|
||||||
|
def __init__(self, sample_interval_sec: float = 0.1, logger: Optional[Logger] = None):
|
||||||
|
self.sample_interval_sec = sample_interval_sec
|
||||||
|
self.logger = logger if logger is not None else logging.getLogger(__name__)
|
||||||
|
|
||||||
|
self.num_available_gpus = torch.cuda.device_count()
|
||||||
|
if self.num_available_gpus == 0:
|
||||||
|
raise RuntimeError("No GPUs detected by torch.cuda.device_count().")
|
||||||
|
self.gpu_stats_getter = GPUStatsCollector()
|
||||||
|
|
||||||
|
def start(self):
|
||||||
|
"""Start monitoring GPU metrics."""
|
||||||
|
# Clear the stop event to enable monitoring
|
||||||
|
self.stop_event = threading.Event()
|
||||||
|
self.gpu_utilization = []
|
||||||
|
self.gpu_memory_used = []
|
||||||
|
self.timestamps = []
|
||||||
|
self.thread = threading.Thread(target=self._monitor_loop)
|
||||||
|
self.thread.start()
|
||||||
|
self.logger.debug("GPU monitoring started")
|
||||||
|
|
||||||
|
def stop_and_collect(self) -> GPURawMetrics:
|
||||||
|
"""Stop monitoring and return collected metrics."""
|
||||||
|
self.stop_event.set()
|
||||||
|
self.thread.join()
|
||||||
|
if self.gpu_utilization:
|
||||||
|
timestamp_0 = self.timestamps[0]
|
||||||
|
metrics = GPURawMetrics(
|
||||||
|
utilization=self.gpu_utilization,
|
||||||
|
memory_used=self.gpu_memory_used,
|
||||||
|
timestamps=[t - timestamp_0 for t in self.timestamps],
|
||||||
|
timestamp_0=timestamp_0,
|
||||||
|
monitoring_status=GPUMonitoringStatus.SUCCESS,
|
||||||
|
)
|
||||||
|
self.logger.debug(f"GPU monitoring completed: {len(self.gpu_utilization)} samples collected")
|
||||||
|
else:
|
||||||
|
metrics = GPURawMetrics(monitoring_status=GPUMonitoringStatus.NO_SAMPLES_COLLECTED)
|
||||||
|
return metrics
|
||||||
|
|
||||||
|
def _monitor_loop(self):
|
||||||
|
"""Background monitoring loop using threading.Event for communication."""
|
||||||
|
while not self.stop_event.is_set():
|
||||||
|
utilization, memory_used = self.gpu_stats_getter.get_utilization_and_memory_used()
|
||||||
|
self.gpu_utilization.append(utilization)
|
||||||
|
self.gpu_memory_used.append(memory_used)
|
||||||
|
self.timestamps.append(time.time())
|
||||||
|
if self.stop_event.wait(timeout=self.sample_interval_sec):
|
||||||
|
break
|
@ -19,477 +19,93 @@ in the ./benches directory, organizing outputs into model-specific subfolders.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import argparse
|
import argparse
|
||||||
import importlib.util
|
|
||||||
import json
|
|
||||||
import logging
|
import logging
|
||||||
import os
|
import random
|
||||||
import sys
|
import sys
|
||||||
import uuid
|
import uuid
|
||||||
from datetime import datetime
|
|
||||||
from pathlib import Path
|
from framework.benchmark_config import BenchmarkConfig, generate_all_configs
|
||||||
from typing import Any, Optional
|
from framework.benchmark_runner import BenchmarkRunner
|
||||||
|
|
||||||
|
|
||||||
def setup_logging(log_level: str = "INFO", enable_file_logging: bool = False) -> logging.Logger:
|
if __name__ == "__main__":
|
||||||
"""Setup logging configuration."""
|
# Parse arguments
|
||||||
numeric_level = getattr(logging, log_level.upper(), None)
|
parser = argparse.ArgumentParser()
|
||||||
if not isinstance(numeric_level, int):
|
parser.add_argument("--output-dir", type=str, default="benchmark_results", help="Output dir for benchmark results")
|
||||||
raise ValueError(f"Invalid log level: {log_level}")
|
parser.add_argument("--log-level", type=str, choices=["DEBUG", "INFO", "WARNING", "ERROR"], default="INFO")
|
||||||
|
parser.add_argument("--model-id", type=str, help="Specific model ID to benchmark (if supported by benchmarks)")
|
||||||
|
|
||||||
|
parser.add_argument("--warmup", type=int, default=5, help="Number of warmup iterations")
|
||||||
|
parser.add_argument("--iterations", type=int, default=20, help="Number of measurement iterations")
|
||||||
|
|
||||||
|
parser.add_argument("--batch-size", "-b", type=int, nargs="+", help="Batch size")
|
||||||
|
parser.add_argument("--sequence-length", "-s", type=int, nargs="+", help="Sequence length")
|
||||||
|
parser.add_argument("--num-tokens-to-generate", "-n", type=int, nargs="+", help="Number of tokens to generate")
|
||||||
|
|
||||||
|
parser.add_argument("--num-tokens-to-profile", "-p", type=int, default=0, help="Number of tokens to profile")
|
||||||
|
|
||||||
|
parser.add_argument("--commit-id", type=str, help="Git commit ID (if not provided, will auto-detect from git)")
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
benchmark_run_uuid = str(uuid.uuid4())[:8]
|
||||||
|
numeric_level = getattr(logging, args.log_level.upper())
|
||||||
|
|
||||||
handlers = [logging.StreamHandler(sys.stdout)]
|
handlers = [logging.StreamHandler(sys.stdout)]
|
||||||
|
|
||||||
if enable_file_logging:
|
|
||||||
handlers.append(logging.FileHandler(f"benchmark_run_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"))
|
|
||||||
|
|
||||||
logging.basicConfig(
|
logging.basicConfig(
|
||||||
level=numeric_level, format="[%(levelname)s - %(asctime)s] %(name)s: %(message)s", handlers=handlers
|
level=numeric_level, format="[%(levelname)s - %(asctime)s] %(name)s: %(message)s", handlers=handlers
|
||||||
)
|
)
|
||||||
|
|
||||||
return logging.getLogger(__name__)
|
logger = logging.getLogger("benchmark_v2")
|
||||||
|
|
||||||
|
|
||||||
def discover_benchmarks(benches_dir: str) -> list[dict[str, Any]]:
|
|
||||||
"""
|
|
||||||
Discover all benchmark modules in the benches directory.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
List of dictionaries containing benchmark module info
|
|
||||||
"""
|
|
||||||
benchmarks = []
|
|
||||||
benches_path = Path(benches_dir)
|
|
||||||
|
|
||||||
if not benches_path.exists():
|
|
||||||
raise FileNotFoundError(f"Benches directory not found: {benches_dir}")
|
|
||||||
|
|
||||||
for py_file in benches_path.glob("*.py"):
|
|
||||||
if py_file.name.startswith("__"):
|
|
||||||
continue
|
|
||||||
|
|
||||||
module_name = py_file.stem
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Import the module
|
|
||||||
spec = importlib.util.spec_from_file_location(module_name, py_file)
|
|
||||||
module = importlib.util.module_from_spec(spec)
|
|
||||||
spec.loader.exec_module(module)
|
|
||||||
|
|
||||||
# Check if it has a benchmark runner function
|
|
||||||
if hasattr(module, f"run_{module_name}"):
|
|
||||||
benchmarks.append(
|
|
||||||
{
|
|
||||||
"name": module_name,
|
|
||||||
"path": str(py_file),
|
|
||||||
"module": module,
|
|
||||||
"runner_function": getattr(module, f"run_{module_name}"),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
elif hasattr(module, "run_benchmark"):
|
|
||||||
benchmarks.append(
|
|
||||||
{
|
|
||||||
"name": module_name,
|
|
||||||
"path": str(py_file),
|
|
||||||
"module": module,
|
|
||||||
"runner_function": getattr(module, "run_benchmark"),
|
|
||||||
}
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logging.warning(f"No runner function found in {py_file}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logging.error(f"Failed to import {py_file}: {e}")
|
|
||||||
|
|
||||||
return benchmarks
|
|
||||||
|
|
||||||
|
|
||||||
def run_single_benchmark(
|
|
||||||
benchmark_info: dict[str, Any], output_dir: str, logger: logging.Logger, **kwargs
|
|
||||||
) -> Optional[str]:
|
|
||||||
"""
|
|
||||||
Run a single benchmark and return the output file path.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
benchmark_info: Dictionary containing benchmark module info
|
|
||||||
output_dir: Base output directory
|
|
||||||
logger: Logger instance
|
|
||||||
**kwargs: Additional arguments to pass to the benchmark
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Path to the output file if successful, None otherwise
|
|
||||||
"""
|
|
||||||
benchmark_name = benchmark_info["name"]
|
|
||||||
runner_func = benchmark_info["runner_function"]
|
|
||||||
|
|
||||||
logger.info(f"Running benchmark: {benchmark_name}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Check function signature to determine what arguments to pass
|
|
||||||
import inspect
|
|
||||||
|
|
||||||
sig = inspect.signature(runner_func)
|
|
||||||
|
|
||||||
# Prepare arguments based on function signature
|
|
||||||
func_kwargs = {"logger": logger, "output_dir": output_dir}
|
|
||||||
|
|
||||||
# Add other kwargs if the function accepts them
|
|
||||||
for param_name in sig.parameters:
|
|
||||||
if param_name in kwargs:
|
|
||||||
func_kwargs[param_name] = kwargs[param_name]
|
|
||||||
|
|
||||||
# Filter kwargs to only include parameters the function accepts
|
|
||||||
# If function has **kwargs, include all provided kwargs
|
|
||||||
has_var_kwargs = any(param.kind == param.VAR_KEYWORD for param in sig.parameters.values())
|
|
||||||
if has_var_kwargs:
|
|
||||||
valid_kwargs = {**func_kwargs, **kwargs}
|
|
||||||
else:
|
|
||||||
valid_kwargs = {k: v for k, v in func_kwargs.items() if k in sig.parameters}
|
|
||||||
|
|
||||||
# Run the benchmark
|
|
||||||
result = runner_func(**valid_kwargs)
|
|
||||||
|
|
||||||
if isinstance(result, str):
|
|
||||||
# Function returned a file path
|
|
||||||
return result
|
|
||||||
else:
|
|
||||||
logger.info(f"Benchmark {benchmark_name} completed successfully")
|
|
||||||
return "completed"
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Benchmark {benchmark_name} failed: {e}")
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
logger.debug(traceback.format_exc())
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def generate_summary_report(
|
|
||||||
output_dir: str,
|
|
||||||
benchmark_results: dict[str, Any],
|
|
||||||
logger: logging.Logger,
|
|
||||||
benchmark_run_uuid: Optional[str] = None,
|
|
||||||
) -> str:
|
|
||||||
"""Generate a summary report of all benchmark runs."""
|
|
||||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
|
||||||
summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.json")
|
|
||||||
|
|
||||||
summary_data = {
|
|
||||||
"run_metadata": {
|
|
||||||
"timestamp": datetime.utcnow().isoformat(),
|
|
||||||
"benchmark_run_uuid": benchmark_run_uuid,
|
|
||||||
"total_benchmarks": len(benchmark_results),
|
|
||||||
"successful_benchmarks": len([r for r in benchmark_results.values() if r is not None]),
|
|
||||||
"failed_benchmarks": len([r for r in benchmark_results.values() if r is None]),
|
|
||||||
},
|
|
||||||
"benchmark_results": benchmark_results,
|
|
||||||
"output_directory": output_dir,
|
|
||||||
}
|
|
||||||
|
|
||||||
with open(summary_file, "w") as f:
|
|
||||||
json.dump(summary_data, f, indent=2, default=str)
|
|
||||||
|
|
||||||
logger.info(f"Summary report saved to: {summary_file}")
|
|
||||||
return summary_file
|
|
||||||
|
|
||||||
|
|
||||||
def upload_results_to_hf_dataset(
|
|
||||||
output_dir: str,
|
|
||||||
summary_file: str,
|
|
||||||
dataset_name: str,
|
|
||||||
run_id: Optional[str] = None,
|
|
||||||
token: Optional[str] = None,
|
|
||||||
logger: Optional[logging.Logger] = None,
|
|
||||||
) -> Optional[str]:
|
|
||||||
"""
|
|
||||||
Upload benchmark results to a HuggingFace Dataset.
|
|
||||||
Based on upload_collated_report() from utils/collated_reports.py
|
|
||||||
Args:
|
|
||||||
output_dir: Local output directory containing results
|
|
||||||
summary_file: Path to the summary file
|
|
||||||
dataset_name: Name of the HuggingFace dataset to upload to
|
|
||||||
run_id: Unique run identifier (if None, will generate one)
|
|
||||||
token: HuggingFace token for authentication (if None, will use environment variables)
|
|
||||||
logger: Logger instance
|
|
||||||
Returns:
|
|
||||||
The run_id used for the upload, None if upload failed
|
|
||||||
"""
|
|
||||||
if logger is None:
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from huggingface_hub import HfApi
|
|
||||||
|
|
||||||
api = HfApi()
|
|
||||||
|
|
||||||
if run_id is None:
|
|
||||||
github_run_number = os.getenv("GITHUB_RUN_NUMBER")
|
|
||||||
github_run_id = os.getenv("GITHUB_RUN_ID")
|
|
||||||
if github_run_number and github_run_id:
|
|
||||||
run_id = f"{github_run_number}-{github_run_id}"
|
|
||||||
|
|
||||||
date_folder = datetime.now().strftime("%Y-%m-%d")
|
|
||||||
|
|
||||||
github_event_name = os.getenv("GITHUB_EVENT_NAME")
|
|
||||||
if github_event_name != "schedule":
|
|
||||||
# Non-scheduled runs go under a runs subfolder
|
|
||||||
repo_path = f"{date_folder}/runs/{run_id}/benchmark_results"
|
|
||||||
else:
|
|
||||||
# Scheduled runs go directly under the date
|
|
||||||
repo_path = f"{date_folder}/{run_id}/benchmark_results"
|
|
||||||
|
|
||||||
logger.info(f"Uploading benchmark results to dataset '{dataset_name}' at path '{repo_path}'")
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Upload all files in the output directory
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
output_path = Path(output_dir)
|
|
||||||
|
|
||||||
for file_path in output_path.rglob("*"):
|
|
||||||
if file_path.is_file():
|
|
||||||
# Calculate relative path from output_dir
|
|
||||||
relative_path = file_path.relative_to(output_path)
|
|
||||||
path_in_repo = f"{repo_path}/{relative_path}"
|
|
||||||
|
|
||||||
logger.debug(f"Uploading {file_path} to {path_in_repo}")
|
|
||||||
|
|
||||||
api.upload_file(
|
|
||||||
path_or_fileobj=str(file_path),
|
|
||||||
path_in_repo=path_in_repo,
|
|
||||||
repo_id=dataset_name,
|
|
||||||
repo_type="dataset",
|
|
||||||
token=token,
|
|
||||||
commit_message=f"Upload benchmark results for run {run_id}",
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
f"Successfully uploaded results to: https://huggingface.co/datasets/{dataset_name}/tree/main/{repo_path}"
|
|
||||||
)
|
|
||||||
|
|
||||||
return run_id
|
|
||||||
|
|
||||||
except Exception as upload_error:
|
|
||||||
logger.error(f"Failed to upload results: {upload_error}")
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
logger.debug(traceback.format_exc())
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
"""Main entry point for the benchmarking script."""
|
|
||||||
# Generate a unique UUID for this benchmark run
|
|
||||||
benchmark_run_uuid = str(uuid.uuid4())[:8]
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
|
||||||
description="Run all benchmarks in the ./benches directory",
|
|
||||||
epilog="""
|
|
||||||
Examples:
|
|
||||||
# Run all available benchmarks
|
|
||||||
python3 run_benchmarks.py
|
|
||||||
|
|
||||||
# Run with specific model and upload to HuggingFace Dataset
|
|
||||||
python3 run_benchmarks.py --model-id meta-llama/Llama-2-7b-hf --upload-to-hf username/benchmark-results
|
|
||||||
|
|
||||||
# Run with custom run ID and upload to HuggingFace Dataset
|
|
||||||
python3 run_benchmarks.py --run-id experiment_v1 --upload-to-hf org/benchmarks
|
|
||||||
|
|
||||||
# Run only specific benchmarks with file logging
|
|
||||||
python3 run_benchmarks.py --include llama --enable-file-logging
|
|
||||||
""", # noqa: W293
|
|
||||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--output-dir",
|
|
||||||
type=str,
|
|
||||||
default="benchmark_results",
|
|
||||||
help="Base output directory for benchmark results (default: benchmark_results)",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--benches-dir",
|
|
||||||
type=str,
|
|
||||||
default="./benches",
|
|
||||||
help="Directory containing benchmark implementations (default: ./benches)",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--log-level",
|
|
||||||
type=str,
|
|
||||||
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
|
|
||||||
default="INFO",
|
|
||||||
help="Logging level (default: INFO)",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument("--model-id", type=str, help="Specific model ID to benchmark (if supported by benchmarks)")
|
|
||||||
|
|
||||||
parser.add_argument("--warmup-iterations", type=int, default=3, help="Number of warmup iterations (default: 3)")
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--measurement-iterations", type=int, default=5, help="Number of measurement iterations (default: 5)"
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--num-tokens-to-generate",
|
|
||||||
type=int,
|
|
||||||
default=100,
|
|
||||||
help="Number of tokens to generate in benchmarks (default: 100)",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument("--include", type=str, nargs="*", help="Only run benchmarks matching these names")
|
|
||||||
|
|
||||||
parser.add_argument("--exclude", type=str, nargs="*", help="Exclude benchmarks matching these names")
|
|
||||||
|
|
||||||
parser.add_argument("--enable-file-logging", action="store_true", help="Enable file logging (disabled by default)")
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--commit-id", type=str, help="Git commit ID for metadata (if not provided, will auto-detect from git)"
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--push-to-hub",
|
|
||||||
type=str,
|
|
||||||
help="Upload results to HuggingFace Dataset (provide dataset name, e.g., 'username/benchmark-results')",
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--run-id", type=str, help="Custom run ID for organizing results (if not provided, will generate a unique ID)"
|
|
||||||
)
|
|
||||||
|
|
||||||
parser.add_argument(
|
|
||||||
"--token",
|
|
||||||
type=str,
|
|
||||||
help="HuggingFace token for dataset uploads (if not provided, will use HF_TOKEN environment variable)",
|
|
||||||
)
|
|
||||||
|
|
||||||
args = parser.parse_args()
|
|
||||||
|
|
||||||
# Setup logging
|
|
||||||
logger = setup_logging(args.log_level, args.enable_file_logging)
|
|
||||||
|
|
||||||
logger.info("Starting benchmark discovery and execution")
|
logger.info("Starting benchmark discovery and execution")
|
||||||
logger.info(f"Benchmark run UUID: {benchmark_run_uuid}")
|
logger.info(f"Benchmark run UUID: {benchmark_run_uuid}")
|
||||||
logger.info(f"Output directory: {args.output_dir}")
|
logger.info(f"Output directory: {args.output_dir}")
|
||||||
logger.info(f"Benches directory: {args.benches_dir}")
|
|
||||||
|
|
||||||
# Create output directory
|
# Error out if one of the arguments is not provided
|
||||||
os.makedirs(args.output_dir, exist_ok=True)
|
if len(args.batch_size) * len(args.sequence_length) * len(args.num_tokens_to_generate) == 0:
|
||||||
|
raise ValueError(
|
||||||
|
"At least one of the arguments --batch-size, --sequence-length, or --num-tokens-to-generate is required"
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
# If there is only one (batch_size, sequence_length, num_tokens_to_generate), we benchmark across configs
|
||||||
# Discover benchmarks
|
elif len(args.batch_size) * len(args.sequence_length) * len(args.num_tokens_to_generate) == 1:
|
||||||
benchmarks = discover_benchmarks(args.benches_dir)
|
benchmark_configs = generate_all_configs(
|
||||||
logger.info(f"Discovered {len(benchmarks)} benchmark(s): {[b['name'] for b in benchmarks]}")
|
warmup_iterations=args.warmup,
|
||||||
|
measurement_iterations=args.iterations,
|
||||||
|
batch_size=args.batch_size[0],
|
||||||
|
sequence_length=args.sequence_length[0],
|
||||||
|
num_tokens_to_generate=args.num_tokens_to_generate[0],
|
||||||
|
)
|
||||||
|
random.shuffle(benchmark_configs)
|
||||||
|
|
||||||
if not benchmarks:
|
# Otherwise, we benchmark across all combinations of dimensions
|
||||||
logger.warning("No benchmarks found!")
|
else:
|
||||||
return 1
|
kwargs = {
|
||||||
|
"warmup_iterations": args.warmup,
|
||||||
# Filter benchmarks based on include/exclude
|
"measurement_iterations": args.iterations,
|
||||||
filtered_benchmarks = benchmarks
|
"gpu_monitoring": False,
|
||||||
|
"batch_size": args.batch_size[0],
|
||||||
if args.include:
|
"sequence_length": args.sequence_length[0],
|
||||||
filtered_benchmarks = [
|
"num_tokens_to_generate": args.num_tokens_to_generate[0],
|
||||||
b for b in filtered_benchmarks if any(pattern in b["name"] for pattern in args.include)
|
"attn_implementation": "flex_attention",
|
||||||
]
|
"sdpa_backend": None,
|
||||||
logger.info(f"Filtered to include: {[b['name'] for b in filtered_benchmarks]}")
|
"compile_mode": "default",
|
||||||
|
"kernelize": False,
|
||||||
if args.exclude:
|
|
||||||
filtered_benchmarks = [
|
|
||||||
b for b in filtered_benchmarks if not any(pattern in b["name"] for pattern in args.exclude)
|
|
||||||
]
|
|
||||||
logger.info(f"After exclusion: {[b['name'] for b in filtered_benchmarks]}")
|
|
||||||
|
|
||||||
if not filtered_benchmarks:
|
|
||||||
logger.warning("No benchmarks remaining after filtering!")
|
|
||||||
return 1
|
|
||||||
|
|
||||||
# Prepare common kwargs for benchmarks
|
|
||||||
benchmark_kwargs = {
|
|
||||||
"warmup_iterations": args.warmup_iterations,
|
|
||||||
"measurement_iterations": args.measurement_iterations,
|
|
||||||
"num_tokens_to_generate": args.num_tokens_to_generate,
|
|
||||||
}
|
}
|
||||||
|
benchmark_configs = []
|
||||||
|
for num_tokens_to_generate in args.num_tokens_to_generate:
|
||||||
|
for sequence_length in args.sequence_length:
|
||||||
|
for batch_size in args.batch_size:
|
||||||
|
kwargs["batch_size"] = batch_size
|
||||||
|
kwargs["sequence_length"] = sequence_length
|
||||||
|
kwargs["num_tokens_to_generate"] = num_tokens_to_generate
|
||||||
|
benchmark_configs.append(BenchmarkConfig(**kwargs))
|
||||||
|
|
||||||
if args.model_id:
|
runner = BenchmarkRunner(logger, args.output_dir, args.commit_id)
|
||||||
benchmark_kwargs["model_id"] = args.model_id
|
results = runner.run_benchmarks(
|
||||||
|
args.model_id,
|
||||||
# Add commit_id if provided
|
benchmark_configs[:3],
|
||||||
if args.commit_id:
|
args.num_tokens_to_profile,
|
||||||
benchmark_kwargs["commit_id"] = args.commit_id
|
pretty_print_summary=True,
|
||||||
|
)
|
||||||
# Run benchmarks
|
# runner.save_results(args.model_id, results)
|
||||||
benchmark_results = {}
|
|
||||||
successful_count = 0
|
|
||||||
|
|
||||||
for benchmark_info in filtered_benchmarks:
|
|
||||||
result = run_single_benchmark(benchmark_info, args.output_dir, logger, **benchmark_kwargs)
|
|
||||||
|
|
||||||
benchmark_results[benchmark_info["name"]] = result
|
|
||||||
|
|
||||||
if result is not None:
|
|
||||||
successful_count += 1
|
|
||||||
|
|
||||||
# Generate summary report
|
|
||||||
summary_file = generate_summary_report(args.output_dir, benchmark_results, logger, benchmark_run_uuid)
|
|
||||||
|
|
||||||
# Upload results to HuggingFace Dataset if requested
|
|
||||||
upload_run_id = None
|
|
||||||
if args.push_to_hub:
|
|
||||||
logger.info("=" * 60)
|
|
||||||
logger.info("UPLOADING TO HUGGINGFACE DATASET")
|
|
||||||
logger.info("=" * 60)
|
|
||||||
# Use provided run_id or fallback to benchmark run UUID
|
|
||||||
effective_run_id = args.run_id or benchmark_run_uuid
|
|
||||||
upload_run_id = upload_results_to_hf_dataset(
|
|
||||||
output_dir=args.output_dir,
|
|
||||||
summary_file=summary_file,
|
|
||||||
dataset_name=args.push_to_hub,
|
|
||||||
run_id=effective_run_id,
|
|
||||||
token=args.token,
|
|
||||||
logger=logger,
|
|
||||||
)
|
|
||||||
if upload_run_id:
|
|
||||||
logger.info(f"Upload completed with run ID: {upload_run_id}")
|
|
||||||
else:
|
|
||||||
logger.warning("Upload failed - continuing with local results")
|
|
||||||
|
|
||||||
# Final summary
|
|
||||||
total_benchmarks = len(filtered_benchmarks)
|
|
||||||
failed_count = total_benchmarks - successful_count
|
|
||||||
|
|
||||||
logger.info("=" * 60)
|
|
||||||
logger.info("BENCHMARK RUN SUMMARY")
|
|
||||||
logger.info("=" * 60)
|
|
||||||
logger.info(f"Total benchmarks: {total_benchmarks}")
|
|
||||||
logger.info(f"Successful: {successful_count}")
|
|
||||||
logger.info(f"Failed: {failed_count}")
|
|
||||||
logger.info(f"Output directory: {args.output_dir}")
|
|
||||||
logger.info(f"Summary report: {summary_file}")
|
|
||||||
|
|
||||||
if args.push_to_hub:
|
|
||||||
if upload_run_id:
|
|
||||||
logger.info(f"HuggingFace Dataset: {args.push_to_hub}")
|
|
||||||
logger.info(f"Run ID: {upload_run_id}")
|
|
||||||
logger.info(
|
|
||||||
f"View results: https://huggingface.co/datasets/{args.push_to_hub}/tree/main/{datetime.now().strftime('%Y-%m-%d')}/runs/{upload_run_id}"
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
logger.warning("Upload to HuggingFace Dataset failed")
|
|
||||||
|
|
||||||
if failed_count > 0:
|
|
||||||
logger.warning(f"{failed_count} benchmark(s) failed. Check logs for details.")
|
|
||||||
return 1
|
|
||||||
else:
|
|
||||||
logger.info("All benchmarks completed successfully!")
|
|
||||||
return 0
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"Benchmark run failed: {e}")
|
|
||||||
import traceback
|
|
||||||
|
|
||||||
logger.debug(traceback.format_exc())
|
|
||||||
return 1
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.exit(main())
|
|
||||||
|
@ -54,7 +54,6 @@ NOT_DEVICE_TESTS = {
|
|||||||
"test_gradient_checkpointing_backward_compatibility",
|
"test_gradient_checkpointing_backward_compatibility",
|
||||||
"test_gradient_checkpointing_enable_disable",
|
"test_gradient_checkpointing_enable_disable",
|
||||||
"test_torch_save_load",
|
"test_torch_save_load",
|
||||||
"test_initialization",
|
|
||||||
"test_forward_signature",
|
"test_forward_signature",
|
||||||
"test_model_get_set_embeddings",
|
"test_model_get_set_embeddings",
|
||||||
"test_model_main_input_name",
|
"test_model_main_input_name",
|
||||||
@ -90,6 +89,8 @@ def pytest_configure(config):
|
|||||||
config.addinivalue_line("markers", "torch_compile_test: mark test which tests torch compile functionality")
|
config.addinivalue_line("markers", "torch_compile_test: mark test which tests torch compile functionality")
|
||||||
config.addinivalue_line("markers", "torch_export_test: mark test which tests torch export functionality")
|
config.addinivalue_line("markers", "torch_export_test: mark test which tests torch export functionality")
|
||||||
|
|
||||||
|
os.environ["DISABLE_SAFETENSORS_CONVERSION"] = "true"
|
||||||
|
|
||||||
|
|
||||||
def pytest_collection_modifyitems(items):
|
def pytest_collection_modifyitems(items):
|
||||||
for item in items:
|
for item in items:
|
||||||
|
@ -12,8 +12,6 @@ SHELL ["sh", "-lc"]
|
|||||||
ARG PYTORCH='2.8.0'
|
ARG PYTORCH='2.8.0'
|
||||||
# Example: `cu102`, `cu113`, etc.
|
# Example: `cu102`, `cu113`, etc.
|
||||||
ARG CUDA='cu126'
|
ARG CUDA='cu126'
|
||||||
# Disable kernel mapping for now until all tests pass
|
|
||||||
ENV DISABLE_KERNEL_MAPPING=1
|
|
||||||
|
|
||||||
RUN apt update
|
RUN apt update
|
||||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs
|
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs
|
||||||
|
@ -12,8 +12,6 @@ SHELL ["sh", "-lc"]
|
|||||||
ARG PYTORCH='2.8.0'
|
ARG PYTORCH='2.8.0'
|
||||||
# Example: `cu102`, `cu113`, etc.
|
# Example: `cu102`, `cu113`, etc.
|
||||||
ARG CUDA='cu126'
|
ARG CUDA='cu126'
|
||||||
# Disable kernel mapping for quantization tests
|
|
||||||
ENV DISABLE_KERNEL_MAPPING=1
|
|
||||||
|
|
||||||
RUN apt update
|
RUN apt update
|
||||||
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg
|
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg
|
||||||
@ -82,6 +80,9 @@ RUN python3 -m pip uninstall -y flash-attn
|
|||||||
# this line must be added in order for python to be aware of transformers.
|
# this line must be added in order for python to be aware of transformers.
|
||||||
RUN cd transformers && python3 setup.py develop
|
RUN cd transformers && python3 setup.py develop
|
||||||
|
|
||||||
|
# Add fp-quant for quantization testing
|
||||||
|
RUN python3 -m pip install --no-cache-dir "fp-quant>=0.2.0"
|
||||||
|
|
||||||
# Low usage or incompatible lib, will enable later on
|
# Low usage or incompatible lib, will enable later on
|
||||||
|
|
||||||
# # Add aqlm for quantization testing
|
# # Add aqlm for quantization testing
|
||||||
@ -102,7 +103,3 @@ RUN cd transformers && python3 setup.py develop
|
|||||||
# # TODO: create a new workflow to test them
|
# # TODO: create a new workflow to test them
|
||||||
# RUN python3 -m pip install --no-cache-dir flute-kernel==0.4.1
|
# RUN python3 -m pip install --no-cache-dir flute-kernel==0.4.1
|
||||||
# RUN python3 -m pip install --no-cache-dir git+https://github.com/Dao-AILab/fast-hadamard-transform.git
|
# RUN python3 -m pip install --no-cache-dir git+https://github.com/Dao-AILab/fast-hadamard-transform.git
|
||||||
|
|
||||||
# Add fp-quant for quantization testing
|
|
||||||
# Requires py3.11 but our CI runs on 3.9
|
|
||||||
# RUN python3 -m pip install --no-cache-dir "fp-quant>=0.1.6"
|
|
@ -52,7 +52,7 @@
|
|||||||
<figcaption class="mt-2 text-center text-sm text-gray-500">الصورة توضح مخطط مراحل نموذج Swin.</figcaption>
|
<figcaption class="mt-2 text-center text-sm text-gray-500">الصورة توضح مخطط مراحل نموذج Swin.</figcaption>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
يسمح لك [`AutoBackbone`] باستخدام النماذج المُدربة مسبقًا كعمود فقري للحصول على خرائط ميزات من مراحل مختلفة من العمود الفقري. يجب عليك تحديد أحد المعلمات التالية في [`~PretrainedConfig.from_pretrained`]:
|
يسمح لك [`AutoBackbone`] باستخدام النماذج المُدربة مسبقًا كعمود فقري للحصول على خرائط ميزات من مراحل مختلفة من العمود الفقري. يجب عليك تحديد أحد المعلمات التالية في [`~PreTrainedConfig.from_pretrained`]:
|
||||||
|
|
||||||
* `out_indices` هو فهرس الطبقة التي تريد الحصول على خريطة الميزات منها
|
* `out_indices` هو فهرس الطبقة التي تريد الحصول على خريطة الميزات منها
|
||||||
* `out_features` هو اسم الطبقة التي تريد الحصول على خريطة الميزات منها
|
* `out_features` هو اسم الطبقة التي تريد الحصول على خريطة الميزات منها
|
||||||
|
@ -54,19 +54,19 @@ DistilBertConfig {
|
|||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
يمكن تعديل خصائص النموذج المدرب مسبقًا في دالة [`~PretrainedConfig.from_pretrained`] :
|
يمكن تعديل خصائص النموذج المدرب مسبقًا في دالة [`~PreTrainedConfig.from_pretrained`] :
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4)
|
>>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4)
|
||||||
```
|
```
|
||||||
|
|
||||||
بمجرد أن تصبح راضيًا عن تكوين نموذجك، يمكنك حفظه باستخدام [`~PretrainedConfig.save_pretrained`]. يتم تخزين ملف التكوين الخاص بك على أنه ملف JSON في دليل الحفظ المحدد:
|
بمجرد أن تصبح راضيًا عن تكوين نموذجك، يمكنك حفظه باستخدام [`~PreTrainedConfig.save_pretrained`]. يتم تخزين ملف التكوين الخاص بك على أنه ملف JSON في دليل الحفظ المحدد:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> my_config.save_pretrained(save_directory="./your_model_save_path")
|
>>> my_config.save_pretrained(save_directory="./your_model_save_path")
|
||||||
```
|
```
|
||||||
|
|
||||||
لإعادة استخدام ملف التكوين، قم بتحميله باستخدام [`~PretrainedConfig.from_pretrained`]:
|
لإعادة استخدام ملف التكوين، قم بتحميله باستخدام [`~PreTrainedConfig.from_pretrained`]:
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json")
|
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json")
|
||||||
|
@ -20,11 +20,11 @@
|
|||||||
في مثالنا، سنعدّل بعض الوسائط في فئة ResNet التي قد نرغب في ضبطها. ستعطينا التكوينات المختلفة أنواع ResNets المختلفة الممكنة. سنقوم بتخزين هذه الوسائط بعد التحقق من صحته.
|
في مثالنا، سنعدّل بعض الوسائط في فئة ResNet التي قد نرغب في ضبطها. ستعطينا التكوينات المختلفة أنواع ResNets المختلفة الممكنة. سنقوم بتخزين هذه الوسائط بعد التحقق من صحته.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from transformers import PretrainedConfig
|
from transformers import PreTrainedConfig
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
|
||||||
class ResnetConfig(PretrainedConfig):
|
class ResnetConfig(PreTrainedConfig):
|
||||||
model_type = "resnet"
|
model_type = "resnet"
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@ -58,11 +58,11 @@ class ResnetConfig(PretrainedConfig):
|
|||||||
```
|
```
|
||||||
الأشياء الثلاثة المهمة التي يجب تذكرها عند كتابة تكوينك الخاص هي:
|
الأشياء الثلاثة المهمة التي يجب تذكرها عند كتابة تكوينك الخاص هي:
|
||||||
|
|
||||||
- يجب أن ترث من `PretrainedConfig`،
|
- يجب أن ترث من `PreTrainedConfig`،
|
||||||
- يجب أن تقبل دالة `__init__` الخاصة بـ `PretrainedConfig` أي معامﻻت إضافية kwargs،
|
- يجب أن تقبل دالة `__init__` الخاصة بـ `PreTrainedConfig` أي معامﻻت إضافية kwargs،
|
||||||
- يجب تمرير هذه المعامﻻت الإضافية إلى دالة `__init__` فى الفئة الأساسية الاعلى.
|
- يجب تمرير هذه المعامﻻت الإضافية إلى دالة `__init__` فى الفئة الأساسية الاعلى.
|
||||||
|
|
||||||
يضمن الإرث حصولك على جميع الوظائف من مكتبة 🤗 Transformers، في حين أن القيدين التانى والثالث يأتيان من حقيقة أن `PretrainedConfig` لديه المزيد من الحقول أكثر من تلك التي تقوم بتعيينها. عند إعادة تحميل تكوين باستخدام طريقة `from_pretrained`، يجب أن يقبل تكوينك هذه الحقول ثم إرسالها إلى الفئة الأساسية الأعلى.
|
يضمن الإرث حصولك على جميع الوظائف من مكتبة 🤗 Transformers، في حين أن القيدين التانى والثالث يأتيان من حقيقة أن `PreTrainedConfig` لديه المزيد من الحقول أكثر من تلك التي تقوم بتعيينها. عند إعادة تحميل تكوين باستخدام طريقة `from_pretrained`، يجب أن يقبل تكوينك هذه الحقول ثم إرسالها إلى الفئة الأساسية الأعلى.
|
||||||
|
|
||||||
تحديد `model_type` لتكوينك (هنا `model_type="resnet"`) ليس إلزاميًا، ما لم ترغب في
|
تحديد `model_type` لتكوينك (هنا `model_type="resnet"`) ليس إلزاميًا، ما لم ترغب في
|
||||||
تسجيل نموذجك باستخدام الفئات التلقائية (راجع القسم الأخير).
|
تسجيل نموذجك باستخدام الفئات التلقائية (راجع القسم الأخير).
|
||||||
@ -82,7 +82,7 @@ resnet50d_config.save_pretrained("custom-resnet")
|
|||||||
resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")
|
resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")
|
||||||
```
|
```
|
||||||
|
|
||||||
يمكنك أيضًا استخدام أي طريقة أخرى من فئة [`PretrainedConfig`]، مثل [`~PretrainedConfig.push_to_hub`] لتحميل تكوينك مباشرة إلى Hub.
|
يمكنك أيضًا استخدام أي طريقة أخرى من فئة [`PreTrainedConfig`]، مثل [`~PreTrainedConfig.push_to_hub`] لتحميل تكوينك مباشرة إلى Hub.
|
||||||
|
|
||||||
## كتابة نموذج مخصص
|
## كتابة نموذج مخصص
|
||||||
|
|
||||||
|
@ -60,10 +60,10 @@ pip install transformers bitsandbytes>=0.39.0 -q
|
|||||||
أولاً، تحتاج إلى تحميل النموذج.
|
أولاً، تحتاج إلى تحميل النموذج.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import AutoModelForCausalLM
|
>>> from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
||||||
|
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained(
|
>>> model = AutoModelForCausalLM.from_pretrained(
|
||||||
... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True
|
... "mistralai/Mistral-7B-v0.1", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
|
||||||
... )
|
... )
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -113,12 +113,12 @@ pip install transformers bitsandbytes>=0.39.0 -q
|
|||||||
هناك العديد من [استراتيجيات التوليد](generation_strategies)، وفي بعض الأحيان قد لا تكون القيم الافتراضية مناسبة لحالتك الاستخدام. إذا لم تكن الإخراج الخاصة بك متوافقة مع ما تتوقعه، فقد قمنا بإنشاء قائمة بأكثر الأخطاء الشائعة وكيفية تجنبها.
|
هناك العديد من [استراتيجيات التوليد](generation_strategies)، وفي بعض الأحيان قد لا تكون القيم الافتراضية مناسبة لحالتك الاستخدام. إذا لم تكن الإخراج الخاصة بك متوافقة مع ما تتوقعه، فقد قمنا بإنشاء قائمة بأكثر الأخطاء الشائعة وكيفية تجنبها.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
||||||
|
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
>>> tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
||||||
>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default
|
>>> tokenizer.pad_token = tokenizer.eos_token # Most LLMs don't have a pad token by default
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained(
|
>>> model = AutoModelForCausalLM.from_pretrained(
|
||||||
... "mistralai/Mistral-7B-v0.1", device_map="auto", load_in_4bit=True
|
... "mistralai/Mistral-7B-v0.1", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
|
||||||
... )
|
... )
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -192,7 +192,7 @@ LLMs هي [معماريات فك التشفير فقط](https://huggingface.co/l
|
|||||||
```python
|
```python
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
|
>>> tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained(
|
>>> model = AutoModelForCausalLM.from_pretrained(
|
||||||
... "HuggingFaceH4/zephyr-7b-alpha", device_map="auto", load_in_4bit=True
|
... "HuggingFaceH4/zephyr-7b-alpha", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
|
||||||
... )
|
... )
|
||||||
>>> set_seed(0)
|
>>> set_seed(0)
|
||||||
>>> prompt = """How many helicopters can a human eat in one sitting? Reply as a thug."""
|
>>> prompt = """How many helicopters can a human eat in one sitting? Reply as a thug."""
|
||||||
|
@ -231,7 +231,7 @@ flush()
|
|||||||
دعنا نرى ما هو استهلاك ذاكرة GPU الذروة الذي يوفره تكميم 4 بت. يمكن تكميم النموذج إلى 4 بت باستخدام نفس واجهة برمجة التطبيقات كما في السابق - هذه المرة عن طريق تمرير `load_in_4bit=True` بدلاً من `load_in_8bit=True`.
|
دعنا نرى ما هو استهلاك ذاكرة GPU الذروة الذي يوفره تكميم 4 بت. يمكن تكميم النموذج إلى 4 بت باستخدام نفس واجهة برمجة التطبيقات كما في السابق - هذه المرة عن طريق تمرير `load_in_4bit=True` بدلاً من `load_in_8bit=True`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", load_in_4bit=True, pad_token_id=0)
|
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", quantization_config=BitsAndBytesConfig(load_in_4bit=True), pad_token_id=0)
|
||||||
|
|
||||||
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
||||||
|
|
||||||
@ -329,174 +329,6 @@ $$ \textbf{O}_i \leftarrow s^a_{ij} * \textbf{O}_i + s^b_{ij} * \mathbf{V}_{j} \
|
|||||||
لنلقِ نظرة على مثال عملي.
|
لنلقِ نظرة على مثال عملي.
|
||||||
|
|
||||||
|
|
||||||
يحصل نموذج OctoCoder الخاص بنا الآن على موجه إدخال أطول بشكل كبير يتضمن ما يسمى *موجه النظام*. تُستخدم موجهات النظام لتوجيه LLM إلى مساعد أفضل مصمم لمهام المستخدمين.
|
|
||||||
فيما يلي، نستخدم موجه النظام الذي سيجعل OctoCoder مساعد ترميز أفضل.
|
|
||||||
|
|
||||||
```python
|
|
||||||
system_prompt = """Below are a series of dialogues between various people and an AI technical assistant.
|
|
||||||
The assistant tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble but knowledgeable.
|
|
||||||
The assistant is happy to help with code questions and will do their best to understand exactly what is needed.
|
|
||||||
It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer.
|
|
||||||
That said, the assistant is practical really does its best, and doesn't let caution get too much in the way of being useful.
|
|
||||||
|
|
||||||
The Starcoder models are a series of 15.5B parameter models trained on 80+ programming languages from The Stack (v1.2) (excluding opt-out requests).
|
|
||||||
The model uses Multi Query Attention, was trained using the Fill-in-the-Middle objective, and with 8,192 tokens context window for a trillion tokens of heavily deduplicated data.
|
|
||||||
-----
|
|
||||||
|
|
||||||
Question: Write a function that takes two lists and returns a list that has alternating elements from each input list.
|
|
||||||
|
|
||||||
Answer: Sure. Here is a function that does that.
|
|
||||||
|
|
||||||
def alternating(list1, list2):
|
|
||||||
results = []
|
|
||||||
for i in range(len(list1)):
|
|
||||||
results.append(list1[i])
|
|
||||||
results.append(list2[i])
|
|
||||||
return results
|
|
||||||
|
|
||||||
Question: Can you write some test cases for this function?
|
|
||||||
|
|
||||||
Answer: Sure, here are some tests.
|
|
||||||
|
|
||||||
assert alternating([10, 20, 30], [1, 2, 3]) == [10, 1, 20, 2, 30, 3]
|
|
||||||
assert alternating([True, False], [4, 5]) == [True, 4, False, 5]
|
|
||||||
assert alternating([], []) == []
|
|
||||||
|
|
||||||
Question: Modify the function so that it returns all input elements when the lists have uneven length. The elements from the longer list should be at the end.
|
|
||||||
|
|
||||||
Answer: Here is the modified function.
|
|
||||||
|
|
||||||
def alternating(list1, list2):
|
|
||||||
results = []
|
|
||||||
for i in range(min(len(list1), len(list2))):
|
|
||||||
results.append(list1[i])
|
|
||||||
results.append(list2[i])
|
|
||||||
if len(list1) > len(list2):
|
|
||||||
results.extend(list1[i+1:])
|
|
||||||
else:
|
|
||||||
results.extend(list2[i+1:])
|
|
||||||
return results
|
|
||||||
-----
|
|
||||||
"""
|
|
||||||
```
|
|
||||||
لأغراض التوضيح، سنكرر موجه النظام عشر مرات بحيث يكون طول الإدخال طويلاً بما يكفي لملاحظة وفورات ذاكرة Flash Attention.
|
|
||||||
نضيف موجه النص الأصلي "سؤال: يرجى كتابة وظيفة في Python تقوم بتحويل البايتات إلى جيجا بايت.
|
|
||||||
|
|
||||||
```python
|
|
||||||
long_prompt = 10 * system_prompt + prompt
|
|
||||||
```
|
|
||||||
|
|
||||||
نقوم بتنفيذ نموذجنا مرة أخرى بدقة bfloat16.
|
|
||||||
|
|
||||||
```python
|
|
||||||
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", dtype=torch.bfloat16, device_map="auto")
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("bigcode/octocoder")
|
|
||||||
|
|
||||||
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
||||||
```
|
|
||||||
|
|
||||||
دعنا الآن نقوم بتشغيل النموذج تمامًا مثلما كان من قبل *بدون اهتمام فلاشي* وقياس متطلبات ذاكرة GPU وقت الذروة ووقت الاستدلال.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import time
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
result = pipe(long_prompt, max_new_tokens=60)[0]["generated_text"][len(long_prompt):]
|
|
||||||
|
|
||||||
print(f"Generated in {time.time() - start_time} seconds.")
|
|
||||||
result
|
|
||||||
```
|
|
||||||
|
|
||||||
**الإخراج**:
|
|
||||||
```
|
|
||||||
تم التوليد في 10.96854019165039 ثانية.
|
|
||||||
بالتأكيد. إليك وظيفة للقيام بذلك.
|
|
||||||
|
|
||||||
def bytes_to_giga(bytes):
|
|
||||||
return bytes / 1024 / 1024 / 1024
|
|
||||||
|
|
||||||
الإجابة: بالتأكيد. إليك وظيفة للقيام بذلك.
|
|
||||||
|
|
||||||
ديف
|
|
||||||
```
|
|
||||||
|
|
||||||
نحصل على نفس الإخراج كما كان من قبل، ولكن هذه المرة، يقوم النموذج بتكرار الإجابة عدة مرات حتى يتم قطعها عند 60 رمزًا. ليس من المستغرب أننا كررنا موجه النظام عشر مرات لأغراض التوضيح وبالتالي قمنا بتشغيل النموذج لتكرار نفسه.
|
|
||||||
|
|
||||||
**ملاحظة** لا ينبغي تكرار موجه النظام عشر مرات في التطبيقات الواقعية - مرة واحدة كافية!
|
|
||||||
|
|
||||||
دعنا نقيس متطلبات ذاكرة GPU وقت الذروة.
|
|
||||||
|
|
||||||
```python
|
|
||||||
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
|
|
||||||
```
|
|
||||||
|
|
||||||
**الإخراج**:
|
|
||||||
```
|
|
||||||
37.668193340301514
|
|
||||||
```
|
|
||||||
|
|
||||||
كما نرى، فإن متطلبات ذاكرة GPU وقت الذروة أعلى بكثير مما كانت عليه في البداية، وهو ما يرجع إلى حد كبير إلى تسلسل الإدخال الأطول. أيضًا، يستغرق التوليد أكثر من دقيقة بقليل الآن.
|
|
||||||
|
|
||||||
نستدعي `flush()` لتحرير ذاكرة GPU لتجربتنا التالية.
|
|
||||||
|
|
||||||
```python
|
|
||||||
flush()
|
|
||||||
```
|
|
||||||
|
|
||||||
لمقارنة، دعونا نقوم بتشغيل نفس الدالة، ولكن تمكين الاهتمام فلاش بدلا من ذلك.
|
|
||||||
للقيام بذلك، نقوم بتحويل النموذج إلى [BetterTransformer](Https://huggingface.co/docs/optimum/bettertransformer/overview) ومن خلال القيام بذلك تمكين PyTorch's [SDPA self-attention](Https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) والتي بدورها قادرة على استخدام الاهتمام فلاش.
|
|
||||||
|
|
||||||
```python
|
|
||||||
model.to_bettertransformer()
|
|
||||||
```
|
|
||||||
|
|
||||||
الآن نقوم بتشغيل نفس مقتطف التعليمات البرمجية بالضبط كما كان من قبل وتحت الغطاء سوف تستخدم المحولات الاهتمام فلاش.
|
|
||||||
|
|
||||||
```py
|
|
||||||
start_time = time.time()
|
|
||||||
with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
|
|
||||||
result = pipe(long_prompt, max_new_tokens=60)[0]["generated_text"][len(long_prompt):]
|
|
||||||
|
|
||||||
print(f"Generated in {time.time() - start_time} seconds.")
|
|
||||||
result
|
|
||||||
```
|
|
||||||
|
|
||||||
**الإخراج**:
|
|
||||||
```
|
|
||||||
تم التوليد في 3.0211617946624756 ثانية.
|
|
||||||
بالتأكيد. إليك وظيفة للقيام بذلك.
|
|
||||||
|
|
||||||
def bytes_to_giga(bytes):
|
|
||||||
return bytes / 1024 / 1024 / 1024
|
|
||||||
|
|
||||||
الإجابة: بالتأكيد. إليك وظيفة للقيام بذلك.
|
|
||||||
|
|
||||||
ديف
|
|
||||||
```
|
|
||||||
|
|
||||||
نحصل على نفس النتيجة بالضبط كما كان من قبل، ولكن يمكننا ملاحظة تسريع كبير بفضل الاهتمام فلاش.
|
|
||||||
|
|
||||||
دعنا نقيس استهلاك الذاكرة لآخر مرة.
|
|
||||||
|
|
||||||
```python
|
|
||||||
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
|
|
||||||
```
|
|
||||||
|
|
||||||
**الإخراج**:
|
|
||||||
```
|
|
||||||
32.617331981658936
|
|
||||||
```
|
|
||||||
|
|
||||||
ونحن تقريبا مرة أخرى إلى ذاكرة GPU الذروة الأصلية لدينا 29GB.
|
|
||||||
|
|
||||||
يمكننا أن نلاحظ أننا نستخدم فقط حوالي 100 ميجابايت إضافية من ذاكرة GPU عند تمرير تسلسل إدخال طويل جدًا مع الاهتمام فلاش مقارنة بتمرير تسلسل إدخال قصير كما فعلنا في البداية.
|
|
||||||
|
|
||||||
```py
|
|
||||||
flush()
|
|
||||||
```
|
|
||||||
|
|
||||||
لمزيد من المعلومات حول كيفية استخدام Flash Attention، يرجى الاطلاع على [صفحة doc هذه](Https://huggingface.co/docs/transformers/en/perf_infer_gpu_one#flashattention-2).
|
|
||||||
|
|
||||||
## 3. الابتكارات المعمارية
|
## 3. الابتكارات المعمارية
|
||||||
|
|
||||||
حتى الآن، نظرنا في تحسين الكفاءة الحسابية والذاكرة من خلال:
|
حتى الآن، نظرنا في تحسين الكفاءة الحسابية والذاكرة من خلال:
|
||||||
@ -640,7 +472,7 @@ for _ in range(5):
|
|||||||
next_token_id = torch.argmax(next_logits, dim=-1)
|
next_token_id = torch.argmax(next_logits, dim=-1)
|
||||||
|
|
||||||
print("shape of input_ids", next_token_id.shape)
|
print("shape of input_ids", next_token_id.shape)
|
||||||
print("length of key-value cache", len(past_key_values[0][0])) # past_key_values are of shape [num_layers, 0 for k, 1 for v, batch_size, length, hidden_dim]
|
print("length of key-value cache", past_key_values.get_seq_length()) # past_key_values are of shape [num_layers, 0 for k, 1 for v, batch_size, length, hidden_dim]
|
||||||
generated_tokens.append(next_token_id.item())
|
generated_tokens.append(next_token_id.item())
|
||||||
|
|
||||||
generated_text = tokenizer.batch_decode(generated_tokens)
|
generated_text = tokenizer.batch_decode(generated_tokens)
|
||||||
|
@ -93,7 +93,6 @@ python examples/pytorch/summarization/run_summarization.py \
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -117,7 +116,6 @@ torchrun \
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -140,7 +138,6 @@ python xla_spawn.py --num_cores 8 \
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -197,7 +194,6 @@ python examples/pytorch/summarization/run_summarization.py \
|
|||||||
--summary_column summary_column_name \
|
--summary_column summary_column_name \
|
||||||
--source_prefix "summarize: " \
|
--source_prefix "summarize: " \
|
||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--overwrite_output_dir \
|
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
@ -225,7 +221,6 @@ python examples/pytorch/summarization/run_summarization.py \
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -239,8 +234,6 @@ examples/pytorch/summarization/run_summarization.py -h
|
|||||||
|
|
||||||
خيار آخر مفيد لتمكينه هو استئناف التدريب من نقطة تفتيش سابقة. سيضمن ذلك أنك تستطيع الاستمرار من حيث توقفت دون البدء من جديد إذا تم مقاطعة تدريبك. هناك طريقتان لاستئناف التدريب من نقطة تفتيش.
|
خيار آخر مفيد لتمكينه هو استئناف التدريب من نقطة تفتيش سابقة. سيضمن ذلك أنك تستطيع الاستمرار من حيث توقفت دون البدء من جديد إذا تم مقاطعة تدريبك. هناك طريقتان لاستئناف التدريب من نقطة تفتيش.
|
||||||
|
|
||||||
تستخدم الطريقة الأولى المعلمة `output_dir previous_output_dir` لاستئناف التدريب من أحدث نقطة تفتيش مخزنة في `output_dir`. في هذه الحالة، يجب عليك إزالة `overwrite_output_dir`:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python examples/pytorch/summarization/run_summarization.py
|
python examples/pytorch/summarization/run_summarization.py
|
||||||
--model_name_or_path google-t5/t5-small \
|
--model_name_or_path google-t5/t5-small \
|
||||||
@ -252,24 +245,6 @@ python examples/pytorch/summarization/run_summarization.py
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--output_dir previous_output_dir \
|
|
||||||
--predict_with_generate
|
|
||||||
```
|
|
||||||
|
|
||||||
تستخدم الطريقة الثانية معلمة `resume_from_checkpoint path_to_specific_checkpoint` لاستئناف التدريب من مجلد نقطة تفتيش محددة.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python examples/pytorch/summarization/run_summarization.py
|
|
||||||
--model_name_or_path google-t5/t5-small \
|
|
||||||
--do_train \
|
|
||||||
--do_eval \
|
|
||||||
--dataset_name cnn_dailymail \
|
|
||||||
--dataset_config "3.0.0" \
|
|
||||||
--source_prefix "summarize: " \
|
|
||||||
--output_dir /tmp/tst-summarization \
|
|
||||||
--per_device_train_batch_size=4 \
|
|
||||||
--per_device_eval_batch_size=4 \
|
|
||||||
--overwrite_output_dir \
|
|
||||||
--resume_from_checkpoint path_to_specific_checkpoint \
|
--resume_from_checkpoint path_to_specific_checkpoint \
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
@ -301,6 +276,5 @@ python examples/pytorch/summarization/run_summarization.py
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
@ -611,7 +611,6 @@ accelerate launch \
|
|||||||
--learning_rate 5e-5 \
|
--learning_rate 5e-5 \
|
||||||
--num_train_epochs 3 \
|
--num_train_epochs 3 \
|
||||||
--output_dir /tmp/$TASK_NAME/ \
|
--output_dir /tmp/$TASK_NAME/ \
|
||||||
--overwrite_output_dir
|
|
||||||
```
|
```
|
||||||
|
|
||||||
يمكنك أيضًا تحديد المعلمات من ملف `config_file.yaml` مباشرة في سطر الأوامر:
|
يمكنك أيضًا تحديد المعلمات من ملف `config_file.yaml` مباشرة في سطر الأوامر:
|
||||||
@ -634,7 +633,6 @@ accelerate launch --num_processes=2 \
|
|||||||
--learning_rate 5e-5 \
|
--learning_rate 5e-5 \
|
||||||
--num_train_epochs 3 \
|
--num_train_epochs 3 \
|
||||||
--output_dir /tmp/$TASK_NAME/ \
|
--output_dir /tmp/$TASK_NAME/ \
|
||||||
--overwrite_output_dir
|
|
||||||
```
|
```
|
||||||
|
|
||||||
اطلع على برنامج تعليمي [Launching your Accelerate scripts](https://huggingface.co/docs/accelerate/basic_tutorials/launch) لمعرفة المزيد حول `accelerate_launch` والتكوينات المخصصة.
|
اطلع على برنامج تعليمي [Launching your Accelerate scripts](https://huggingface.co/docs/accelerate/basic_tutorials/launch) لمعرفة المزيد حول `accelerate_launch` والتكوينات المخصصة.
|
||||||
|
@ -53,7 +53,7 @@ Lassen Sie uns daher ein wenig tiefer in das allgemeine Design der Bibliothek ei
|
|||||||
### Überblick über die Modelle
|
### Überblick über die Modelle
|
||||||
|
|
||||||
Um ein Modell erfolgreich hinzuzufügen, ist es wichtig, die Interaktion zwischen Ihrem Modell und seiner Konfiguration zu verstehen,
|
Um ein Modell erfolgreich hinzuzufügen, ist es wichtig, die Interaktion zwischen Ihrem Modell und seiner Konfiguration zu verstehen,
|
||||||
[`PreTrainedModel`] und [`PretrainedConfig`]. Als Beispiel werden wir
|
[`PreTrainedModel`] und [`PreTrainedConfig`]. Als Beispiel werden wir
|
||||||
das Modell, das zu 🤗 Transformers hinzugefügt werden soll, `BrandNewBert` nennen.
|
das Modell, das zu 🤗 Transformers hinzugefügt werden soll, `BrandNewBert` nennen.
|
||||||
|
|
||||||
Schauen wir uns das mal an:
|
Schauen wir uns das mal an:
|
||||||
@ -81,10 +81,10 @@ model.config # model has access to its config
|
|||||||
```
|
```
|
||||||
|
|
||||||
Ähnlich wie das Modell erbt die Konfiguration grundlegende Serialisierungs- und Deserialisierungsfunktionalitäten von
|
Ähnlich wie das Modell erbt die Konfiguration grundlegende Serialisierungs- und Deserialisierungsfunktionalitäten von
|
||||||
[`PretrainedConfig`]. Beachten Sie, dass die Konfiguration und das Modell immer in zwei verschiedene Formate serialisiert werden
|
[`PreTrainedConfig`]. Beachten Sie, dass die Konfiguration und das Modell immer in zwei verschiedene Formate serialisiert werden
|
||||||
unterschiedliche Formate serialisiert werden - das Modell in eine *pytorch_model.bin* Datei und die Konfiguration in eine *config.json* Datei. Aufruf von
|
unterschiedliche Formate serialisiert werden - das Modell in eine *pytorch_model.bin* Datei und die Konfiguration in eine *config.json* Datei. Aufruf von
|
||||||
[`~PreTrainedModel.save_pretrained`] wird automatisch
|
[`~PreTrainedModel.save_pretrained`] wird automatisch
|
||||||
[`~PretrainedConfig.save_pretrained`] auf, so dass sowohl das Modell als auch die Konfiguration gespeichert werden.
|
[`~PreTrainedConfig.save_pretrained`] auf, so dass sowohl das Modell als auch die Konfiguration gespeichert werden.
|
||||||
|
|
||||||
|
|
||||||
### Code-Stil
|
### Code-Stil
|
||||||
|
@ -78,10 +78,10 @@ Wenn Sie an der grundlegenden Verwendung von LLMs interessiert sind, ist unsere
|
|||||||
Zunächst müssen Sie das Modell laden.
|
Zunächst müssen Sie das Modell laden.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import AutoModelForCausalLM
|
>>> from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
||||||
|
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained(
|
>>> model = AutoModelForCausalLM.from_pretrained(
|
||||||
... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
|
... "openlm-research/open_llama_7b", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
|
||||||
... )
|
... )
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -119,12 +119,12 @@ Und das war's! Mit ein paar Zeilen Code können Sie sich die Macht eines LLM zun
|
|||||||
Es gibt viele [Generierungsstrategien](generation_strategies), und manchmal sind die Standardwerte für Ihren Anwendungsfall vielleicht nicht geeignet. Wenn Ihre Ausgaben nicht mit dem übereinstimmen, was Sie erwarten, haben wir eine Liste der häufigsten Fallstricke erstellt und wie Sie diese vermeiden können.
|
Es gibt viele [Generierungsstrategien](generation_strategies), und manchmal sind die Standardwerte für Ihren Anwendungsfall vielleicht nicht geeignet. Wenn Ihre Ausgaben nicht mit dem übereinstimmen, was Sie erwarten, haben wir eine Liste der häufigsten Fallstricke erstellt und wie Sie diese vermeiden können.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
||||||
|
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
|
>>> tokenizer = AutoTokenizer.from_pretrained("openlm-research/open_llama_7b")
|
||||||
>>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default
|
>>> tokenizer.pad_token = tokenizer.eos_token # Llama has no pad token by default
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained(
|
>>> model = AutoModelForCausalLM.from_pretrained(
|
||||||
... "openlm-research/open_llama_7b", device_map="auto", load_in_4bit=True
|
... "openlm-research/open_llama_7b", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
|
||||||
... )
|
... )
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -98,7 +98,6 @@ python examples/pytorch/summarization/run_summarization.py \
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -122,7 +121,6 @@ torchrun \
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -144,7 +142,6 @@ python xla_spawn.py --num_cores 8 \
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -201,7 +198,6 @@ python examples/pytorch/summarization/run_summarization.py \
|
|||||||
--summary_column summary_column_name \
|
--summary_column summary_column_name \
|
||||||
--source_prefix "summarize: " \
|
--source_prefix "summarize: " \
|
||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--overwrite_output_dir \
|
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
@ -229,7 +225,6 @@ python examples/pytorch/summarization/run_summarization.py \
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -243,8 +238,6 @@ examples/pytorch/summarization/run_summarization.py -h
|
|||||||
|
|
||||||
Eine weitere hilfreiche Option, die Sie aktivieren können, ist die Wiederaufnahme des Trainings von einem früheren Kontrollpunkt aus. Auf diese Weise können Sie im Falle einer Unterbrechung Ihres Trainings dort weitermachen, wo Sie aufgehört haben, ohne von vorne beginnen zu müssen. Es gibt zwei Methoden, um das Training von einem Kontrollpunkt aus wieder aufzunehmen.
|
Eine weitere hilfreiche Option, die Sie aktivieren können, ist die Wiederaufnahme des Trainings von einem früheren Kontrollpunkt aus. Auf diese Weise können Sie im Falle einer Unterbrechung Ihres Trainings dort weitermachen, wo Sie aufgehört haben, ohne von vorne beginnen zu müssen. Es gibt zwei Methoden, um das Training von einem Kontrollpunkt aus wieder aufzunehmen.
|
||||||
|
|
||||||
Die erste Methode verwendet das Argument `output_dir previous_output_dir`, um das Training ab dem letzten in `output_dir` gespeicherten Kontrollpunkt wieder aufzunehmen. In diesem Fall sollten Sie `overwrite_output_dir` entfernen:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
python examples/pytorch/summarization/run_summarization.py
|
python examples/pytorch/summarization/run_summarization.py
|
||||||
--model_name_or_path google-t5/t5-small \
|
--model_name_or_path google-t5/t5-small \
|
||||||
@ -256,24 +249,6 @@ python examples/pytorch/summarization/run_summarization.py
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--output_dir previous_output_dir \
|
|
||||||
--predict_with_generate
|
|
||||||
```
|
|
||||||
|
|
||||||
Die zweite Methode verwendet das Argument `Resume_from_checkpoint path_to_specific_checkpoint`, um das Training ab einem bestimmten Checkpoint-Ordner wieder aufzunehmen.
|
|
||||||
|
|
||||||
```bash
|
|
||||||
python examples/pytorch/summarization/run_summarization.py
|
|
||||||
--model_name_or_path google-t5/t5-small \
|
|
||||||
--do_train \
|
|
||||||
--do_eval \
|
|
||||||
--dataset_name cnn_dailymail \
|
|
||||||
--dataset_config "3.0.0" \
|
|
||||||
--source_prefix "summarize: " \
|
|
||||||
--output_dir /tmp/tst-summarization \
|
|
||||||
--per_device_train_batch_size=4 \
|
|
||||||
--per_device_eval_batch_size=4 \
|
|
||||||
--overwrite_output_dir \
|
|
||||||
--resume_from_checkpoint path_to_specific_checkpoint \
|
--resume_from_checkpoint path_to_specific_checkpoint \
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
@ -305,6 +280,5 @@ python examples/pytorch/summarization/run_summarization.py
|
|||||||
--output_dir /tmp/tst-summarization \
|
--output_dir /tmp/tst-summarization \
|
||||||
--per_device_train_batch_size=4 \
|
--per_device_train_batch_size=4 \
|
||||||
--per_device_eval_batch_size=4 \
|
--per_device_eval_batch_size=4 \
|
||||||
--overwrite_output_dir \
|
|
||||||
--predict_with_generate
|
--predict_with_generate
|
||||||
```
|
```
|
||||||
|
@ -216,6 +216,11 @@
|
|||||||
- local: quantization/contribute
|
- local: quantization/contribute
|
||||||
title: Contribute
|
title: Contribute
|
||||||
title: Quantization
|
title: Quantization
|
||||||
|
- isExpanded: false
|
||||||
|
sections:
|
||||||
|
- local: kernel_doc/overview
|
||||||
|
title: Kernels in transformers
|
||||||
|
title: Kernels
|
||||||
- isExpanded: false
|
- isExpanded: false
|
||||||
sections:
|
sections:
|
||||||
- local: serialization
|
- local: serialization
|
||||||
@ -368,6 +373,8 @@
|
|||||||
title: Image Processor
|
title: Image Processor
|
||||||
- local: main_classes/video_processor
|
- local: main_classes/video_processor
|
||||||
title: Video Processor
|
title: Video Processor
|
||||||
|
- local: main_classes/kernels
|
||||||
|
title: Kernels
|
||||||
title: Main Classes
|
title: Main Classes
|
||||||
- sections:
|
- sections:
|
||||||
- sections:
|
- sections:
|
||||||
@ -555,6 +562,8 @@
|
|||||||
title: LED
|
title: LED
|
||||||
- local: model_doc/lfm2
|
- local: model_doc/lfm2
|
||||||
title: LFM2
|
title: LFM2
|
||||||
|
- local: model_doc/lfm2_moe
|
||||||
|
title: LFM2Moe
|
||||||
- local: model_doc/llama
|
- local: model_doc/llama
|
||||||
title: LLaMA
|
title: LLaMA
|
||||||
- local: model_doc/llama2
|
- local: model_doc/llama2
|
||||||
@ -1017,6 +1026,8 @@
|
|||||||
title: CLIPSeg
|
title: CLIPSeg
|
||||||
- local: model_doc/clvp
|
- local: model_doc/clvp
|
||||||
title: CLVP
|
title: CLVP
|
||||||
|
- local: model_doc/cwm
|
||||||
|
title: Code World Model (CWM)
|
||||||
- local: model_doc/cohere2_vision
|
- local: model_doc/cohere2_vision
|
||||||
title: Cohere2Vision
|
title: Cohere2Vision
|
||||||
- local: model_doc/colpali
|
- local: model_doc/colpali
|
||||||
@ -1177,6 +1188,8 @@
|
|||||||
title: TVP
|
title: TVP
|
||||||
- local: model_doc/udop
|
- local: model_doc/udop
|
||||||
title: UDOP
|
title: UDOP
|
||||||
|
- local: model_doc/video_llama_3
|
||||||
|
title: VideoLlama3
|
||||||
- local: model_doc/video_llava
|
- local: model_doc/video_llava
|
||||||
title: VideoLlava
|
title: VideoLlava
|
||||||
- local: model_doc/vilt
|
- local: model_doc/vilt
|
||||||
|
@ -51,7 +51,7 @@ This section describes how the model and configuration classes interact and the
|
|||||||
|
|
||||||
### Model and configuration
|
### Model and configuration
|
||||||
|
|
||||||
All Transformers' models inherit from a base [`PreTrainedModel`] and [`PretrainedConfig`] class. The configuration is the models blueprint.
|
All Transformers' models inherit from a base [`PreTrainedModel`] and [`PreTrainedConfig`] class. The configuration is the models blueprint.
|
||||||
|
|
||||||
There is never more than two levels of abstraction for any model to keep the code readable. The example model here, BrandNewLlama, inherits from `BrandNewLlamaPreTrainedModel` and [`PreTrainedModel`]. It is important that a new model only depends on [`PreTrainedModel`] so that it can use the [`~PreTrainedModel.from_pretrained`] and [`~PreTrainedModel.save_pretrained`] methods.
|
There is never more than two levels of abstraction for any model to keep the code readable. The example model here, BrandNewLlama, inherits from `BrandNewLlamaPreTrainedModel` and [`PreTrainedModel`]. It is important that a new model only depends on [`PreTrainedModel`] so that it can use the [`~PreTrainedModel.from_pretrained`] and [`~PreTrainedModel.save_pretrained`] methods.
|
||||||
|
|
||||||
@ -66,9 +66,9 @@ model = BrandNewLlamaModel.from_pretrained("username/brand_new_llama")
|
|||||||
model.config
|
model.config
|
||||||
```
|
```
|
||||||
|
|
||||||
[`PretrainedConfig`] provides the [`~PretrainedConfig.from_pretrained`] and [`~PretrainedConfig.save_pretrained`] methods.
|
[`PreTrainedConfig`] provides the [`~PreTrainedConfig.from_pretrained`] and [`~PreTrainedConfig.save_pretrained`] methods.
|
||||||
|
|
||||||
When you use [`PreTrainedModel.save_pretrained`], it automatically calls [`PretrainedConfig.save_pretrained`] so that both the model and configuration are saved together.
|
When you use [`PreTrainedModel.save_pretrained`], it automatically calls [`PreTrainedConfig.save_pretrained`] so that both the model and configuration are saved together.
|
||||||
|
|
||||||
A model is saved to a `model.safetensors` file and a configuration is saved to a `config.json` file.
|
A model is saved to a `model.safetensors` file and a configuration is saved to a `config.json` file.
|
||||||
|
|
||||||
|
@ -292,7 +292,7 @@ The `@auto_docstring` decorator automatically generates docstrings by:
|
|||||||
|
|
||||||
8. Unrolling kwargs typed with the unpack operator. For specific methods (defined in `UNROLL_KWARGS_METHODS`) or classes (defined in `UNROLL_KWARGS_CLASSES`), the decorator processes `**kwargs` parameters that are typed with `Unpack[KwargsTypedDict]`. It extracts the documentations from the `TypedDict` and adds each parameter to the function's docstring.
|
8. Unrolling kwargs typed with the unpack operator. For specific methods (defined in `UNROLL_KWARGS_METHODS`) or classes (defined in `UNROLL_KWARGS_CLASSES`), the decorator processes `**kwargs` parameters that are typed with `Unpack[KwargsTypedDict]`. It extracts the documentations from the `TypedDict` and adds each parameter to the function's docstring.
|
||||||
|
|
||||||
Currently only supported for [`FastImageProcessorKwargs`].
|
Currently only supported for [`ImagesKwargs`].
|
||||||
|
|
||||||
## Best practices
|
## Best practices
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ Higher-level computer visions tasks, such as object detection or image segmentat
|
|||||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Backbone.png"/>
|
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Backbone.png"/>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
Load a backbone with [`~PretrainedConfig.from_pretrained`] and use the `out_indices` parameter to determine which layer, given by the index, to extract a feature map from.
|
Load a backbone with [`~PreTrainedConfig.from_pretrained`] and use the `out_indices` parameter to determine which layer, given by the index, to extract a feature map from.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import AutoBackbone
|
from transformers import AutoBackbone
|
||||||
@ -46,7 +46,7 @@ There are two ways to load a Transformers backbone, [`AutoBackbone`] and a model
|
|||||||
<hfoptions id="backbone-classes">
|
<hfoptions id="backbone-classes">
|
||||||
<hfoption id="AutoBackbone">
|
<hfoption id="AutoBackbone">
|
||||||
|
|
||||||
The [AutoClass](./model_doc/auto) API automatically loads a pretrained vision model with [`~PretrainedConfig.from_pretrained`] as a backbone if it's supported.
|
The [AutoClass](./model_doc/auto) API automatically loads a pretrained vision model with [`~PreTrainedConfig.from_pretrained`] as a backbone if it's supported.
|
||||||
|
|
||||||
Set the `out_indices` parameter to the layer you'd like to get the feature map from. If you know the name of the layer, you could also use `out_features`. These parameters can be used interchangeably, but if you use both, make sure they refer to the same layer.
|
Set the `out_indices` parameter to the layer you'd like to get the feature map from. If you know the name of the layer, you could also use `out_features`. These parameters can be used interchangeably, but if you use both, make sure they refer to the same layer.
|
||||||
|
|
||||||
|
@ -41,13 +41,13 @@ $$
|
|||||||
|
|
||||||
The query (`Q`), key (`K`), and value (`V`) matrices are projections from the input embeddings of shape `(b, h, T, d_head)`.
|
The query (`Q`), key (`K`), and value (`V`) matrices are projections from the input embeddings of shape `(b, h, T, d_head)`.
|
||||||
|
|
||||||
For causal attention, the mask prevents the model from attending to future tokens. Once a token is processed, its representation never changes with respect to future tokens, which means \\( K_{\text{past}} \\) and \\( V_{\text{past}} \\) can be cached and reused to compute the last token's representation.
|
For causal attention, the mask prevents the model from attending to future tokens. Once a token is processed, its representation never changes with respect to future tokens, which means $ K_{\text{past}} $ and $ V_{\text{past}} $ can be cached and reused to compute the last token's representation.
|
||||||
|
|
||||||
$$
|
$$
|
||||||
\text{Attention}(q_t, [\underbrace{k_1, k_2, \dots, k_{t-1}}_{\text{cached}}, k_{t}], [\underbrace{v_1, v_2, \dots, v_{t-1}}_{\text{cached}}, v_{t}])
|
\text{Attention}(q_t, [\underbrace{k_1, k_2, \dots, k_{t-1}}_{\text{cached}}, k_{t}], [\underbrace{v_1, v_2, \dots, v_{t-1}}_{\text{cached}}, v_{t}])
|
||||||
$$
|
$$
|
||||||
|
|
||||||
At inference time, you only need the last token's query to compute the representation \\( x_t \\) that predicts the next token \\( t+1 \\). At each step, the new key and value vectors are **stored** in the cache and **appended** to the past keys and values.
|
At inference time, you only need the last token's query to compute the representation $ x_t $ that predicts the next token $ t+1 $. At each step, the new key and value vectors are **stored** in the cache and **appended** to the past keys and values.
|
||||||
|
|
||||||
$$
|
$$
|
||||||
K_{\text{cache}} \leftarrow \text{concat}(K_{\text{past}}, k_t), \quad V_{\text{cache}} \leftarrow \text{concat}(V_{\text{past}}, v_t)
|
K_{\text{cache}} \leftarrow \text{concat}(K_{\text{past}}, k_t), \quad V_{\text{cache}} \leftarrow \text{concat}(V_{\text{past}}, v_t)
|
||||||
@ -59,7 +59,7 @@ Refer to the table below to compare how caching improves efficiency.
|
|||||||
|
|
||||||
| without caching | with caching |
|
| without caching | with caching |
|
||||||
|---|---|
|
|---|---|
|
||||||
| for each step, recompute all previous `K` and `V` | for each step, only compute current `K` and `V`
|
| for each step, recompute all previous `K` and `V` | for each step, only compute current `K` and `V` |
|
||||||
| attention cost per step is **quadratic** with sequence length | attention cost per step is **linear** with sequence length (memory grows linearly, but compute/token remains low) |
|
| attention cost per step is **quadratic** with sequence length | attention cost per step is **linear** with sequence length (memory grows linearly, but compute/token remains low) |
|
||||||
|
|
||||||
## Cache class
|
## Cache class
|
||||||
@ -98,9 +98,10 @@ The example below demonstrates how to create a generation loop with [`DynamicCac
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
import torch
|
import torch
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache, infer_device
|
from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
device = f"{infer_device()}:0"
|
device = Accelerator().device
|
||||||
|
|
||||||
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map=device)
|
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map=device)
|
||||||
@ -143,9 +144,10 @@ The generation loop usually takes care of the cache position, but if you're writ
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
import torch
|
import torch
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache, infer_device
|
from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
device = f"{infer_device()}:0"
|
device = Accelerator().device
|
||||||
|
|
||||||
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map=device)
|
model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.bfloat16, device_map=device)
|
||||||
@ -156,31 +158,3 @@ inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, ret
|
|||||||
generated_ids = model.generate(**inputs, use_cache=True, max_new_tokens=10)
|
generated_ids = model.generate(**inputs, use_cache=True, max_new_tokens=10)
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Legacy cache format
|
|
||||||
|
|
||||||
Before the [`Cache`] class, the cache used to be stored as a tuple of tuples of tensors. This format is dynamic because it grows as text is generated, similar to [`DynamicCache`].
|
|
||||||
|
|
||||||
The legacy format is essentially the same data structure but organized differently.
|
|
||||||
|
|
||||||
- It's a tuple of tuples, where each inner tuple contains the key and value tensors for a layer.
|
|
||||||
- The tensors have the same shape `[batch_size, num_heads, seq_len, head_dim]`.
|
|
||||||
- The format is less flexible and doesn't support features like quantization or offloading.
|
|
||||||
|
|
||||||
If your project depends on this legacy format, we recommend to convert to [`DynamicCache`] with [`~DynamicCache.from_legacy_cache`]. Note that legacy cache format is deprecated and not used anymore in `Transformers`. You can convert back to tuple format with [`DynamicCache.to_legacy_cache`] functions, which is helpful if you have custom logic for manipulating a cache in a specific format.
|
|
||||||
|
|
||||||
```py
|
|
||||||
import torch
|
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
|
||||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", dtype=torch.float16, device_map="auto")
|
|
||||||
inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
|
||||||
|
|
||||||
# `return_dict_in_generate=True` is required to return the cache and `return_legacy_cache` forces the returned cache
|
|
||||||
# in the legacy format
|
|
||||||
generation_outputs = model.generate(**inputs, return_dict_in_generate=True, return_legacy_cache=True, max_new_tokens=5)
|
|
||||||
|
|
||||||
cache = DynamicCache.from_legacy_cache(generation_outputs.past_key_values)
|
|
||||||
legacy_format_cache = cache.to_legacy_cache()
|
|
||||||
```
|
|
||||||
|
@ -25,12 +25,12 @@ This guide will show you how to customize a ResNet model, enable [AutoClass](./m
|
|||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
A configuration, given by the base [`PretrainedConfig`] class, contains all the necessary information to build a model. This is where you'll configure the attributes of the custom ResNet model. Different attributes gives different ResNet model types.
|
A configuration, given by the base [`PreTrainedConfig`] class, contains all the necessary information to build a model. This is where you'll configure the attributes of the custom ResNet model. Different attributes gives different ResNet model types.
|
||||||
|
|
||||||
The main rules for customizing a configuration are:
|
The main rules for customizing a configuration are:
|
||||||
|
|
||||||
1. A custom configuration must subclass [`PretrainedConfig`]. This ensures a custom model has all the functionality of a Transformers' model such as [`~PretrainedConfig.from_pretrained`], [`~PretrainedConfig.save_pretrained`], and [`~PretrainedConfig.push_to_hub`].
|
1. A custom configuration must subclass [`PreTrainedConfig`]. This ensures a custom model has all the functionality of a Transformers' model such as [`~PreTrainedConfig.from_pretrained`], [`~PreTrainedConfig.save_pretrained`], and [`~PreTrainedConfig.push_to_hub`].
|
||||||
2. The [`PretrainedConfig`] `__init__` must accept any `kwargs` and they must be passed to the superclass `__init__`. [`PretrainedConfig`] has more fields than the ones set in your custom configuration, so when you load a configuration with [`~PretrainedConfig.from_pretrained`], those fields need to be accepted by your configuration and passed to the superclass.
|
2. The [`PreTrainedConfig`] `__init__` must accept any `kwargs` and they must be passed to the superclass `__init__`. [`PreTrainedConfig`] has more fields than the ones set in your custom configuration, so when you load a configuration with [`~PreTrainedConfig.from_pretrained`], those fields need to be accepted by your configuration and passed to the superclass.
|
||||||
|
|
||||||
> [!TIP]
|
> [!TIP]
|
||||||
> It is useful to check the validity of some of the parameters. In the example below, a check is implemented to ensure `block_type` and `stem_type` belong to one of the predefined values.
|
> It is useful to check the validity of some of the parameters. In the example below, a check is implemented to ensure `block_type` and `stem_type` belong to one of the predefined values.
|
||||||
@ -38,10 +38,10 @@ The main rules for customizing a configuration are:
|
|||||||
> Add `model_type` to the configuration class to enable [AutoClass](./models#autoclass) support.
|
> Add `model_type` to the configuration class to enable [AutoClass](./models#autoclass) support.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import PretrainedConfig
|
from transformers import PreTrainedConfig
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
class ResnetConfig(PretrainedConfig):
|
class ResnetConfig(PreTrainedConfig):
|
||||||
model_type = "resnet"
|
model_type = "resnet"
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
@ -74,7 +74,7 @@ class ResnetConfig(PretrainedConfig):
|
|||||||
super().__init__(**kwargs)
|
super().__init__(**kwargs)
|
||||||
```
|
```
|
||||||
|
|
||||||
Save the configuration to a JSON file in your custom model folder, `custom-resnet`, with [`~PretrainedConfig.save_pretrained`].
|
Save the configuration to a JSON file in your custom model folder, `custom-resnet`, with [`~PreTrainedConfig.save_pretrained`].
|
||||||
|
|
||||||
```py
|
```py
|
||||||
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
|
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
|
||||||
@ -83,7 +83,7 @@ resnet50d_config.save_pretrained("custom-resnet")
|
|||||||
|
|
||||||
## Model
|
## Model
|
||||||
|
|
||||||
With the custom ResNet configuration, you can now create and customize the model. The model subclasses the base [`PreTrainedModel`] class. Like [`PretrainedConfig`], inheriting from [`PreTrainedModel`] and initializing the superclass with the configuration extends Transformers' functionalities such as saving and loading to the custom model.
|
With the custom ResNet configuration, you can now create and customize the model. The model subclasses the base [`PreTrainedModel`] class. Like [`PreTrainedConfig`], inheriting from [`PreTrainedModel`] and initializing the superclass with the configuration extends Transformers' functionalities such as saving and loading to the custom model.
|
||||||
|
|
||||||
Transformers' models follow the convention of accepting a `config` object in the `__init__` method. This passes the entire `config` to the model sublayers, instead of breaking the `config` object into multiple arguments that are individually passed to the sublayers.
|
Transformers' models follow the convention of accepting a `config` object in the `__init__` method. This passes the entire `config` to the model sublayers, instead of breaking the `config` object into multiple arguments that are individually passed to the sublayers.
|
||||||
|
|
||||||
@ -235,7 +235,7 @@ from resnet_model.configuration_resnet import ResnetConfig
|
|||||||
from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification
|
from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification
|
||||||
```
|
```
|
||||||
|
|
||||||
Copy the code from the model and configuration files. To make sure the AutoClass objects are saved with [`~PreTrainedModel.save_pretrained`], call the [`~PretrainedConfig.register_for_auto_class`] method. This modifies the configuration JSON file to include the AutoClass objects and mapping.
|
Copy the code from the model and configuration files. To make sure the AutoClass objects are saved with [`~PreTrainedModel.save_pretrained`], call the [`~PreTrainedConfig.register_for_auto_class`] method. This modifies the configuration JSON file to include the AutoClass objects and mapping.
|
||||||
|
|
||||||
For a model, pick the appropriate `AutoModelFor` class based on the task.
|
For a model, pick the appropriate `AutoModelFor` class based on the task.
|
||||||
|
|
||||||
|
@ -593,7 +593,7 @@ To deploy DeepSpeed on multiple GPUs, add `--num_gpus`. You don't need to add `-
|
|||||||
deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \
|
deepspeed --num_gpus=2 examples/pytorch/translation/run_translation.py \
|
||||||
--deepspeed tests/deepspeed/ds_config_zero3.json \
|
--deepspeed tests/deepspeed/ds_config_zero3.json \
|
||||||
--model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \
|
--model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \
|
||||||
--output_dir output_dir --overwrite_output_dir --fp16 \
|
--output_dir output_dir --fp16 \
|
||||||
--do_train --max_train_samples 500 --num_train_epochs 1 \
|
--do_train --max_train_samples 500 --num_train_epochs 1 \
|
||||||
--dataset_name wmt16 --dataset_config "ro-en" \
|
--dataset_name wmt16 --dataset_config "ro-en" \
|
||||||
--source_lang en --target_lang ro
|
--source_lang en --target_lang ro
|
||||||
@ -616,7 +616,7 @@ To deploy DeepSpeed on a single GPU, add `--num_gpus`. You don't need to add `--
|
|||||||
deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \
|
deepspeed --num_gpus=1 examples/pytorch/translation/run_translation.py \
|
||||||
--deepspeed tests/deepspeed/ds_config_zero2.json \
|
--deepspeed tests/deepspeed/ds_config_zero2.json \
|
||||||
--model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \
|
--model_name_or_path google-t5/t5-small --per_device_train_batch_size 1 \
|
||||||
--output_dir output_dir --overwrite_output_dir --fp16 \
|
--output_dir output_dir --fp16 \
|
||||||
--do_train --max_train_samples 500 --num_train_epochs 1 \
|
--do_train --max_train_samples 500 --num_train_epochs 1 \
|
||||||
--dataset_name wmt16 --dataset_config "ro-en" \
|
--dataset_name wmt16 --dataset_config "ro-en" \
|
||||||
--source_lang en --target_lang ro
|
--source_lang en --target_lang ro
|
||||||
|
@ -32,9 +32,10 @@ Greedy search works well for tasks with relatively short outputs where creativit
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
import torch
|
import torch
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
||||||
inputs = tokenizer("Hugging Face is an open-source company", return_tensors="pt").to(device)
|
inputs = tokenizer("Hugging Face is an open-source company", return_tensors="pt").to(device)
|
||||||
@ -54,9 +55,10 @@ Enable multinomial sampling with `do_sample=True` and `num_beams=1`.
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
import torch
|
import torch
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
||||||
inputs = tokenizer("Hugging Face is an open-source company", return_tensors="pt").to(device)
|
inputs = tokenizer("Hugging Face is an open-source company", return_tensors="pt").to(device)
|
||||||
@ -79,9 +81,10 @@ Enable beam search with the `num_beams` parameter (should be greater than 1 othe
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
import torch
|
import torch
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
|
||||||
inputs = tokenizer("Hugging Face is an open-source company", return_tensors="pt").to(device)
|
inputs = tokenizer("Hugging Face is an open-source company", return_tensors="pt").to(device)
|
||||||
@ -166,9 +169,10 @@ Enable prompt lookup decoding with the `prompt_lookup_num_tokens` parameter.
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
import torch
|
import torch
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM-1.7B")
|
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceTB/SmolLM-1.7B")
|
||||||
model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/SmolLM-1.7B", dtype=torch.float16).to(device)
|
model = AutoModelForCausalLM.from_pretrained("HuggingFaceTB/SmolLM-1.7B", dtype=torch.float16).to(device)
|
||||||
|
@ -15,15 +15,12 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
# Hyperparameter search
|
# Hyperparameter search
|
||||||
|
|
||||||
Hyperparameter search discovers an optimal set of hyperparameters that produces the best model performance. [`Trainer`] supports several hyperparameter search backends - [Optuna](https://optuna.readthedocs.io/en/stable/index.html), [SigOpt](https://docs.sigopt.com/), [Weights & Biases](https://docs.wandb.ai/), [Ray Tune](https://docs.ray.io/en/latest/tune/index.html) - through [`~Trainer.hyperparameter_search`] to optimize an objective or even multiple objectives.
|
Hyperparameter search discovers an optimal set of hyperparameters that produces the best model performance. [`Trainer`] supports several hyperparameter search backends - [Optuna](https://optuna.readthedocs.io/en/stable/index.html), [Weights & Biases](https://docs.wandb.ai/), [Ray Tune](https://docs.ray.io/en/latest/tune/index.html) - through [`~Trainer.hyperparameter_search`] to optimize an objective or even multiple objectives.
|
||||||
|
|
||||||
This guide will go over how to set up a hyperparameter search for each of the backends.
|
This guide will go over how to set up a hyperparameter search for each of the backends.
|
||||||
|
|
||||||
> [!WARNING]
|
|
||||||
> [SigOpt](https://github.com/sigopt/sigopt-server) is in public archive mode and is no longer actively maintained. Try using Optuna, Weights & Biases or Ray Tune instead.
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install optuna/sigopt/wandb/ray[tune]
|
pip install optuna/wandb/ray[tune]
|
||||||
```
|
```
|
||||||
|
|
||||||
To use [`~Trainer.hyperparameter_search`], you need to create a `model_init` function. This function includes basic model information (arguments and configuration) because it needs to be reinitialized for each search trial in the run.
|
To use [`~Trainer.hyperparameter_search`], you need to create a `model_init` function. This function includes basic model information (arguments and configuration) because it needs to be reinitialized for each search trial in the run.
|
||||||
@ -109,31 +106,7 @@ best_trials = trainer.hyperparameter_search(
|
|||||||
n_trials=20,
|
n_trials=20,
|
||||||
compute_objective=compute_objective,
|
compute_objective=compute_objective,
|
||||||
)
|
)
|
||||||
```
|
|
||||||
|
|
||||||
</hfoption>
|
|
||||||
<hfoption id="SigOpt">
|
|
||||||
|
|
||||||
[SigOpt](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter) optimizes double, integer, and categorical parameters.
|
|
||||||
|
|
||||||
```py
|
|
||||||
def sigopt_hp_space(trial):
|
|
||||||
return [
|
|
||||||
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"},
|
|
||||||
{
|
|
||||||
"categorical_values": ["16", "32", "64", "128"],
|
|
||||||
"name": "per_device_train_batch_size",
|
|
||||||
"type": "categorical",
|
|
||||||
},
|
|
||||||
]
|
|
||||||
|
|
||||||
best_trials = trainer.hyperparameter_search(
|
|
||||||
direction=["minimize", "maximize"],
|
|
||||||
backend="sigopt",
|
|
||||||
hp_space=sigopt_hp_space,
|
|
||||||
n_trials=20,
|
|
||||||
compute_objective=compute_objective,
|
|
||||||
)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
</hfoption>
|
</hfoption>
|
||||||
@ -166,4 +139,4 @@ best_trials = trainer.hyperparameter_search(
|
|||||||
|
|
||||||
## Distributed Data Parallel
|
## Distributed Data Parallel
|
||||||
|
|
||||||
[`Trainer`] only supports hyperparameter search for distributed data parallel (DDP) on the Optuna and SigOpt backends. Only the rank-zero process is used to generate the search trial, and the resulting parameters are passed along to the other ranks.
|
[`Trainer`] only supports hyperparameter search for distributed data parallel (DDP) on the Optuna backends. Only the rank-zero process is used to generate the search trial, and the resulting parameters are passed along to the other ranks.
|
||||||
|
@ -43,4 +43,3 @@ Most of those are only useful if you are studying the general code in the librar
|
|||||||
## Other Utilities
|
## Other Utilities
|
||||||
|
|
||||||
[[autodoc]] utils._LazyModule
|
[[autodoc]] utils._LazyModule
|
||||||
[[autodoc]] pytorch_utils.infer_device
|
|
||||||
|
@ -153,6 +153,9 @@ generation.
|
|||||||
[[autodoc]] TemperatureLogitsWarper
|
[[autodoc]] TemperatureLogitsWarper
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
|
[[autodoc]] TopHLogitsWarper
|
||||||
|
- __call__
|
||||||
|
|
||||||
[[autodoc]] TopKLogitsWarper
|
[[autodoc]] TopKLogitsWarper
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
@ -193,28 +196,6 @@ A [`StoppingCriteria`] can be used to change when to stop generation (other than
|
|||||||
[[autodoc]] EosTokenCriteria
|
[[autodoc]] EosTokenCriteria
|
||||||
- __call__
|
- __call__
|
||||||
|
|
||||||
## Constraints
|
|
||||||
|
|
||||||
A [`Constraint`] can be used to force the generation to include specific tokens or sequences in the output. Please note that this is exclusively available to our PyTorch implementations.
|
|
||||||
|
|
||||||
[[autodoc]] Constraint
|
|
||||||
|
|
||||||
[[autodoc]] PhrasalConstraint
|
|
||||||
|
|
||||||
[[autodoc]] DisjunctiveConstraint
|
|
||||||
|
|
||||||
[[autodoc]] ConstraintListState
|
|
||||||
|
|
||||||
## BeamSearch
|
|
||||||
|
|
||||||
[[autodoc]] BeamScorer
|
|
||||||
- process
|
|
||||||
- finalize
|
|
||||||
|
|
||||||
[[autodoc]] ConstrainedBeamSearchScorer
|
|
||||||
- process
|
|
||||||
- finalize
|
|
||||||
|
|
||||||
## Streamers
|
## Streamers
|
||||||
|
|
||||||
[[autodoc]] TextStreamer
|
[[autodoc]] TextStreamer
|
||||||
@ -270,19 +251,19 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
|||||||
- batch_select_indices
|
- batch_select_indices
|
||||||
|
|
||||||
[[autodoc]] DynamicCache
|
[[autodoc]] DynamicCache
|
||||||
- to_legacy_cache
|
|
||||||
- from_legacy_cache
|
[[autodoc]] StaticCache
|
||||||
|
|
||||||
[[autodoc]] QuantizedCache
|
[[autodoc]] QuantizedCache
|
||||||
|
|
||||||
|
[[autodoc]] EncoderDecoderCache
|
||||||
|
|
||||||
[[autodoc]] QuantoQuantizedCache
|
[[autodoc]] QuantoQuantizedCache
|
||||||
|
|
||||||
[[autodoc]] HQQQuantizedCache
|
[[autodoc]] HQQQuantizedCache
|
||||||
|
|
||||||
[[autodoc]] OffloadedCache
|
[[autodoc]] OffloadedCache
|
||||||
|
|
||||||
[[autodoc]] StaticCache
|
|
||||||
|
|
||||||
[[autodoc]] OffloadedStaticCache
|
[[autodoc]] OffloadedStaticCache
|
||||||
|
|
||||||
[[autodoc]] HybridCache
|
[[autodoc]] HybridCache
|
||||||
@ -291,10 +272,6 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
|||||||
|
|
||||||
[[autodoc]] SlidingWindowCache
|
[[autodoc]] SlidingWindowCache
|
||||||
|
|
||||||
[[autodoc]] EncoderDecoderCache
|
|
||||||
- to_legacy_cache
|
|
||||||
- from_legacy_cache
|
|
||||||
|
|
||||||
## Watermark Utils
|
## Watermark Utils
|
||||||
|
|
||||||
[[autodoc]] WatermarkingConfig
|
[[autodoc]] WatermarkingConfig
|
||||||
|
@ -345,10 +345,94 @@ Skipped : 124/323 (38.4%)
|
|||||||
- bit: Bit does not use inputs_embeds
|
- bit: Bit does not use inputs_embeds
|
||||||
- blip: Blip does not use inputs_embeds
|
- blip: Blip does not use inputs_embeds
|
||||||
- blip_2: Inputs_embeds is tested in individual model tests
|
- blip_2: Inputs_embeds is tested in individual model tests
|
||||||
- bridgetower:
|
- bridgetower:
|
||||||
- canine: CANINE does not have a get_input_embeddings() method.
|
- canine: CANINE does not have a get_input_embeddings() method.
|
||||||
- ...
|
- ...
|
||||||
|
|
||||||
📄 JSON saved to /home/pablo/git/transformers/scan_test_inputs_embeds.json
|
📄 JSON saved to /home/pablo/git/transformers/scan_test_inputs_embeds.json
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Modular model detector
|
||||||
|
|
||||||
|
### Code similarity analyzer - for model adders
|
||||||
|
|
||||||
|
This utility analyzes code similarities between model implementations to identify opportunities for modularization. It compares a new or existing modeling file against all models in the library using embedding-based and token-based similarity metrics.
|
||||||
|
|
||||||
|
### Rationale
|
||||||
|
|
||||||
|
When adding a new model to transformers, many components (attention layers, MLPs, outputs, etc.) may already exist in similar form in other models. Instead of implementing everything from scratch, model adders can identify which existing classes are similar and potentially reusable through modularization.
|
||||||
|
|
||||||
|
The tool computes two similarity scores:
|
||||||
|
- **Embedding score**: Uses semantic code embeddings (via `Qwen/Qwen3-Embedding-4B`) to detect functionally similar code even with different naming
|
||||||
|
- **Jaccard score**: Measures token set overlap to identify structurally similar code patterns
|
||||||
|
|
||||||
|
A score of 1.00 means the code is identical.
|
||||||
|
|
||||||
|
### Usage
|
||||||
|
|
||||||
|
From the root of the `transformers` repository:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python utils/modular_model_detector.py --modeling-file path/to/modeling_file.py
|
||||||
|
```
|
||||||
|
|
||||||
|
The tool will automatically download the pre-built index from the Hub (requires RAM/VRAM for the embedding model).
|
||||||
|
|
||||||
|
**Example output:**
|
||||||
|
|
||||||
|
```text
|
||||||
|
Loading checkpoint shards: 100%|████████████████████| 2/2 [00:00<00:00, 33.62it/s]
|
||||||
|
encoding 21 query definitions with Qwen/Qwen3-Embedding-4B (device=cuda, batch=16, max_length=4096)
|
||||||
|
|
||||||
|
stuff.py::Beit3ImageTextMatchingOutput:
|
||||||
|
embedding:
|
||||||
|
blip_2::Blip2ImageTextMatchingModelOutput (0.9994)
|
||||||
|
chinese_clip::ChineseCLIPOutput (0.9818)
|
||||||
|
owlvit::OwlViTOutput (0.9818)
|
||||||
|
jaccard:
|
||||||
|
owlv2::Owlv2Output (0.9667)
|
||||||
|
metaclip_2::MetaClip2Output (0.9667)
|
||||||
|
altclip::AltCLIPOutput (0.9667)
|
||||||
|
intersection:
|
||||||
|
blip::BlipOutput
|
||||||
|
owlvit::OwlViTOutput
|
||||||
|
|
||||||
|
stuff.py::Beit3MLP:
|
||||||
|
embedding:
|
||||||
|
efficientloftr::EfficientLoFTRMLP (0.9718)
|
||||||
|
seggpt::SegGptMlp (0.9650)
|
||||||
|
jaccard:
|
||||||
|
chinese_clip::ChineseCLIPTextSelfOutput (0.5294)
|
||||||
|
bert::BertSelfOutput (0.5294)
|
||||||
|
intersection:
|
||||||
|
```
|
||||||
|
|
||||||
|
The `intersection` field shows classes that appear in both top-5 results, indicating high confidence for modularization candidates.
|
||||||
|
|
||||||
|
### Building a custom index
|
||||||
|
|
||||||
|
To rebuild the index from your local codebase (useful after adding new models or using a different embedding model):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python utils/modular_model_detector.py --build
|
||||||
|
```
|
||||||
|
|
||||||
|
To push the rebuilt index to a Hub dataset:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
python utils/modular_model_detector.py --build --push-new-index --hub-dataset your-org/your-dataset
|
||||||
|
```
|
||||||
|
|
||||||
|
### Options
|
||||||
|
|
||||||
|
- `--modeling-file`: Path to the modeling file to analyze
|
||||||
|
- `--build`: Build the code similarity index from all modeling files in `src/transformers/models/`
|
||||||
|
- `--push-new-index`: After building, push the index to a Hub dataset (requires `--build`)
|
||||||
|
- `--hub-dataset`: Hub dataset repository ID to pull/push the index (default: `hf-internal-testing/transformers_code_embeddings`)
|
||||||
|
|
||||||
|
### Limitations
|
||||||
|
|
||||||
|
This tool requires GPU/CPU resources to run the embedding model (`Qwen/Qwen3-Embedding-4B`). The pre-built index is downloaded from the Hub by default, which requires an internet connection on first use.
|
||||||
|
|
||||||
|
Results are suggestions based on code similarity and should be manually reviewed before modularization. High similarity scores don't guarantee perfect compatibility.
|
||||||
|
@ -46,10 +46,4 @@ Most of those are only useful if you are studying the code of the models in the
|
|||||||
|
|
||||||
[[autodoc]] pytorch_utils.apply_chunking_to_forward
|
[[autodoc]] pytorch_utils.apply_chunking_to_forward
|
||||||
|
|
||||||
[[autodoc]] pytorch_utils.find_pruneable_heads_and_indices
|
|
||||||
|
|
||||||
[[autodoc]] pytorch_utils.prune_layer
|
|
||||||
|
|
||||||
[[autodoc]] pytorch_utils.prune_conv1d_layer
|
|
||||||
|
|
||||||
[[autodoc]] pytorch_utils.prune_linear_layer
|
[[autodoc]] pytorch_utils.prune_linear_layer
|
||||||
|
3
docs/source/en/kernel_doc/overview.md
Normal file
3
docs/source/en/kernel_doc/overview.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
# Overview
|
||||||
|
|
||||||
|
Kernels in transformers are used to optimize the performance of models with custom layers from the hub and very low effort.
|
@ -124,11 +124,12 @@ The example below shows how you can fallback to an offloaded cache if you run ou
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
import torch
|
import torch
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM, infer_device
|
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
def resilient_generate(model, *args, **kwargs):
|
def resilient_generate(model, *args, **kwargs):
|
||||||
oom = False
|
oom = False
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
torch_device_module = getattr(torch, device, torch.cuda)
|
torch_device_module = getattr(torch, device, torch.cuda)
|
||||||
try:
|
try:
|
||||||
return model.generate(*args, **kwargs)
|
return model.generate(*args, **kwargs)
|
||||||
|
@ -114,7 +114,8 @@ print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
|||||||
Another option for using [`StaticCache`] is to pass it to a models forward pass using the same `past_key_values` argument. This allows you to write your own custom decoding function to decode the next token given the current token, position, and cache position of previously generated tokens.
|
Another option for using [`StaticCache`] is to pass it to a models forward pass using the same `past_key_values` argument. This allows you to write your own custom decoding function to decode the next token given the current token, position, and cache position of previously generated tokens.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import LlamaTokenizer, LlamaForCausalLM, StaticCache, logging, infer_device
|
from transformers import LlamaTokenizer, LlamaForCausalLM, StaticCache, logging
|
||||||
|
from accelerate import Accelerator
|
||||||
from transformers.testing_utils import CaptureLogger
|
from transformers.testing_utils import CaptureLogger
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
@ -124,7 +125,7 @@ prompts = [
|
|||||||
]
|
]
|
||||||
|
|
||||||
NUM_TOKENS_TO_GENERATE = 40
|
NUM_TOKENS_TO_GENERATE = 40
|
||||||
torch_device = infer_device()
|
torch_device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="right")
|
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="right")
|
||||||
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", device_map="sequential")
|
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", device_map="sequential")
|
||||||
@ -208,10 +209,11 @@ Enable speculative decoding by loading an assistant model and passing it to [`~G
|
|||||||
<hfoption id="greedy search">
|
<hfoption id="greedy search">
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
||||||
inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device)
|
inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device)
|
||||||
@ -229,10 +231,11 @@ tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
|||||||
For speculative sampling decoding, add the [do_sample](https://hf.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig.do_sample) and [temperature](https://hf.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig.temperature) parameters to [`~GenerationMixin.generate`].
|
For speculative sampling decoding, add the [do_sample](https://hf.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig.do_sample) and [temperature](https://hf.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig.temperature) parameters to [`~GenerationMixin.generate`].
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
||||||
inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device)
|
inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device)
|
||||||
@ -257,10 +260,11 @@ To enable prompt lookup decoding, specify the number of tokens that should be ov
|
|||||||
<hfoption id="greedy decoding">
|
<hfoption id="greedy decoding">
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
||||||
inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device)
|
inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device)
|
||||||
@ -278,10 +282,11 @@ print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
|||||||
For prompt lookup decoding with sampling, add the [do_sample](https://hf.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig.do_sample) and [temperature](https://hf.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig.temperature) parameters to [`~GenerationMixin.generate`].
|
For prompt lookup decoding with sampling, add the [do_sample](https://hf.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig.do_sample) and [temperature](https://hf.co/docs/transformers/main/en/main_classes/text_generation#transformers.GenerationConfig.temperature) parameters to [`~GenerationMixin.generate`].
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
||||||
inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device)
|
inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device)
|
||||||
|
@ -259,11 +259,11 @@ Some models and tasks expect a certain input prompt format, and if the format is
|
|||||||
For example, a chat model expects the input as a [chat template](./chat_templating). Your prompt should include a `role` and `content` to indicate who is participating in the conversation. If you try to pass your prompt as a single string, the model doesn't always return the expected output.
|
For example, a chat model expects the input as a [chat template](./chat_templating). Your prompt should include a `role` and `content` to indicate who is participating in the conversation. If you try to pass your prompt as a single string, the model doesn't always return the expected output.
|
||||||
|
|
||||||
```py
|
```py
|
||||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
|
tokenizer = AutoTokenizer.from_pretrained("HuggingFaceH4/zephyr-7b-alpha")
|
||||||
model = AutoModelForCausalLM.from_pretrained(
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
"HuggingFaceH4/zephyr-7b-alpha", device_map="auto", load_in_4bit=True
|
"HuggingFaceH4/zephyr-7b-alpha", device_map="auto", quantization_config=BitsAndBytesConfig(load_in_4bit=True)
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -16,18 +16,18 @@ rendered properly in your Markdown viewer.
|
|||||||
Large Language Models (LLMs) such as GPT3/4, [Falcon](https://huggingface.co/tiiuae/falcon-40b), and [Llama](https://huggingface.co/meta-llama/Llama-2-70b-hf) are rapidly advancing in their ability to tackle human-centric tasks, establishing themselves as essential tools in modern knowledge-based industries.
|
Large Language Models (LLMs) such as GPT3/4, [Falcon](https://huggingface.co/tiiuae/falcon-40b), and [Llama](https://huggingface.co/meta-llama/Llama-2-70b-hf) are rapidly advancing in their ability to tackle human-centric tasks, establishing themselves as essential tools in modern knowledge-based industries.
|
||||||
Deploying these models in real-world tasks remains challenging, however:
|
Deploying these models in real-world tasks remains challenging, however:
|
||||||
|
|
||||||
- To exhibit near-human text understanding and generation capabilities, LLMs currently require to be composed of billions of parameters (see [Kaplan et al](https://huggingface.co/papers/2001.08361), [Wei et. al](https://huggingface.co/papers/2206.07682)). This consequently amplifies the memory demands for inference.
|
- To exhibit near-human text understanding and generation capabilities, LLMs currently require to be composed of billions of parameters (see [Kaplan et al](https://huggingface.co/papers/2001.08361), [Wei et. al](https://huggingface.co/papers/2206.07682)). This consequently amplifies the memory demands for inference.
|
||||||
- In many real-world tasks, LLMs need to be given extensive contextual information. This necessitates the model's capability to manage very long input sequences during inference.
|
- In many real-world tasks, LLMs need to be given extensive contextual information. This necessitates the model's capability to manage very long input sequences during inference.
|
||||||
|
|
||||||
The crux of these challenges lies in augmenting the computational and memory capabilities of LLMs, especially when handling expansive input sequences.
|
The crux of these challenges lies in augmenting the computational and memory capabilities of LLMs, especially when handling expansive input sequences.
|
||||||
|
|
||||||
In this guide, we will go over the effective techniques for efficient LLM deployment:
|
In this guide, we will go over the effective techniques for efficient LLM deployment:
|
||||||
|
|
||||||
1. **Lower Precision:** Research has shown that operating at reduced numerical precision, namely [8-bit and 4-bit](./main_classes/quantization) can achieve computational advantages without a considerable decline in model performance.
|
1. **Lower Precision:** Research has shown that operating at reduced numerical precision, namely [8-bit and 4-bit](./main_classes/quantization) can achieve computational advantages without a considerable decline in model performance.
|
||||||
|
|
||||||
2. **Flash Attention:** Flash Attention is a variation of the attention algorithm that not only provides a more memory-efficient approach but also realizes increased efficiency due to optimized GPU memory utilization.
|
2. **Flash Attention:** Flash Attention is a variation of the attention algorithm that not only provides a more memory-efficient approach but also realizes increased efficiency due to optimized GPU memory utilization.
|
||||||
|
|
||||||
3. **Architectural Innovations:** Considering that LLMs are always deployed in the same way during inference, namely autoregressive text generation with a long input context, specialized model architectures have been proposed that allow for more efficient inference. The most important advancement in model architectures hereby are [Alibi](https://huggingface.co/papers/2108.12409), [Rotary embeddings](https://huggingface.co/papers/2104.09864), [Multi-Query Attention (MQA)](https://huggingface.co/papers/1911.02150) and [Grouped-Query-Attention (GQA)](https://huggingface.co/papers/2305.13245).
|
3. **Architectural Innovations:** Considering that LLMs are always deployed in the same way during inference, namely autoregressive text generation with a long input context, specialized model architectures have been proposed that allow for more efficient inference. The most important advancement in model architectures hereby are [Alibi](https://huggingface.co/papers/2108.12409), [Rotary embeddings](https://huggingface.co/papers/2104.09864), [Multi-Query Attention (MQA)](https://huggingface.co/papers/1911.02150) and [Grouped-Query-Attention (GQA)](https://huggingface.co/papers/2305.13245).
|
||||||
|
|
||||||
Throughout this guide, we will offer an analysis of auto-regressive generation from a tensor's perspective. We delve into the pros and cons of adopting lower precision, provide a comprehensive exploration of the latest attention algorithms, and discuss improved LLM architectures. While doing so, we run practical examples showcasing each of the feature improvements.
|
Throughout this guide, we will offer an analysis of auto-regressive generation from a tensor's perspective. We delve into the pros and cons of adopting lower precision, provide a comprehensive exploration of the latest attention algorithms, and discuss improved LLM architectures. While doing so, we run practical examples showcasing each of the feature improvements.
|
||||||
|
|
||||||
@ -37,22 +37,22 @@ Memory requirements of LLMs can be best understood by seeing the LLM as a set of
|
|||||||
|
|
||||||
At the time of writing this guide, LLMs consist of at least a couple billion parameters. Each parameter thereby is made of a decimal number, e.g. `4.5689` which is usually stored in either [float32](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format), or [float16](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) format. This allows us to easily compute the memory requirement to load the LLM into memory:
|
At the time of writing this guide, LLMs consist of at least a couple billion parameters. Each parameter thereby is made of a decimal number, e.g. `4.5689` which is usually stored in either [float32](https://en.wikipedia.org/wiki/Single-precision_floating-point_format), [bfloat16](https://en.wikipedia.org/wiki/Bfloat16_floating-point_format), or [float16](https://en.wikipedia.org/wiki/Half-precision_floating-point_format) format. This allows us to easily compute the memory requirement to load the LLM into memory:
|
||||||
|
|
||||||
> *Loading the weights of a model having X billion parameters requires roughly 4 * X GB of VRAM in float32 precision*
|
> *Loading the weights of a model having X billion parameters requires roughly 4 \* X GB of VRAM in float32 precision*
|
||||||
|
|
||||||
Nowadays, models are however rarely trained in full float32 precision, but usually in bfloat16 precision or less frequently in float16 precision. Therefore the rule of thumb becomes:
|
Nowadays, models are however rarely trained in full float32 precision, but usually in bfloat16 precision or less frequently in float16 precision. Therefore the rule of thumb becomes:
|
||||||
|
|
||||||
> *Loading the weights of a model having X billion parameters requires roughly 2 * X GB of VRAM in bfloat16/float16 precision*
|
> *Loading the weights of a model having X billion parameters requires roughly 2 \* X GB of VRAM in bfloat16/float16 precision*
|
||||||
|
|
||||||
For shorter text inputs (less than 1024 tokens), the memory requirement for inference is very much dominated by the memory requirement to load the weights. Therefore, for now, let's assume that the memory requirement for inference is equal to the memory requirement to load the model into the GPU VRAM.
|
For shorter text inputs (less than 1024 tokens), the memory requirement for inference is very much dominated by the memory requirement to load the weights. Therefore, for now, let's assume that the memory requirement for inference is equal to the memory requirement to load the model into the GPU VRAM.
|
||||||
|
|
||||||
To give some examples of how much VRAM it roughly takes to load a model in bfloat16:
|
To give some examples of how much VRAM it roughly takes to load a model in bfloat16:
|
||||||
|
|
||||||
- **GPT3** requires 2 \* 175 GB = **350 GB** VRAM
|
- **GPT3** requires 2 \* 175 GB = **350 GB** VRAM
|
||||||
- [**Bloom**](https://huggingface.co/bigscience/bloom) requires 2 \* 176 GB = **352 GB** VRAM
|
- [**Bloom**](https://huggingface.co/bigscience/bloom) requires 2 \* 176 GB = **352 GB** VRAM
|
||||||
- [**Llama-2-70b**](https://huggingface.co/meta-llama/Llama-2-70b-hf) requires 2 \* 70 GB = **140 GB** VRAM
|
- [**Llama-2-70b**](https://huggingface.co/meta-llama/Llama-2-70b-hf) requires 2 \* 70 GB = **140 GB** VRAM
|
||||||
- [**Falcon-40b**](https://huggingface.co/tiiuae/falcon-40b) requires 2 \* 40 GB = **80 GB** VRAM
|
- [**Falcon-40b**](https://huggingface.co/tiiuae/falcon-40b) requires 2 \* 40 GB = **80 GB** VRAM
|
||||||
- [**MPT-30b**](https://huggingface.co/mosaicml/mpt-30b) requires 2 \* 30 GB = **60 GB** VRAM
|
- [**MPT-30b**](https://huggingface.co/mosaicml/mpt-30b) requires 2 \* 30 GB = **60 GB** VRAM
|
||||||
- [**bigcode/starcoder**](https://huggingface.co/bigcode/starcoder) requires 2 \* 15.5 = **31 GB** VRAM
|
- [**bigcode/starcoder**](https://huggingface.co/bigcode/starcoder) requires 2 \* 15.5 = **31 GB** VRAM
|
||||||
|
|
||||||
As of writing this document, the largest GPU chip on the market is the A100 & H100 offering 80GB of VRAM. Most of the models listed before require more than 80GB just to be loaded and therefore necessarily require [tensor parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#tensor-parallelism) and/or [pipeline parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism).
|
As of writing this document, the largest GPU chip on the market is the A100 & H100 offering 80GB of VRAM. Most of the models listed before require more than 80GB just to be loaded and therefore necessarily require [tensor parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#tensor-parallelism) and/or [pipeline parallelism](https://huggingface.co/docs/transformers/perf_train_gpu_many#naive-model-parallelism-vertical-and-pipeline-parallelism).
|
||||||
|
|
||||||
@ -169,11 +169,11 @@ All that matters is that the next token *logit* distribution stays roughly the s
|
|||||||
|
|
||||||
There are various quantization techniques, which we won't discuss in detail here, but in general, all quantization techniques work as follows:
|
There are various quantization techniques, which we won't discuss in detail here, but in general, all quantization techniques work as follows:
|
||||||
|
|
||||||
- 1. Quantize all weights to the target precision
|
- 1. Quantize all weights to the target precision
|
||||||
- 2. Load the quantized weights, and pass the input sequence of vectors in bfloat16 precision
|
- 2. Load the quantized weights, and pass the input sequence of vectors in bfloat16 precision
|
||||||
- 3. Dynamically dequantize weights to bfloat16 to perform the computation with their input vectors in bfloat16 precision
|
- 3. Dynamically dequantize weights to bfloat16 to perform the computation with their input vectors in bfloat16 precision
|
||||||
|
|
||||||
In a nutshell, this means that *inputs-weight matrix* multiplications, with \\( X \\) being the *inputs*, \\( W \\) being a weight matrix and \\( Y \\) being the output:
|
In a nutshell, this means that *inputs-weight matrix* multiplications, with $X$ being the *inputs*, $W$ being a weight matrix and $Y$ being the output:
|
||||||
|
|
||||||
$$ Y = X * W $$
|
$$ Y = X * W $$
|
||||||
|
|
||||||
@ -194,7 +194,7 @@ the [`bitsandbytes`](https://github.com/bitsandbytes-foundation/bitsandbytes) li
|
|||||||
We can then load models in 8-bit quantization by simply adding a `load_in_8bit=True` flag to `from_pretrained`.
|
We can then load models in 8-bit quantization by simply adding a `load_in_8bit=True` flag to `from_pretrained`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", load_in_8bit=True, pad_token_id=0)
|
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", quantization_config=BitsAndBytesConfig(load_in_8bit=True), pad_token_id=0)
|
||||||
```
|
```
|
||||||
|
|
||||||
Now, let's run our example again and measure the memory usage.
|
Now, let's run our example again and measure the memory usage.
|
||||||
@ -241,7 +241,7 @@ flush()
|
|||||||
Let's see what peak GPU memory consumption 4-bit quantization gives. Quantizing the model to 4-bit can be done with the same API as before - this time by passing `load_in_4bit=True` instead of `load_in_8bit=True`.
|
Let's see what peak GPU memory consumption 4-bit quantization gives. Quantizing the model to 4-bit can be done with the same API as before - this time by passing `load_in_4bit=True` instead of `load_in_8bit=True`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", load_in_4bit=True, pad_token_id=0)
|
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", quantization_config=BitsAndBytesConfig(load_in_4bit=True), pad_token_id=0)
|
||||||
|
|
||||||
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
||||||
|
|
||||||
@ -271,7 +271,7 @@ Just 9.5GB! That's really not a lot for a >15 billion parameter model.
|
|||||||
|
|
||||||
While we see very little degradation in accuracy for our model here, 4-bit quantization can in practice often lead to different results compared to 8-bit quantization or full `bfloat16` inference. It is up to the user to try it out.
|
While we see very little degradation in accuracy for our model here, 4-bit quantization can in practice often lead to different results compared to 8-bit quantization or full `bfloat16` inference. It is up to the user to try it out.
|
||||||
|
|
||||||
Also note that inference here was again a bit slower compared to 8-bit quantization which is due to the more aggressive quantization method used for 4-bit quantization leading to \\( \text{quantize} \\) and \\( \text{dequantize} \\) taking longer during inference.
|
Also note that inference here was again a bit slower compared to 8-bit quantization which is due to the more aggressive quantization method used for 4-bit quantization leading to $\text{quantize}$ and $\text{dequantize}$ taking longer during inference.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
del model
|
del model
|
||||||
@ -300,279 +300,117 @@ Next, let's look into how we can improve computational and memory efficiency by
|
|||||||
Today's top-performing LLMs share more or less the same fundamental architecture that consists of feed-forward layers, activation layers, layer normalization layers, and most crucially, self-attention layers.
|
Today's top-performing LLMs share more or less the same fundamental architecture that consists of feed-forward layers, activation layers, layer normalization layers, and most crucially, self-attention layers.
|
||||||
|
|
||||||
Self-attention layers are central to Large Language Models (LLMs) in that they enable the model to understand the contextual relationships between input tokens.
|
Self-attention layers are central to Large Language Models (LLMs) in that they enable the model to understand the contextual relationships between input tokens.
|
||||||
However, the peak GPU memory consumption for self-attention layers grows *quadratically* both in compute and memory complexity with number of input tokens (also called *sequence length*) that we denote in the following by \\( N \\) .
|
However, the peak GPU memory consumption for self-attention layers grows *quadratically* both in compute and memory complexity with number of input tokens (also called *sequence length*) that we denote in the following by $N$ .
|
||||||
While this is not really noticeable for shorter input sequences (of up to 1000 input tokens), it becomes a serious problem for longer input sequences (at around 16000 input tokens).
|
While this is not really noticeable for shorter input sequences (of up to 1000 input tokens), it becomes a serious problem for longer input sequences (at around 16000 input tokens).
|
||||||
|
|
||||||
Let's take a closer look. The formula to compute the output \\( \mathbf{O} \\) of a self-attention layer for an input \\( \mathbf{X} \\) of length \\( N \\) is:
|
Let's take a closer look. The formula to compute the output $\mathbf{O}$ of a self-attention layer for an input $\mathbf{X}$ of length $N$ is:
|
||||||
|
|
||||||
$$ \textbf{O} = \text{Attn}(\mathbf{X}) = \mathbf{V} \times \text{Softmax}(\mathbf{QK}^T) \text{ with } \mathbf{Q} = \mathbf{W}_q \mathbf{X}, \mathbf{V} = \mathbf{W}_v \mathbf{X}, \mathbf{K} = \mathbf{W}_k \mathbf{X} $$
|
$$ \textbf{O} = \text{Attn}(\mathbf{X}) = \mathbf{V} \times \text{Softmax}(\mathbf{QK}^T) \text{ with } \mathbf{Q} = \mathbf{W}_q \mathbf{X}, \mathbf{V} = \mathbf{W}_v \mathbf{X}, \mathbf{K} = \mathbf{W}_k \mathbf{X} $$
|
||||||
|
|
||||||
\\( \mathbf{X} = (\mathbf{x}_1, ... \mathbf{x}_{N}) \\) is thereby the input sequence to the attention layer. The projections \\( \mathbf{Q} \\) and \\( \mathbf{K} \\) will each consist of \\( N \\) vectors resulting in the \\( \mathbf{QK}^T \\) being of size \\( N^2 \\) .
|
$\mathbf{X} = (\mathbf{x}_1, ... \mathbf{x}_{N})$ is thereby the input sequence to the attention layer. The projections $\mathbf{Q}$ and $\mathbf{K}$ will each consist of $N$ vectors resulting in the $\mathbf{QK}^T$ being of size $N^2$ .
|
||||||
|
|
||||||
LLMs usually have multiple attention heads, thus doing multiple self-attention computations in parallel.
|
LLMs usually have multiple attention heads, thus doing multiple self-attention computations in parallel.
|
||||||
Assuming, the LLM has 40 attention heads and runs in bfloat16 precision, we can calculate the memory requirement to store the \\( \mathbf{QK^T} \\) matrices to be \\( 40 * 2 * N^2 \\) bytes. For \\( N=1000 \\) only around 50 MB of VRAM are needed, however, for \\( N=16000 \\) we would need 19 GB of VRAM, and for \\( N=100,000 \\) we would need almost 1TB just to store the \\( \mathbf{QK}^T \\) matrices.
|
Assuming, the LLM has 40 attention heads and runs in bfloat16 precision, we can calculate the memory requirement to store the $\mathbf{QK^T}$ matrices to be $40 * 2 * N^2$ bytes. For $N=1000$ only around 50 MB of VRAM are needed, however, for $N=16000$ we would need 19 GB of VRAM, and for $N=100,000$ we would need almost 1TB just to store the $\mathbf{QK}^T$ matrices.
|
||||||
|
|
||||||
Long story short, the default self-attention algorithm quickly becomes prohibitively memory-expensive for large input contexts.
|
Long story short, the default self-attention algorithm quickly becomes prohibitively memory-expensive for large input contexts.
|
||||||
|
|
||||||
As LLMs improve in text comprehension and generation, they are applied to increasingly complex tasks. While models once handled the translation or summarization of a few sentences, they now manage entire pages, demanding the capability to process extensive input lengths.
|
As LLMs improve in text comprehension and generation, they are applied to increasingly complex tasks. While models once handled the translation or summarization of a few sentences, they now manage entire pages, demanding the capability to process extensive input lengths.
|
||||||
|
|
||||||
How can we get rid of the exorbitant memory requirements for large input lengths? We need a new way to compute the self-attention mechanism that gets rid of the \\( QK^T \\) matrix. [Tri Dao et al.](https://huggingface.co/papers/2205.14135) developed exactly such a new algorithm and called it **Flash Attention**.
|
How can we get rid of the exorbitant memory requirements for large input lengths? We need a new way to compute the self-attention mechanism that gets rid of the $\mathbf{QK}^T$ matrix. [Tri Dao et al.](https://huggingface.co/papers/2205.14135) developed exactly such a new algorithm and called it **Flash Attention**.
|
||||||
|
|
||||||
In a nutshell, Flash Attention breaks the \\(\mathbf{V} \times \text{Softmax}(\mathbf{QK}^T\\)) computation apart and instead computes smaller chunks of the output by iterating over multiple softmax computation steps:
|
In a nutshell, Flash Attention breaks the $\mathbf{V} \times \text{Softmax}(\mathbf{QK}^T)$ computation apart and instead computes smaller chunks of the output by iterating over multiple softmax computation steps:
|
||||||
|
|
||||||
$$ \textbf{O}_i \leftarrow s^a_{ij} * \textbf{O}_i + s^b_{ij} * \mathbf{V}_{j} \times \text{Softmax}(\mathbf{QK}^T_{i,j}) \text{ for multiple } i, j \text{ iterations} $$
|
$$ \textbf{O}_i \leftarrow s^a_{ij} * \textbf{O}_i + s^b_{ij} * \mathbf{V}_{j} \times \text{Softmax}(\mathbf{QK}^T_{i,j}) \text{ for multiple } i, j \text{ iterations} $$
|
||||||
|
|
||||||
with \\( s^a_{ij} \\) and \\( s^b_{ij} \\) being some softmax normalization statistics that need to be recomputed for every \\( i \\) and \\( j \\) .
|
with $s^a_{ij}$ and $s^b_{ij}$ being some softmax normalization statistics that need to be recomputed for every $i$ and $j$ .
|
||||||
|
|
||||||
Please note that the whole Flash Attention is a bit more complex and is greatly simplified here as going in too much depth is out of scope for this guide. The reader is invited to take a look at the well-written [Flash Attention paper](https://huggingface.co/papers/2205.14135) for more details.
|
Please note that the whole Flash Attention is a bit more complex and is greatly simplified here as going in too much depth is out of scope for this guide. The reader is invited to take a look at the well-written [Flash Attention paper](https://huggingface.co/papers/2205.14135) for more details.
|
||||||
|
|
||||||
The main takeaway here is:
|
The main takeaway here is:
|
||||||
|
|
||||||
> By keeping track of softmax normalization statistics and by using some smart mathematics, Flash Attention gives **numerical identical** outputs compared to the default self-attention layer at a memory cost that only increases linearly with \\( N \\) .
|
> By keeping track of softmax normalization statistics and by using some smart mathematics, Flash Attention gives **numerical identical** outputs compared to the default self-attention layer at a memory cost that only increases linearly with $N$ .
|
||||||
|
|
||||||
Looking at the formula, one would intuitively say that Flash Attention must be much slower compared to the default self-attention formula as more computation needs to be done. Indeed Flash Attention requires more FLOPs compared to normal attention as the softmax normalization statistics have to constantly be recomputed (see [paper](https://huggingface.co/papers/2205.14135) for more details if interested)
|
Looking at the formula, one would intuitively say that Flash Attention must be much slower compared to the default self-attention formula as more computation needs to be done. Indeed Flash Attention requires more FLOPs compared to normal attention as the softmax normalization statistics have to constantly be recomputed (see [paper](https://huggingface.co/papers/2205.14135) for more details if interested)
|
||||||
|
|
||||||
> However, Flash Attention is much faster in inference compared to default attention which comes from its ability to significantly reduce the demands on the slower, high-bandwidth memory of the GPU (VRAM), focusing instead on the faster on-chip memory (SRAM).
|
> However, Flash Attention is much faster in inference compared to default attention which comes from its ability to significantly reduce the demands on the slower, high-bandwidth memory of the GPU (VRAM), focusing instead on the faster on-chip memory (SRAM).
|
||||||
|
|
||||||
Essentially, Flash Attention makes sure that all intermediate write and read operations can be done using the fast *on-chip* SRAM memory instead of having to access the slower VRAM memory to compute the output vector \\( \mathbf{O} \\) .
|
Essentially, Flash Attention makes sure that all intermediate write and read operations can be done using the fast *on-chip* SRAM memory instead of having to access the slower VRAM memory to compute the output vector $\mathbf{O}$ .
|
||||||
|
|
||||||
In practice, there is currently absolutely no reason to **not** use Flash Attention if available. The algorithm gives mathematically the same outputs, and is both faster and more memory-efficient.
|
In practice, there is currently absolutely no reason to **not** use Flash Attention if available. The algorithm gives mathematically the same outputs, and is both faster and more memory-efficient.
|
||||||
|
|
||||||
Let's look at a practical example.
|
|
||||||
|
|
||||||
Our OctoCoder model now gets a significantly longer input prompt which includes a so-called *system prompt*. System prompts are used to steer the LLM into a better assistant that is tailored to the users' task.
|
|
||||||
In the following, we use a system prompt that will make OctoCoder a better coding assistant.
|
|
||||||
|
|
||||||
```python
|
|
||||||
system_prompt = """Below are a series of dialogues between various people and an AI technical assistant.
|
|
||||||
The assistant tries to be helpful, polite, honest, sophisticated, emotionally aware, and humble but knowledgeable.
|
|
||||||
The assistant is happy to help with code questions and will do their best to understand exactly what is needed.
|
|
||||||
It also tries to avoid giving false or misleading information, and it caveats when it isn't entirely sure about the right answer.
|
|
||||||
That said, the assistant is practical really does its best, and doesn't let caution get too much in the way of being useful.
|
|
||||||
|
|
||||||
The Starcoder models are a series of 15.5B parameter models trained on 80+ programming languages from The Stack (v1.2) (excluding opt-out requests).
|
|
||||||
The model uses Multi Query Attention, was trained using the Fill-in-the-Middle objective, and with 8,192 tokens context window for a trillion tokens of heavily deduplicated data.
|
|
||||||
|
|
||||||
-----
|
|
||||||
|
|
||||||
Question: Write a function that takes two lists and returns a list that has alternating elements from each input list.
|
|
||||||
|
|
||||||
Answer: Sure. Here is a function that does that.
|
|
||||||
|
|
||||||
def alternating(list1, list2):
|
|
||||||
results = []
|
|
||||||
for i in range(len(list1)):
|
|
||||||
results.append(list1[i])
|
|
||||||
results.append(list2[i])
|
|
||||||
return results
|
|
||||||
|
|
||||||
Question: Can you write some test cases for this function?
|
|
||||||
|
|
||||||
Answer: Sure, here are some tests.
|
|
||||||
|
|
||||||
assert alternating([10, 20, 30], [1, 2, 3]) == [10, 1, 20, 2, 30, 3]
|
|
||||||
assert alternating([True, False], [4, 5]) == [True, 4, False, 5]
|
|
||||||
assert alternating([], []) == []
|
|
||||||
|
|
||||||
Question: Modify the function so that it returns all input elements when the lists have uneven length. The elements from the longer list should be at the end.
|
|
||||||
|
|
||||||
Answer: Here is the modified function.
|
|
||||||
|
|
||||||
def alternating(list1, list2):
|
|
||||||
results = []
|
|
||||||
for i in range(min(len(list1), len(list2))):
|
|
||||||
results.append(list1[i])
|
|
||||||
results.append(list2[i])
|
|
||||||
if len(list1) > len(list2):
|
|
||||||
results.extend(list1[i+1:])
|
|
||||||
else:
|
|
||||||
results.extend(list2[i+1:])
|
|
||||||
return results
|
|
||||||
|
|
||||||
-----
|
|
||||||
"""
|
|
||||||
```
|
|
||||||
|
|
||||||
For demonstration purposes, we duplicate the system prompt by ten so that the input length is long enough to observe Flash Attention's memory savings.
|
|
||||||
We append the original text prompt `"Question: Please write a function in Python that transforms bytes to Giga bytes.\n\nAnswer: Here"`
|
|
||||||
|
|
||||||
```python
|
|
||||||
long_prompt = 10 * system_prompt + prompt
|
|
||||||
```
|
|
||||||
|
|
||||||
We instantiate our model again in bfloat16 precision.
|
|
||||||
|
|
||||||
```python
|
|
||||||
model = AutoModelForCausalLM.from_pretrained("bigcode/octocoder", dtype=torch.bfloat16, device_map="auto")
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("bigcode/octocoder")
|
|
||||||
|
|
||||||
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
||||||
```
|
|
||||||
|
|
||||||
Let's now run the model just like before *without Flash Attention* and measure the peak GPU memory requirement and inference time.
|
|
||||||
|
|
||||||
```python
|
|
||||||
import time
|
|
||||||
|
|
||||||
start_time = time.time()
|
|
||||||
result = pipe(long_prompt, max_new_tokens=60)[0]["generated_text"][len(long_prompt):]
|
|
||||||
|
|
||||||
print(f"Generated in {time.time() - start_time} seconds.")
|
|
||||||
result
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output**:
|
|
||||||
|
|
||||||
```text
|
|
||||||
Generated in 10.96854019165039 seconds.
|
|
||||||
Sure. Here is a function that does that.\n\ndef bytes_to_giga(bytes):\n return bytes / 1024 / 1024 / 1024\n\nAnswer: Sure. Here is a function that does that.\n\ndef
|
|
||||||
````
|
|
||||||
|
|
||||||
We're getting the same output as before, however this time, the model repeats the answer multiple times until it's 60 tokens cut-off. This is not surprising as we've repeated the system prompt ten times for demonstration purposes and thus cued the model to repeat itself.
|
|
||||||
|
|
||||||
**Note** that the system prompt should not be repeated ten times in real-world applications - one time is enough!
|
|
||||||
|
|
||||||
Let's measure the peak GPU memory requirement.
|
|
||||||
|
|
||||||
```python
|
|
||||||
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output**:
|
|
||||||
|
|
||||||
```text
|
|
||||||
37.668193340301514
|
|
||||||
```
|
|
||||||
|
|
||||||
As we can see the peak GPU memory requirement is now significantly higher than in the beginning, which is largely due to the longer input sequence. Also the generation takes a little over a minute now.
|
|
||||||
|
|
||||||
We call `flush()` to free GPU memory for our next experiment.
|
|
||||||
|
|
||||||
```python
|
|
||||||
flush()
|
|
||||||
```
|
|
||||||
|
|
||||||
For comparison, let's run the same function, but enable Flash Attention instead.
|
|
||||||
To do so, we convert the model to [BetterTransformer](https://huggingface.co/docs/optimum/bettertransformer/overview) and by doing so enabling PyTorch's [SDPA self-attention](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention) which in turn is able to use Flash Attention.
|
|
||||||
|
|
||||||
```python
|
|
||||||
model.to_bettertransformer()
|
|
||||||
```
|
|
||||||
|
|
||||||
Now we run the exact same code snippet as before and under the hood Transformers will make use of Flash Attention.
|
|
||||||
|
|
||||||
```py
|
|
||||||
start_time = time.time()
|
|
||||||
with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
|
|
||||||
result = pipe(long_prompt, max_new_tokens=60)[0]["generated_text"][len(long_prompt):]
|
|
||||||
|
|
||||||
print(f"Generated in {time.time() - start_time} seconds.")
|
|
||||||
result
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output**:
|
|
||||||
|
|
||||||
```text
|
|
||||||
Generated in 3.0211617946624756 seconds.
|
|
||||||
Sure. Here is a function that does that.\n\ndef bytes_to_giga(bytes):\n return bytes / 1024 / 1024 / 1024\n\nAnswer: Sure. Here is a function that does that.\n\ndef
|
|
||||||
```
|
|
||||||
|
|
||||||
We're getting the exact same result as before, but can observe a very significant speed-up thanks to Flash Attention.
|
|
||||||
|
|
||||||
Let's measure the memory consumption one last time.
|
|
||||||
|
|
||||||
```python
|
|
||||||
bytes_to_giga_bytes(torch.cuda.max_memory_allocated())
|
|
||||||
```
|
|
||||||
|
|
||||||
**Output**:
|
|
||||||
|
|
||||||
```text
|
|
||||||
32.617331981658936
|
|
||||||
```
|
|
||||||
|
|
||||||
And we're almost back to our original 29GB peak GPU memory from the beginning.
|
|
||||||
|
|
||||||
We can observe that we only use roughly 100MB more GPU memory when passing a very long input sequence with Flash Attention compared to passing a short input sequence as done in the beginning.
|
|
||||||
|
|
||||||
```py
|
|
||||||
flush()
|
|
||||||
```
|
|
||||||
|
|
||||||
For more information on how to use Flash Attention, please have a look at [this doc page](https://huggingface.co/docs/transformers/en/perf_infer_gpu_one#flashattention-2).
|
|
||||||
|
|
||||||
## 3. Architectural Innovations
|
## 3. Architectural Innovations
|
||||||
|
|
||||||
So far we have looked into improving computational and memory efficiency by:
|
So far we have looked into improving computational and memory efficiency by:
|
||||||
|
|
||||||
- Casting the weights to a lower precision format
|
- Casting the weights to a lower precision format
|
||||||
- Replacing the self-attention algorithm with a more memory- and compute efficient version
|
- Replacing the self-attention algorithm with a more memory- and compute efficient version
|
||||||
|
|
||||||
Let's now look into how we can change the architecture of an LLM so that it is most effective and efficient for task that require long text inputs, *e.g.*:
|
Let's now look into how we can change the architecture of an LLM so that it is most effective and efficient for tasks that require long text inputs, *e.g.*:
|
||||||
- Retrieval augmented Questions Answering,
|
|
||||||
- Summarization,
|
- Retrieval augmented Questions Answering,
|
||||||
- Chat
|
- Summarization,
|
||||||
|
- Chat
|
||||||
|
|
||||||
Note that *chat* not only requires the LLM to handle long text inputs, but it also necessitates that the LLM is able to efficiently handle the back-and-forth dialogue between user and assistant (such as ChatGPT).
|
Note that *chat* not only requires the LLM to handle long text inputs, but it also necessitates that the LLM is able to efficiently handle the back-and-forth dialogue between user and assistant (such as ChatGPT).
|
||||||
|
|
||||||
Once trained, the fundamental LLM architecture is difficult to change, so it is important to make considerations about the LLM's tasks beforehand and accordingly optimize the model's architecture.
|
Once trained, the fundamental LLM architecture is difficult to change, so it is important to make considerations about the LLM's tasks beforehand and accordingly optimize the model's architecture.
|
||||||
There are two important components of the model architecture that quickly become memory and/or performance bottlenecks for large input sequences.
|
There are two important components of the model architecture that quickly become memory and/or performance bottlenecks for large input sequences.
|
||||||
|
|
||||||
- The positional embeddings
|
- The positional embeddings
|
||||||
- The key-value cache
|
- The key-value cache
|
||||||
|
|
||||||
Let's go over each component in more detail
|
Let's go over each component in more detail
|
||||||
|
|
||||||
### 3.1 Improving positional embeddings of LLMs
|
### 3.1 Improving positional embeddings of LLMs
|
||||||
|
|
||||||
Self-attention puts each token in relation to each other's tokens.
|
Self-attention puts each token in relation to each other's tokens.
|
||||||
As an example, the \\( \text{Softmax}(\mathbf{QK}^T) \\) matrix of the text input sequence *"Hello", "I", "love", "you"* could look as follows:
|
As an example, the $\text{Softmax}(\mathbf{QK}^T)$ matrix of the text input sequence *"Hello", "I", "love", "you"* could look as follows:
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
Each word token is given a probability mass at which it attends all other word tokens and, therefore is put into relation with all other word tokens. E.g. the word *"love"* attends to the word *"Hello"* with 5%, to *"I"* with 30%, and to itself with 65%.
|
Each word token is given a probability mass at which it attends all other word tokens and, therefore is put into relation with all other word tokens. E.g. the word *"love"* attends to the word *"Hello"* with 5%, to *"I"* with 30%, and to itself with 65%.
|
||||||
|
|
||||||
A LLM based on self-attention, but without position embeddings would have great difficulties in understanding the positions of the text inputs to each other.
|
A LLM based on self-attention, but without position embeddings would have great difficulties in understanding the positions of the text inputs to each other.
|
||||||
This is because the probability score computed by \\( \mathbf{QK}^T \\) relates each word token to each other word token in \\( O(1) \\) computations regardless of their relative positional distance to each other.
|
This is because the probability score computed by $\mathbf{QK}^T$ relates each word token to each other word token in $O(1)$ computations regardless of their relative positional distance to each other.
|
||||||
Therefore, for the LLM without position embeddings each token appears to have the same distance to all other tokens, *e.g.* differentiating between *"Hello I love you"* and *"You love I hello"* would be very challenging.
|
Therefore, for the LLM without position embeddings each token appears to have the same distance to all other tokens, *e.g.* differentiating between *"Hello I love you"* and *"You love I hello"* would be very challenging.
|
||||||
|
|
||||||
For the LLM to understand sentence order, an additional *cue* is needed and is usually applied in the form of *positional encodings* (or also called *positional embeddings*).
|
For the LLM to understand sentence order, an additional *cue* is needed and is usually applied in the form of *positional encodings* (or also called *positional embeddings*).
|
||||||
Positional encodings, encode the position of each token into a numerical presentation that the LLM can leverage to better understand sentence order.
|
Positional encodings, encode the position of each token into a numerical presentation that the LLM can leverage to better understand sentence order.
|
||||||
|
|
||||||
The authors of the [*Attention Is All You Need*](https://huggingface.co/papers/1706.03762) paper introduced sinusoidal positional embeddings \\( \mathbf{P} = \mathbf{p}_1, \ldots, \mathbf{p}_N \\) .
|
The authors of the [*Attention Is All You Need*](https://huggingface.co/papers/1706.03762) paper introduced sinusoidal positional embeddings $\mathbf{P} = \mathbf{p}_1, \ldots, \mathbf{p}_N$ .
|
||||||
where each vector \\( \mathbf{p}_i \\) is computed as a sinusoidal function of its position \\( i \\) .
|
where each vector $\mathbf{p}_i$ is computed as a sinusoidal function of its position $i$ .
|
||||||
The positional encodings are then simply added to the input sequence vectors \\( \mathbf{\hat{X}} = \mathbf{\hat{x}}_1, \ldots, \mathbf{\hat{x}}_N \\) = \\( \mathbf{x}_1 + \mathbf{p}_1, \ldots, \mathbf{x}_N + \mathbf{p}_N \\) thereby cueing the model to better learn sentence order.
|
The positional encodings are then simply added to the input sequence vectors $\mathbf{\hat{X}} = \mathbf{\hat{x}}_1, \ldots, \mathbf{\hat{x}}_N$ = $\mathbf{x}_1 + \mathbf{p}_1, \ldots, \mathbf{x}_N + \mathbf{p}_N$ thereby cueing the model to better learn sentence order.
|
||||||
|
|
||||||
Instead of using fixed position embeddings, others (such as [Devlin et al.](https://huggingface.co/papers/1810.04805)) used learned positional encodings for which the positional embeddings
|
Instead of using fixed position embeddings, others (such as [Devlin et al.](https://huggingface.co/papers/1810.04805)) used learned positional encodings for which the positional embeddings
|
||||||
\\( \mathbf{P} \\) are learned during training.
|
$\mathbf{P}$ are learned during training.
|
||||||
|
|
||||||
Sinusoidal and learned position embeddings used to be the predominant methods to encode sentence order into LLMs, but a couple of problems related to these positional encodings were found:
|
Sinusoidal and learned position embeddings used to be the predominant methods to encode sentence order into LLMs, but a couple of problems related to these positional encodings were found:
|
||||||
|
|
||||||
1. Sinusoidal and learned position embeddings are both absolute positional embeddings, *i.e.* encoding a unique embedding for each position id: \\( 0, \ldots, N \\) . As shown by [Huang et al.](https://huggingface.co/papers/2009.13658) and [Su et al.](https://huggingface.co/papers/2104.09864), absolute positional embeddings lead to poor LLM performance for long text inputs. For long text inputs, it is advantageous if the model learns the relative positional distance input tokens have to each other instead of their absolute position.
|
1. Sinusoidal and learned position embeddings are both absolute positional embeddings, *i.e.* encoding a unique embedding for each position id: $0, \ldots, N$ . As shown by [Huang et al.](https://huggingface.co/papers/2009.13658) and [Su et al.](https://huggingface.co/papers/2104.09864), absolute positional embeddings lead to poor LLM performance for long text inputs. For long text inputs, it is advantageous if the model learns the relative positional distance input tokens have to each other instead of their absolute position.
|
||||||
2. When using learned position embeddings, the LLM has to be trained on a fixed input length \\( N \\), which makes it difficult to extrapolate to an input length longer than what it was trained on.
|
2. When using learned position embeddings, the LLM has to be trained on a fixed input length $N$, which makes it difficult to extrapolate to an input length longer than what it was trained on.
|
||||||
|
|
||||||
Recently, relative positional embeddings that can tackle the above mentioned problems have become more popular, most notably:
|
Recently, relative positional embeddings that can tackle the above mentioned problems have become more popular, most notably:
|
||||||
|
|
||||||
- [Rotary Position Embedding (RoPE)](https://huggingface.co/papers/2104.09864)
|
- [Rotary Position Embedding (RoPE)](https://huggingface.co/papers/2104.09864)
|
||||||
- [ALiBi](https://huggingface.co/papers/2108.12409)
|
- [ALiBi](https://huggingface.co/papers/2108.12409)
|
||||||
|
|
||||||
Both *RoPE* and *ALiBi* argue that it's best to cue the LLM about sentence order directly in the self-attention algorithm as it's there that word tokens are put into relation with each other. More specifically, sentence order should be cued by modifying the \\( \mathbf{QK}^T \\) computation.
|
Both *RoPE* and *ALiBi* argue that it's best to cue the LLM about sentence order directly in the self-attention algorithm as it's there that word tokens are put into relation with each other. More specifically, sentence order should be cued by modifying the $\mathbf{QK}^T$ computation.
|
||||||
|
|
||||||
Without going into too many details, *RoPE* notes that positional information can be encoded into query-key pairs, *e.g.* \\( \mathbf{q}_i \\) and \\( \mathbf{x}_j \\) by rotating each vector by an angle \\( \theta * i \\) and \\( \theta * j \\) respectively with \\( i, j \\) describing each vectors sentence position:
|
Without going into too many details, *RoPE* notes that positional information can be encoded into query-key pairs, *e.g.* $\mathbf{q}_i$ and $\mathbf{x}_j$ by rotating each vector by an angle $\theta * i$ and $\theta * j$ respectively with $i, j$ describing each vectors sentence position:
|
||||||
|
|
||||||
$$ \mathbf{\hat{q}}_i^T \mathbf{\hat{x}}_j = \mathbf{{q}}_i^T \mathbf{R}_{\theta, i -j} \mathbf{{x}}_j. $$
|
$$ \mathbf{\hat{q}}_i^T \mathbf{\hat{x}}_j = \mathbf{{q}}_i^T \mathbf{R}_{\theta, i -j} \mathbf{{x}}_j. $$
|
||||||
|
|
||||||
\\( \mathbf{R}_{\theta, i - j} \\) thereby represents a rotational matrix. \\( \theta \\) is *not* learned during training, but instead set to a pre-defined value that depends on the maximum input sequence length during training.
|
$\mathbf{R}_{\theta, i - j}$ thereby represents a rotational matrix. $\theta$ is *not* learned during training, but instead set to a pre-defined value that depends on the maximum input sequence length during training.
|
||||||
|
|
||||||
> By doing so, the probability score between \\( \mathbf{q}_i \\) and \\( \mathbf{q}_j \\) is only affected if \\( i \ne j \\) and solely depends on the relative distance \\( i - j \\) regardless of each vector's specific positions \\( i \\) and \\( j \\) .
|
> By doing so, the probability score between $\mathbf{q}_i$ and $\mathbf{q}_j$ is only affected if $i \ne j$ and solely depends on the relative distance $i - j$ regardless of each vector's specific positions $i$ and $j$ .
|
||||||
|
|
||||||
*RoPE* is used in multiple of today's most important LLMs, such as:
|
*RoPE* is used in multiple of today's most important LLMs, such as:
|
||||||
|
|
||||||
- [**Falcon**](https://huggingface.co/tiiuae/falcon-40b)
|
- [**Falcon**](https://huggingface.co/tiiuae/falcon-40b)
|
||||||
- [**Llama**](https://huggingface.co/papers/2302.13971)
|
- [**Llama**](https://huggingface.co/papers/2302.13971)
|
||||||
- [**PaLM**](https://huggingface.co/papers/2204.02311)
|
- [**PaLM**](https://huggingface.co/papers/2204.02311)
|
||||||
|
|
||||||
As an alternative, *ALiBi* proposes a much simpler relative position encoding scheme. The relative distance that input tokens have to each other is added as a negative integer scaled by a pre-defined value `m` to each query-key entry of the \\( \mathbf{QK}^T \\) matrix right before the softmax computation.
|
As an alternative, *ALiBi* proposes a much simpler relative position encoding scheme. The relative distance that input tokens have to each other is added as a negative integer scaled by a pre-defined value `m` to each query-key entry of the $\mathbf{QK}^T$ matrix right before the softmax computation.
|
||||||
|
|
||||||

|

|
||||||
|
|
||||||
@ -580,19 +418,20 @@ As shown in the [ALiBi](https://huggingface.co/papers/2108.12409) paper, this si
|
|||||||
|
|
||||||
*ALiBi* is used in multiple of today's most important LLMs, such as:
|
*ALiBi* is used in multiple of today's most important LLMs, such as:
|
||||||
|
|
||||||
- [**MPT**](https://huggingface.co/mosaicml/mpt-30b)
|
- [**MPT**](https://huggingface.co/mosaicml/mpt-30b)
|
||||||
- [**BLOOM**](https://huggingface.co/bigscience/bloom)
|
- [**BLOOM**](https://huggingface.co/bigscience/bloom)
|
||||||
|
|
||||||
Both *RoPE* and *ALiBi* position encodings can extrapolate to input lengths not seen during training whereas it has been shown that extrapolation works much better out-of-the-box for *ALiBi* as compared to *RoPE*.
|
Both *RoPE* and *ALiBi* position encodings can extrapolate to input lengths not seen during training whereas it has been shown that extrapolation works much better out-of-the-box for *ALiBi* as compared to *RoPE*.
|
||||||
For ALiBi, one simply increases the values of the lower triangular position matrix to match the length of the input sequence.
|
For ALiBi, one simply increases the values of the lower triangular position matrix to match the length of the input sequence.
|
||||||
For *RoPE*, keeping the same \\( \theta \\) that was used during training leads to poor results when passing text inputs much longer than those seen during training, *c.f* [Press et al.](https://huggingface.co/papers/2108.12409). However, the community has found a couple of effective tricks that adapt \\( \theta \\), thereby allowing *RoPE* position embeddings to work well for extrapolated text input sequences (see [here](https://github.com/huggingface/transformers/pull/24653)).
|
For *RoPE*, keeping the same $\theta$ that was used during training leads to poor results when passing text inputs much longer than those seen during training, *c.f* [Press et al.](https://huggingface.co/papers/2108.12409). However, the community has found a couple of effective tricks that adapt $\theta$, thereby allowing *RoPE* position embeddings to work well for extrapolated text input sequences (see [here](https://github.com/huggingface/transformers/pull/24653)).
|
||||||
|
|
||||||
> Both RoPE and ALiBi are relative positional embeddings that are *not* learned during training, but instead are based on the following intuitions:
|
> Both RoPE and ALiBi are relative positional embeddings that are *not* learned during training, but instead are based on the following intuitions:
|
||||||
- Positional cues about the text inputs should be given directly to the \\( QK^T \\) matrix of the self-attention layer
|
|
||||||
- The LLM should be incentivized to learn a constant *relative* distance positional encodings have to each other
|
|
||||||
- The further text input tokens are from each other, the lower the probability of their query-value probability. Both RoPE and ALiBi lower the query-key probability of tokens far away from each other. RoPE by decreasing their vector product by increasing the angle between the query-key vectors. ALiBi by adding large negative numbers to the vector product
|
|
||||||
|
|
||||||
In conclusion, LLMs that are intended to be deployed in tasks that require handling large text inputs are better trained with relative positional embeddings, such as RoPE and ALiBi. Also note that even if an LLM with RoPE and ALiBi has been trained only on a fixed length of say \\( N_1 = 2048 \\) it can still be used in practice with text inputs much larger than \\( N_1 \\), like \\( N_2 = 8192 > N_1 \\) by extrapolating the positional embeddings.
|
- Positional cues about the text inputs should be given directly to the $\mathbf{QK}^T$ matrix of the self-attention layer.
|
||||||
|
- The LLM should be incentivized to learn a constant *relative* distance positional encoding.
|
||||||
|
- The further text input tokens are from each other, the lower the probability of their query-value probability. Both RoPE and ALiBi lower the query-key probability of tokens far away from each other. RoPE lowers by decreasing their vector product by increasing the angle between the query-key vectors. ALiBi lowers by adding large negative numbers to the vector product.
|
||||||
|
|
||||||
|
In conclusion, LLMs that are intended to be deployed in tasks that require handling large text inputs are better trained with relative positional embeddings, such as RoPE and ALiBi. Also note that even if an LLM with RoPE and ALiBi has been trained only on a fixed length of say $N_1 = 2048$ it can still be used in practice with text inputs much larger than $N_1$, like $N_2 = 8192 > N_1$ by extrapolating the positional embeddings.
|
||||||
|
|
||||||
### 3.2 The key-value cache
|
### 3.2 The key-value cache
|
||||||
|
|
||||||
@ -631,7 +470,7 @@ As we can see every time we increase the text input tokens by the just sampled t
|
|||||||
|
|
||||||
With very few exceptions, LLMs are trained using the [causal language modeling objective](https://huggingface.co/docs/transformers/tasks/language_modeling#causal-language-modeling) and therefore mask the upper triangle matrix of the attention score - this is why in the two diagrams above the attention scores are left blank (*a.k.a* have 0 probability). For a quick recap on causal language modeling you can refer to the [*Illustrated Self Attention blog*](https://jalammar.github.io/illustrated-gpt2/#part-2-illustrated-self-attention).
|
With very few exceptions, LLMs are trained using the [causal language modeling objective](https://huggingface.co/docs/transformers/tasks/language_modeling#causal-language-modeling) and therefore mask the upper triangle matrix of the attention score - this is why in the two diagrams above the attention scores are left blank (*a.k.a* have 0 probability). For a quick recap on causal language modeling you can refer to the [*Illustrated Self Attention blog*](https://jalammar.github.io/illustrated-gpt2/#part-2-illustrated-self-attention).
|
||||||
|
|
||||||
As a consequence, tokens *never* depend on previous tokens, more specifically the \\( \mathbf{q}_i \\) vector is never put in relation with any key, values vectors \\( \mathbf{k}_j, \mathbf{v}_j \\) if \\( j > i \\) . Instead \\( \mathbf{q}_i \\) only attends to previous key-value vectors \\( \mathbf{k}_{m < i}, \mathbf{v}_{m < i} \text{ , for } m \in \{0, \ldots i - 1\} \\). In order to reduce unnecessary computation, one can therefore cache each layer's key-value vectors for all previous timesteps.
|
As a consequence, tokens *never* depend on later tokens, more specifically the $\mathbf{q}_i$ vector is never put in relation with any key, values vectors $\mathbf{k}_j, \mathbf{v}_j$ if $j > i$ . Instead $\mathbf{q}_i$ only attends to previous key-value vectors $\mathbf{k}_{m < i}, \mathbf{v}_{m < i} \text{ , for } m \in \{0, \ldots i - 1\}$. In order to reduce unnecessary computation, one can therefore cache each layer's key-value vectors for all previous timesteps.
|
||||||
|
|
||||||
In the following, we will tell the LLM to make use of the key-value cache by retrieving and forwarding it for each forward pass.
|
In the following, we will tell the LLM to make use of the key-value cache by retrieving and forwarding it for each forward pass.
|
||||||
In Transformers, we can retrieve the key-value cache by passing the `use_cache` flag to the `forward` call and can then pass it with the current token.
|
In Transformers, we can retrieve the key-value cache by passing the `use_cache` flag to the `forward` call and can then pass it with the current token.
|
||||||
@ -647,7 +486,7 @@ for _ in range(5):
|
|||||||
next_token_id = torch.argmax(next_logits, dim=-1)
|
next_token_id = torch.argmax(next_logits, dim=-1)
|
||||||
|
|
||||||
print("shape of input_ids", next_token_id.shape)
|
print("shape of input_ids", next_token_id.shape)
|
||||||
print("length of key-value cache", len(past_key_values[0][0])) # past_key_values are of shape [num_layers, 0 for k, 1 for v, batch_size, length, hidden_dim]
|
print("length of key-value cache", past_key_values.get_seq_length()) # past_key_values are of shape [num_layers, 0 for k, 1 for v, batch_size, length, hidden_dim]
|
||||||
generated_tokens.append(next_token_id.item())
|
generated_tokens.append(next_token_id.item())
|
||||||
|
|
||||||
generated_text = tokenizer.batch_decode(generated_tokens)
|
generated_text = tokenizer.batch_decode(generated_tokens)
|
||||||
@ -672,11 +511,12 @@ length of key-value cache 24
|
|||||||
|
|
||||||
As one can see, when using the key-value cache the text input tokens are *not* increased in length, but remain a single input vector. The length of the key-value cache on the other hand is increased by one at every decoding step.
|
As one can see, when using the key-value cache the text input tokens are *not* increased in length, but remain a single input vector. The length of the key-value cache on the other hand is increased by one at every decoding step.
|
||||||
|
|
||||||
> Making use of the key-value cache means that the \\( \mathbf{QK}^T \\) is essentially reduced to \\( \mathbf{q}_c\mathbf{K}^T \\) with \\( \mathbf{q}_c \\) being the query projection of the currently passed input token which is *always* just a single vector.
|
> Making use of the key-value cache means that the $\mathbf{QK}^T$ is essentially reduced to $\mathbf{q}_c\mathbf{K}^T$ with $\mathbf{q}_c$ being the query projection of the currently passed input token which is *always* just a single vector.
|
||||||
|
|
||||||
Using the key-value cache has two advantages:
|
Using the key-value cache has two advantages:
|
||||||
- Significant increase in computational efficiency as less computations are performed compared to computing the full \\( \mathbf{QK}^T \\) matrix. This leads to an increase in inference speed
|
|
||||||
- The maximum required memory is not increased quadratically with the number of generated tokens, but only increases linearly.
|
- Significant increase in computational efficiency as less computations are performed compared to computing the full $\mathbf{QK}^T$ matrix. This leads to an increase in inference speed
|
||||||
|
- The maximum required memory is not increased quadratically with the number of generated tokens, but only increases linearly.
|
||||||
|
|
||||||
> One should *always* make use of the key-value cache as it leads to identical results and a significant speed-up for longer input sequences. Transformers has the key-value cache enabled by default when making use of the text pipeline or the [`generate` method](https://huggingface.co/docs/transformers/main_classes/text_generation). We have an entire guide dedicated to caches [here](./kv_cache).
|
> One should *always* make use of the key-value cache as it leads to identical results and a significant speed-up for longer input sequences. Transformers has the key-value cache enabled by default when making use of the text pipeline or the [`generate` method](https://huggingface.co/docs/transformers/main_classes/text_generation). We have an entire guide dedicated to caches [here](./kv_cache).
|
||||||
|
|
||||||
@ -698,10 +538,12 @@ Assistant: Germany has ca. 81 million inhabitants
|
|||||||
```
|
```
|
||||||
|
|
||||||
In this chat, the LLM runs auto-regressive decoding twice:
|
In this chat, the LLM runs auto-regressive decoding twice:
|
||||||
|
|
||||||
1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step.
|
1. The first time, the key-value cache is empty and the input prompt is `"User: How many people live in France?"` and the model auto-regressively generates the text `"Roughly 75 million people live in France"` while increasing the key-value cache at every decoding step.
|
||||||
2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, its computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`.
|
2. The second time the input prompt is `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many in Germany?"`. Thanks to the cache, all key-value vectors for the first two sentences are already computed. Therefore the input prompt only consists of `"User: And how many in Germany?"`. While processing the shortened input prompt, its computed key-value vectors are concatenated to the key-value cache of the first decoding. The second Assistant's answer `"Germany has ca. 81 million inhabitants"` is then auto-regressively generated with the key-value cache consisting of encoded key-value vectors of `"User: How many people live in France? \n Assistant: Roughly 75 million people live in France \n User: And how many are in Germany?"`.
|
||||||
|
|
||||||
Two things should be noted here:
|
Two things should be noted here:
|
||||||
|
|
||||||
1. Keeping all the context is crucial for LLMs deployed in chat so that the LLM understands all the previous context of the conversation. E.g. for the example above the LLM needs to understand that the user refers to the population when asking `"And how many are in Germany"`.
|
1. Keeping all the context is crucial for LLMs deployed in chat so that the LLM understands all the previous context of the conversation. E.g. for the example above the LLM needs to understand that the user refers to the population when asking `"And how many are in Germany"`.
|
||||||
2. The key-value cache is extremely useful for chat as it allows us to continuously grow the encoded chat history instead of having to re-encode the chat history again from scratch (as e.g. would be the case when using an encoder-decoder architecture).
|
2. The key-value cache is extremely useful for chat as it allows us to continuously grow the encoded chat history instead of having to re-encode the chat history again from scratch (as e.g. would be the case when using an encoder-decoder architecture).
|
||||||
|
|
||||||
@ -737,7 +579,7 @@ def bytes_to_megabytes(bytes):
|
|||||||
Answer: The function takes a number of bytes as input and returns the number of
|
Answer: The function takes a number of bytes as input and returns the number of
|
||||||
```
|
```
|
||||||
|
|
||||||
Great, no additional time is spent recomputing the same key and values for the attention layer! There is however one catch. While the required peak memory for the \\( \mathbf{QK}^T \\) matrix is significantly reduced, holding the key-value cache in memory can become very memory expensive for long input sequences or multi-turn chat. Remember that the key-value cache needs to store the key-value vectors for all previous input vectors \\( \mathbf{x}_i \text{, for } i \in \{1, \ldots, c - 1\} \\) for all self-attention layers and for all attention heads.
|
Great, no additional time is spent recomputing the same key and values for the attention layer! There is however one catch. While the required peak memory for the $\mathbf{QK}^T$ matrix is significantly reduced, holding the key-value cache in memory can become very memory expensive for long input sequences or multi-turn chat. Remember that the key-value cache needs to store the key-value vectors for all previous input vectors $\mathbf{x}_i \text{, for } i \in \{1, \ldots, c - 1\}$ for all self-attention layers and for all attention heads.
|
||||||
|
|
||||||
Let's compute the number of float values that need to be stored in the key-value cache for the LLM `bigcode/octocoder` that we used before.
|
Let's compute the number of float values that need to be stored in the key-value cache for the LLM `bigcode/octocoder` that we used before.
|
||||||
The number of float values amounts to two times the sequence length times the number of attention heads times the attention head dimension and times the number of layers.
|
The number of float values amounts to two times the sequence length times the number of attention heads times the attention head dimension and times the number of layers.
|
||||||
@ -761,21 +603,21 @@ Researchers have proposed two methods that allow to significantly reduce the mem
|
|||||||
|
|
||||||
[Multi-Query-Attention](https://huggingface.co/papers/1911.02150) was proposed in Noam Shazeer's *Fast Transformer Decoding: One Write-Head is All You Need* paper. As the title says, Noam found out that instead of using `n_head` key-value projections weights, one can use a single head-value projection weight pair that is shared across all attention heads without that the model's performance significantly degrades.
|
[Multi-Query-Attention](https://huggingface.co/papers/1911.02150) was proposed in Noam Shazeer's *Fast Transformer Decoding: One Write-Head is All You Need* paper. As the title says, Noam found out that instead of using `n_head` key-value projections weights, one can use a single head-value projection weight pair that is shared across all attention heads without that the model's performance significantly degrades.
|
||||||
|
|
||||||
> By using a single head-value projection weight pair, the key value vectors \\( \mathbf{k}_i, \mathbf{v}_i \\) have to be identical across all attention heads which in turn means that we only need to store 1 key-value projection pair in the cache instead of `n_head` ones.
|
> By using a single head-value projection weight pair, the key value vectors $\mathbf{k}_i, \mathbf{v}_i$ have to be identical across all attention heads which in turn means that we only need to store 1 key-value projection pair in the cache instead of `n_head` ones.
|
||||||
|
|
||||||
As most LLMs use between 20 and 100 attention heads, MQA significantly reduces the memory consumption of the key-value cache. For the LLM used in this notebook we could therefore reduce the required memory consumption from 15 GB to less than 400 MB at an input sequence length of 16000.
|
As most LLMs use between 20 and 100 attention heads, MQA significantly reduces the memory consumption of the key-value cache. For the LLM used in this notebook we could therefore reduce the required memory consumption from 15 GB to less than 400 MB at an input sequence length of 16000.
|
||||||
|
|
||||||
In addition to memory savings, MQA also leads to improved computational efficiency as explained in the following.
|
In addition to memory savings, MQA also leads to improved computational efficiency as explained in the following.
|
||||||
In auto-regressive decoding, large key-value vectors need to be reloaded, concatenated with the current key-value vector pair to be then fed into the \\( \mathbf{q}_c\mathbf{K}^T \\) computation at every step. For auto-regressive decoding, the required memory bandwidth for the constant reloading can become a serious time bottleneck. By reducing the size of the key-value vectors less memory needs to be accessed, thus reducing the memory bandwidth bottleneck. For more detail, please have a look at [Noam's paper](https://huggingface.co/papers/1911.02150).
|
In auto-regressive decoding, large key-value vectors need to be reloaded, concatenated with the current key-value vector pair to be then fed into the $\mathbf{q}_c\mathbf{K}^T$ computation at every step. For auto-regressive decoding, the required memory bandwidth for the constant reloading can become a serious time bottleneck. By reducing the size of the key-value vectors less memory needs to be accessed, thus reducing the memory bandwidth bottleneck. For more detail, please have a look at [Noam's paper](https://huggingface.co/papers/1911.02150).
|
||||||
|
|
||||||
The important part to understand here is that reducing the number of key-value attention heads to 1 only makes sense if a key-value cache is used. The peak memory consumption of the model for a single forward pass without key-value cache stays unchanged as every attention head still has a unique query vector so that each attention head still has a different \\( \mathbf{QK}^T \\) matrix.
|
The important part to understand here is that reducing the number of key-value attention heads to 1 only makes sense if a key-value cache is used. The peak memory consumption of the model for a single forward pass without key-value cache stays unchanged as every attention head still has a unique query vector so that each attention head still has a different $\mathbf{QK}^T$ matrix.
|
||||||
|
|
||||||
MQA has seen wide adoption by the community and is now used by many of the most popular LLMs:
|
MQA has seen wide adoption by the community and is now used by many of the most popular LLMs:
|
||||||
|
|
||||||
- [**Falcon**](https://huggingface.co/tiiuae/falcon-40b)
|
- [**Falcon**](https://huggingface.co/tiiuae/falcon-40b)
|
||||||
- [**PaLM**](https://huggingface.co/papers/2204.02311)
|
- [**PaLM**](https://huggingface.co/papers/2204.02311)
|
||||||
- [**MPT**](https://huggingface.co/mosaicml/mpt-30b)
|
- [**MPT**](https://huggingface.co/mosaicml/mpt-30b)
|
||||||
- [**BLOOM**](https://huggingface.co/bigscience/bloom)
|
- [**BLOOM**](https://huggingface.co/bigscience/bloom)
|
||||||
|
|
||||||
Also, the checkpoint used in this notebook - `bigcode/octocoder` - makes use of MQA.
|
Also, the checkpoint used in this notebook - `bigcode/octocoder` - makes use of MQA.
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
# Configuration
|
# Configuration
|
||||||
|
|
||||||
The base class [`PretrainedConfig`] implements the common methods for loading/saving a configuration
|
The base class [`PreTrainedConfig`] implements the common methods for loading/saving a configuration
|
||||||
either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded
|
either from a local file or directory, or from a pretrained model configuration provided by the library (downloaded
|
||||||
from HuggingFace's AWS S3 repository).
|
from HuggingFace's AWS S3 repository).
|
||||||
|
|
||||||
@ -24,8 +24,8 @@ Each derived config class implements model specific attributes. Common attribute
|
|||||||
`hidden_size`, `num_attention_heads`, and `num_hidden_layers`. Text models further implement:
|
`hidden_size`, `num_attention_heads`, and `num_hidden_layers`. Text models further implement:
|
||||||
`vocab_size`.
|
`vocab_size`.
|
||||||
|
|
||||||
## PretrainedConfig
|
## PreTrainedConfig
|
||||||
|
|
||||||
[[autodoc]] PretrainedConfig
|
[[autodoc]] PreTrainedConfig
|
||||||
- push_to_hub
|
- push_to_hub
|
||||||
- all
|
- all
|
||||||
|
7
docs/source/en/main_classes/kernels.md
Normal file
7
docs/source/en/main_classes/kernels.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
## Kernels
|
||||||
|
|
||||||
|
This page documents the kernels configuration utilities.
|
||||||
|
|
||||||
|
### KernelConfig
|
||||||
|
|
||||||
|
[[autodoc]] KernelConfig
|
@ -22,7 +22,6 @@ file or directory, or from a pretrained model configuration provided by the libr
|
|||||||
[`PreTrainedModel`] also implements a few methods which are common among all the models to:
|
[`PreTrainedModel`] also implements a few methods which are common among all the models to:
|
||||||
|
|
||||||
- resize the input token embeddings when new tokens are added to the vocabulary
|
- resize the input token embeddings when new tokens are added to the vocabulary
|
||||||
- prune the attention heads of the model.
|
|
||||||
|
|
||||||
The other methods that are common to each model are defined in [`~modeling_utils.ModuleUtilsMixin`] and [`~generation.GenerationMixin`].
|
The other methods that are common to each model are defined in [`~modeling_utils.ModuleUtilsMixin`] and [`~generation.GenerationMixin`].
|
||||||
|
|
||||||
@ -43,7 +42,3 @@ set this to `False`.
|
|||||||
## Pushing to the Hub
|
## Pushing to the Hub
|
||||||
|
|
||||||
[[autodoc]] utils.PushToHubMixin
|
[[autodoc]] utils.PushToHubMixin
|
||||||
|
|
||||||
## Sharded checkpoints
|
|
||||||
|
|
||||||
[[autodoc]] modeling_utils.load_sharded_checkpoint
|
|
||||||
|
@ -154,7 +154,6 @@ for label, score in zip(candidate_labels, probs):
|
|||||||
## AlignConfig
|
## AlignConfig
|
||||||
|
|
||||||
[[autodoc]] AlignConfig
|
[[autodoc]] AlignConfig
|
||||||
- from_text_vision_configs
|
|
||||||
|
|
||||||
## AlignTextConfig
|
## AlignTextConfig
|
||||||
|
|
||||||
|
@ -48,7 +48,7 @@ You will then be able to use the auto classes like you would usually do!
|
|||||||
|
|
||||||
<Tip warning={true}>
|
<Tip warning={true}>
|
||||||
|
|
||||||
If your `NewModelConfig` is a subclass of [`~transformers.PretrainedConfig`], make sure its
|
If your `NewModelConfig` is a subclass of [`~transformers.PreTrainedConfig`], make sure its
|
||||||
`model_type` attribute is set to the same key you use when registering the config (here `"new-model"`).
|
`model_type` attribute is set to the same key you use when registering the config (here `"new-model"`).
|
||||||
|
|
||||||
Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its
|
Likewise, if your `NewModel` is a subclass of [`PreTrainedModel`], make sure its
|
||||||
|
@ -43,10 +43,11 @@ Bark can be optimized with just a few extra lines of code, which **significantly
|
|||||||
You can speed up inference and reduce memory footprint by 50% simply by loading the model in half-precision.
|
You can speed up inference and reduce memory footprint by 50% simply by loading the model in half-precision.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from transformers import BarkModel, infer_device
|
from transformers import BarkModel
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
model = BarkModel.from_pretrained("suno/bark-small", dtype=torch.float16).to(device)
|
model = BarkModel.from_pretrained("suno/bark-small", dtype=torch.float16).to(device)
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -62,24 +63,13 @@ model.enable_cpu_offload()
|
|||||||
|
|
||||||
Note that 🤗 Accelerate must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/accelerate/basic_tutorials/install)
|
Note that 🤗 Accelerate must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/accelerate/basic_tutorials/install)
|
||||||
|
|
||||||
#### Using Better Transformer
|
|
||||||
|
|
||||||
Better Transformer is an 🤗 Optimum feature that performs kernel fusion under the hood. You can gain 20% to 30% in speed with zero performance degradation. It only requires one line of code to export the model to 🤗 Better Transformer:
|
|
||||||
|
|
||||||
```python
|
|
||||||
model = model.to_bettertransformer()
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that 🤗 Optimum must be installed before using this feature. [Here's how to install it.](https://huggingface.co/docs/optimum/installation)
|
|
||||||
|
|
||||||
#### Using Flash Attention 2
|
#### Using Flash Attention 2
|
||||||
|
|
||||||
Flash Attention 2 is an even faster, optimized version of the previous optimization.
|
Flash Attention 2 is an even faster, optimized version of the previous optimization.
|
||||||
|
|
||||||
##### Installation
|
##### Installation
|
||||||
|
|
||||||
First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer).
|
First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features).
|
||||||
|
|
||||||
Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2:
|
Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -96,7 +86,7 @@ model = BarkModel.from_pretrained("suno/bark-small", dtype=torch.float16, attn_i
|
|||||||
|
|
||||||
##### Performance comparison
|
##### Performance comparison
|
||||||
|
|
||||||
The following diagram shows the latency for the native attention implementation (no optimisation) against Better Transformer and Flash Attention 2. In all cases, we generate 400 semantic tokens on a 40GB A100 GPU with PyTorch 2.1. Flash Attention 2 is also consistently faster than Better Transformer, and its performance improves even more as batch sizes increase:
|
The following diagram shows the latency for the native attention implementation (no optimisation) against Flash Attention 2. In all cases, we generate 400 semantic tokens on a 40GB A100 GPU with PyTorch 2.1:
|
||||||
|
|
||||||
<div style="text-align: center">
|
<div style="text-align: center">
|
||||||
<img src="https://huggingface.co/datasets/ylacombe/benchmark-comparison/resolve/main/Bark%20Optimization%20Benchmark.png">
|
<img src="https://huggingface.co/datasets/ylacombe/benchmark-comparison/resolve/main/Bark%20Optimization%20Benchmark.png">
|
||||||
@ -104,17 +94,16 @@ The following diagram shows the latency for the native attention implementation
|
|||||||
|
|
||||||
To put this into perspective, on an NVIDIA A100 and when generating 400 semantic tokens with a batch size of 16, you can get 17 times the [throughput](https://huggingface.co/blog/optimizing-bark#throughput) and still be 2 seconds faster than generating sentences one by one with the native model implementation. In other words, all the samples will be generated 17 times faster.
|
To put this into perspective, on an NVIDIA A100 and when generating 400 semantic tokens with a batch size of 16, you can get 17 times the [throughput](https://huggingface.co/blog/optimizing-bark#throughput) and still be 2 seconds faster than generating sentences one by one with the native model implementation. In other words, all the samples will be generated 17 times faster.
|
||||||
|
|
||||||
At batch size 8, on an NVIDIA A100, Flash Attention 2 is also 10% faster than Better Transformer, and at batch size 16, 25%.
|
|
||||||
|
|
||||||
#### Combining optimization techniques
|
#### Combining optimization techniques
|
||||||
|
|
||||||
You can combine optimization techniques, and use CPU offload, half-precision and Flash Attention 2 (or 🤗 Better Transformer) all at once.
|
You can combine optimization techniques, and use CPU offload, half-precision and Flash Attention 2 all at once.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from transformers import BarkModel, infer_device
|
from transformers import BarkModel
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
# load in fp16 and use Flash Attention 2
|
# load in fp16 and use Flash Attention 2
|
||||||
model = BarkModel.from_pretrained("suno/bark-small", dtype=torch.float16, attn_implementation="flash_attention_2").to(device)
|
model = BarkModel.from_pretrained("suno/bark-small", dtype=torch.float16, attn_implementation="flash_attention_2").to(device)
|
||||||
|
@ -60,7 +60,6 @@ If you're interested in submitting a resource to be included here, please feel f
|
|||||||
## Blip2Config
|
## Blip2Config
|
||||||
|
|
||||||
[[autodoc]] Blip2Config
|
[[autodoc]] Blip2Config
|
||||||
- from_vision_qformer_text_configs
|
|
||||||
|
|
||||||
## Blip2VisionConfig
|
## Blip2VisionConfig
|
||||||
|
|
||||||
|
@ -87,7 +87,6 @@ Refer to this [notebook](https://github.com/huggingface/notebooks/blob/main/exam
|
|||||||
## BlipConfig
|
## BlipConfig
|
||||||
|
|
||||||
[[autodoc]] BlipConfig
|
[[autodoc]] BlipConfig
|
||||||
- from_text_vision_configs
|
|
||||||
|
|
||||||
## BlipTextConfig
|
## BlipTextConfig
|
||||||
|
|
||||||
|
@ -25,7 +25,7 @@ rendered properly in your Markdown viewer.
|
|||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
# Byte Lantet Transformer (BLT)
|
# Byte Latent Transformer (BLT)
|
||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
|
@ -76,7 +76,6 @@ Currently, following scales of pretrained Chinese-CLIP models are available on
|
|||||||
## ChineseCLIPConfig
|
## ChineseCLIPConfig
|
||||||
|
|
||||||
[[autodoc]] ChineseCLIPConfig
|
[[autodoc]] ChineseCLIPConfig
|
||||||
- from_text_vision_configs
|
|
||||||
|
|
||||||
## ChineseCLIPTextConfig
|
## ChineseCLIPTextConfig
|
||||||
|
|
||||||
|
@ -63,7 +63,6 @@ print(f"Text embeddings: {text_features}")
|
|||||||
## ClapConfig
|
## ClapConfig
|
||||||
|
|
||||||
[[autodoc]] ClapConfig
|
[[autodoc]] ClapConfig
|
||||||
- from_text_audio_configs
|
|
||||||
|
|
||||||
## ClapTextConfig
|
## ClapTextConfig
|
||||||
|
|
||||||
|
@ -87,7 +87,6 @@ print(f"Most likely label: {most_likely_label} with probability: {probs[0][most_
|
|||||||
## CLIPConfig
|
## CLIPConfig
|
||||||
|
|
||||||
[[autodoc]] CLIPConfig
|
[[autodoc]] CLIPConfig
|
||||||
- from_text_vision_configs
|
|
||||||
|
|
||||||
## CLIPTextConfig
|
## CLIPTextConfig
|
||||||
|
|
||||||
|
@ -72,7 +72,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
## CLIPSegConfig
|
## CLIPSegConfig
|
||||||
|
|
||||||
[[autodoc]] CLIPSegConfig
|
[[autodoc]] CLIPSegConfig
|
||||||
- from_text_vision_configs
|
|
||||||
|
|
||||||
## CLIPSegTextConfig
|
## CLIPSegTextConfig
|
||||||
|
|
||||||
|
@ -73,7 +73,6 @@ Example :
|
|||||||
## ClvpConfig
|
## ClvpConfig
|
||||||
|
|
||||||
[[autodoc]] ClvpConfig
|
[[autodoc]] ClvpConfig
|
||||||
- from_sub_model_configs
|
|
||||||
|
|
||||||
## ClvpEncoderConfig
|
## ClvpEncoderConfig
|
||||||
|
|
||||||
|
@ -107,10 +107,11 @@ import requests
|
|||||||
import torch
|
import torch
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
from transformers import BitsAndBytesConfig, ColQwen2ForRetrieval, ColQwen2Processor, infer_device
|
from transformers import BitsAndBytesConfig, ColQwen2ForRetrieval, ColQwen2Processor
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
model_name = "vidore/colqwen2-v1.0-hf"
|
model_name = "vidore/colqwen2-v1.0-hf"
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
# 4-bit quantization configuration
|
# 4-bit quantization configuration
|
||||||
bnb_config = BitsAndBytesConfig(
|
bnb_config = BitsAndBytesConfig(
|
||||||
|
@ -38,10 +38,11 @@ CSM can be used to simply generate speech from a text prompt:
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from transformers import CsmForConditionalGeneration, AutoProcessor, infer_device
|
from transformers import CsmForConditionalGeneration, AutoProcessor
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
model_id = "sesame/csm-1b"
|
model_id = "sesame/csm-1b"
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
# load the model and the processor
|
# load the model and the processor
|
||||||
processor = AutoProcessor.from_pretrained(model_id)
|
processor = AutoProcessor.from_pretrained(model_id)
|
||||||
@ -72,11 +73,12 @@ CSM can be used to generate speech given a conversation, allowing consistency in
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from transformers import CsmForConditionalGeneration, AutoProcessor, infer_device
|
from transformers import CsmForConditionalGeneration, AutoProcessor
|
||||||
|
from accelerate import Accelerator
|
||||||
from datasets import load_dataset, Audio
|
from datasets import load_dataset, Audio
|
||||||
|
|
||||||
model_id = "sesame/csm-1b"
|
model_id = "sesame/csm-1b"
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
# load the model and the processor
|
# load the model and the processor
|
||||||
processor = AutoProcessor.from_pretrained(model_id)
|
processor = AutoProcessor.from_pretrained(model_id)
|
||||||
@ -117,11 +119,12 @@ CSM supports batched inference!
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from transformers import CsmForConditionalGeneration, AutoProcessor, infer_device
|
from transformers import CsmForConditionalGeneration, AutoProcessor
|
||||||
|
from accelerate import Accelerator
|
||||||
from datasets import load_dataset, Audio
|
from datasets import load_dataset, Audio
|
||||||
|
|
||||||
model_id = "sesame/csm-1b"
|
model_id = "sesame/csm-1b"
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
# load the model and the processor
|
# load the model and the processor
|
||||||
processor = AutoProcessor.from_pretrained(model_id)
|
processor = AutoProcessor.from_pretrained(model_id)
|
||||||
@ -306,11 +309,12 @@ print("="*50)
|
|||||||
CSM Transformers integration supports training!
|
CSM Transformers integration supports training!
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from transformers import CsmForConditionalGeneration, AutoProcessor, infer_device
|
from transformers import CsmForConditionalGeneration, AutoProcessor
|
||||||
|
from accelerate import Accelerator
|
||||||
from datasets import load_dataset, Audio
|
from datasets import load_dataset, Audio
|
||||||
|
|
||||||
model_id = "sesame/csm-1b"
|
model_id = "sesame/csm-1b"
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
# load the model and the processor
|
# load the model and the processor
|
||||||
processor = AutoProcessor.from_pretrained(model_id)
|
processor = AutoProcessor.from_pretrained(model_id)
|
||||||
|
186
docs/source/en/model_doc/cwm.md
Normal file
186
docs/source/en/model_doc/cwm.md
Normal file
@ -0,0 +1,186 @@
|
|||||||
|
<-- Copyright 2025 the HuggingFace Team. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer.
|
||||||
|
|
||||||
|
-->
|
||||||
|
|
||||||
|
|
||||||
|
# Code World Model (CWM)
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
The Code World Model (CWM) model was proposed in [CWM: An Open-Weights LLM for Research on Code
|
||||||
|
Generation with World Models](https://ai.facebook.com/research/publications/cwm) by Meta FAIR CodeGen Team.
|
||||||
|
CWM is an LLM for code generation and reasoning about code that has, in particular, been trained
|
||||||
|
to better represent and reason about how code and commands affect the state of a program or system.
|
||||||
|
Specifically, we mid-trained CWM on a large number of observation-action trajectories from Python
|
||||||
|
execution traces and agentic interactions in containerized environments. We post-trained with
|
||||||
|
extensive multi-task RL in verifiable coding, math, and multi-turn software engineering environments.
|
||||||
|
|
||||||
|
The abstract from the paper is the following:
|
||||||
|
|
||||||
|
> *We release Code World Model (CWM), a 32-billion-parameter open-weights LLM, to advance research
|
||||||
|
on code generation with world models. To improve code understanding beyond what can be learned
|
||||||
|
from training on static code alone, we mid-train CWM on a large amount of observation-action
|
||||||
|
trajectories from Python interpreter and agentic Docker environments, and perform extensive multi-
|
||||||
|
task reasoning RL in verifiable coding, math, and multi-turn software engineering environments. With
|
||||||
|
CWM, we provide a strong testbed for researchers to explore the opportunities world modeling affords
|
||||||
|
for improving code generation with reasoning and planning in computational environments. We
|
||||||
|
present first steps of how world models can benefit agentic coding, enable step-by-step simulation of
|
||||||
|
Python code execution, and show early results of how reasoning can benefit from the latter. CWM is
|
||||||
|
a dense, decoder-only LLM trained with a context size of up to 131 k tokens. Independent of its world
|
||||||
|
modeling capabilities, CWM offers strong performance on general coding and math tasks: it reaches
|
||||||
|
pass@1 scores of 65.8 % on SWE-bench Verified (with test-time scaling), 68.9 % on LiveCodeBench,
|
||||||
|
96.6 % on Math-500, and 76.0 % on AIME 2024. To support further research on code world modeling,
|
||||||
|
we release model checkpoints after mid-training, SFT, and RL.*
|
||||||
|
|
||||||
|
Tips:
|
||||||
|
|
||||||
|
CWM requires a dedicated system prompt to function optimally during inference. Without proper prompt
|
||||||
|
configuration, CWM's output quality may be significantly degraded. The following serves as the default
|
||||||
|
system prompt for reasoning tasks. For agentic workflows, append the relevant tool specifications
|
||||||
|
after this base prompt. Checkout the original code repository for more details.
|
||||||
|
```
|
||||||
|
You are a helpful AI assistant. You always reason before responding, using the following format:
|
||||||
|
|
||||||
|
<think>
|
||||||
|
your internal reasoning
|
||||||
|
</think>
|
||||||
|
your external response
|
||||||
|
```
|
||||||
|
|
||||||
|
This model was contributed by [Meta FAIR CodeGen Team](https://huggingface.co/facebook).
|
||||||
|
The original code can be found [here](https://github.com/facebookresearch/cwm).
|
||||||
|
|
||||||
|
## Usage examples
|
||||||
|
|
||||||
|
```python
|
||||||
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
|
|
||||||
|
model_name = 'facebook/cwm'
|
||||||
|
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||||
|
model = AutoModelForCausalLM.from_pretrained(
|
||||||
|
model_name,
|
||||||
|
dtype="auto",
|
||||||
|
device_map="auto"
|
||||||
|
)
|
||||||
|
|
||||||
|
system_prompt = """
|
||||||
|
You are a helpful AI assistant. You always reason before responding, using the following format:
|
||||||
|
|
||||||
|
<think>
|
||||||
|
your internal reasoning
|
||||||
|
</think>
|
||||||
|
your external response
|
||||||
|
""".strip()
|
||||||
|
|
||||||
|
messages = [
|
||||||
|
{"role": "system", "content": system_prompt},
|
||||||
|
{"role": "user", "content": "Write a haiku about recursion in programming."}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
text = tokenizer.apply_chat_template(
|
||||||
|
messages,
|
||||||
|
tokenize=False,
|
||||||
|
add_generation_prompt=True,
|
||||||
|
enable_thinking=True, # Switches between thinking and non-thinking modes. Default is True.
|
||||||
|
preserve_previous_think=True, # Switches between keeping thinking blocks from previous messages or not. Default is True.
|
||||||
|
)
|
||||||
|
|
||||||
|
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
|
||||||
|
|
||||||
|
generated_ids = model.generate(
|
||||||
|
**model_inputs,
|
||||||
|
max_new_tokens=1024
|
||||||
|
)
|
||||||
|
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
|
||||||
|
print(tokenizer.decode(output_ids))
|
||||||
|
```
|
||||||
|
<details>
|
||||||
|
<summary>Produces the following output:</summary>
|
||||||
|
|
||||||
|
Okay, I need to write a haiku about recursion in programming. Let's start by recalling what recursion is. Recursion is a programming technique where a function calls itself in its own definition, often used to solve problems that can be broken down into smaller, similar sub-problems.
|
||||||
|
|
||||||
|
A haiku is a three-line poem with a syllable count of 5-7-5. So, I need to capture the essence of recursion in a concise and poetic way. Let's think about the key elements of recursion: the function calling itself, the base case that stops the recursion, and the idea of breaking down problems into smaller parts.
|
||||||
|
|
||||||
|
Maybe I can use imagery related to mirrors or reflections, since recursion often involves a function reflecting or repeating itself. Alternatively, I could use natural imagery like a spiral or a tree, which suggests self-similar patterns.
|
||||||
|
|
||||||
|
Let me try to come up with some ideas. For example, "Function calls itself" could be a starting point. But how to make that into a haiku? Maybe something like:
|
||||||
|
|
||||||
|
Function calls itself
|
||||||
|
Base case stops the endless loop
|
||||||
|
Spiral of code
|
||||||
|
|
||||||
|
Hmm, that's a start. But maybe I can make it more poetic. Let's think about the idea of a function calling itself as a kind of infinite loop, but with a base case that stops it. That's like a spiral that unwinds at the center.
|
||||||
|
|
||||||
|
Another angle: recursion is often used for problems that have a recursive structure, like tree traversals or factorial calculations. Maybe I can use a natural image to represent that.
|
||||||
|
|
||||||
|
How about:
|
||||||
|
|
||||||
|
Branches split again
|
||||||
|
Leaves unfold in endless dance
|
||||||
|
Roots hold the base case
|
||||||
|
|
||||||
|
That's a bit more poetic. The branches splitting represent the recursive calls, the leaves unfolding could represent the processing of each sub-problem, and the roots holding the base case are what stops the recursion.
|
||||||
|
|
||||||
|
Alternatively, maybe something simpler:
|
||||||
|
|
||||||
|
Mirror within mirror
|
||||||
|
Reflections infinite deep
|
||||||
|
Base case breaks the glass
|
||||||
|
|
||||||
|
This uses the mirror metaphor to represent the function calling itself, with the base case being the point where the recursion stops, breaking the infinite reflection.
|
||||||
|
|
||||||
|
I think I like the mirror idea better. Let me refine that:
|
||||||
|
|
||||||
|
Mirror within mirror
|
||||||
|
Infinite reflections dance
|
||||||
|
Base case shatters glass
|
||||||
|
|
||||||
|
Yes, that seems to capture the idea of recursion well. The mirror represents the function calling itself, the infinite reflections are the recursive calls, and the base case shattering the glass is what stops the recursion.
|
||||||
|
|
||||||
|
Let me check the syllable count:
|
||||||
|
|
||||||
|
Mirror within mirror (5 syllables: Mir-ror-with-in-mir-ror)
|
||||||
|
Infinite reflections dance (7 syllables: In-fi-nite-ref-lec-tions-dance)
|
||||||
|
Base case shatters glass (5 syllables: Base-case-shat-ters-glass)
|
||||||
|
|
||||||
|
Yes, that fits the 5-7-5 structure. I think that's a good haiku about recursion.
|
||||||
|
</think>
|
||||||
|
Mirror within mirror
|
||||||
|
Infinite reflections dance
|
||||||
|
Base case shatters glass<|eot_id|>
|
||||||
|
|
||||||
|
</details>
|
||||||
|
|
||||||
|
## CwmConfig
|
||||||
|
|
||||||
|
[[autodoc]] CwmConfig
|
||||||
|
|
||||||
|
## CwmPreTrainedModel
|
||||||
|
|
||||||
|
[[autodoc]] CwmPreTrainedModel
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## CwmModel
|
||||||
|
|
||||||
|
[[autodoc]] CwmModel
|
||||||
|
- forward
|
||||||
|
|
||||||
|
## CwmForCausalLM
|
||||||
|
|
||||||
|
[[autodoc]] CwmForCausalLM
|
@ -46,9 +46,10 @@ The DepthPro model processes an input image by first downsampling it at multiple
|
|||||||
>>> import requests
|
>>> import requests
|
||||||
>>> from PIL import Image
|
>>> from PIL import Image
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import DepthProImageProcessorFast, DepthProForDepthEstimation, infer_device
|
>>> from transformers import DepthProImageProcessorFast, DepthProForDepthEstimation
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
|
|
||||||
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
>>> url = 'http://images.cocodataset.org/val2017/000000039769.jpg'
|
||||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||||
|
@ -42,9 +42,10 @@ tokens and decodes them back into audio.
|
|||||||
### Generation with Text
|
### Generation with Text
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from transformers import AutoProcessor, DiaForConditionalGeneration, infer_device
|
from transformers import AutoProcessor, DiaForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
torch_device = infer_device()
|
torch_device = Accelerator().device
|
||||||
model_checkpoint = "nari-labs/Dia-1.6B-0626"
|
model_checkpoint = "nari-labs/Dia-1.6B-0626"
|
||||||
|
|
||||||
text = ["[S1] Dia is an open weights text to dialogue model."]
|
text = ["[S1] Dia is an open weights text to dialogue model."]
|
||||||
@ -64,9 +65,10 @@ processor.save_audio(outputs, "example.wav")
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
from datasets import load_dataset, Audio
|
from datasets import load_dataset, Audio
|
||||||
from transformers import AutoProcessor, DiaForConditionalGeneration, infer_device
|
from transformers import AutoProcessor, DiaForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
torch_device = infer_device()
|
torch_device = Accelerator().device
|
||||||
model_checkpoint = "nari-labs/Dia-1.6B-0626"
|
model_checkpoint = "nari-labs/Dia-1.6B-0626"
|
||||||
|
|
||||||
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
|
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
|
||||||
@ -91,9 +93,10 @@ processor.save_audio(outputs, "example_with_audio.wav")
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
from datasets import load_dataset, Audio
|
from datasets import load_dataset, Audio
|
||||||
from transformers import AutoProcessor, DiaForConditionalGeneration, infer_device
|
from transformers import AutoProcessor, DiaForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
torch_device = infer_device()
|
torch_device = Accelerator().device
|
||||||
model_checkpoint = "nari-labs/Dia-1.6B-0626"
|
model_checkpoint = "nari-labs/Dia-1.6B-0626"
|
||||||
|
|
||||||
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
|
ds = load_dataset("hf-internal-testing/dailytalk-dummy", split="train")
|
||||||
|
@ -178,3 +178,8 @@ print("Pooled output shape:", pooled_output.shape)
|
|||||||
|
|
||||||
[[autodoc]] DINOv3ViTImageProcessorFast
|
[[autodoc]] DINOv3ViTImageProcessorFast
|
||||||
- preprocess
|
- preprocess
|
||||||
|
|
||||||
|
## DINOv3ConvNextBackbone
|
||||||
|
|
||||||
|
[[autodoc]] DINOv3ConvNextBackbone
|
||||||
|
- forward
|
@ -61,10 +61,10 @@ pipeline(image=image, question="What time is the coffee break?")
|
|||||||
# pip install datasets
|
# pip install datasets
|
||||||
import torch
|
import torch
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset
|
||||||
from transformers import AutoProcessor, AutoModelForVision2Seq
|
from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
|
||||||
processor = AutoProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa")
|
processor = AutoProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa")
|
||||||
model = AutoModelForVision2Seq.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa")
|
model = AutoModelForImageTextToText.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa")
|
||||||
|
|
||||||
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
|
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
|
||||||
image = dataset[0]["image"]
|
image = dataset[0]["image"]
|
||||||
@ -92,11 +92,11 @@ The example below uses [torchao](../quantization/torchao) to only quantize the w
|
|||||||
# pip install datasets torchao
|
# pip install datasets torchao
|
||||||
import torch
|
import torch
|
||||||
from datasets import load_dataset
|
from datasets import load_dataset
|
||||||
from transformers import TorchAoConfig, AutoProcessor, AutoModelForVision2Seq
|
from transformers import TorchAoConfig, AutoProcessor, AutoModelForImageTextToText
|
||||||
|
|
||||||
quantization_config = TorchAoConfig("int4_weight_only", group_size=128)
|
quantization_config = TorchAoConfig("int4_weight_only", group_size=128)
|
||||||
processor = AutoProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa")
|
processor = AutoProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa")
|
||||||
model = AutoModelForVision2Seq.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa", quantization_config=quantization_config)
|
model = AutoModelForImageTextToText.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa", quantization_config=quantization_config)
|
||||||
|
|
||||||
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
|
dataset = load_dataset("hf-internal-testing/example-documents", split="test")
|
||||||
image = dataset[0]["image"]
|
image = dataset[0]["image"]
|
||||||
@ -119,14 +119,15 @@ print(answer)
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
>>> import re
|
>>> import re
|
||||||
>>> from transformers import DonutProcessor, VisionEncoderDecoderModel, infer_device
|
>>> from transformers import DonutProcessor, VisionEncoderDecoderModel
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> from datasets import load_dataset
|
>>> from datasets import load_dataset
|
||||||
>>> import torch
|
>>> import torch
|
||||||
|
|
||||||
>>> processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip")
|
>>> processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip")
|
||||||
>>> model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip")
|
>>> model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-rvlcdip")
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model.to(device) # doctest: +IGNORE_RESULT
|
>>> model.to(device) # doctest: +IGNORE_RESULT
|
||||||
|
|
||||||
>>> # load document image
|
>>> # load document image
|
||||||
@ -161,14 +162,15 @@ print(answer)
|
|||||||
|
|
||||||
```py
|
```py
|
||||||
>>> import re
|
>>> import re
|
||||||
>>> from transformers import DonutProcessor, VisionEncoderDecoderModel, infer_device
|
>>> from transformers import DonutProcessor, VisionEncoderDecoderModel
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> from datasets import load_dataset
|
>>> from datasets import load_dataset
|
||||||
>>> import torch
|
>>> import torch
|
||||||
|
|
||||||
>>> processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2")
|
>>> processor = DonutProcessor.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2")
|
||||||
>>> model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2")
|
>>> model = VisionEncoderDecoderModel.from_pretrained("naver-clova-ix/donut-base-finetuned-cord-v2")
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model.to(device) # doctest: +IGNORE_RESULT
|
>>> model.to(device) # doctest: +IGNORE_RESULT
|
||||||
|
|
||||||
>>> # load document image
|
>>> # load document image
|
||||||
|
@ -61,12 +61,13 @@ EdgeTAM can be used for automatic mask generation to segment all objects in an i
|
|||||||
You can segment objects by providing a single point click on the object you want to segment:
|
You can segment objects by providing a single point click on the object you want to segment:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import Sam2Processor, EdgeTamModel, infer_device
|
>>> from transformers import Sam2Processor, EdgeTamModel
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from PIL import Image
|
>>> from PIL import Image
|
||||||
>>> import requests
|
>>> import requests
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
|
|
||||||
>>> model = EdgeTamModel.from_pretrained("yonigozlan/edgetam-1").to(device)
|
>>> model = EdgeTamModel.from_pretrained("yonigozlan/edgetam-1").to(device)
|
||||||
>>> processor = Sam2Processor.from_pretrained("yonigozlan/edgetam-1")
|
>>> processor = Sam2Processor.from_pretrained("yonigozlan/edgetam-1")
|
||||||
@ -157,12 +158,13 @@ IoU scores: tensor([0.7616, 0.9465], device='cuda:0')
|
|||||||
Process multiple images simultaneously for improved efficiency:
|
Process multiple images simultaneously for improved efficiency:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import Sam2Processor, EdgeTamModel, infer_device
|
>>> from transformers import Sam2Processor, EdgeTamModel
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from PIL import Image
|
>>> from PIL import Image
|
||||||
>>> import requests
|
>>> import requests
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
|
|
||||||
>>> model = EdgeTamModel.from_pretrained("yonigozlan/edgetam-1").to(device)
|
>>> model = EdgeTamModel.from_pretrained("yonigozlan/edgetam-1").to(device)
|
||||||
>>> processor = Sam2Processor.from_pretrained("yonigozlan/edgetam-1")
|
>>> processor = Sam2Processor.from_pretrained("yonigozlan/edgetam-1")
|
||||||
|
@ -51,10 +51,11 @@ EdgeTAM Video's key strength is its ability to track objects across video frames
|
|||||||
#### Basic Video Tracking
|
#### Basic Video Tracking
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import EdgeTamVideoModel, Sam2VideoProcessor, infer_device
|
>>> from transformers import EdgeTamVideoModel, Sam2VideoProcessor
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> import torch
|
>>> import torch
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = EdgeTamVideoModel.from_pretrained("yonigozlan/edgetam-video-1").to(device, dtype=torch.bfloat16)
|
>>> model = EdgeTamVideoModel.from_pretrained("yonigozlan/edgetam-video-1").to(device, dtype=torch.bfloat16)
|
||||||
>>> processor = Sam2VideoProcessor.from_pretrained("yonigozlan/edgetam-video-1")
|
>>> processor = Sam2VideoProcessor.from_pretrained("yonigozlan/edgetam-video-1")
|
||||||
|
|
||||||
|
@ -40,9 +40,9 @@ The original checkpoints can be found [here](https://github.com/google-research/
|
|||||||
The model is pretty heavy (~40GB in half precision) so if you just want to run the model, make sure you load your model in 8bit, and use `device_map="auto"` to make sure you don't have any OOM issue!
|
The model is pretty heavy (~40GB in half precision) so if you just want to run the model, make sure you load your model in 8bit, and use `device_map="auto"` to make sure you don't have any OOM issue!
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
|
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, BitsAndBytesConfig
|
||||||
|
|
||||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-ul2", load_in_8bit=True, device_map="auto")
|
>>> model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-ul2", quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto")
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
|
>>> tokenizer = AutoTokenizer.from_pretrained("google/flan-ul2")
|
||||||
|
|
||||||
>>> inputs = tokenizer("A step by step recipe to make bolognese pasta:", return_tensors="pt")
|
>>> inputs = tokenizer("A step by step recipe to make bolognese pasta:", return_tensors="pt")
|
||||||
|
@ -60,8 +60,9 @@ Tips:
|
|||||||
In the following, we demonstrate how to use `glm-4-9b-chat` for the inference. Note that we have used the ChatML format for dialog, in this demo we show how to leverage `apply_chat_template` for this purpose.
|
In the following, we demonstrate how to use `glm-4-9b-chat` for the inference. Note that we have used the ChatML format for dialog, in this demo we show how to leverage `apply_chat_template` for this purpose.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
>>> device = infer_device() # the device to load the model onto
|
from accelerate import Accelerator
|
||||||
|
>>> device = Accelerator().device # the device to load the model onto
|
||||||
|
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained("THUDM/glm-4-9b-chat", device_map="auto", trust_remote_code=True)
|
>>> model = AutoModelForCausalLM.from_pretrained("THUDM/glm-4-9b-chat", device_map="auto", trust_remote_code=True)
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat")
|
>>> tokenizer = AutoTokenizer.from_pretrained("THUDM/glm-4-9b-chat")
|
||||||
|
@ -19,6 +19,27 @@ rendered properly in your Markdown viewer.
|
|||||||
|
|
||||||
## Overview
|
## Overview
|
||||||
|
|
||||||
|
Both **GLM-4.6** and **GLM-4.5** language model use this class. The implementation in transformers does not include an MTP layer.
|
||||||
|
|
||||||
|
### GLM-4.6
|
||||||
|
|
||||||
|
Compared with GLM-4.5, **GLM-4.6** brings several key improvements:
|
||||||
|
|
||||||
|
* **Longer context window:** The context window has been expanded from 128K to 200K tokens, enabling the model to handle more complex agentic tasks.
|
||||||
|
* **Superior coding performance:** The model achieves higher scores on code benchmarks and demonstrates better real-world performance in applications such as Claude Code、Cline、Roo Code and Kilo Code, including improvements in generating visually polished front-end pages.
|
||||||
|
* **Advanced reasoning:** GLM-4.6 shows a clear improvement in reasoning performance and supports tool use during inference, leading to stronger overall capability.
|
||||||
|
* **More capable agents:** GLM-4.6 exhibits stronger performance in tool using and search-based agents, and integrates more effectively within agent frameworks.
|
||||||
|
* **Refined writing:** Better aligns with human preferences in style and readability, and performs more naturally in role-playing scenarios.
|
||||||
|
|
||||||
|
We evaluated GLM-4.6 across eight public benchmarks covering agents, reasoning, and coding. Results show clear gains over GLM-4.5, with GLM-4.6 also holding competitive advantages over leading domestic and international models such as **DeepSeek-V3.1-Terminus** and **Claude Sonnet 4**.
|
||||||
|
|
||||||
|

|
||||||
|
|
||||||
|
For more eval results, show cases, and technical details, please visit our [technical blog](https://z.ai/blog/glm-4.6).
|
||||||
|
|
||||||
|
|
||||||
|
### GLM-4.5
|
||||||
|
|
||||||
The [**GLM-4.5**](https://huggingface.co/papers/2508.06471) series models are foundation models designed for intelligent agents, MoE variants are documented here as Glm4Moe.
|
The [**GLM-4.5**](https://huggingface.co/papers/2508.06471) series models are foundation models designed for intelligent agents, MoE variants are documented here as Glm4Moe.
|
||||||
|
|
||||||
GLM-4.5 has **355** billion total parameters with **32** billion active parameters, while GLM-4.5-Air adopts a more compact design with **106** billion total parameters and **12** billion active parameters. GLM-4.5 models unify reasoning, coding, and intelligent agent capabilities to meet the complex demands of intelligent agent applications.
|
GLM-4.5 has **355** billion total parameters with **32** billion active parameters, while GLM-4.5-Air adopts a more compact design with **106** billion total parameters and **12** billion active parameters. GLM-4.5 models unify reasoning, coding, and intelligent agent capabilities to meet the complex demands of intelligent agent applications.
|
||||||
|
@ -132,10 +132,11 @@ Using GLM-4.1V with video input is similar to using it with image input.
|
|||||||
The model can process video data and generate text based on the content of the video.
|
The model can process video data and generate text based on the content of the video.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from transformers import AutoProcessor, Glm4vForConditionalGeneration, infer_device
|
from transformers import AutoProcessor, Glm4vForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
device = f"{infer_device()}:0"
|
device = Accelerator().device
|
||||||
|
|
||||||
processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
|
processor = AutoProcessor.from_pretrained("THUDM/GLM-4.1V-9B-Thinking")
|
||||||
model = Glm4vForConditionalGeneration.from_pretrained(
|
model = Glm4vForConditionalGeneration.from_pretrained(
|
||||||
|
@ -48,9 +48,10 @@ The original code can be found [here](https://github.com/Ucas-HaoranWei/GOT-OCR2
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import AutoProcessor, AutoModelForImageTextToText, infer_device
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
||||||
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
||||||
|
|
||||||
@ -73,9 +74,10 @@ The original code can be found [here](https://github.com/Ucas-HaoranWei/GOT-OCR2
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import AutoProcessor, AutoModelForImageTextToText, infer_device
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
||||||
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
||||||
|
|
||||||
@ -102,9 +104,10 @@ GOT-OCR2 can also generate formatted text, such as markdown or LaTeX. Here is an
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import AutoProcessor, AutoModelForImageTextToText, infer_device
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
||||||
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
||||||
|
|
||||||
@ -130,9 +133,10 @@ Here is an example of how to process multiple pages at once:
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import AutoProcessor, AutoModelForImageTextToText, infer_device
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
||||||
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
||||||
|
|
||||||
@ -159,9 +163,10 @@ Here is an example of how to process cropped patches:
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import AutoProcessor, AutoModelForImageTextToText, infer_device
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", dtype=torch.bfloat16, device_map=device)
|
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", dtype=torch.bfloat16, device_map=device)
|
||||||
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
||||||
|
|
||||||
@ -188,7 +193,7 @@ GOT supports interactive OCR, where the user can specify the region to be recogn
|
|||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
||||||
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
||||||
|
|
||||||
@ -214,10 +219,11 @@ Here is an example of how to process sheet music:
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import AutoProcessor, AutoModelForImageTextToText, infer_device
|
>>> from transformers import AutoProcessor, AutoModelForImageTextToText
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> import verovio
|
>>> import verovio
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
>>> model = AutoModelForImageTextToText.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", device_map=device)
|
||||||
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
>>> processor = AutoProcessor.from_pretrained("stepfun-ai/GOT-OCR-2.0-hf", use_fast=True)
|
||||||
|
|
||||||
|
@ -64,8 +64,9 @@ To load and run a model using Flash Attention 2, refer to the snippet below:
|
|||||||
|
|
||||||
```python
|
```python
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, infer_device
|
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||||
>>> device = infer_device() # the device to load the model onto
|
from accelerate import Accelerator
|
||||||
|
>>> device = Accelerator().device # the device to load the model onto
|
||||||
|
|
||||||
>>> model = AutoModelForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder", dtype=torch.float16, attn_implementation="flash_attention_2")
|
>>> model = AutoModelForCausalLM.from_pretrained("bigcode/gpt_bigcode-santacoder", dtype=torch.float16, attn_implementation="flash_attention_2")
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("bigcode/gpt_bigcode-santacoder")
|
>>> tokenizer = AutoTokenizer.from_pretrained("bigcode/gpt_bigcode-santacoder")
|
||||||
|
@ -73,7 +73,7 @@ Flash Attention 2 is an faster, optimized version of the model.
|
|||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features). If your hardware is not compatible with Flash Attention 2, you can still benefit from attention kernel optimisations through Better Transformer support covered [above](https://huggingface.co/docs/transformers/main/en/model_doc/bark#using-better-transformer).
|
First, check whether your hardware is compatible with Flash Attention 2. The latest list of compatible hardware can be found in the [official documentation](https://github.com/Dao-AILab/flash-attention#installation-and-features).
|
||||||
|
|
||||||
Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2:
|
Next, [install](https://github.com/Dao-AILab/flash-attention#installation-and-features) the latest version of Flash Attention 2:
|
||||||
|
|
||||||
|
@ -38,10 +38,11 @@ This model was contributed by [Stella Biderman](https://huggingface.co/stellaath
|
|||||||
which could be used to further minimize the RAM usage:
|
which could be used to further minimize the RAM usage:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import GPTJForCausalLM, infer_device
|
>>> from transformers import GPTJForCausalLM
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> import torch
|
>>> import torch
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = GPTJForCausalLM.from_pretrained(
|
>>> model = GPTJForCausalLM.from_pretrained(
|
||||||
... "EleutherAI/gpt-j-6B",
|
... "EleutherAI/gpt-j-6B",
|
||||||
... revision="float16",
|
... revision="float16",
|
||||||
@ -93,10 +94,11 @@ model.
|
|||||||
...or in float16 precision:
|
...or in float16 precision:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import GPTJForCausalLM, AutoTokenizer, infer_device
|
>>> from transformers import GPTJForCausalLM, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> import torch
|
>>> import torch
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", dtype=torch.float16).to(device)
|
>>> model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", dtype=torch.float16).to(device)
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
|
>>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B")
|
||||||
|
|
||||||
|
@ -42,10 +42,11 @@ fine-tune for translation or summarization.
|
|||||||
The `generate()` method can be used to generate text using GPTSAN-Japanese model.
|
The `generate()` method can be used to generate text using GPTSAN-Japanese model.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
>>> from transformers import AutoModel, AutoTokenizer, infer_device
|
>>> from transformers import AutoModel, AutoTokenizer
|
||||||
|
from accelerate import Accelerator
|
||||||
>>> import torch
|
>>> import torch
|
||||||
|
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
>>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
|
>>> tokenizer = AutoTokenizer.from_pretrained("Tanrei/GPTSAN-japanese")
|
||||||
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
|
>>> model = AutoModel.from_pretrained("Tanrei/GPTSAN-japanese").to(device)
|
||||||
>>> x_tok = tokenizer("は、", prefix_text="織田信長", return_tensors="pt")
|
>>> x_tok = tokenizer("は、", prefix_text="織田信長", return_tensors="pt")
|
||||||
|
@ -34,9 +34,10 @@ Tips:
|
|||||||
Sample inference:
|
Sample inference:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration, infer_device
|
from transformers import LlavaNextProcessor, LlavaNextForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
model_path = "ibm-granite/granite-vision-3.1-2b-preview"
|
model_path = "ibm-granite/granite-vision-3.1-2b-preview"
|
||||||
processor = LlavaNextProcessor.from_pretrained(model_path)
|
processor = LlavaNextProcessor.from_pretrained(model_path)
|
||||||
|
@ -50,10 +50,11 @@ Here's how to use the model for zero-shot object detection:
|
|||||||
|
|
||||||
>>> import torch
|
>>> import torch
|
||||||
>>> from PIL import Image
|
>>> from PIL import Image
|
||||||
>>> from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection, infer_device
|
>>> from transformers import AutoProcessor, AutoModelForZeroShotObjectDetection
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
>>> model_id = "IDEA-Research/grounding-dino-tiny"
|
>>> model_id = "IDEA-Research/grounding-dino-tiny"
|
||||||
>>> device = infer_device()
|
>>> device = Accelerator().device
|
||||||
|
|
||||||
>>> processor = AutoProcessor.from_pretrained(model_id)
|
>>> processor = AutoProcessor.from_pretrained(model_id)
|
||||||
>>> model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)
|
>>> model = AutoModelForZeroShotObjectDetection.from_pretrained(model_id).to(device)
|
||||||
|
@ -46,7 +46,6 @@ A list of official Hugging Face and community (indicated by 🌎) resources to h
|
|||||||
## GroupViTConfig
|
## GroupViTConfig
|
||||||
|
|
||||||
[[autodoc]] GroupViTConfig
|
[[autodoc]] GroupViTConfig
|
||||||
- from_text_vision_configs
|
|
||||||
|
|
||||||
## GroupViTTextConfig
|
## GroupViTTextConfig
|
||||||
|
|
||||||
|
@ -57,10 +57,11 @@ Example of how to use the processor on chat messages:
|
|||||||
```python
|
```python
|
||||||
import requests
|
import requests
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from transformers import Idefics2Processor, Idefics2ForConditionalGeneration, infer_device
|
from transformers import Idefics2Processor, Idefics2ForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||||
url_2 = "http://images.cocodataset.org/val2017/000000219578.jpg"
|
url_2 = "http://images.cocodataset.org/val2017/000000219578.jpg"
|
||||||
@ -99,7 +100,8 @@ print("Generated text:", generated_text)
|
|||||||
```python
|
```python
|
||||||
import requests
|
import requests
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
from transformers import Idefics2Processor, Idefics2ForConditionalGeneration, infer_device
|
from transformers import Idefics2Processor, Idefics2ForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
import torch
|
import torch
|
||||||
|
|
||||||
url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
url_1 = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||||
@ -124,7 +126,7 @@ messages = [{
|
|||||||
],
|
],
|
||||||
}]
|
}]
|
||||||
|
|
||||||
device = infer_device()
|
device = Accelerator().device
|
||||||
|
|
||||||
processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b")
|
processor = Idefics2Processor.from_pretrained("HuggingFaceM4/idefics2-8b")
|
||||||
model = Idefics2ForConditionalGeneration.from_pretrained("HuggingFaceM4/idefics2-8b")
|
model = Idefics2ForConditionalGeneration.from_pretrained("HuggingFaceM4/idefics2-8b")
|
||||||
|
@ -45,7 +45,6 @@ The attributes can be obtained from model config, as `model.config.num_query_tok
|
|||||||
## InstructBlipConfig
|
## InstructBlipConfig
|
||||||
|
|
||||||
[[autodoc]] InstructBlipConfig
|
[[autodoc]] InstructBlipConfig
|
||||||
- from_vision_qformer_text_configs
|
|
||||||
|
|
||||||
## InstructBlipVisionConfig
|
## InstructBlipVisionConfig
|
||||||
|
|
||||||
|
@ -45,7 +45,6 @@ The attributes can be obtained from model config, as `model.config.num_query_tok
|
|||||||
## InstructBlipVideoConfig
|
## InstructBlipVideoConfig
|
||||||
|
|
||||||
[[autodoc]] InstructBlipVideoConfig
|
[[autodoc]] InstructBlipVideoConfig
|
||||||
- from_vision_qformer_text_configs
|
|
||||||
|
|
||||||
## InstructBlipVideoVisionConfig
|
## InstructBlipVideoVisionConfig
|
||||||
|
|
||||||
|
@ -45,7 +45,8 @@ import re
|
|||||||
import torch
|
import torch
|
||||||
import requests
|
import requests
|
||||||
from PIL import Image, ImageDraw
|
from PIL import Image, ImageDraw
|
||||||
from transformers import AutoProcessor, Kosmos2_5ForConditionalGeneration, infer_device
|
from transformers import AutoProcessor, Kosmos2_5ForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
repo = "microsoft/kosmos-2.5"
|
repo = "microsoft/kosmos-2.5"
|
||||||
device = "cuda:0"
|
device = "cuda:0"
|
||||||
@ -84,7 +85,8 @@ import re
|
|||||||
import torch
|
import torch
|
||||||
import requests
|
import requests
|
||||||
from PIL import Image, ImageDraw
|
from PIL import Image, ImageDraw
|
||||||
from transformers import AutoProcessor, Kosmos2_5ForConditionalGeneration, infer_device
|
from transformers import AutoProcessor, Kosmos2_5ForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
repo = "microsoft/kosmos-2.5"
|
repo = "microsoft/kosmos-2.5"
|
||||||
device = "cuda:0"
|
device = "cuda:0"
|
||||||
|
@ -34,10 +34,11 @@ rendered properly in your Markdown viewer.
|
|||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from datasets import load_dataset, Audio
|
from datasets import load_dataset, Audio
|
||||||
from transformers import infer_device, KyutaiSpeechToTextProcessor, KyutaiSpeechToTextForConditionalGeneration
|
from transformers import KyutaiSpeechToTextProcessor, KyutaiSpeechToTextForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
# 1. load the model and the processor
|
# 1. load the model and the processor
|
||||||
torch_device = infer_device()
|
torch_device = Accelerator().device
|
||||||
model_id = "kyutai/stt-2.6b-en-trfs"
|
model_id = "kyutai/stt-2.6b-en-trfs"
|
||||||
|
|
||||||
processor = KyutaiSpeechToTextProcessor.from_pretrained(model_id)
|
processor = KyutaiSpeechToTextProcessor.from_pretrained(model_id)
|
||||||
@ -67,10 +68,11 @@ print(processor.batch_decode(output_tokens, skip_special_tokens=True))
|
|||||||
```python
|
```python
|
||||||
import torch
|
import torch
|
||||||
from datasets import load_dataset, Audio
|
from datasets import load_dataset, Audio
|
||||||
from transformers import infer_device, KyutaiSpeechToTextProcessor, KyutaiSpeechToTextForConditionalGeneration
|
from transformers import KyutaiSpeechToTextProcessor, KyutaiSpeechToTextForConditionalGeneration
|
||||||
|
from accelerate import Accelerator
|
||||||
|
|
||||||
# 1. load the model and the processor
|
# 1. load the model and the processor
|
||||||
torch_device = infer_device()
|
torch_device = Accelerator().device
|
||||||
model_id = "kyutai/stt-2.6b-en-trfs"
|
model_id = "kyutai/stt-2.6b-en-trfs"
|
||||||
|
|
||||||
processor = KyutaiSpeechToTextProcessor.from_pretrained(model_id)
|
processor = KyutaiSpeechToTextProcessor.from_pretrained(model_id)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user