mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-27 23:06:50 +08:00
Compare commits
1 Commits
base-model
...
test_docke
| Author | SHA1 | Date | |
|---|---|---|---|
| a9ca3e46b0 |
@ -13,7 +13,6 @@ jobs:
|
||||
check_circleci_user:
|
||||
docker:
|
||||
- image: python:3.10-slim
|
||||
resource_class: small
|
||||
parallelism: 1
|
||||
steps:
|
||||
- run: echo $CIRCLE_PROJECT_USERNAME
|
||||
@ -31,14 +30,6 @@ jobs:
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run: if [[ "$CIRCLE_PULL_REQUEST" == "" && "$CIRCLE_BRANCH" != "main" && "$CIRCLE_BRANCH" != *-release ]]; then echo "Not a PR, not the main branch and not a release branch, skip test!"; circleci-agent step halt; fi
|
||||
- run: 'curl -L -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls/${CIRCLE_PULL_REQUEST##*/} >> github.txt'
|
||||
- run: cat github.txt
|
||||
- run: (python3 -c 'import json; from datetime import datetime; fp = open("github.txt"); data = json.load(fp); fp.close(); f = "%Y-%m-%dT%H:%M:%SZ"; created = datetime.strptime(data["created_at"], f); updated = datetime.strptime(data["updated_at"], f); s = (updated - created).total_seconds(); print(int(s))' || true) > elapsed.txt
|
||||
- run: if [ "$(cat elapsed.txt)" == "" ]; then echo 60 > elapsed.txt; fi
|
||||
- run: cat elapsed.txt
|
||||
- run: if [ "$(cat elapsed.txt)" -lt "30" ]; then echo "PR is just opened, wait some actions from GitHub"; sleep 30; fi
|
||||
- run: 'if grep -q "\"draft\": true," github.txt; then echo "draft mode, skip test!"; circleci-agent step halt; fi'
|
||||
- run: uv pip install -U -e .
|
||||
- run: echo 'export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)"' >> "$BASH_ENV" && source "$BASH_ENV"
|
||||
- run: mkdir -p test_preparation
|
||||
@ -179,6 +170,7 @@ jobs:
|
||||
path: ~/transformers/installed.txt
|
||||
- run: python utils/check_copies.py
|
||||
- run: python utils/check_modular_conversion.py
|
||||
- run: python utils/check_table.py
|
||||
- run: python utils/check_dummies.py
|
||||
- run: python utils/check_repo.py
|
||||
- run: python utils/check_inits.py
|
||||
@ -188,6 +180,7 @@ jobs:
|
||||
- run: make deps_table_check_updated
|
||||
- run: python utils/update_metadata.py --check-only
|
||||
- run: python utils/check_docstrings.py
|
||||
- run: python utils/check_support_list.py
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
|
||||
@ -28,9 +28,11 @@ COMMON_ENV_VARIABLES = {
|
||||
"TRANSFORMERS_IS_CI": True,
|
||||
"PYTEST_TIMEOUT": 120,
|
||||
"RUN_PIPELINE_TESTS": False,
|
||||
"RUN_PT_TF_CROSS_TESTS": False,
|
||||
"RUN_PT_FLAX_CROSS_TESTS": False,
|
||||
}
|
||||
# Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical
|
||||
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "vvv": None, "rsfE":None}
|
||||
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "vvv": None, "rsf":None}
|
||||
DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.8.12"}]
|
||||
|
||||
|
||||
@ -38,23 +40,9 @@ class EmptyJob:
|
||||
job_name = "empty"
|
||||
|
||||
def to_dict(self):
|
||||
steps = [{"run": 'ls -la'}]
|
||||
if self.job_name == "collection_job":
|
||||
steps.extend(
|
||||
[
|
||||
"checkout",
|
||||
{"run": "pip install requests || true"},
|
||||
{"run": """while [[ $(curl --location --request GET "https://circleci.com/api/v2/workflow/$CIRCLE_WORKFLOW_ID/job" --header "Circle-Token: $CCI_TOKEN"| jq -r '.items[]|select(.name != "collection_job")|.status' | grep -c "running") -gt 0 ]]; do sleep 5; done || true"""},
|
||||
{"run": 'python utils/process_circleci_workflow_test_reports.py --workflow_id $CIRCLE_WORKFLOW_ID || true'},
|
||||
{"store_artifacts": {"path": "outputs"}},
|
||||
{"run": 'echo "All required jobs have now completed"'},
|
||||
]
|
||||
)
|
||||
|
||||
return {
|
||||
"docker": copy.deepcopy(DEFAULT_DOCKER_IMAGE),
|
||||
"resource_class": "small",
|
||||
"steps": steps,
|
||||
"steps":["checkout"],
|
||||
}
|
||||
|
||||
|
||||
@ -66,9 +54,9 @@ class CircleCIJob:
|
||||
install_steps: List[str] = None
|
||||
marker: Optional[str] = None
|
||||
parallelism: Optional[int] = 0
|
||||
pytest_num_workers: int = 8
|
||||
pytest_num_workers: int = 12
|
||||
pytest_options: Dict[str, Any] = None
|
||||
resource_class: Optional[str] = "xlarge"
|
||||
resource_class: Optional[str] = "2xlarge"
|
||||
tests_to_run: Optional[List[str]] = None
|
||||
num_test_files_per_worker: Optional[int] = 10
|
||||
# This should be only used for doctest job!
|
||||
@ -145,7 +133,7 @@ class CircleCIJob:
|
||||
"command": """dpkg-query --show --showformat='${Installed-Size}\t${Package}\n' | sort -rh | head -25 | sort -h | awk '{ package=$2; sub(".*/", "", package); printf("%.5f GB %s\n", $1/1024/1024, package)}' || true"""}
|
||||
},
|
||||
{"run": {"name": "Create `test-results` directory", "command": "mkdir test-results"}},
|
||||
{"run": {"name": "Get files to test", "command":f'curl -L -o {self.job_name}_test_list.txt <<pipeline.parameters.{self.job_name}_test_list>> --header "Circle-Token: $CIRCLE_TOKEN"' if self.name != "pr_documentation_tests" else 'echo "Skipped"'}},
|
||||
{"run": {"name": "Get files to test", "command":f'curl -L -o {self.job_name}_test_list.txt <<pipeline.parameters.{self.job_name}_test_list>>' if self.name != "pr_documentation_tests" else 'echo "Skipped"'}},
|
||||
{"run": {"name": "Split tests across parallel nodes: show current parallel tests",
|
||||
"command": f"TESTS=$(circleci tests split --split-by=timings {self.job_name}_test_list.txt) && echo $TESTS > splitted_tests.txt && echo $TESTS | tr ' ' '\n'" if self.parallelism else f"awk '{{printf \"%s \", $0}}' {self.job_name}_test_list.txt > splitted_tests.txt"
|
||||
}
|
||||
@ -175,11 +163,29 @@ class CircleCIJob:
|
||||
|
||||
|
||||
# JOBS
|
||||
torch_and_tf_job = CircleCIJob(
|
||||
"torch_and_tf",
|
||||
docker_image=[{"image":"huggingface/transformers-torch-tf-light"}],
|
||||
additional_env={"RUN_PT_TF_CROSS_TESTS": True},
|
||||
marker="is_pt_tf_cross_test",
|
||||
pytest_options={"rA": None, "durations": 0},
|
||||
)
|
||||
|
||||
|
||||
torch_and_flax_job = CircleCIJob(
|
||||
"torch_and_flax",
|
||||
additional_env={"RUN_PT_FLAX_CROSS_TESTS": True},
|
||||
docker_image=[{"image":"huggingface/transformers-torch-jax-light"}],
|
||||
marker="is_pt_flax_cross_test",
|
||||
pytest_options={"rA": None, "durations": 0},
|
||||
)
|
||||
|
||||
torch_job = CircleCIJob(
|
||||
"torch",
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
marker="not generate",
|
||||
parallelism=6,
|
||||
pytest_num_workers=8
|
||||
)
|
||||
|
||||
generate_job = CircleCIJob(
|
||||
@ -187,24 +193,28 @@ generate_job = CircleCIJob(
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
marker="generate",
|
||||
parallelism=6,
|
||||
pytest_num_workers=8
|
||||
)
|
||||
|
||||
tokenization_job = CircleCIJob(
|
||||
"tokenization",
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
parallelism=8,
|
||||
pytest_num_workers=16
|
||||
)
|
||||
|
||||
processor_job = CircleCIJob(
|
||||
"processors",
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
parallelism=8,
|
||||
pytest_num_workers=6
|
||||
)
|
||||
|
||||
tf_job = CircleCIJob(
|
||||
"tf",
|
||||
docker_image=[{"image":"huggingface/transformers-tf-light"}],
|
||||
parallelism=6,
|
||||
pytest_num_workers=16,
|
||||
)
|
||||
|
||||
|
||||
@ -212,8 +222,7 @@ flax_job = CircleCIJob(
|
||||
"flax",
|
||||
docker_image=[{"image":"huggingface/transformers-jax-light"}],
|
||||
parallelism=6,
|
||||
pytest_num_workers=16,
|
||||
resource_class="2xlarge",
|
||||
pytest_num_workers=16
|
||||
)
|
||||
|
||||
|
||||
@ -222,7 +231,7 @@ pipelines_torch_job = CircleCIJob(
|
||||
additional_env={"RUN_PIPELINE_TESTS": True},
|
||||
docker_image=[{"image":"huggingface/transformers-torch-light"}],
|
||||
marker="is_pipeline_test",
|
||||
parallelism=4,
|
||||
parallelism=4
|
||||
)
|
||||
|
||||
|
||||
@ -231,7 +240,7 @@ pipelines_tf_job = CircleCIJob(
|
||||
additional_env={"RUN_PIPELINE_TESTS": True},
|
||||
docker_image=[{"image":"huggingface/transformers-tf-light"}],
|
||||
marker="is_pipeline_test",
|
||||
parallelism=4,
|
||||
parallelism=4
|
||||
)
|
||||
|
||||
|
||||
@ -248,6 +257,7 @@ examples_torch_job = CircleCIJob(
|
||||
docker_image=[{"image":"huggingface/transformers-examples-torch"}],
|
||||
# TODO @ArthurZucker remove this once docker is easier to build
|
||||
install_steps=["uv venv && uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"],
|
||||
pytest_num_workers=8,
|
||||
)
|
||||
|
||||
|
||||
@ -255,6 +265,7 @@ examples_tensorflow_job = CircleCIJob(
|
||||
"examples_tensorflow",
|
||||
additional_env={"OMP_NUM_THREADS": 8},
|
||||
docker_image=[{"image":"huggingface/transformers-examples-tf"}],
|
||||
pytest_num_workers=16,
|
||||
)
|
||||
|
||||
|
||||
@ -269,7 +280,6 @@ hub_job = CircleCIJob(
|
||||
],
|
||||
marker="is_staging_test",
|
||||
pytest_num_workers=2,
|
||||
resource_class="medium",
|
||||
)
|
||||
|
||||
|
||||
@ -282,13 +292,13 @@ onnx_job = CircleCIJob(
|
||||
],
|
||||
pytest_options={"k onnx": None},
|
||||
pytest_num_workers=1,
|
||||
resource_class="small",
|
||||
)
|
||||
|
||||
|
||||
exotic_models_job = CircleCIJob(
|
||||
"exotic_models",
|
||||
docker_image=[{"image":"huggingface/transformers-exotic-models"}],
|
||||
pytest_num_workers=12,
|
||||
parallelism=4,
|
||||
pytest_options={"durations": 100},
|
||||
)
|
||||
@ -307,6 +317,7 @@ non_model_job = CircleCIJob(
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
marker="not generate",
|
||||
parallelism=6,
|
||||
pytest_num_workers=8,
|
||||
)
|
||||
|
||||
|
||||
@ -334,14 +345,13 @@ doc_test_job = CircleCIJob(
|
||||
pytest_num_workers=1,
|
||||
)
|
||||
|
||||
REGULAR_TESTS = [torch_job, tf_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
|
||||
REGULAR_TESTS = [torch_and_tf_job, torch_and_flax_job, torch_job, tf_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
|
||||
EXAMPLES_TESTS = [examples_torch_job, examples_tensorflow_job]
|
||||
PIPELINE_TESTS = [pipelines_torch_job, pipelines_tf_job]
|
||||
REPO_UTIL_TESTS = [repo_utils_job]
|
||||
DOC_TESTS = [doc_test_job]
|
||||
ALL_TESTS = REGULAR_TESTS + EXAMPLES_TESTS + PIPELINE_TESTS + REPO_UTIL_TESTS + DOC_TESTS + [custom_tokenizers_job] + [exotic_models_job] # fmt: skip
|
||||
|
||||
|
||||
def create_circleci_config(folder=None):
|
||||
if folder is None:
|
||||
folder = os.getcwd()
|
||||
@ -351,13 +361,7 @@ def create_circleci_config(folder=None):
|
||||
|
||||
if len(jobs) == 0:
|
||||
jobs = [EmptyJob()]
|
||||
else:
|
||||
print("Full list of job name inputs", {j.job_name + "_test_list":{"type":"string", "default":''} for j in jobs})
|
||||
# Add a job waiting all the test jobs and aggregate their test summary files at the end
|
||||
collection_job = EmptyJob()
|
||||
collection_job.job_name = "collection_job"
|
||||
jobs = [collection_job] + jobs
|
||||
|
||||
config = {
|
||||
"version": "2.1",
|
||||
"parameters": {
|
||||
@ -367,14 +371,9 @@ def create_circleci_config(folder=None):
|
||||
**{j.job_name + "_test_list":{"type":"string", "default":''} for j in jobs},
|
||||
**{j.job_name + "_parallelism":{"type":"integer", "default":1} for j in jobs},
|
||||
},
|
||||
"jobs": {j.job_name: j.to_dict() for j in jobs}
|
||||
"jobs" : {j.job_name: j.to_dict() for j in jobs},
|
||||
"workflows": {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}}
|
||||
}
|
||||
if "CIRCLE_TOKEN" in os.environ:
|
||||
# For private forked repo. (e.g. new model addition)
|
||||
config["workflows"] = {"version": 2, "run_tests": {"jobs": [{j.job_name: {"context": ["TRANSFORMERS_CONTEXT"]}} for j in jobs]}}
|
||||
else:
|
||||
# For public repo. (e.g. `transformers`)
|
||||
config["workflows"] = {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}}
|
||||
with open(os.path.join(folder, "generated_config.yml"), "w") as f:
|
||||
f.write(yaml.dump(config, sort_keys=False, default_flow_style=False).replace("' << pipeline", " << pipeline").replace(">> '", " >>"))
|
||||
|
||||
|
||||
1
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
1
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@ -106,7 +106,6 @@ body:
|
||||
label: Reproduction
|
||||
description: |
|
||||
Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.
|
||||
Please include relevant config information with your code, for example your Trainers, TRL, Peft, and DeepSpeed configs.
|
||||
If you have code snippets, error messages, stack traces please provide them here as well.
|
||||
Important! Use code tags to correctly format your code. See https://help.github.com/en/github/writing-on-github/creating-and-highlighting-code-blocks#syntax-highlighting
|
||||
Do not use screenshots, as they are hard to read and (more importantly) don't allow others to copy-and-paste your code.
|
||||
|
||||
6
.github/workflows/benchmark.yml
vendored
6
.github/workflows/benchmark.yml
vendored
@ -18,8 +18,7 @@ jobs:
|
||||
name: Benchmark
|
||||
strategy:
|
||||
matrix:
|
||||
# group: [aws-g5-4xlarge-cache, aws-p4d-24xlarge-plus] (A100 runner is not enabled)
|
||||
group: [aws-g5-4xlarge-cache]
|
||||
group: [aws-g5-4xlarge-cache, aws-p4d-24xlarge-plus]
|
||||
runs-on:
|
||||
group: ${{ matrix.group }}
|
||||
if: |
|
||||
@ -64,7 +63,7 @@ jobs:
|
||||
commit_id=$GITHUB_SHA
|
||||
fi
|
||||
commit_msg=$(git show -s --format=%s | cut -c1-70)
|
||||
python3 benchmark/benchmarks_entrypoint.py "$BRANCH_NAME" "$commit_id" "$commit_msg"
|
||||
python3 benchmark/llama.py "${{ github.head_ref || github.ref_name }}" "$commit_id" "$commit_msg"
|
||||
env:
|
||||
HF_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
# Enable this to see debug logs
|
||||
@ -73,4 +72,3 @@ jobs:
|
||||
PGHOST: ${{ secrets.TRANSFORMERS_BENCHMARKS_PGHOST }}
|
||||
PGUSER: transformers_benchmarks
|
||||
PGPASSWORD: ${{ secrets.TRANSFORMERS_BENCHMARKS_PGPASSWORD }}
|
||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||
|
||||
2
.github/workflows/build-ci-docker-images.yml
vendored
2
.github/workflows/build-ci-docker-images.yml
vendored
@ -26,7 +26,7 @@ jobs:
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
file: ["quality", "consistency", "custom-tokenizers", "torch-light", "tf-light", "exotic-models", "torch-tf-light", "jax-light", "examples-torch", "examples-tf"]
|
||||
file: ["quality", "consistency", "custom-tokenizers", "torch-light", "tf-light", "exotic-models", "torch-tf-light", "torch-jax-light", "jax-light", "examples-torch", "examples-tf"]
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
|
||||
642
.github/workflows/build-docker-images.yml
vendored
642
.github/workflows/build-docker-images.yml
vendored
@ -3,7 +3,7 @@ name: Build docker images (scheduled)
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- build_ci_docker_image*
|
||||
- test_docker_run_quantization
|
||||
repository_dispatch:
|
||||
workflow_call:
|
||||
inputs:
|
||||
@ -18,341 +18,341 @@ concurrency:
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
latest-docker:
|
||||
name: "Latest PyTorch + TensorFlow [dev]"
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-all-latest-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-all-latest-gpu${{ inputs.image_postfix }}
|
||||
# Push CI images still need to be re-built daily
|
||||
-
|
||||
name: Build and push (for Push CI) in a daily basis
|
||||
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-all-latest-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-all-latest-gpu-push-ci
|
||||
# latest-docker:
|
||||
# name: "Latest PyTorch + TensorFlow [dev]"
|
||||
# runs-on:
|
||||
# group: aws-general-8-plus
|
||||
# steps:
|
||||
# -
|
||||
# name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v3
|
||||
# -
|
||||
# name: Check out code
|
||||
# uses: actions/checkout@v4
|
||||
# -
|
||||
# name: Login to DockerHub
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# -
|
||||
# name: Build and push
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-all-latest-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-all-latest-gpu${{ inputs.image_postfix }}
|
||||
# # Push CI images still need to be re-built daily
|
||||
# -
|
||||
# name: Build and push (for Push CI) in a daily basis
|
||||
# # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# # The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
# if: inputs.image_postfix != '-push-ci'
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-all-latest-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-all-latest-gpu-push-ci
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the transformers-all-latest-gpu-push-ci docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
# title: 🤗 Results of the transformers-all-latest-gpu-push-ci docker build
|
||||
# status: ${{ job.status }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-torch-deepspeed-docker:
|
||||
name: "Latest PyTorch + DeepSpeed"
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }}
|
||||
# latest-torch-deepspeed-docker:
|
||||
# name: "Latest PyTorch + DeepSpeed"
|
||||
# runs-on:
|
||||
# group: aws-general-8-plus
|
||||
# steps:
|
||||
# -
|
||||
# name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v3
|
||||
# -
|
||||
# name: Check out code
|
||||
# uses: actions/checkout@v4
|
||||
# -
|
||||
# name: Login to DockerHub
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# -
|
||||
# name: Build and push
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-pytorch-deepspeed-latest-gpu${{ inputs.image_postfix }}
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER}}
|
||||
title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER}}
|
||||
# title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu docker build
|
||||
# status: ${{ job.status }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
# Can't build 2 images in a single job `latest-torch-deepspeed-docker` (for `nvcr.io/nvidia`)
|
||||
latest-torch-deepspeed-docker-for-push-ci-daily-build:
|
||||
name: "Latest PyTorch + DeepSpeed (Push CI - Daily Build)"
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# Push CI images still need to be re-built daily
|
||||
-
|
||||
name: Build and push (for Push CI) in a daily basis
|
||||
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
|
||||
# # Can't build 2 images in a single job `latest-torch-deepspeed-docker` (for `nvcr.io/nvidia`)
|
||||
# latest-torch-deepspeed-docker-for-push-ci-daily-build:
|
||||
# name: "Latest PyTorch + DeepSpeed (Push CI - Daily Build)"
|
||||
# runs-on:
|
||||
# group: aws-general-8-plus
|
||||
# steps:
|
||||
# -
|
||||
# name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v3
|
||||
# -
|
||||
# name: Check out code
|
||||
# uses: actions/checkout@v4
|
||||
# -
|
||||
# name: Login to DockerHub
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# # Push CI images still need to be re-built daily
|
||||
# -
|
||||
# name: Build and push (for Push CI) in a daily basis
|
||||
# # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# # The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
# if: inputs.image_postfix != '-push-ci'
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-pytorch-deepspeed-latest-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu-push-ci docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
# title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu-push-ci docker build
|
||||
# status: ${{ job.status }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
doc-builder:
|
||||
name: "Doc builder"
|
||||
# Push CI doesn't need this image
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-doc-builder
|
||||
push: true
|
||||
tags: huggingface/transformers-doc-builder
|
||||
# doc-builder:
|
||||
# name: "Doc builder"
|
||||
# # Push CI doesn't need this image
|
||||
# if: inputs.image_postfix != '-push-ci'
|
||||
# runs-on:
|
||||
# group: aws-general-8-plus
|
||||
# steps:
|
||||
# -
|
||||
# name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v3
|
||||
# -
|
||||
# name: Check out code
|
||||
# uses: actions/checkout@v4
|
||||
# -
|
||||
# name: Login to DockerHub
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# -
|
||||
# name: Build and push
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-doc-builder
|
||||
# push: true
|
||||
# tags: huggingface/transformers-doc-builder
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the huggingface/transformers-doc-builder docker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
# title: 🤗 Results of the huggingface/transformers-doc-builder docker build
|
||||
# status: ${{ job.status }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-pytorch:
|
||||
name: "Latest PyTorch [dev]"
|
||||
# Push CI doesn't need this image
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-gpu
|
||||
# latest-pytorch:
|
||||
# name: "Latest PyTorch [dev]"
|
||||
# # Push CI doesn't need this image
|
||||
# if: inputs.image_postfix != '-push-ci'
|
||||
# runs-on:
|
||||
# group: aws-general-8-plus
|
||||
# steps:
|
||||
# -
|
||||
# name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v3
|
||||
# -
|
||||
# name: Check out code
|
||||
# uses: actions/checkout@v4
|
||||
# -
|
||||
# name: Login to DockerHub
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# -
|
||||
# name: Build and push
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-pytorch-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-pytorch-gpu
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the huggingface/transformers-pytorch-gpudocker build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
# title: 🤗 Results of the huggingface/transformers-pytorch-gpudocker build
|
||||
# status: ${{ job.status }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-pytorch-amd:
|
||||
name: "Latest PyTorch (AMD) [dev]"
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-amd-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-amd-gpu${{ inputs.image_postfix }}
|
||||
# Push CI images still need to be re-built daily
|
||||
-
|
||||
name: Build and push (for Push CI) in a daily basis
|
||||
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-amd-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-amd-gpu-push-ci
|
||||
# latest-pytorch-amd:
|
||||
# name: "Latest PyTorch (AMD) [dev]"
|
||||
# runs-on:
|
||||
# group: aws-general-8-plus
|
||||
# steps:
|
||||
# -
|
||||
# name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v3
|
||||
# -
|
||||
# name: Check out code
|
||||
# uses: actions/checkout@v4
|
||||
# -
|
||||
# name: Login to DockerHub
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# -
|
||||
# name: Build and push
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-pytorch-amd-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-pytorch-amd-gpu${{ inputs.image_postfix }}
|
||||
# # Push CI images still need to be re-built daily
|
||||
# -
|
||||
# name: Build and push (for Push CI) in a daily basis
|
||||
# # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# # The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
# if: inputs.image_postfix != '-push-ci'
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-pytorch-amd-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-pytorch-amd-gpu-push-ci
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the huggingface/transformers-pytorch-amd-gpu-push-ci build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
# title: 🤗 Results of the huggingface/transformers-pytorch-amd-gpu-push-ci build
|
||||
# status: ${{ job.status }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-tensorflow:
|
||||
name: "Latest TensorFlow [dev]"
|
||||
# Push CI doesn't need this image
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-tensorflow-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-tensorflow-gpu
|
||||
# latest-tensorflow:
|
||||
# name: "Latest TensorFlow [dev]"
|
||||
# # Push CI doesn't need this image
|
||||
# if: inputs.image_postfix != '-push-ci'
|
||||
# runs-on:
|
||||
# group: aws-general-8-plus
|
||||
# steps:
|
||||
# -
|
||||
# name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v3
|
||||
# -
|
||||
# name: Check out code
|
||||
# uses: actions/checkout@v4
|
||||
# -
|
||||
# name: Login to DockerHub
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# -
|
||||
# name: Build and push
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-tensorflow-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-tensorflow-gpu
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the huggingface/transformers-tensorflow-gpu build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
# title: 🤗 Results of the huggingface/transformers-tensorflow-gpu build
|
||||
# status: ${{ job.status }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-pytorch-deepspeed-amd:
|
||||
name: "PyTorch + DeepSpeed (AMD) [dev]"
|
||||
runs-on:
|
||||
group: aws-general-8-plus
|
||||
steps:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
-
|
||||
name: Check out code
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-deepspeed-amd-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-amd-gpu${{ inputs.image_postfix }}
|
||||
# Push CI images still need to be re-built daily
|
||||
-
|
||||
name: Build and push (for Push CI) in a daily basis
|
||||
# This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
if: inputs.image_postfix != '-push-ci'
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ./docker/transformers-pytorch-deepspeed-amd-gpu
|
||||
build-args: |
|
||||
REF=main
|
||||
push: true
|
||||
tags: huggingface/transformers-pytorch-deepspeed-amd-gpu-push-ci
|
||||
# latest-pytorch-deepspeed-amd:
|
||||
# name: "PyTorch + DeepSpeed (AMD) [dev]"
|
||||
# runs-on:
|
||||
# group: aws-general-8-plus
|
||||
# steps:
|
||||
# -
|
||||
# name: Set up Docker Buildx
|
||||
# uses: docker/setup-buildx-action@v3
|
||||
# -
|
||||
# name: Check out code
|
||||
# uses: actions/checkout@v4
|
||||
# -
|
||||
# name: Login to DockerHub
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
# password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
# -
|
||||
# name: Build and push
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-pytorch-deepspeed-amd-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-pytorch-deepspeed-amd-gpu${{ inputs.image_postfix }}
|
||||
# # Push CI images still need to be re-built daily
|
||||
# -
|
||||
# name: Build and push (for Push CI) in a daily basis
|
||||
# # This condition allows `schedule` events, or `push` events that trigger this workflow NOT via `workflow_call`.
|
||||
# # The later case is useful for manual image building for debugging purpose. Use another tag in this case!
|
||||
# if: inputs.image_postfix != '-push-ci'
|
||||
# uses: docker/build-push-action@v5
|
||||
# with:
|
||||
# context: ./docker/transformers-pytorch-deepspeed-amd-gpu
|
||||
# build-args: |
|
||||
# REF=main
|
||||
# push: true
|
||||
# tags: huggingface/transformers-pytorch-deepspeed-amd-gpu-push-ci
|
||||
|
||||
- name: Post to Slack
|
||||
if: always()
|
||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
with:
|
||||
slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
title: 🤗 Results of the transformers-pytorch-deepspeed-amd-gpu build
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
# - name: Post to Slack
|
||||
# if: always()
|
||||
# uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||
# with:
|
||||
# slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }}
|
||||
# title: 🤗 Results of the transformers-pytorch-deepspeed-amd-gpu build
|
||||
# status: ${{ job.status }}
|
||||
# slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
||||
latest-quantization-torch-docker:
|
||||
name: "Latest Pytorch + Quantization [dev]"
|
||||
|
||||
25
.github/workflows/change_pr_to_draft.yml
vendored
25
.github/workflows/change_pr_to_draft.yml
vendored
@ -1,25 +0,0 @@
|
||||
name: Change PR to draft
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
types: [opened, reopened]
|
||||
|
||||
jobs:
|
||||
convert_pr_to_draft:
|
||||
runs-on: ubuntu-22.04
|
||||
name: Convert PR to draft
|
||||
permissions:
|
||||
pull-requests: write
|
||||
contents: write
|
||||
if: github.event.pull_request.draft == false
|
||||
steps:
|
||||
- name: Convert PR to draft
|
||||
shell: bash
|
||||
env:
|
||||
PR_NUMBER: ${{ github.event.number }}
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
REPO: ${{ github.repository }}
|
||||
run: |
|
||||
echo $PR_NUMBER
|
||||
gh pr ready $PR_NUMBER --repo $REPO --undo
|
||||
gh pr comment $PR_NUMBER --repo $REPO --body "Hi 👋, thank you for opening this pull request! The pull request is converted to draft by default. When it is ready for review, please click the \`Ready for review\` button (at the bottom of the PR page)."
|
||||
@ -22,6 +22,7 @@ env:
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
|
||||
|
||||
1
.github/workflows/model_jobs.yml
vendored
1
.github/workflows/model_jobs.yml
vendored
@ -30,6 +30,7 @@ env:
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
|
||||
1
.github/workflows/model_jobs_amd.yml
vendored
1
.github/workflows/model_jobs_amd.yml
vendored
@ -30,6 +30,7 @@ env:
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
|
||||
@ -1,68 +0,0 @@
|
||||
# Used to notify core maintainers about new model PR being merged
|
||||
name: New model PR merged notification
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'src/transformers/models/*/modeling_*'
|
||||
|
||||
jobs:
|
||||
notify_new_model:
|
||||
name: Notify new model
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- name: Check new model
|
||||
shell: bash
|
||||
run: |
|
||||
python -m pip install gitpython
|
||||
python -c 'from utils.pr_slow_ci_models import get_new_model; new_model = get_new_model(diff_with_last_commit=True); print(new_model)' | tee output.txt
|
||||
echo "NEW_MODEL=$(tail -n 1 output.txt)" >> $GITHUB_ENV
|
||||
echo "COMMIT_SHA=$(git log -1 --format=%H)" >> $GITHUB_ENV
|
||||
|
||||
- name: print commit sha
|
||||
if: ${{ env.NEW_MODEL != ''}}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "$COMMIT_SHA"
|
||||
|
||||
- name: print new model
|
||||
if: ${{ env.NEW_MODEL != ''}}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "$NEW_MODEL"
|
||||
|
||||
- name: Notify
|
||||
if: ${{ env.NEW_MODEL != ''}}
|
||||
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
|
||||
with:
|
||||
# Slack channel id, channel name, or user id to post message.
|
||||
# See also: https://api.slack.com/methods/chat.postMessage#channels
|
||||
channel-id: transformers-new-model-notification
|
||||
# For posting a rich message using Block Kit
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "header",
|
||||
"text": {
|
||||
"type": "plain_text",
|
||||
"text": "New model!",
|
||||
"emoji": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": "<https://github.com/huggingface/transformers/commit/${{ env.COMMIT_SHA }}|New model: ${{ env.NEW_MODEL }}> GH_ArthurZucker, GH_lysandrejik, GH_ydshieh"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
env:
|
||||
SLACK_BOT_TOKEN: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
8
.github/workflows/push-important-models.yml
vendored
8
.github/workflows/push-important-models.yml
vendored
@ -14,6 +14,7 @@ env:
|
||||
RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`.
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
|
||||
jobs:
|
||||
get_modified_models:
|
||||
@ -133,3 +134,10 @@ jobs:
|
||||
slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }}
|
||||
slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
waitForSSH: true
|
||||
|
||||
benchmark:
|
||||
name: Benchmark workflow
|
||||
needs: get_modified_models
|
||||
if: ${{ needs.get_modified_models.outputs.matrix != '[]' && needs.get_modified_models.outputs.matrix != '' && fromJson(needs.get_modified_models.outputs.matrix)[0] != null }}
|
||||
uses: ./.github/workflows/benchmark.yml
|
||||
secrets: inherit
|
||||
|
||||
416
.github/workflows/self-comment-ci.yml
vendored
416
.github/workflows/self-comment-ci.yml
vendored
@ -1,416 +0,0 @@
|
||||
name: PR comment GitHub CI
|
||||
|
||||
on:
|
||||
issue_comment:
|
||||
types:
|
||||
- created
|
||||
branches-ignore:
|
||||
- main
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.issue.number }}-${{ startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow') }}
|
||||
cancel-in-progress: true
|
||||
permissions: read-all
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
||||
# This token is created under the bot `hf-transformers-bot`.
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
get-pr-number:
|
||||
runs-on: ubuntu-22.04
|
||||
name: Get PR number
|
||||
# For security: only allow team members to run
|
||||
if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }}
|
||||
outputs:
|
||||
PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }}
|
||||
steps:
|
||||
- name: Get PR number
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ "${{ github.event.issue.number }}" != "" && "${{ github.event.issue.pull_request }}" != "" ]]; then
|
||||
echo "PR_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "PR_NUMBER=" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Check PR number
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ env.PR_NUMBER }}"
|
||||
|
||||
- name: Set PR number
|
||||
id: set_pr_number
|
||||
run: echo "PR_NUMBER=${{ env.PR_NUMBER }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
get-sha:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: get-pr-number
|
||||
if: ${{ needs.get-pr-number.outputs.PR_NUMBER != ''}}
|
||||
outputs:
|
||||
PR_HEAD_SHA: ${{ steps.get_sha.outputs.PR_HEAD_SHA }}
|
||||
PR_MERGE_SHA: ${{ steps.get_sha.outputs.PR_MERGE_SHA }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: "0"
|
||||
ref: "refs/pull/${{needs.get-pr-number.outputs.PR_NUMBER}}/merge"
|
||||
|
||||
- name: Get SHA (and verify timestamps against the issue comment date)
|
||||
id: get_sha
|
||||
env:
|
||||
PR_NUMBER: ${{ needs.get-pr-number.outputs.PR_NUMBER }}
|
||||
COMMENT_DATE: ${{ github.event.comment.created_at }}
|
||||
run: |
|
||||
git fetch origin refs/pull/$PR_NUMBER/head:refs/remotes/pull/$PR_NUMBER/head
|
||||
git checkout refs/remotes/pull/$PR_NUMBER/head
|
||||
echo "PR_HEAD_SHA: $(git log -1 --format=%H)"
|
||||
echo "PR_HEAD_SHA=$(git log -1 --format=%H)" >> "$GITHUB_OUTPUT"
|
||||
git fetch origin refs/pull/$PR_NUMBER/merge:refs/remotes/pull/$PR_NUMBER/merge
|
||||
git checkout refs/remotes/pull/$PR_NUMBER/merge
|
||||
echo "PR_MERGE_SHA: $(git log -1 --format=%H)"
|
||||
echo "PR_MERGE_SHA=$(git log -1 --format=%H)" >> "$GITHUB_OUTPUT"
|
||||
PR_MERGE_COMMIT_TIMESTAMP=$(git log -1 --date=unix --format=%cd)
|
||||
echo "PR_MERGE_COMMIT_TIMESTAMP: $PR_MERGE_COMMIT_TIMESTAMP"
|
||||
COMMENT_TIMESTAMP=$(date -d "${COMMENT_DATE}" +"%s")
|
||||
echo "COMMENT_DATE: $COMMENT_DATE"
|
||||
echo "COMMENT_TIMESTAMP: $COMMENT_TIMESTAMP"
|
||||
if [ $COMMENT_TIMESTAMP -le $PR_MERGE_COMMIT_TIMESTAMP ]; then
|
||||
echo "Last commit on the pull request is newer than the issue comment triggering this run! Abort!";
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
# use a python script to handle this complex logic
|
||||
# case 1: `run-slow` (auto. infer with limited number of models, but in particular, new model)
|
||||
# case 2: `run-slow model_1, model_2`
|
||||
get-tests:
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [get-pr-number, get-sha]
|
||||
if: ${{ needs.get-pr-number.outputs.PR_NUMBER != ''}}
|
||||
outputs:
|
||||
models: ${{ steps.models_to_run.outputs.models }}
|
||||
quantizations: ${{ steps.models_to_run.outputs.quantizations }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: "0"
|
||||
ref: "refs/pull/${{needs.get-pr-number.outputs.PR_NUMBER}}/merge"
|
||||
|
||||
- name: Verify merge commit SHA
|
||||
env:
|
||||
VERIFIED_PR_MERGE_SHA: ${{ needs.get-sha.outputs.PR_MERGE_SHA }}
|
||||
run: |
|
||||
PR_MERGE_SHA=$(git log -1 --format=%H)
|
||||
if [ $PR_MERGE_SHA != $VERIFIED_PR_MERGE_SHA ]; then
|
||||
echo "The merged commit SHA is not the same as the verified one! Security issue detected, abort the workflow!";
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
- name: Get models to test
|
||||
env:
|
||||
PR_COMMENT: ${{ github.event.comment.body }}
|
||||
run: |
|
||||
python -m pip install GitPython
|
||||
python utils/pr_slow_ci_models.py --message "$PR_COMMENT" | tee output.txt
|
||||
echo "models=$(tail -n 1 output.txt)" >> $GITHUB_ENV
|
||||
python utils/pr_slow_ci_models.py --message "$PR_COMMENT" --quantization | tee output2.txt
|
||||
echo "quantizations=$(tail -n 1 output2.txt)" >> $GITHUB_ENV
|
||||
|
||||
- name: Show models to test
|
||||
id: models_to_run
|
||||
run: |
|
||||
echo "${{ env.models }}"
|
||||
echo "models=${{ env.models }}" >> $GITHUB_ENV
|
||||
echo "models=${{ env.models }}" >> $GITHUB_OUTPUT
|
||||
echo "${{ env.quantizations }}"
|
||||
echo "quantizations=${{ env.quantizations }}" >> $GITHUB_OUTPUT
|
||||
|
||||
reply_to_comment:
|
||||
name: Reply to the comment
|
||||
if: ${{ needs.get-tests.outputs.models != '[]' || needs.get-tests.outputs.quantizations != '[]' }}
|
||||
needs: [get-pr-number, get-tests]
|
||||
permissions:
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Reply to the comment
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
MODELS: ${{ needs.get-tests.outputs.models }}
|
||||
BODY: "This comment contains run-slow, running the specified jobs:\n\nmodels: ${{ needs.get-tests.outputs.models }}\nquantizations: ${{ needs.get-tests.outputs.quantizations }}"
|
||||
run: |
|
||||
gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
repos/${{ github.repository }}/issues/${{ needs.get-pr-number.outputs.PR_NUMBER }}/comments \
|
||||
-f "body=This comment contains run-slow, running the specified jobs: ${{ env.BODY }} ..."
|
||||
|
||||
create_run:
|
||||
name: Create run
|
||||
if: ${{ needs.get-tests.outputs.models != '[]' || needs.get-tests.outputs.quantizations != '[]' }}
|
||||
needs: [get-sha, get-tests, reply_to_comment]
|
||||
permissions:
|
||||
statuses: write
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Create Run
|
||||
id: create_run
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
# Create a commit status (pending) for a run of this workflow. The status has to be updated later in `update_run_status`.
|
||||
# See https://docs.github.com/en/rest/commits/statuses?apiVersion=2022-11-28#create-a-commit-status
|
||||
GITHUB_RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
run: |
|
||||
gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
repos/${{ github.repository }}/statuses/${{ needs.get-sha.outputs.PR_HEAD_SHA }} \
|
||||
-f "target_url=$GITHUB_RUN_URL" -f "state=pending" -f "description=Slow CI job" -f "context=pytest/custom-tests"
|
||||
|
||||
run_models_gpu:
|
||||
name: Run all tests for the model
|
||||
if: ${{ needs.get-tests.outputs.models != '[]' }}
|
||||
needs: [get-pr-number, get-sha, get-tests, create_run]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.get-tests.outputs.models) }}
|
||||
machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
|
||||
runs-on:
|
||||
group: '${{ matrix.machine_type }}'
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Echo input and matrix info
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout to PR merge commit
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch origin refs/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge:refs/remotes/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge
|
||||
git checkout refs/remotes/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge
|
||||
git log -1 --format=%H
|
||||
|
||||
- name: Verify merge commit SHA
|
||||
env:
|
||||
VERIFIED_PR_MERGE_SHA: ${{ needs.get-sha.outputs.PR_MERGE_SHA }}
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
PR_MERGE_SHA=$(git log -1 --format=%H)
|
||||
if [ $PR_MERGE_SHA != $VERIFIED_PR_MERGE_SHA ]; then
|
||||
echo "The merged commit SHA is not the same as the verified one! Security issue detected, abort the workflow!";
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Set `machine_type` for report and artifact names
|
||||
working-directory: /transformers
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ matrix.machine_type }}"
|
||||
if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
|
||||
machine_type=single-gpu
|
||||
elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
|
||||
machine_type=multi-gpu
|
||||
else
|
||||
machine_type=${{ matrix.machine_type }}
|
||||
fi
|
||||
echo "$machine_type"
|
||||
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
export CUDA_VISIBLE_DEVICES="$(python3 utils/set_cuda_devices_for_ci.py --test_folder ${{ matrix.folders }})"
|
||||
echo $CUDA_VISIBLE_DEVICES
|
||||
python3 -m pytest -v -rsfE --make-reports=${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/failures_short.txt
|
||||
|
||||
- name: Make sure report directory exists
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
|
||||
echo "hello" > /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/hello.txt
|
||||
echo "${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports"
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports
|
||||
path: /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
|
||||
|
||||
run_quantization_torch_gpu:
|
||||
name: Run all tests for a quantization
|
||||
if: ${{ needs.get-tests.outputs.quantizations != '[]' }}
|
||||
needs: [get-pr-number, get-sha, get-tests, create_run]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.get-tests.outputs.quantizations) }}
|
||||
machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
|
||||
runs-on:
|
||||
group: '${{ matrix.machine_type }}'
|
||||
container:
|
||||
image: huggingface/transformers-quantization-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'quantization/'/'quantization_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout to PR merge commit
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch origin refs/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge:refs/remotes/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge
|
||||
git checkout refs/remotes/pull/${{ needs.get-pr-number.outputs.PR_NUMBER }}/merge
|
||||
git log -1 --format=%H
|
||||
|
||||
- name: Verify merge commit SHA
|
||||
env:
|
||||
VERIFIED_PR_MERGE_SHA: ${{ needs.get-sha.outputs.PR_MERGE_SHA }}
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
PR_MERGE_SHA=$(git log -1 --format=%H)
|
||||
if [ $PR_MERGE_SHA != $VERIFIED_PR_MERGE_SHA ]; then
|
||||
echo "The merged commit SHA is not the same as the verified one! Security issue detected, abort the workflow!";
|
||||
exit -1;
|
||||
fi
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Set `machine_type` for report and artifact names
|
||||
working-directory: /transformers
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ matrix.machine_type }}"
|
||||
if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
|
||||
machine_type=single-gpu
|
||||
elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
|
||||
machine_type=multi-gpu
|
||||
else
|
||||
machine_type=${{ matrix.machine_type }}
|
||||
fi
|
||||
echo "$machine_type"
|
||||
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run quantization tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pytest -v --make-reports=${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports/failures_short.txt
|
||||
|
||||
- name: Make sure report directory exists
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p /transformers/reports/${{ env.machine_type }}_run_quantization_gpu_${{ matrix.folders }}_test_reports
|
||||
echo "hello" > /transformers/reports/${{ env.machine_type }}_run_quantization_gpu_${{ matrix.folders }}_test_reports/hello.txt
|
||||
echo "${{ env.machine_type }}_run_quantization_gpu_${{ matrix.folders }}_test_reports"
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ env.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports
|
||||
path: /transformers/reports/${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports
|
||||
|
||||
update_run_status:
|
||||
name: Update Check Run Status
|
||||
needs: [get-sha, create_run, run_models_gpu, run_quantization_torch_gpu]
|
||||
permissions:
|
||||
statuses: write
|
||||
if: ${{ always() && needs.create_run.result == 'success' }}
|
||||
runs-on: ubuntu-22.04
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
GITHUB_RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
STATUS_OK: ${{ contains(fromJSON('["skipped", "success"]'), needs.run_models_gpu.result) && contains(fromJSON('["skipped", "success"]'), needs.run_quantization_torch_gpu.result) }}
|
||||
steps:
|
||||
- name: Get `run_models_gpu` job status
|
||||
run: |
|
||||
echo "${{ needs.run_models_gpu.result }}"
|
||||
echo "${{ needs.run_quantization_torch_gpu.result }}"
|
||||
echo $STATUS_OK
|
||||
if [ "$STATUS_OK" = "true" ]; then
|
||||
echo "STATUS=success" >> $GITHUB_ENV
|
||||
else
|
||||
echo "STATUS=failure" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update PR commit statuses
|
||||
run: |
|
||||
echo "${{ needs.run_models_gpu.result }}"
|
||||
echo "${{ env.STATUS }}"
|
||||
gh api \
|
||||
--method POST \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
-H "X-GitHub-Api-Version: 2022-11-28" \
|
||||
repos/${{ github.repository }}/statuses/${{ needs.get-sha.outputs.PR_HEAD_SHA }} \
|
||||
-f "target_url=$GITHUB_RUN_URL" -f "state=${{ env.STATUS }}" -f "description=Slow CI job" -f "context=pytest/custom-tests"
|
||||
@ -21,6 +21,39 @@ jobs:
|
||||
echo "$(python3 -c 'print(int(${{ github.run_number }}) % 10)')"
|
||||
echo "run_number=$(python3 -c 'print(int(${{ github.run_number }}) % 10)')" >> $GITHUB_OUTPUT
|
||||
|
||||
run_past_ci_pytorch_1-13:
|
||||
name: PyTorch 1.13
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 0 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.13"
|
||||
sha: ${{ github.sha }}
|
||||
secrets: inherit
|
||||
|
||||
run_past_ci_pytorch_1-12:
|
||||
name: PyTorch 1.12
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 1 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.12"
|
||||
sha: ${{ github.sha }}
|
||||
secrets: inherit
|
||||
|
||||
run_past_ci_pytorch_1-11:
|
||||
name: PyTorch 1.11
|
||||
needs: get_number
|
||||
if: needs.get_number.outputs.run_number == 2 && (cancelled() != true) && ((github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_past_ci')))
|
||||
uses: ./.github/workflows/self-past-caller.yml
|
||||
with:
|
||||
framework: pytorch
|
||||
version: "1.11"
|
||||
sha: ${{ github.sha }}
|
||||
secrets: inherit
|
||||
|
||||
run_past_ci_tensorflow_2-11:
|
||||
name: TensorFlow 2.11
|
||||
needs: get_number
|
||||
|
||||
151
.github/workflows/self-pr-slow-ci.yml
vendored
Normal file
151
.github/workflows/self-pr-slow-ci.yml
vendored
Normal file
@ -0,0 +1,151 @@
|
||||
name: PR slow CI
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "src/transformers/models/*/modeling_*.py"
|
||||
- "tests/**/test_*.py"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
# For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access.
|
||||
# This token is created under the bot `hf-transformers-bot`.
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
find_models_to_run:
|
||||
runs-on: ubuntu-22.04
|
||||
name: Find models to run slow tests
|
||||
# Triggered only if the required label `run-slow` is added
|
||||
if: ${{ contains(github.event.pull_request.labels.*.name, 'run-slow') }}
|
||||
outputs:
|
||||
models: ${{ steps.models_to_run.outputs.models }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: "0"
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Get commit message
|
||||
run: |
|
||||
echo "commit_message=$(git show -s --format=%s)" >> $GITHUB_ENV
|
||||
|
||||
- name: Get models to run slow tests
|
||||
run: |
|
||||
echo "${{ env.commit_message }}"
|
||||
python -m pip install GitPython
|
||||
python utils/pr_slow_ci_models.py --commit_message "${{ env.commit_message }}" | tee output.txt
|
||||
echo "models=$(tail -n 1 output.txt)" >> $GITHUB_ENV
|
||||
|
||||
- name: Models to run slow tests
|
||||
id: models_to_run
|
||||
run: |
|
||||
echo "${{ env.models }}"
|
||||
echo "models=${{ env.models }}" >> $GITHUB_OUTPUT
|
||||
|
||||
run_models_gpu:
|
||||
name: Run all tests for the model
|
||||
# Triggered only `find_models_to_run` is triggered (label `run-slow` is added) which gives the models to run
|
||||
# (either a new model PR or via a commit message)
|
||||
if: ${{ needs.find_models_to_run.outputs.models != '[]' }}
|
||||
needs: find_models_to_run
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
folders: ${{ fromJson(needs.find_models_to_run.outputs.models) }}
|
||||
machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
|
||||
runs-on:
|
||||
group: '${{ matrix.machine_type }}'
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Echo input and matrix info
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
|
||||
- name: Echo folder ${{ matrix.folders }}
|
||||
shell: bash
|
||||
# For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to
|
||||
# set the artifact folder names (because the character `/` is not allowed).
|
||||
run: |
|
||||
echo "${{ matrix.folders }}"
|
||||
matrix_folders=${{ matrix.folders }}
|
||||
matrix_folders=${matrix_folders/'models/'/'models_'}
|
||||
echo "$matrix_folders"
|
||||
echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV
|
||||
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git fetch origin pull/${{ github.event.pull_request.number }}/head:pull/${{ github.event.pull_request.number }}/merge && git checkout pull/${{ github.event.pull_request.number }}/merge
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . && python3 -m pip install --upgrade torch torchaudio torchvision
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Set `machine_type` for report and artifact names
|
||||
working-directory: /transformers
|
||||
shell: bash
|
||||
run: |
|
||||
echo "${{ matrix.machine_type }}"
|
||||
if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then
|
||||
machine_type=single-gpu
|
||||
elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then
|
||||
machine_type=multi-gpu
|
||||
else
|
||||
machine_type=${{ matrix.machine_type }}
|
||||
fi
|
||||
echo "$machine_type"
|
||||
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
export CUDA_VISIBLE_DEVICES="$(python3 utils/set_cuda_devices_for_ci.py --test_folder ${{ matrix.folders }})"
|
||||
echo $CUDA_VISIBLE_DEVICES
|
||||
python3 -m pytest -v -rsfE --make-reports=${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/failures_short.txt
|
||||
|
||||
- name: Make sure report directory exists
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
|
||||
echo "hello" > /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/hello.txt
|
||||
echo "${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports"
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports
|
||||
path: /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports
|
||||
@ -1,10 +1,10 @@
|
||||
name: Self-hosted runner (AMD mi210 CI caller)
|
||||
|
||||
on:
|
||||
#workflow_run:
|
||||
# workflows: ["Self-hosted runner (push-caller)"]
|
||||
# branches: ["main"]
|
||||
# types: [completed]
|
||||
workflow_run:
|
||||
workflows: ["Self-hosted runner (push-caller)"]
|
||||
branches: ["main"]
|
||||
types: [completed]
|
||||
push:
|
||||
branches:
|
||||
- run_amd_push_ci_caller*
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
name: Self-hosted runner (AMD mi250 CI caller)
|
||||
|
||||
on:
|
||||
#workflow_run:
|
||||
# workflows: ["Self-hosted runner (push-caller)"]
|
||||
# branches: ["main"]
|
||||
# types: [completed]
|
||||
workflow_run:
|
||||
workflows: ["Self-hosted runner (push-caller)"]
|
||||
branches: ["main"]
|
||||
types: [completed]
|
||||
push:
|
||||
branches:
|
||||
- run_amd_push_ci_caller*
|
||||
|
||||
@ -1,10 +1,10 @@
|
||||
name: Self-hosted runner (AMD mi300 CI caller)
|
||||
|
||||
on:
|
||||
#workflow_run:
|
||||
# workflows: ["Self-hosted runner (push-caller)"]
|
||||
# branches: ["main"]
|
||||
# types: [completed]
|
||||
workflow_run:
|
||||
workflows: ["Self-hosted runner (push-caller)"]
|
||||
branches: ["main"]
|
||||
types: [completed]
|
||||
push:
|
||||
branches:
|
||||
- run_amd_push_ci_caller*
|
||||
|
||||
1
.github/workflows/self-push-amd.yml
vendored
1
.github/workflows/self-push-amd.yml
vendored
@ -14,6 +14,7 @@ env:
|
||||
MKL_NUM_THREADS: 8
|
||||
PYTEST_TIMEOUT: 60
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
|
||||
jobs:
|
||||
|
||||
1
.github/workflows/self-push.yml
vendored
1
.github/workflows/self-push.yml
vendored
@ -24,6 +24,7 @@ env:
|
||||
MKL_NUM_THREADS: 8
|
||||
PYTEST_TIMEOUT: 60
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
|
||||
jobs:
|
||||
|
||||
@ -12,7 +12,7 @@ on:
|
||||
jobs:
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
torch-pipeline:
|
||||
name: Torch pipeline CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
@ -34,7 +34,7 @@ jobs:
|
||||
|
||||
example-ci:
|
||||
name: Example CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
@ -45,7 +45,7 @@ jobs:
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
|
||||
@ -12,10 +12,10 @@ on:
|
||||
jobs:
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#amd-hf-ci"
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
@ -23,10 +23,10 @@ jobs:
|
||||
|
||||
torch-pipeline:
|
||||
name: Torch pipeline CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_pipelines_torch_gpu
|
||||
slack_report_channel: "#amd-hf-ci"
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
@ -34,10 +34,10 @@ jobs:
|
||||
|
||||
example-ci:
|
||||
name: Example CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_examples_gpu
|
||||
slack_report_channel: "#amd-hf-ci"
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
@ -45,10 +45,10 @@ jobs:
|
||||
|
||||
deepspeed-ci:
|
||||
name: DeepSpeed CI
|
||||
uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main
|
||||
uses: ./.github/workflows/self-scheduled-amd.yml
|
||||
with:
|
||||
job: run_torch_cuda_extensions_gpu
|
||||
slack_report_channel: "#amd-hf-ci"
|
||||
slack_report_channel: "#transformers-ci-daily-amd"
|
||||
runner: mi250
|
||||
docker: huggingface/transformers-pytorch-deepspeed-amd-gpu
|
||||
ci_event: Scheduled CI (AMD) - mi250
|
||||
|
||||
349
.github/workflows/self-scheduled-amd.yml
vendored
Normal file
349
.github/workflows/self-scheduled-amd.yml
vendored
Normal file
@ -0,0 +1,349 @@
|
||||
name: Self-hosted runner (scheduled-amd)
|
||||
|
||||
# Note: For the AMD CI, we rely on a caller workflow and on the workflow_call event to trigger the
|
||||
# CI in order to run it on both MI210 and MI250, without having to use matrix here which pushes
|
||||
# us towards the limit of allowed jobs on GitHub Actions.
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
job:
|
||||
required: true
|
||||
type: string
|
||||
slack_report_channel:
|
||||
required: true
|
||||
type: string
|
||||
runner:
|
||||
required: true
|
||||
type: string
|
||||
docker:
|
||||
required: true
|
||||
type: string
|
||||
ci_event:
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
OMP_NUM_THREADS: 8
|
||||
MKL_NUM_THREADS: 8
|
||||
RUN_SLOW: yes
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
NUM_SLICES: 2
|
||||
|
||||
# Important note: each job (run_tests_single_gpu, run_tests_multi_gpu, run_examples_gpu, run_pipelines_torch_gpu) requires all the previous jobs before running.
|
||||
# This is done so that we avoid parallelizing the scheduled tests, to leave available
|
||||
# runners for the push CI that is running on the same machine.
|
||||
jobs:
|
||||
check_runner_status:
|
||||
name: Check Runner Status
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Checkout transformers
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Check Runner Status
|
||||
run: python utils/check_self_hosted_runner.py --target_runners hf-amd-mi210-ci-1gpu-1,hf-amd-mi250-ci-1gpu-1,hf-amd-mi300-ci-1gpu-1 --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
|
||||
check_runners:
|
||||
name: Check Runners
|
||||
needs: check_runner_status
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-amd-gpu
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
setup:
|
||||
if: contains(fromJSON('["run_models_gpu"]'), inputs.job)
|
||||
name: Setup
|
||||
needs: check_runners
|
||||
strategy:
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-amd-gpu
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
outputs:
|
||||
folder_slices: ${{ steps.set-matrix.outputs.folder_slices }}
|
||||
slice_ids: ${{ steps.set-matrix.outputs.slice_ids }}
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Cleanup
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
rm -rf tests/__pycache__
|
||||
rm -rf tests/models/__pycache__
|
||||
rm -rf reports
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- id: set-matrix
|
||||
name: Identify models to test
|
||||
working-directory: /transformers/tests
|
||||
run: |
|
||||
echo "folder_slices=$(python3 ../utils/split_model_tests.py --num_splits ${{ env.NUM_SLICES }})" >> $GITHUB_OUTPUT
|
||||
echo "slice_ids=$(python3 -c 'd = list(range(${{ env.NUM_SLICES }})); print(d)')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
run_models_gpu:
|
||||
if: ${{ inputs.job == 'run_models_gpu' }}
|
||||
name: Single GPU tests
|
||||
needs: setup
|
||||
strategy:
|
||||
max-parallel: 1 # For now, not to parallelize. Can change later if it works well.
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }}
|
||||
uses: ./.github/workflows/model_jobs_amd.yml
|
||||
with:
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
machine_type: ${{ matrix.machine_type }}
|
||||
slice_id: ${{ matrix.slice_id }}
|
||||
runner: ${{ inputs.runner }}
|
||||
docker: ${{ inputs.docker }}
|
||||
secrets: inherit
|
||||
|
||||
run_pipelines_torch_gpu:
|
||||
if: ${{ inputs.job == 'run_pipelines_torch_gpu' }}
|
||||
name: PyTorch pipelines
|
||||
needs: check_runners
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all pipeline tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports tests/pipelines -m "not not_device_test"
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_run_pipelines_torch_gpu_test_reports
|
||||
|
||||
run_examples_gpu:
|
||||
if: ${{ inputs.job == 'run_examples_gpu' }}
|
||||
name: Examples directory
|
||||
needs: check_runners
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run examples tests on GPU
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
pip install -r examples/pytorch/_tests_requirements.txt
|
||||
python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_examples_gpu_test_reports examples/pytorch -m "not not_device_test"
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_run_examples_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_examples_gpu_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_examples_gpu_test_reports
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_run_examples_gpu_test_reports
|
||||
|
||||
run_torch_cuda_extensions_gpu:
|
||||
if: ${{ inputs.job == 'run_torch_cuda_extensions_gpu' }}
|
||||
name: Torch ROCm deepspeed tests
|
||||
needs: check_runners
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
machine_type: [single-gpu, multi-gpu]
|
||||
runs-on: ['${{ matrix.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}']
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
steps:
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: ROCM-SMI
|
||||
run: |
|
||||
rocm-smi
|
||||
|
||||
- name: ROCM-INFO
|
||||
run: |
|
||||
rocminfo | grep "Agent" -A 14
|
||||
|
||||
- name: Show ROCR environment
|
||||
run: |
|
||||
echo "ROCR: $ROCR_VISIBLE_DEVICES"
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
run: pip freeze
|
||||
|
||||
- name: Run all tests on GPU
|
||||
working-directory: /transformers
|
||||
run: python3 -m pytest -v --make-reports=${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed tests/extended -m "not not_device_test"
|
||||
|
||||
- name: Failure short reports
|
||||
if: ${{ failure() }}
|
||||
continue-on-error: true
|
||||
run: cat /transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt
|
||||
|
||||
- name: "Test suite reports artifacts: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports"
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
path: /transformers/reports/${{ matrix.machine_type }}_run_torch_cuda_extensions_gpu_test_reports
|
||||
|
||||
send_results:
|
||||
name: Slack Report
|
||||
needs: [
|
||||
check_runner_status,
|
||||
check_runners,
|
||||
setup,
|
||||
run_models_gpu,
|
||||
run_pipelines_torch_gpu,
|
||||
run_examples_gpu,
|
||||
run_torch_cuda_extensions_gpu
|
||||
]
|
||||
if: ${{ always() }}
|
||||
uses: ./.github/workflows/slack-report.yml
|
||||
with:
|
||||
job: ${{ inputs.job }}
|
||||
# This would be `skipped` if `setup` is skipped.
|
||||
setup_status: ${{ needs.setup.result }}
|
||||
slack_report_channel: ${{ inputs.slack_report_channel }}
|
||||
# This would be an empty string if `setup` is skipped.
|
||||
folder_slices: ${{ needs.setup.outputs.folder_slices }}
|
||||
quantization_matrix: ${{ needs.setup.outputs.quantization_matrix }}
|
||||
ci_event: ${{ inputs.ci_event }}
|
||||
|
||||
secrets: inherit
|
||||
3
.github/workflows/self-scheduled.yml
vendored
3
.github/workflows/self-scheduled.yml
vendored
@ -40,6 +40,7 @@ env:
|
||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
NUM_SLICES: 2
|
||||
|
||||
@ -365,7 +366,7 @@ jobs:
|
||||
run: |
|
||||
python3 -m pip uninstall -y deepspeed
|
||||
rm -rf DeepSpeed
|
||||
git clone https://github.com/deepspeedai/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build
|
||||
DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
|
||||
1
.github/workflows/ssh-runner.yml
vendored
1
.github/workflows/ssh-runner.yml
vendored
@ -23,6 +23,7 @@ env:
|
||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||
CUDA_VISIBLE_DEVICES: 0,1
|
||||
RUN_PT_TF_CROSS_TESTS: 1
|
||||
|
||||
jobs:
|
||||
get_runner:
|
||||
|
||||
2
.github/workflows/trufflehog.yml
vendored
2
.github/workflows/trufflehog.yml
vendored
@ -16,5 +16,3 @@ jobs:
|
||||
fetch-depth: 0
|
||||
- name: Secret Scanning
|
||||
uses: trufflesecurity/trufflehog@main
|
||||
with:
|
||||
extra_args: --results=verified,unknown
|
||||
|
||||
@ -343,6 +343,8 @@ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/t
|
||||
|
||||
Like the slow tests, there are other environment variables available which are not enabled by default during testing:
|
||||
- `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers.
|
||||
- `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration.
|
||||
- `RUN_PT_TF_CROSS_TESTS`: Enables tests for TensorFlow + PyTorch integration.
|
||||
|
||||
More environment variables and additional information can be found in the [testing_utils.py](https://github.com/huggingface/transformers/blob/main/src/transformers/testing_utils.py).
|
||||
|
||||
|
||||
3
Makefile
3
Makefile
@ -37,6 +37,7 @@ autogenerate_code: deps_table_update
|
||||
repo-consistency:
|
||||
python utils/check_copies.py
|
||||
python utils/check_modular_conversion.py
|
||||
python utils/check_table.py
|
||||
python utils/check_dummies.py
|
||||
python utils/check_repo.py
|
||||
python utils/check_inits.py
|
||||
@ -45,6 +46,7 @@ repo-consistency:
|
||||
python utils/check_doctest_list.py
|
||||
python utils/update_metadata.py --check-only
|
||||
python utils/check_docstrings.py
|
||||
python utils/check_support_list.py
|
||||
|
||||
# this target runs checks on all files
|
||||
|
||||
@ -80,6 +82,7 @@ fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency
|
||||
fix-copies:
|
||||
python utils/check_copies.py --fix_and_overwrite
|
||||
python utils/check_modular_conversion.py --fix_and_overwrite
|
||||
python utils/check_table.py --fix_and_overwrite
|
||||
python utils/check_dummies.py --fix_and_overwrite
|
||||
python utils/check_doctest_list.py --fix_and_overwrite
|
||||
python utils/check_docstrings.py --fix_and_overwrite
|
||||
|
||||
28
README.md
28
README.md
@ -249,43 +249,23 @@ The model itself is a regular [Pytorch `nn.Module`](https://pytorch.org/docs/sta
|
||||
|
||||
### With pip
|
||||
|
||||
This repository is tested on Python 3.9+, Flax 0.4.1+, PyTorch 2.0+, and TensorFlow 2.6+.
|
||||
This repository is tested on Python 3.9+, Flax 0.4.1+, PyTorch 1.11+, and TensorFlow 2.6+.
|
||||
|
||||
You should install 🤗 Transformers in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
|
||||
First, create a virtual environment with the version of Python you're going to use and activate it.
|
||||
|
||||
**macOS/Linux**
|
||||
|
||||
```python -m venv env
|
||||
source env/bin/activate
|
||||
```
|
||||
|
||||
**Windows**
|
||||
|
||||
``` python -m venv env
|
||||
env\Scripts\activate
|
||||
```
|
||||
|
||||
To use 🤗 Transformers, you must install at least one of Flax, PyTorch, or TensorFlow. Refer to the official installation guides for platform-specific commands:
|
||||
|
||||
[TensorFlow installation page](https://www.tensorflow.org/install/),
|
||||
[PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax](https://github.com/google/flax#quick-install) and [Jax](https://github.com/google/jax#installation)
|
||||
Then, you will need to install at least one of Flax, PyTorch, or TensorFlow.
|
||||
Please refer to [TensorFlow installation page](https://www.tensorflow.org/install/), [PyTorch installation page](https://pytorch.org/get-started/locally/#start-locally) and/or [Flax](https://github.com/google/flax#quick-install) and [Jax](https://github.com/google/jax#installation) installation pages regarding the specific installation command for your platform.
|
||||
|
||||
When one of those backends has been installed, 🤗 Transformers can be installed using pip as follows:
|
||||
|
||||
```
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
If you'd like to play with the examples or need the bleeding edge of the code and can't wait for a new release, you must [install the library from source](https://huggingface.co/docs/transformers/installation#installing-from-source).
|
||||
|
||||
```
|
||||
git clone https://github.com/huggingface/transformers.git
|
||||
cd transformers
|
||||
pip install .
|
||||
```
|
||||
|
||||
### With conda
|
||||
|
||||
🤗 Transformers can be installed using conda as follows:
|
||||
|
||||
@ -15,7 +15,7 @@ to add it.
|
||||
|
||||
Keywords: Open-source, LLaMa, GPT-J, instruction, assistant
|
||||
|
||||
## [recommenders](https://github.com/recommenders-team/recommenders)
|
||||
## [recommenders](https://github.com/microsoft/recommenders)
|
||||
|
||||
This repository contains examples and best practices for building recommendation systems, provided as Jupyter notebooks. It goes over several aspects required to build efficient recommendation systems: data preparation, modeling, evaluation, model selection & optimization, as well as operationalization
|
||||
|
||||
@ -29,7 +29,7 @@ Keywords: inpainting, SD, Stable Diffusion
|
||||
|
||||
## [flair](https://github.com/flairNLP/flair)
|
||||
|
||||
FLAIR is a powerful PyTorch NLP framework, covering several important tasks: NER, sentiment-analysis, part-of-speech tagging, text and document embeddings, among other things.
|
||||
FLAIR is a powerful PyTorch NLP framework, convering several important tasks: NER, sentiment-analysis, part-of-speech tagging, text and document embeddings, among other things.
|
||||
|
||||
Keywords: NLP, text embedding, document embedding, biomedical, NER, PoS, sentiment-analysis
|
||||
|
||||
@ -39,15 +39,15 @@ MindsDB is a low-code ML platform, which automates and integrates several ML fra
|
||||
|
||||
Keywords: Database, low-code, AI table
|
||||
|
||||
## [langchain](https://github.com/langchain-ai/langchain)
|
||||
## [langchain](https://github.com/hwchase17/langchain)
|
||||
|
||||
[langchain](https://github.com/langchain-ai/langchain) is aimed at assisting in the development of apps merging both LLMs and other sources of knowledge. The library allows chaining calls to applications, creating a sequence across many tools.
|
||||
[langchain](https://github.com/hwchase17/langchain) is aimed at assisting in the development of apps merging both LLMs and other sources of knowledge. The library allows chaining calls to applications, creating a sequence across many tools.
|
||||
|
||||
Keywords: LLMs, Large Language Models, Agents, Chains
|
||||
|
||||
## [LlamaIndex](https://github.com/run-llama/llama_index)
|
||||
## [LlamaIndex](https://github.com/jerryjliu/llama_index)
|
||||
|
||||
[LlamaIndex](https://github.com/run-llama/llama_index) is a project that provides a central interface to connect your LLM's with external data. It provides various kinds of indices and retrieval mechanisms to perform different LLM tasks and obtain knowledge-augmented results.
|
||||
[LlamaIndex](https://github.com/jerryjliu/llama_index) is a project that provides a central interface to connect your LLM's with external data. It provides various kinds of indices and retreival mechanisms to perform different LLM tasks and obtain knowledge-augmented results.
|
||||
|
||||
Keywords: LLMs, Large Language Models, Data Retrieval, Indices, Knowledge Augmentation
|
||||
|
||||
@ -146,9 +146,9 @@ Keywords: Framework, simplicity, NLP
|
||||
|
||||
Keywords: LLM, Agents, HF Hub
|
||||
|
||||
## [transformers.js](https://github.com/huggingface/transformers.js/)
|
||||
## [transformers.js](https://xenova.github.io/transformers.js/)
|
||||
|
||||
[transformers.js](https://github.com/huggingface/transformers.js/) is a JavaScript library targeted at running models from transformers directly within the browser.
|
||||
[transformers.js](https://xenova.github.io/transformers.js/) is a JavaScript library targeted at running models from transformers directly within the browser.
|
||||
|
||||
Keywords: Transformers, JavaScript, browser
|
||||
|
||||
@ -437,7 +437,7 @@ Keywords: DALL-E, Russian
|
||||
|
||||
Keywords: Knowledge Extraction, Knowledge Graphs
|
||||
|
||||
## [Nebuly](https://github.com/nebuly-ai/optimate)
|
||||
## [Nebuly](https://github.com/nebuly-ai/nebuly)
|
||||
|
||||
Nebuly is the next-generation platform to monitor and optimize your AI costs in one place. The platform connects to all your AI cost sources (compute, API providers, AI software licenses, etc) and centralizes them in one place to give you full visibility on a model basis. The platform also provides optimization recommendations and a co-pilot model that can guide during the optimization process. The platform builds on top of the open-source tools allowing you to optimize the different steps of your AI stack to squeeze out the best possible cost performances.
|
||||
|
||||
|
||||
@ -1,49 +0,0 @@
|
||||
# Benchmarks
|
||||
|
||||
You might want to add new benchmarks.
|
||||
|
||||
You will need to define a python function named `run_benchmark` in your python file and the file must be located in this `benchmark/` directory.
|
||||
|
||||
The expected function signature is the following:
|
||||
|
||||
```py
|
||||
def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str, num_tokens_to_generate=100):
|
||||
```
|
||||
|
||||
## Writing metrics to the database
|
||||
|
||||
`MetricRecorder` is thread-safe, in the sense of the python [`Thread`](https://docs.python.org/3/library/threading.html#threading.Thread). This means you can start a background thread to do the readings on the device measurements while not blocking the main thread to execute the model measurements.
|
||||
|
||||
cf [`llama.py`](./llama.py) to see an example of this in practice.
|
||||
|
||||
```py
|
||||
from benchmarks_entrypoint import MetricsRecorder
|
||||
import psycopg2
|
||||
|
||||
def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str, num_tokens_to_generate=100):
|
||||
metrics_recorder = MetricsRecorder(psycopg2.connect("dbname=metrics"), logger, branch, commit_id, commit_msg)
|
||||
benchmark_id = metrics_recorder.initialise_benchmark({"gpu_name": gpu_name, "model_id": model_id})
|
||||
# To collect device measurements
|
||||
metrics_recorder.collect_device_measurements(
|
||||
benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes
|
||||
)
|
||||
# To collect your model measurements
|
||||
metrics_recorder.collect_model_measurements(
|
||||
benchmark_id,
|
||||
{
|
||||
"model_load_time": model_load_time,
|
||||
"first_eager_forward_pass_time_secs": first_eager_fwd_pass_time,
|
||||
"second_eager_forward_pass_time_secs": second_eager_fwd_pass_time,
|
||||
"first_eager_generate_time_secs": first_eager_generate_time,
|
||||
"second_eager_generate_time_secs": second_eager_generate_time,
|
||||
"time_to_first_token_secs": time_to_first_token,
|
||||
"time_to_second_token_secs": time_to_second_token,
|
||||
"time_to_third_token_secs": time_to_third_token,
|
||||
"time_to_next_token_mean_secs": mean_time_to_next_token,
|
||||
"first_compile_generate_time_secs": first_compile_generate_time,
|
||||
"second_compile_generate_time_secs": second_compile_generate_time,
|
||||
"third_compile_generate_time_secs": third_compile_generate_time,
|
||||
"fourth_compile_generate_time_secs": fourth_compile_generate_time,
|
||||
},
|
||||
)
|
||||
```
|
||||
@ -1,144 +0,0 @@
|
||||
import argparse
|
||||
import importlib.util
|
||||
import logging
|
||||
import os
|
||||
from typing import Dict
|
||||
import psycopg2
|
||||
import sys
|
||||
|
||||
from psycopg2.extras import Json
|
||||
from psycopg2.extensions import register_adapter
|
||||
|
||||
|
||||
register_adapter(dict, Json)
|
||||
|
||||
|
||||
class ImportModuleException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MetricsRecorder:
|
||||
def __init__(self, connection, logger: logging.Logger, branch: str, commit_id: str, commit_msg: str):
|
||||
self.conn = connection
|
||||
self.conn.autocommit = True
|
||||
self.logger = logger
|
||||
self.branch = branch
|
||||
self.commit_id = commit_id
|
||||
self.commit_msg = commit_msg
|
||||
|
||||
def initialise_benchmark(self, metadata: Dict[str, str]) -> int:
|
||||
"""
|
||||
Creates a new benchmark, returns the benchmark id
|
||||
"""
|
||||
# gpu_name: str, model_id: str
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"INSERT INTO benchmarks (branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s) RETURNING benchmark_id",
|
||||
(self.branch, self.commit_id, self.commit_msg, metadata),
|
||||
)
|
||||
benchmark_id = cur.fetchone()[0]
|
||||
logger.debug(f"initialised benchmark #{benchmark_id}")
|
||||
return benchmark_id
|
||||
|
||||
def collect_device_measurements(self, benchmark_id: int, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes):
|
||||
"""
|
||||
Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function.
|
||||
"""
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)",
|
||||
(benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes),
|
||||
)
|
||||
self.logger.debug(
|
||||
f"inserted device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]"
|
||||
)
|
||||
|
||||
def collect_model_measurements(self, benchmark_id: int, measurements: Dict[str, float]):
|
||||
with self.conn.cursor() as cur:
|
||||
cur.execute(
|
||||
"""
|
||||
INSERT INTO model_measurements (
|
||||
benchmark_id,
|
||||
measurements
|
||||
) VALUES (%s, %s)
|
||||
""",
|
||||
(
|
||||
benchmark_id,
|
||||
measurements,
|
||||
),
|
||||
)
|
||||
self.logger.debug(f"inserted model measurements for benchmark #{benchmark_id}: {measurements}")
|
||||
|
||||
def close(self):
|
||||
self.conn.close()
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("[%(levelname)s - %(asctime)s] %(message)s")
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
|
||||
def parse_arguments():
|
||||
"""
|
||||
Parse command line arguments for the benchmarking CLI.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description="CLI for benchmarking the huggingface/transformers.")
|
||||
|
||||
parser.add_argument(
|
||||
"branch",
|
||||
type=str,
|
||||
help="The branch name on which the benchmarking is performed.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"commit_id",
|
||||
type=str,
|
||||
help="The commit hash on which the benchmarking is performed.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"commit_msg",
|
||||
type=str,
|
||||
help="The commit message associated with the commit, truncated to 70 characters.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
return args.branch, args.commit_id, args.commit_msg
|
||||
|
||||
|
||||
def import_from_path(module_name, file_path):
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
sys.modules[module_name] = module
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
except Exception as e:
|
||||
raise ImportModuleException(f"failed to load python module: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
benchmarks_folder_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
branch, commit_id, commit_msg = parse_arguments()
|
||||
|
||||
for entry in os.scandir(benchmarks_folder_path):
|
||||
try:
|
||||
if not entry.name.endswith(".py"):
|
||||
continue
|
||||
if entry.path == __file__:
|
||||
continue
|
||||
logger.debug(f"loading: {entry.name}")
|
||||
module = import_from_path(entry.name.split(".")[0], entry.path)
|
||||
logger.info(f"running benchmarks in: {entry.name}")
|
||||
module.run_benchmark(logger, branch, commit_id, commit_msg)
|
||||
except ImportModuleException as e:
|
||||
logger.error(e)
|
||||
except Exception as e:
|
||||
logger.error(f"error running benchmarks for {entry.name}: {e}")
|
||||
@ -1,10 +0,0 @@
|
||||
apiVersion: 1
|
||||
|
||||
providers:
|
||||
- name: 'Transformers Benchmarks'
|
||||
orgId: 1
|
||||
type: file
|
||||
updateIntervalSeconds: 10
|
||||
allowUiUpdates: true
|
||||
options:
|
||||
path: /etc/grafana/dashboards
|
||||
@ -30,7 +30,7 @@
|
||||
"title": "Go to data",
|
||||
"tooltip": "Go to data",
|
||||
"type": "link",
|
||||
"url": "http://transformers-benchmarks.hf.co/d/fdz33iyzln9c0a/transformers-benchmarks?orgId=1&from=${StartTime}&to=${EndTime}"
|
||||
"url": "http://transformers-benchmarks.huggingface.co/d/fdz33iyzln9c0a/transformers-benchmarks?orgId=1&from=${StartTime}&to=${EndTime}"
|
||||
}
|
||||
],
|
||||
"liveNow": true,
|
||||
@ -77,7 +77,7 @@
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 202
|
||||
"value": 196
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -101,7 +101,7 @@
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 524
|
||||
"value": 581
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -113,19 +113,7 @@
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 353
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"matcher": {
|
||||
"id": "byName",
|
||||
"options": "model_id"
|
||||
},
|
||||
"properties": [
|
||||
{
|
||||
"id": "custom.width",
|
||||
"value": 216
|
||||
"value": 379
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -155,14 +143,12 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"type": "grafana-postgresql-datasource"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT commit_id, commit_message, metadata->>'gpu_name' as gpu_name, metadata->>'model_id' as model_id, created_at AS date FROM benchmarks WHERE branch = '${branch}' AND metadata->>'gpu_name' = '${gpu_name}' ORDER BY benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT commit_id as commit_id, commit_message, gpu_name, created_at AS date FROM benchmarks WHERE branch = '${branch}' ORDER BY benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -320,14 +306,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'first_eager_forward_pass_time_secs' AS double precision) AS first_eager_forward_pass_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'first_eager_forward_pass_time_secs' AS double precision) AS first_eager_forward_pass_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -446,14 +431,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'second_eager_forward_pass_time_secs' AS double precision) AS second_eager_forward_pass_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'second_eager_forward_pass_time_secs' AS double precision) AS second_eager_forward_pass_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -581,14 +565,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'time_to_first_token_secs' AS double precision) AS time_to_first_token_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'time_to_first_token_secs' AS double precision) AS time_to_first_token_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -703,14 +686,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'time_to_second_token_secs' AS double precision) AS time_to_second_token_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'time_to_second_token_secs' AS double precision) AS time_to_second_token_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -825,14 +807,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'time_to_third_token_secs' AS double precision) AS time_to_third_token_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'time_to_third_token_secs' AS double precision) AS time_to_third_token_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -947,14 +928,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'time_to_next_token_mean_secs' AS double precision) AS time_to_next_token_mean_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'time_to_next_token_mean_secs' AS double precision) AS time_to_next_token_mean_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -1082,14 +1062,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'first_compile_generate_time_secs' AS double precision) AS first_compile_generate_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'first_compile_generate_time_secs' AS double precision) AS first_compile_generate_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -1204,14 +1183,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'second_compile_generate_time_secs' AS double precision) AS second_compile_generate_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'second_compile_generate_time_secs' AS double precision) AS second_compile_generate_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -1326,14 +1304,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'third_compile_generate_time_secs' AS double precision) AS third_compile_generate_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'third_compile_generate_time_secs' AS double precision) AS third_compile_generate_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -1448,14 +1425,13 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
"rawQuery": true,
|
||||
"rawSql": "SELECT CAST(m.measurements->'fourth_compile_generate_time_secs' AS double precision) AS fourth_compile_generate_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND b.metadata->>'gpu_name' = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"rawSql": "SELECT CAST(m.measurements->'fourth_compile_generate_time_secs' AS double precision) AS fourth_compile_generate_time_secs, left(b.commit_id, 7), m.time FROM benchmarks as b JOIN model_measurements AS m ON b.benchmark_id = m.benchmark_id WHERE b.branch = '${branch}' AND gpu_name = '${gpu_name}' ORDER BY b.benchmark_id DESC LIMIT ${last_n_commits};",
|
||||
"refId": "A",
|
||||
"sql": {
|
||||
"columns": [
|
||||
@ -1504,7 +1480,11 @@
|
||||
"id": 15,
|
||||
"panels": [
|
||||
{
|
||||
"datasource": {},
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@ -1548,7 +1528,8 @@
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
@ -1582,9 +1563,8 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
@ -1685,7 +1665,11 @@
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {},
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@ -1729,7 +1713,8 @@
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
@ -1763,9 +1748,8 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
@ -1866,7 +1850,11 @@
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {},
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@ -1910,7 +1898,8 @@
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
@ -1944,9 +1933,8 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
@ -2047,7 +2035,11 @@
|
||||
"type": "timeseries"
|
||||
},
|
||||
{
|
||||
"datasource": {},
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
},
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"color": {
|
||||
@ -2091,7 +2083,8 @@
|
||||
"mode": "absolute",
|
||||
"steps": [
|
||||
{
|
||||
"color": "green"
|
||||
"color": "green",
|
||||
"value": null
|
||||
},
|
||||
{
|
||||
"color": "red",
|
||||
@ -2125,9 +2118,8 @@
|
||||
"targets": [
|
||||
{
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
"uid": "bdz2yss7sxo1sc"
|
||||
},
|
||||
"editorMode": "code",
|
||||
"format": "table",
|
||||
@ -2232,6 +2224,7 @@
|
||||
"type": "row"
|
||||
}
|
||||
],
|
||||
"refresh": "",
|
||||
"schemaVersion": 39,
|
||||
"tags": [],
|
||||
"templating": {
|
||||
@ -2243,7 +2236,6 @@
|
||||
"value": "main"
|
||||
},
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
},
|
||||
@ -2256,7 +2248,7 @@
|
||||
"name": "branch",
|
||||
"options": [],
|
||||
"query": "SELECT DISTINCT branch FROM benchmarks;",
|
||||
"refresh": 1,
|
||||
"refresh": 2,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 0,
|
||||
@ -2269,7 +2261,6 @@
|
||||
"value": "1729701492845"
|
||||
},
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
},
|
||||
@ -2290,11 +2281,10 @@
|
||||
{
|
||||
"current": {
|
||||
"selected": false,
|
||||
"text": "1730393397577",
|
||||
"value": "1730393397577"
|
||||
"text": "1730120430069",
|
||||
"value": "1730120430069"
|
||||
},
|
||||
"datasource": {
|
||||
"default": true,
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
},
|
||||
@ -2322,16 +2312,15 @@
|
||||
"type": "grafana-postgresql-datasource",
|
||||
"uid": "be28nkzirtb0gd"
|
||||
},
|
||||
"definition": "SELECT DISTINCT metadata->>'gpu_name' FROM benchmarks;",
|
||||
"description": "",
|
||||
"definition": "SELECT DISTINCT gpu_name FROM benchmarks;",
|
||||
"hide": 0,
|
||||
"includeAll": false,
|
||||
"label": "GPU",
|
||||
"multi": false,
|
||||
"name": "gpu_name",
|
||||
"options": [],
|
||||
"query": "SELECT DISTINCT metadata->>'gpu_name' FROM benchmarks;",
|
||||
"refresh": 1,
|
||||
"query": "SELECT DISTINCT gpu_name FROM benchmarks;",
|
||||
"refresh": 2,
|
||||
"regex": "",
|
||||
"skipUrlSync": false,
|
||||
"sort": 0,
|
||||
@ -2339,7 +2328,7 @@
|
||||
},
|
||||
{
|
||||
"current": {
|
||||
"selected": true,
|
||||
"selected": false,
|
||||
"text": "10",
|
||||
"value": "10"
|
||||
},
|
||||
@ -2370,6 +2359,6 @@
|
||||
"timezone": "browser",
|
||||
"title": "Transformers benchmarks",
|
||||
"uid": "fdz33iyzln9c0a",
|
||||
"version": 10,
|
||||
"version": 4,
|
||||
"weekStart": ""
|
||||
}
|
||||
|
||||
@ -1,17 +0,0 @@
|
||||
apiVersion: 1
|
||||
datasources:
|
||||
- name: grafana-postgresql-datasource
|
||||
uid: be28nkzirtb0gd
|
||||
type: postgres
|
||||
url: $GRAFANA_POSTGRES_DATASOURCE_URL
|
||||
user: $GRAFANA_POSTGRES_DATASOURCE_USER
|
||||
secureJsonData:
|
||||
password: $GRAFANA_POSTGRES_DATASOURCE_PWD
|
||||
jsonData:
|
||||
database: metrics
|
||||
maxOpenConns: 100
|
||||
maxIdleConns: 100
|
||||
maxIdleConnsAuto: true
|
||||
connMaxLifetime: 14400
|
||||
postgresVersion: 1000
|
||||
timescaledb: false
|
||||
@ -3,7 +3,7 @@ CREATE TABLE IF NOT EXISTS benchmarks (
|
||||
branch VARCHAR(255),
|
||||
commit_id VARCHAR(72),
|
||||
commit_message VARCHAR(70),
|
||||
metadata jsonb,
|
||||
gpu_name VARCHAR(255),
|
||||
created_at timestamp without time zone NOT NULL DEFAULT (current_timestamp AT TIME ZONE 'UTC')
|
||||
);
|
||||
|
||||
|
||||
@ -1,25 +1,71 @@
|
||||
from logging import Logger
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from statistics import mean
|
||||
from threading import Event, Thread
|
||||
from time import perf_counter, sleep
|
||||
from typing import Optional
|
||||
from benchmarks_entrypoint import MetricsRecorder
|
||||
import gpustat
|
||||
import psutil
|
||||
import psycopg2
|
||||
import torch
|
||||
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, StaticCache
|
||||
from psycopg2.extras import Json
|
||||
from psycopg2.extensions import register_adapter
|
||||
|
||||
|
||||
os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
handler.setLevel(logging.INFO)
|
||||
formatter = logging.Formatter("[%(levelname)s - %(asctime)s] %(message)s")
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "1"
|
||||
torch.set_float32_matmul_precision("high")
|
||||
register_adapter(dict, Json)
|
||||
|
||||
|
||||
def collect_metrics(benchmark_id, continue_metric_collection, metrics_recorder):
|
||||
def parse_arguments():
|
||||
"""
|
||||
Parse command line arguments for the benchmarking CLI.
|
||||
"""
|
||||
parser = argparse.ArgumentParser(description="CLI for benchmarking the huggingface/transformers.")
|
||||
|
||||
parser.add_argument(
|
||||
"branch",
|
||||
type=str,
|
||||
help="The branch name on which the benchmarking is performed.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"commit_id",
|
||||
type=str,
|
||||
help="The commit hash on which the benchmarking is performed.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"commit_msg",
|
||||
type=str,
|
||||
help="The commit message associated with the commit, truncated to 70 characters.",
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
return args.branch, args.commit_id, args.commit_msg
|
||||
|
||||
|
||||
def collect_metrics(benchmark_id, continue_metric_collection):
|
||||
p = psutil.Process(os.getpid())
|
||||
conn = psycopg2.connect("dbname=metrics")
|
||||
cur = conn.cursor()
|
||||
while not continue_metric_collection.is_set():
|
||||
with p.oneshot():
|
||||
cpu_util = p.cpu_percent()
|
||||
@ -27,41 +73,47 @@ def collect_metrics(benchmark_id, continue_metric_collection, metrics_recorder):
|
||||
gpu_stats = gpustat.GPUStatCollection.new_query()
|
||||
gpu_util = gpu_stats[0]["utilization.gpu"]
|
||||
gpu_mem_megabytes = gpu_stats[0]["memory.used"]
|
||||
metrics_recorder.collect_device_measurements(
|
||||
benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes
|
||||
cur.execute(
|
||||
"INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)",
|
||||
(benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes),
|
||||
)
|
||||
sleep(0.01)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
|
||||
def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str, num_tokens_to_generate=100):
|
||||
def run_benchmark(branch: str, commit_id: str, commit_msg: str, num_tokens_to_generate=100):
|
||||
continue_metric_collection = Event()
|
||||
metrics_thread = None
|
||||
model_id = "meta-llama/Llama-2-7b-hf"
|
||||
metrics_recorder = MetricsRecorder(psycopg2.connect("dbname=metrics"), logger, branch, commit_id, commit_msg)
|
||||
try:
|
||||
gpu_stats = gpustat.GPUStatCollection.new_query()
|
||||
gpu_name = gpu_stats[0]["name"]
|
||||
benchmark_id = metrics_recorder.initialise_benchmark({"gpu_name": gpu_name, "model_id": model_id})
|
||||
logger.info(f"running benchmark #{benchmark_id} on {gpu_name} for {model_id}")
|
||||
metrics_thread = Thread(
|
||||
target=collect_metrics,
|
||||
args=[benchmark_id, continue_metric_collection, metrics_recorder],
|
||||
conn = psycopg2.connect("dbname=metrics")
|
||||
cur = conn.cursor()
|
||||
cur.execute(
|
||||
"INSERT INTO benchmarks (branch, commit_id, commit_message, gpu_name) VALUES (%s, %s, %s, %s) RETURNING benchmark_id",
|
||||
(branch, commit_id, commit_msg, gpu_name),
|
||||
)
|
||||
conn.commit()
|
||||
benchmark_id = cur.fetchone()[0]
|
||||
logger.info(f"running benchmark #{benchmark_id} on {gpu_name}")
|
||||
metrics_thread = Thread(target=collect_metrics, args=[benchmark_id, continue_metric_collection])
|
||||
metrics_thread.start()
|
||||
logger.info("started background thread to fetch device metrics")
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # silence warnings when compiling
|
||||
|
||||
device = "cuda"
|
||||
ckpt = "meta-llama/Llama-2-7b-hf"
|
||||
|
||||
logger.info("downloading weights")
|
||||
# This is to avoid counting download in model load time measurement
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16)
|
||||
model = AutoModelForCausalLM.from_pretrained(ckpt, torch_dtype=torch.float16)
|
||||
gen_config = GenerationConfig(do_sample=False, top_p=1, temperature=1)
|
||||
logger.info("loading model")
|
||||
start = perf_counter()
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
model_id, torch_dtype=torch.float16, generation_config=gen_config
|
||||
ckpt, torch_dtype=torch.float16, generation_config=gen_config
|
||||
).eval()
|
||||
model.to(device)
|
||||
torch.cuda.synchronize()
|
||||
@ -69,7 +121,7 @@ def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str,
|
||||
model_load_time = end - start
|
||||
logger.info(f"loaded model in: {model_load_time}s")
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
tokenizer = AutoTokenizer.from_pretrained(ckpt)
|
||||
|
||||
prompt = "Why dogs are so cute?"
|
||||
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
||||
@ -316,7 +368,14 @@ def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str,
|
||||
logger.info(f"completed second compile generation in: {fourth_compile_generate_time}s")
|
||||
logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}")
|
||||
|
||||
metrics_recorder.collect_model_measurements(
|
||||
cur.execute(
|
||||
"""
|
||||
INSERT INTO model_measurements (
|
||||
benchmark_id,
|
||||
measurements
|
||||
) VALUES (%s, %s)
|
||||
""",
|
||||
(
|
||||
benchmark_id,
|
||||
{
|
||||
"model_load_time": model_load_time,
|
||||
@ -333,10 +392,17 @@ def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str,
|
||||
"third_compile_generate_time_secs": third_compile_generate_time,
|
||||
"fourth_compile_generate_time_secs": fourth_compile_generate_time,
|
||||
},
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
except Exception as e:
|
||||
logger.error(f"Caught exception: {e}")
|
||||
continue_metric_collection.set()
|
||||
if metrics_thread is not None:
|
||||
metrics_thread.join()
|
||||
metrics_recorder.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
branch, commit_id, commit_msg = parse_arguments()
|
||||
run_benchmark(branch, commit_id, commit_msg, num_tokens_to_generate=20)
|
||||
|
||||
@ -61,6 +61,7 @@ NOT_DEVICE_TESTS = {
|
||||
"test_load_save_without_tied_weights",
|
||||
"test_tied_weights_keys",
|
||||
"test_model_weights_reload_no_missing_tied_weights",
|
||||
"test_pt_tf_model_equivalence",
|
||||
"test_mismatched_shapes_have_properly_initialized_weights",
|
||||
"test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist",
|
||||
"test_model_is_small",
|
||||
@ -84,6 +85,12 @@ warnings.simplefilter(action="ignore", category=FutureWarning)
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line(
|
||||
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested"
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested"
|
||||
)
|
||||
config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested")
|
||||
config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment")
|
||||
config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate")
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
ARG REF=main
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake wget xz-utils build-essential g++5 libprotobuf-dev protobuf-compiler
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
USER root
|
||||
@ -7,5 +7,5 @@ ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools
|
||||
RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu
|
||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing,tiktoken,num2words]"
|
||||
RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing,tiktoken]"
|
||||
RUN pip uninstall -y transformers
|
||||
@ -1,4 +1,4 @@
|
||||
FROM python:3.9-slim
|
||||
FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
ARG REF=main
|
||||
RUN echo ${REF}
|
||||
|
||||
@ -9,7 +9,7 @@ SHELL ["sh", "-lc"]
|
||||
# The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant
|
||||
# to be used as arguments for docker build (so far).
|
||||
|
||||
ARG PYTORCH='2.6.0'
|
||||
ARG PYTORCH='2.5.1'
|
||||
# (not always a valid torch version)
|
||||
ARG INTEL_TORCH_EXT='2.3.0'
|
||||
# Example: `cu102`, `cu113`, etc.
|
||||
@ -65,9 +65,6 @@ RUN python3 -m pip install --no-cache-dir python-Levenshtein
|
||||
# For `FastSpeech2ConformerTokenizer` tokenizer
|
||||
RUN python3 -m pip install --no-cache-dir g2p-en
|
||||
|
||||
# For Some bitsandbytes tests
|
||||
RUN python3 -m pip install --no-cache-dir einops
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
||||
@ -48,8 +48,8 @@ RUN python3 -m pip uninstall -y torch-tensorrt apex
|
||||
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
|
||||
RUN python3 -m pip uninstall -y deepspeed
|
||||
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
|
||||
# Issue: https://github.com/deepspeedai/DeepSpeed/issues/2010
|
||||
# RUN git clone https://github.com/deepspeedai/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
||||
# Issue: https://github.com/microsoft/DeepSpeed/issues/2010
|
||||
# RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
||||
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||
|
||||
RUN python3 -m pip install -U "itsdangerous<2.1.0"
|
||||
|
||||
@ -1,18 +1,17 @@
|
||||
FROM rocm/dev-ubuntu-22.04:6.2.4
|
||||
FROM rocm/dev-ubuntu-22.04:6.0.2
|
||||
# rocm/pytorch has no version with 2.1.0
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y --no-install-recommends git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-dev python3-pip python3-dev ffmpeg git-lfs && \
|
||||
apt install -y --no-install-recommends git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-dev python3-pip python3-dev ffmpeg && \
|
||||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN git lfs install
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip numpy
|
||||
|
||||
RUN python3 -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2.4
|
||||
RUN python3 -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.0
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade importlib-metadata setuptools ninja git+https://github.com/facebookresearch/detectron2.git pytesseract "itsdangerous<2.1.0"
|
||||
|
||||
@ -31,5 +30,5 @@ RUN python3 -m pip uninstall -y tensorflow flax
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
||||
# Remove nvml and nvidia-ml-py as it is not compatible with ROCm. apex is not tested on NVIDIA either.
|
||||
RUN python3 -m pip uninstall py3nvml pynvml nvidia-ml-py apex -y
|
||||
# Remove nvml as it is not compatible with ROCm. apex is not tested on NVIDIA either.
|
||||
RUN python3 -m pip uninstall py3nvml pynvml apex -y
|
||||
|
||||
@ -1,11 +1,11 @@
|
||||
FROM rocm/dev-ubuntu-22.04:6.2.4
|
||||
FROM rocm/dev-ubuntu-22.04:5.6
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
ARG PYTORCH='2.6.0'
|
||||
ARG TORCH_VISION='0.21.0'
|
||||
ARG TORCH_AUDIO='2.6.0'
|
||||
ARG ROCM='6.2.4'
|
||||
ARG PYTORCH='2.1.1'
|
||||
ARG TORCH_VISION='0.16.1'
|
||||
ARG TORCH_AUDIO='2.1.1'
|
||||
ARG ROCM='5.6'
|
||||
|
||||
RUN apt update && \
|
||||
apt install -y --no-install-recommends \
|
||||
@ -16,11 +16,9 @@ RUN apt update && \
|
||||
python-is-python3 \
|
||||
rocrand-dev \
|
||||
rocthrust-dev \
|
||||
rocblas-dev \
|
||||
hipsolver-dev \
|
||||
hipsparse-dev \
|
||||
hipblas-dev \
|
||||
hipblaslt-dev && \
|
||||
rocblas-dev && \
|
||||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@ -47,4 +45,4 @@ RUN cd transformers && python3 setup.py develop
|
||||
RUN python3 -c "from deepspeed.launcher.runner import main"
|
||||
|
||||
# Remove nvml as it is not compatible with ROCm
|
||||
RUN python3 -m pip uninstall py3nvml pynvml nvidia-ml-py apex -y
|
||||
RUN python3 -m pip uninstall py3nvml pynvml -y
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11
|
||||
FROM nvcr.io/nvidia/pytorch:23.11-py3
|
||||
FROM nvcr.io/nvidia/pytorch:23.04-py3
|
||||
LABEL maintainer="Hugging Face"
|
||||
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
@ -34,8 +34,8 @@ RUN python3 -m pip uninstall -y torch-tensorrt apex
|
||||
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
|
||||
RUN python3 -m pip uninstall -y deepspeed
|
||||
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
|
||||
# Issue: https://github.com/deepspeedai/DeepSpeed/issues/2010
|
||||
# RUN git clone https://github.com/deepspeedai/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
||||
# Issue: https://github.com/microsoft/DeepSpeed/issues/2010
|
||||
# RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \
|
||||
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||
|
||||
## For `torchdynamo` tests
|
||||
|
||||
@ -11,7 +11,7 @@ ARG REF=main
|
||||
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
|
||||
|
||||
# If set to nothing, will install the latest version
|
||||
ARG PYTORCH='2.6.0'
|
||||
ARG PYTORCH='2.5.1'
|
||||
ARG TORCH_VISION=''
|
||||
ARG TORCH_AUDIO=''
|
||||
# Example: `cu102`, `cu113`, etc.
|
||||
|
||||
@ -36,26 +36,15 @@ RUN python3 -m pip install --no-cache-dir einops
|
||||
# Add bitsandbytes for mixed int8 testing
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Add auto-gptq for gtpq quantization testing, installed from source for pytorch==2.5.1 compatibility
|
||||
# TORCH_CUDA_ARCH_LIST="7.5+PTX" is added to make the package compile for Tesla T4 gpus available for the CI.
|
||||
RUN pip install gekko
|
||||
RUN git clone https://github.com/PanQiWei/AutoGPTQ.git && cd AutoGPTQ && TORCH_CUDA_ARCH_LIST="7.5+PTX" python3 setup.py install
|
||||
# Add auto-gptq for gtpq quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/
|
||||
|
||||
# Add optimum for gptq quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum
|
||||
|
||||
# Add PEFT
|
||||
RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/peft@main#egg=peft
|
||||
|
||||
# Add aqlm for quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir aqlm[gpu]==1.0.2
|
||||
|
||||
# Add vptq for quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir vptq
|
||||
|
||||
# Add spqr for quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir spqr_quant[gpu]
|
||||
|
||||
# Add hqq for quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir hqq
|
||||
|
||||
@ -63,8 +52,8 @@ RUN python3 -m pip install --no-cache-dir hqq
|
||||
RUN python3 -m pip install --no-cache-dir gguf
|
||||
|
||||
# Add autoawq for quantization testing
|
||||
# >=v0.2.7 needed for compatibility with transformers > 4.46
|
||||
RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.7.post2/autoawq-0.2.7.post2-py3-none-any.whl
|
||||
# >=v0.2.3 needed for compatibility with torch 2.2.1
|
||||
RUN python3 -m pip install --no-cache-dir https://github.com/casper-hansen/AutoAWQ/releases/download/v0.2.3/autoawq-0.2.3+cu118-cp310-cp310-linux_x86_64.whl
|
||||
|
||||
# Add quanto for quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir optimum-quanto
|
||||
@ -72,13 +61,6 @@ RUN python3 -m pip install --no-cache-dir optimum-quanto
|
||||
# Add eetq for quantization testing
|
||||
RUN python3 -m pip install git+https://github.com/NetEase-FuXi/EETQ.git
|
||||
|
||||
# Add flute-kernel and fast_hadamard_transform for quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir flute-kernel==0.3.0 -i https://flute-ai.github.io/whl/cu118
|
||||
RUN python3 -m pip install --no-cache-dir fast_hadamard_transform==1.0.4.post1
|
||||
|
||||
# Add compressed-tensors for quantization testing
|
||||
RUN python3 -m pip install --no-cache-dir compressed-tensors
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
||||
@ -30,26 +30,26 @@
|
||||
- local: conversations
|
||||
title: الدردشة مع المحولات
|
||||
title: البرامج التعليمية
|
||||
- sections:
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: tasks/sequence_classification
|
||||
title: تصنيف النصوص
|
||||
- local: tasks/token_classification
|
||||
title: تصنيف الرموز
|
||||
- local: tasks/question_answering
|
||||
title: الإجابة على الأسئلة
|
||||
- local: tasks/language_modeling
|
||||
title: نمذجة اللغة السببية
|
||||
- local: tasks/masked_language_modeling
|
||||
title: نمذجة اللغة المقنعة
|
||||
- local: tasks/translation
|
||||
title: الترجمة
|
||||
- local: tasks/summarization
|
||||
title: التلخيص
|
||||
- local: tasks/multiple_choice
|
||||
title: الاختيار المتعدد
|
||||
title: معالجة اللغات الطبيعية
|
||||
# - sections:
|
||||
# - isExpanded: false
|
||||
# sections:
|
||||
# - local: tasks/sequence_classification
|
||||
# title: تصنيف النصوص
|
||||
# - local: tasks/token_classification
|
||||
# title: تصنيف الرموز
|
||||
# - local: tasks/question_answering
|
||||
# title: الإجابة على الأسئلة
|
||||
# - local: tasks/language_modeling
|
||||
# title: نمذجة اللغة السببية
|
||||
# - local: tasks/masked_language_modeling
|
||||
# title: نمذجة اللغة المقنعة
|
||||
# - local: tasks/translation
|
||||
# title: الترجمة
|
||||
# - local: tasks/summarization
|
||||
# title: التلخيص
|
||||
# - local: tasks/multiple_choice
|
||||
# title: الاختيار المتعدد
|
||||
# title: معالجة اللغات الطبيعية
|
||||
# - isExpanded: false
|
||||
# sections:
|
||||
# - local: tasks/audio_classification
|
||||
@ -107,7 +107,7 @@
|
||||
# - local: tasks/prompting
|
||||
# title: دليل إرشادي لمحفزات النماذج اللغوية الكبيرة
|
||||
# title: الإرشاد
|
||||
title: أدلة المهام
|
||||
# title: أدلة المهام
|
||||
- sections:
|
||||
- local: fast_tokenizers
|
||||
title: استخدم مجزئيات النصوص السريعة من 🤗 Tokenizers
|
||||
@ -129,20 +129,16 @@
|
||||
title: التصدير إلى TFLite
|
||||
- local: torchscript
|
||||
title: التصدير إلى TorchScript
|
||||
- local: notebooks
|
||||
title: دفاتر الملاحظات مع الأمثلة
|
||||
- local: community
|
||||
title: موارد المجتمع
|
||||
# - local: benchmarks
|
||||
# title: المعايير
|
||||
# - local: notebooks
|
||||
# title: دفاتر الملاحظات مع الأمثلة
|
||||
# - local: community
|
||||
# title: موارد المجتمع
|
||||
- local: troubleshooting
|
||||
title: استكشاف الأخطاء وإصلاحها
|
||||
- local: gguf
|
||||
title: التوافق مع ملفات GGUF
|
||||
- local: tiktoken
|
||||
title: التوافق مع ملفات TikToken
|
||||
- local: modular_transformers
|
||||
title: الوحدات النمطية في `transformers`
|
||||
- local: how_to_hack_models
|
||||
title: اختراق النموذج (الكتابة فوق فئة لاستخدامك)
|
||||
title: أدلة المطورين
|
||||
# - sections:
|
||||
# - local: quantization/overview
|
||||
@ -155,8 +151,6 @@
|
||||
# title: AWQ
|
||||
# - local: quantization/aqlm
|
||||
# title: AQLM
|
||||
# - local: quantization/vptq
|
||||
# title: VPTQ
|
||||
# - local: quantization/quanto
|
||||
# title: Quanto
|
||||
# - local: quantization/eetq
|
||||
|
||||
@ -195,7 +195,7 @@ You have access to the following tools:
|
||||
To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
|
||||
|
||||
At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task, then the tools that you want to use.
|
||||
Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '/End code' sequence.
|
||||
Then in the 'Code:' sequence, you shold write the code in simple Python. The code sequence must end with '/End code' sequence.
|
||||
During each intermediate step, you can use 'print()' to save whatever important information you will then need.
|
||||
These print outputs will then be available in the 'Observation:' field, for using this information as input for the next step.
|
||||
|
||||
@ -205,7 +205,7 @@ Here are a few examples using notional tools:
|
||||
---
|
||||
{examples}
|
||||
|
||||
Above example were using notional tools that might not exist for you. You only have access to those tools:
|
||||
Above example were using notional tools that might not exist for you. You only have acces to those tools:
|
||||
<<tool_names>>
|
||||
You also can perform computations in the python code you generate.
|
||||
|
||||
|
||||
@ -1,66 +0,0 @@
|
||||
# مجتمع المطورين
|
||||
|
||||
هذه الصفحة تجمع الموارد حول 🤗 Transformers التي طورها المجتمع.
|
||||
|
||||
## موارد المجتمع:
|
||||
|
||||
| المصدر | الوصف | المؤلف |
|
||||
|:----------|:-------------|------:|
|
||||
| [Hugging Face Transformers Glossary Flashcards](https://www.darigovresearch.com/huggingface-transformers-glossary-flashcards) | مجموعة من البطاقات التعليمية القائمة على [Transformers Docs Glossary](glossary) والتي تم وضعها في شكل يمكن تعلمه/مراجعته بسهولة باستخدام [Anki](https://apps.ankiweb.net/) وهو تطبيق مفتوح المصدر متعدد المنصات مصمم خصيصًا للاحتفاظ بالمعرفة على المدى الطويل. شاهد هذا [فيديو تمهيدي حول كيفية استخدام البطاقات التعليمية](https://www.youtube.com/watch?v=Dji_7PILrw). | [Darigov Research](https://www.darigovresearch.com/) |
|
||||
|
||||
## دفاتر ملاحظات المجتمع:
|
||||
|
||||
| الدفتر | الوصف | المؤلف | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [Fine-tune a pre-trained Transformer to generate lyrics](https://github.com/AlekseyKorshuk/huggingartists) | كيفية توليد كلمات الأغاني على غرار فنانك المفضل من خلال ضبط نموذج GPT-2 | [Aleksey Korshuk](https://github.com/AlekseyKorshuk) | [](https://colab.research.google.com/github/AlekseyKorshuk/huggingartists/blob/master/huggingartists-demo.ipynb) |
|
||||
| [Train T5 in Tensorflow 2](https://github.com/snapthat/TF-T5-text-to-text) | كيفية تدريب T5 لأي مهمة باستخدام Tensorflow 2. يوضح هذا الدفتر مهمة السؤال والجواب المنفذة في Tensorflow 2 باستخدام SQUAD | [Muhammad Harris](https://github.com/HarrisDePerceptron) |[](https://colab.research.google.com/github/snapthat/TF-T5-text-to-text/blob/master/snapthatT5/notebooks/TF-T5-Datasets%20Training.ipynb) |
|
||||
| [Train T5 on TPU](https://github.com/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb) | كيفية تدريب T5 على SQUAD مع Transformers و Nlp | [Suraj Patil](https://github.com/patil-suraj) |[](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/T5_on_TPU.ipynb#scrollTo=QLGiFCDqvuil) |
|
||||
| [Fine-tune T5 for Classification and Multiple Choice](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) | كيفية ضبط نموذج T5 للتصنيف والمهام متعددة الخيارات باستخدام تنسيق النص إلى نص مع PyTorch Lightning | [Suraj Patil](https://github.com/patil-suraj) | [](https://colab.research.google.com/github/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) |
|
||||
| [Fine-tune DialoGPT on New Datasets and Languages](https://github.com/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) | كيفية ضبط نموذج DialoGPT على مجموعة بيانات جديدة لروبوتات الدردشة المحادثية المفتوحة | [Nathan Cooper](https://github.com/ncoop57) | [](https://colab.research.google.com/github/ncoop57/i-am-a-nerd/blob/master/_notebooks/2020-05-12-chatbot-part-1.ipynb) |
|
||||
| [Long Sequence Modeling with Reformer](https://github.com/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) | كيفية التدريب على تسلسلات طويلة تصل إلى 500,000 رمز باستخدام Reformer | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/PyTorch_Reformer.ipynb) |
|
||||
| [Fine-tune BART for Summarization](https://github.com/ohmeow/ohmeow_website/blob/master/posts/2021-05-25-mbart-sequence-classification-with-blurr.ipynb) | كيفية ضبط نموذج BART للتلخيص باستخدام fastai باستخدام blurr | [Wayde Gilliam](https://ohmeow.com/) | [](https://colab.research.google.com/github/ohmeow/ohmeow_website/blob/master/posts/2021-05-25-mbart-sequence-classification-with-blurr.ipynb) |
|
||||
| [Fine-tune a pre-trained Transformer on anyone's tweets](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) | كيفية توليد تغريدات على غرار حساب Twitter المفضل لديك من خلال ضبط نموذج GPT-2 | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/borisdayma/huggingtweets/blob/master/huggingtweets-demo.ipynb) |
|
||||
| [Optimize 🤗 Hugging Face models with Weights & Biases](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) | دليل كامل لعرض تكامل W&B مع Hugging Face | [Boris Dayma](https://github.com/borisdayma) | [](https://colab.research.google.com/github/wandb/examples/blob/master/colabs/huggingface/Optimize_Hugging_Face_models_with_Weights_%26_Biases.ipynb) |
|
||||
| [Pretrain Longformer](https://github.com/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) | كيفية بناء نسخة "طويلة" من النماذج المسبقة التدريب الموجودة | [Iz Beltagy](https://beltagy.net) | [](https://colab.research.google.com/github/allenai/longformer/blob/master/scripts/convert_model_to_long.ipynb) |
|
||||
| [Fine-tune Longformer for QA](https://github.com/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) | كيفية ضبط نموذج Longformer لمهمة QA | [Suraj Patil](https://github.com/patil-suraj) | [](https://colab.research.google.com/github/patil-suraj/Notebooks/blob/master/longformer_qa_training.ipynb) |
|
||||
| [Evaluate Model with 🤗nlp](https://github.com/patrickvonplaten/notebooks/blob/master/How_to_evaluate_Longformer_on_TriviaQA_using_NLP.ipynb) | كيفية تقييم نموذج Longformer على TriviaQA مع `nlp` | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/drive/1m7eTGlPmLRgoPkkA7rkhQdZ9ydpmsdLE?usp=sharing) |
|
||||
| [Fine-tune T5 for Sentiment Span Extraction](https://github.com/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb) | كيفية ضبط نموذج T5 لاستخراج المشاعر باستخدام تنسيق النص إلى نص مع PyTorch Lightning | [Lorenzo Ampil](https://github.com/enzoampil) | [](https://colab.research.google.com/github/enzoampil/t5-intro/blob/master/t5_qa_training_pytorch_span_extraction.ipynb) |
|
||||
| [Fine-tune DistilBert for Multiclass Classification](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb) | كيفية ضبط نموذج DistilBert للتصنيف متعدد الفئات باستخدام PyTorch | [Abhishek Kumar Mishra](https://github.com/abhimishra91) | [](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multiclass_classification.ipynb)|
|
||||
|[Fine-tune BERT for Multi-label Classification](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)|كيفية ضبط نموذج BERT للتصنيف متعدد التصنيفات باستخدام PyTorch|[Abhishek Kumar Mishra](https://github.com/abhimishra91) |[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)|
|
||||
|[Fine-tune T5 for Summarization](https://github.com/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)|كيفية ضبط نموذج T5 للتلخيص في PyTorch وتتبع التجارب باستخدام WandB|[Abhishek Kumar Mishra](https://github.com/abhimishra91) |[](https://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_summarization_wandb.ipynb)|
|
||||
|[Speed up Fine-Tuning in Transformers with Dynamic Padding / Bucketing](https://github.com/ELS-RD/transformers-notebook/blob/master/Divide_Hugging_Face_Transformers_training_time_by_2_or_more.ipynb)|كيفية تسريع الضبط الدقيق بعامل 2 باستخدام الضبط الديناميكي/التقسيم|[Michael Benesty](https://github.com/pommedeterresautee) |[](https://colab.research.google.com/drive/1CBfRU1zbfu7-ijiOqAAQUA-RJaxfcJoO?usp=sharing)|
|
||||
|[Pretrain Reformer for Masked Language Modeling](https://github.com/patrickvonplaten/notebooks/blob/master/Reformer_For_Masked_LM.ipynb)| كيفية تدريب نموذج Reformer مع طبقات الانتباه ثنائية الاتجاه | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/drive/1tzzh0i8PgDQGV3SMFUGxM7_gGae3K-uW?usp=sharing)|
|
||||
|[Expand and Fine Tune Sci-BERT](https://github.com/lordtt13/word-embeddings/blob/master/COVID-19%20Research%20Data/COVID-SciBERT.ipynb)| كيفية زيادة مفردات نموذج SciBERT المسبق التدريب من AllenAI على مجموعة بيانات CORD وإنشاء خط أنابيب لها. | [Tanmay Thakur](https://github.com/lordtt13) | [](https://colab.research.google.com/drive/1rqAR40goxbAfez1xvF3hBJphSCsvXmh8)|
|
||||
|[Fine Tune BlenderBotSmall for Summarization using the Trainer API](https://github.com/lordtt13/transformers-experiments/blob/master/Custom%20Tasks/fine-tune-blenderbot_small-for-summarization.ipynb)| كيفية ضبط نموذج BlenderBotSmall للتلخيص على مجموعة بيانات مخصصة، باستخدام واجهة برمجة التطبيقات Trainer. | [Tanmay Thakur](https://github.com/lordtt13) | [](https://colab.research.google.com/drive/19Wmupuls7mykSGyRN_Qo6lPQhgp56ymq?usp=sharing)|
|
||||
|[Fine-tune Electra and interpret with Integrated Gradients](https://github.com/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb) | كيفية ضبط نموذج Electra للتحليل العاطفي وتفسير التنبؤات باستخدام Captum Integrated Gradients | [Eliza Szczechla](https://elsanns.github.io) | [](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/electra_fine_tune_interpret_captum_ig.ipynb)|
|
||||
|[fine-tune a non-English GPT-2 Model with Trainer class](https://github.com/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb) | كيفية ضبط نموذج GPT-2 غير الإنجليزي باستخدام فئة Trainer | [Philipp Schmid](https://www.philschmid.de) | [](https://colab.research.google.com/github/philschmid/fine-tune-GPT-2/blob/master/Fine_tune_a_non_English_GPT_2_Model_with_Huggingface.ipynb)|
|
||||
|[Fine-tune a DistilBERT Model for Multi Label Classification task](https://github.com/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb) | كيفية ضبط نموذج DistilBERT لمهمة التصنيف متعدد التصنيفات | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [](https://colab.research.google.com/github/DhavalTaunk08/Transformers_scripts/blob/master/Transformers_multilabel_distilbert.ipynb)|
|
||||
|[Fine-tune ALBERT for sentence-pair classification](https://github.com/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb) | كيفية ضبط نموذج ALBERT أو أي نموذج آخر قائم على BERT لمهمة التصنيف المزدوج للجمل | [Nadir El Manouzi](https://github.com/NadirEM) | [](https://colab.research.google.com/github/NadirEM/nlp-notebooks/blob/master/Fine_tune_ALBERT_sentence_pair_classification.ipynb)|
|
||||
|[Fine-tune Roberta for sentiment analysis](https://github.com/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb) | كيفية ضبط نموذج Roberta للتحليل العاطفي | [Dhaval Taunk](https://github.com/DhavalTaunk08) | [](https://colab.research.google.com/github/DhavalTaunk08/NLP_scripts/blob/master/sentiment_analysis_using_roberta.ipynb)|
|
||||
|[Evaluating Question Generation Models](https://github.com/flexudy-pipe/qugeev) | ما مدى دقة الإجابات على الأسئلة التي يولدها نموذجك التحويلي seq2seq؟ | [Pascal Zoleko](https://github.com/zolekode) | [](https://colab.research.google.com/drive/1bpsSqCQU-iw_5nNoRm_crPq6FRuJthq_?usp=sharing)|
|
||||
|[Classify text with DistilBERT and Tensorflow](https://github.com/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb) | كيفية ضبط نموذج DistilBERT للتصنيف النصي في TensorFlow | [Peter Bayerle](https://github.com/peterbayerle) | [](https://colab.research.google.com/github/peterbayerle/huggingface_notebook/blob/main/distilbert_tf.ipynb)|
|
||||
|[Leverage BERT for Encoder-Decoder Summarization on CNN/Dailymail](https://github.com/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb) | كيفية البدء السريع لنموذج *EncoderDecoderModel* مع نقطة تفتيش *google-bert/bert-base-uncased* للتلخيص على CNN/Dailymail | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb)|
|
||||
|[Leverage RoBERTa for Encoder-Decoder Summarization on BBC XSum](https://github.com/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb) | كيفية البدء السريع لنموذج *EncoderDecoderModel* المشترك مع نقطة تفتيش *FacebookAI/roberta-base* للتلخيص على BBC/XSum | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/RoBERTaShared_for_BBC_XSum.ipynb)|
|
||||
|[Fine-tune TAPAS on Sequential Question Answering (SQA)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb) | كيفية ضبط نموذج *TapasForQuestionAnswering* مع نقطة تفتيش *tapas-base* على مجموعة بيانات Sequential Question Answering (SQA) | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Fine_tuning_TapasForQuestionAnswering_on_SQA.ipynb)|
|
||||
|[Evaluate TAPAS on Table Fact Checking (TabFact)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb) | كيفية تقييم نموذج *TapasForSequenceClassification* المضبوط مسبقًا مع نقطة تفتيش *tapas-base-finetuned-tabfact* باستخدام مزيج من مكتبتي 🤗 datasets و 🤗 transformers | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/TAPAS/Evaluating_TAPAS_on_the_Tabfact_test_set.ipynb)|
|
||||
|[Fine-tuning mBART for translation](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb) | كيفية ضبط نموذج mBART باستخدام Seq2SeqTrainer للترجمة من الهندية إلى الإنجليزية | [Vasudev Gupta](https://github.com/vasudevgupta7) | [](https://colab.research.google.com/github/vasudevgupta7/huggingface-tutorials/blob/main/translation_training.ipynb)|
|
||||
|[Fine-tune LayoutLM on FUNSD (a form understanding dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb) | كيفية ضبط نموذج *LayoutLMForTokenClassification* على مجموعة بيانات FUNSD لاستخراج المعلومات من المستندات الممسوحة ضوئيًا | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForTokenClassification_on_FUNSD.ipynb)|
|
||||
|[Fine-Tune DistilGPT2 and Generate Text](https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb) | كيفية ضبط نموذج DistilGPT2 وتوليد النص | [Aakash Tripathi](https://github.com/tripathiaakash) | [](https://colab.research.google.com/github/tripathiaakash/DistilGPT2-Tutorial/blob/main/distilgpt2_fine_tuning.ipynb)|
|
||||
|[Fine-Tune LED on up to 8K tokens](https://github.com/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb) | كيفية ضبط نموذج LED على pubmed للتلخيص طويل المدى | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_tune_Longformer_Encoder_Decoder_(LED)_for_Summarization_on_pubmed.ipynb)|
|
||||
|[Evaluate LED on Arxiv](https://github.com/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb) | كيفية تقييم نموذج LED للتلخيص طويل المدى بشكل فعال | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/LED_on_Arxiv.ipynb)|
|
||||
|[Fine-tune LayoutLM on RVL-CDIP (a document image classification dataset)](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb) | كيفية ضبط نموذج *LayoutLMForSequenceClassification* على مجموعة بيانات RVL-CDIP لتصنيف المستندات الممسوحة ضوئيًا | [Niels Rogge](https://github.com/nielsrogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/LayoutLM/Fine_tuning_LayoutLMForSequenceClassification_on_RVL_CDIP.ipynb)|
|
||||
|[Wav2Vec2 CTC decoding with GPT2 adjustment](https://github.com/voidful/huggingface_notebook/blob/main/xlsr_gpt.ipynb) | كيفية فك تشفير تسلسل CTC مع تعديل نموذج اللغة | [Eric Lam](https://github.com/voidful) | [](https://colab.research.google.com/drive/1e_zQHYbO2YKEaUgzb1ww1WwiAyydAj?usp=sharing)|
|
||||
|[Fine-tune BART for summarization in two languages with Trainer class](https://github.com/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb) | كيفية ضبط نموذج BART للتلخيص بلغتين باستخدام فئة Trainer | [Eliza Szczechla](https://github.com/elsanns) | [](https://colab.research.google.com/github/elsanns/xai-nlp-notebooks/blob/master/fine_tune_bart_summarization_two_langs.ipynb)|
|
||||
|[Evaluate Big Bird on Trivia QA](https://github.com/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb) | كيفية تقييم نموذج BigBird للأسئلة والأجوبة على وثائق طويلة على Trivia QA | [Patrick von Platen](https://github.com/patrickvonplaten) | [](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Evaluating_Big_Bird_on_TriviaQA.ipynb)|
|
||||
| [Create video captions using Wav2Vec2](https://github.com/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) | كيفية إنشاء تعليقات توضيحية على YouTube من أي فيديو من خلال تفريغ الصوت باستخدام Wav2Vec | [Niklas Muennighoff](https://github.com/Muennighoff) |[](https://colab.research.google.com/github/Muennighoff/ytclipcc/blob/main/wav2vec_youtube_captions.ipynb) |
|
||||
| [Fine-tune the Vision Transformer on CIFAR-10 using PyTorch Lightning](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) | كيفية ضبط نموذج Vision Transformer (ViT) على CIFAR-10 باستخدام مكتبات HuggingFace Transformers و Datasets و PyTorch Lightning | [Niels Rogge](https://github.com/nielsrogge) |[](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_PyTorch_Lightning.ipynb) |
|
||||
| [Fine-tune the Vision Transformer on CIFAR-10 using the 🤗 Trainer](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) | كيفية ضبط نموذج Vision Transformer (ViT) على CIFAR-10 باستخدام مكتبات HuggingFace Transformers و Datasets و 🤗 Trainer | [Niels Rogge](https://github.com/nielsrogge) |[](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb) |
|
||||
| [Evaluate LUKE on Open Entity, an entity typing dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) | كيفية تقييم نموذج *LukeForEntityClassification* على مجموعة بيانات Open Entity | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_open_entity.ipynb) |
|
||||
| [Evaluate LUKE on TACRED, a relation extraction dataset](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) | كيفية تقييم نموذج *LukeForEntityPairClassification* على مجموعة بيانات TACRED | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_tacred.ipynb) |
|
||||
| [Evaluate LUKE on CoNLL-2003, an important NER benchmark](https://github.com/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) | كيفية تقييم نموذج *LukeForEntitySpanClassification* على مجموعة بيانات CoNLL-2003 | [Ikuya Yamada](https://github.com/ikuyamada) |[](https://colab.research.google.com/github/studio-ousia/luke/blob/master/notebooks/huggingface_conll_2003.ipynb) |
|
||||
| [Evaluate BigBird-Pegasus on PubMed dataset](https://github.com/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) | كيفية تقييم نموذج *BigBirdPegasusForConditionalGeneration* على مجموعة بيانات PubMed | [Vasudev Gupta](https://github.com/vasudevgupta7) | [](https://colab.research.google.com/github/vasudevgupta7/bigbird/blob/main/notebooks/bigbird_pegasus_evaluation.ipynb) |
|
||||
| [Speech Emotion Classification with Wav2Vec2](https://github.com/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb) | كيفية استخدام نموذج Wav2Vec2 المسبق التدريب لتصنيف المشاعر على مجموعة بيانات MEGA | [Mehrdad Farahani](https://github.com/m3hrdadfi) | [](https://colab.research.google.com/github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb) |
|
||||
| [Detect objects in an image with DETR](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb) | كيفية استخدام نموذج *DetrForObjectDetection* المدرب للكشف عن الأجسام في صورة وتصوير الانتباه | [Niels Rogge](https://github.com/NielsRogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/DETR_minimal_example_(with_DetrFeatureExtractor).ipynb) |
|
||||
| [Fine-tune DETR on a custom object detection dataset](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb) | كيفية ضبط نموذج *DetrForObjectDetection* على مجموعة بيانات الكشف عن الأجسام المخصصة | [Niels Rogge](https://github.com/NielsRogge) | [](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/DETR/Fine_tuning_DetrForObjectDetection_on_custom_dataset_(balloon).ipynb) |
|
||||
| [Finetune T5 for Named Entity Recognition](https://github.com/ToluClassics/Notebooks/blob/main/T5_Ner_Finetuning.ipynb) | كيفية ضبط نموذج *T5* على مهمة التعرف على الكيانات المسماة | [Ogundepo Odunayo](https://github.com/ToluClassics) | [](https://colab.research.google.com/drive/1obr78FY_cBmWY5ODViCmzdY6O1KB65Vc?usp=sharing) |
|
||||
| [Fine-Tuning Open-Source LLM using QLoRA with MLflow and PEFT](https://github.com/mlflow/mlflow/blob/master/docs/source/llms/transformers/tutorials/fine-tuning/transformers-peft.ipynb) | كيفية استخدام [QLoRA](https://github.com/artidoro/qlora) و [PEFT](https://huggingface.co/docs/peft/en/index) لضبط نموذج LLM بطريقة فعالة من حيث الذاكرة، مع استخدام [MLflow](https://mlflow.org/docs/latest/llms/transformers/index.html) لإدارة تتبع التجارب | [Yuki Watanabe](https://github.com/B-Step62) | [](https://colab.research.google.com/github/mlflow/mlflow/blob/master/docs/source/llms/transformers/tutorials/fine-tuning/transformers-peft.ipynb) |
|
||||
@ -1,163 +0,0 @@
|
||||
# كيفية تعديل أي نموذج من نماذج Transformers
|
||||
|
||||
توفر مكتبة [🤗 Transformers](https://github.com/huggingface/transformers) مجموعة من النماذج المسبقة التدريب والأدوات لمعالجة اللغات الطبيعية، والرؤية، وما إلى ذلك. على الرغم من أن هذه النماذج تغطي مجموعة واسعة من التطبيقات، فقد تواجه حالات استخدام لا تدعمها المكتبة بشكل افتراضي. يُمكن للتخصيص أن يفتح إمكانيات جديدة، مثل إضافة طبقات جديدة، أو تعديل البنية المعمارية، أو تحسين آليات الانتباه. سيُوضح لك هذا الدليل كيفية تعديل نماذج Transformers الموجودة لتلبية احتياجاتك المحددة. الشيء الرائع هو أنك لست بحاجة إلى الخروج من إطار عمل Transformers لإجراء هذه التغييرات. ي يمكنك تعديل النماذج مباشرةً في Transformers والاستفادة من الميزات مثل [واجهة برمجة التطبيقات Trainer](https://huggingface.co/docs/transformers/main/en/main_classes/trainer)، و [PreTrainedModel](https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.PreTrainedModel)، والضبط الدقيق الفعال باستخدام أدوات مثل [PEFT](https://huggingface.co/docs/peft/index).
|
||||
|
||||
سنرشدك في هذا الدليل لكيفية تخصيص نماذج Transformers الموجودة لتلبية متطلباتك، دون فقدان مزايا الإطار. ستتعلم كيفية:
|
||||
|
||||
- تعديل بنية نموذج ما من خلال تغيير آلية الانتباه الخاصة به.
|
||||
- تطبيق تقنيات مثل Low-Rank Adaptation (LoRA) على مكونات نموذج محددة.
|
||||
|
||||
نحن نشجعك على المساهمة باختراقاتك الخاصة ومشاركتها هنا مع المجتمع1
|
||||
|
||||
## مثال: تعديل آلية الانتباه في نموذج Segment Anything (SAM)
|
||||
|
||||
نموذج **Segment Anything (SAM)** هو نموذج رائد في مجال تجزئة الصور. في تنفيذه الافتراضي، يستخدم SAM إسقاطًا مجمعًا للاستعلام والمفتاح والقيمة (`qkv`) في آلية الانتباه الخاصة به. ومع ذلك، قد ترغب في ضبط مكونات محددة فقط من آلية الانتباه، مثل إسقاطات الاستعلام (`q`) والقيمة (`v`)، لتقليل عدد المعلمات القابلة للتدريب والموارد الحسابية المطلوبة.
|
||||
|
||||
### الدافع
|
||||
|
||||
من خلال تقسيم الإسقاط المجمع `qkv` إلى إسقاطات منفصلة `q` و `k` و `v`، يمكنك تطبيق تقنيات مثل **LoRA** (Low-Rank Adaptation) على إسقاطي `q` و `v` فقط. يسمح لك هذا بما يلي:
|
||||
|
||||
- ضبط عدد أقل من المعلمات، مما يقلل من العبء الحسابي.
|
||||
- تحقيق أداء أفضل من خلال التركيز على مكونات محددة.
|
||||
- تجربة استراتيجيات تعديل مختلفة في آلية الانتباه.
|
||||
|
||||
### التنفيذ
|
||||
|
||||
#### **الخطوة 1: إنشاء فئة اهتمام مخصصة**
|
||||
|
||||
بعد ذلك، قم بإنشاء فئة فرعية من فئة `SamVisionAttention` الأصلية وعدلها لتضم إسقاطات `q` و `k` و `v` منفصلة.
|
||||
|
||||
```python
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from transformers.models.sam.modeling_sam import SamVisionAttention
|
||||
|
||||
class SamVisionAttentionSplit(SamVisionAttention, nn.Module):
|
||||
def __init__(self, config, window_size):
|
||||
super().__init__(config, window_size)
|
||||
del self.qkv
|
||||
# إسقاطات منفصلة q و k و v
|
||||
self.q = nn.Linear(config.hidden_size, config.hidden_size, bias=config.qkv_bias)
|
||||
self.k = nn.Linear(config.hidden_size, config.hidden_size, bias=config.qkv_bias)
|
||||
self.v = nn.Linear(config.hidden_size, config.hidden_size, bias=config.qkv_bias)
|
||||
self._register_load_state_dict_pre_hook(self.split_q_k_v_load_hook)
|
||||
|
||||
def split_q_k_v_load_hook(self, state_dict, prefix, *args):
|
||||
keys_to_delete = []
|
||||
for key in list(state_dict.keys()):
|
||||
if "qkv." in key:
|
||||
# تقسيم q و k و v من الإسقاط المجمع
|
||||
q, k, v = state_dict[key].chunk(3, dim=0)
|
||||
# استبدال الإسقاطات الفردية q و k و v
|
||||
state_dict[key.replace("qkv.", "q.")] = q
|
||||
state_dict[key.replace("qkv.", "k.")] = k
|
||||
state_dict[key.replace("qkv.", "v.")] = v
|
||||
# وضع علامة على مفتاح qkv القديم للحذف
|
||||
keys_to_delete.append(key)
|
||||
|
||||
# حذف مفاتيح qkv القديمة
|
||||
for key in keys_to_delete:
|
||||
del state_dict[key]
|
||||
|
||||
def forward(self, hidden_states: torch.Tensor, output_attentions=False) -> torch.Tensor:
|
||||
batch_size, height, width, _ = hidden_states.shape
|
||||
qkv_shapes = (batch_size * self.num_attention_heads, height * width, -1)
|
||||
query = self.q(hidden_states).reshape((batch_size, height * width,self.num_attention_heads, -1)).permute(0,2,1,3).reshape(qkv_shapes)
|
||||
key = self.k(hidden_states).reshape((batch_size, height * width,self.num_attention_heads, -1)).permute(0,2,1,3).reshape(qkv_shapes)
|
||||
value = self.v(hidden_states).reshape((batch_size, height * width,self.num_attention_heads, -1)).permute(0,2,1,3).reshape(qkv_shapes)
|
||||
|
||||
attn_weights = (query * self.scale) @ key.transpose(-2, -1)
|
||||
|
||||
if self.use_rel_pos:
|
||||
attn_weights = self.add_decomposed_rel_pos(
|
||||
attn_weights, query, self.rel_pos_h, self.rel_pos_w, (height, width), (height, width)
|
||||
)
|
||||
|
||||
attn_weights = torch.nn.functional.softmax(attn_weights, dtype=torch.float32, dim=-1).to(query.dtype)
|
||||
attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
|
||||
attn_output = (attn_probs @ value).reshape(batch_size, self.num_attention_heads, height, width, -1)
|
||||
attn_output = attn_output.permute(0, 2, 3, 1, 4).reshape(batch_size, height, width, -1)
|
||||
attn_output = self.proj(attn_output)
|
||||
|
||||
if output_attentions:
|
||||
outputs = (attn_output, attn_weights)
|
||||
else:
|
||||
outputs = (attn_output, None)
|
||||
return outputs
|
||||
```
|
||||
|
||||
**الشرح:**
|
||||
|
||||
- **الإسقاطات المنفصلة:** يتم إزالة الإسقاط المُجمع `qkv`، وإنشاء إسقاطات خطية منفصلة `q` و `k` و `v`.
|
||||
- **دالة استدعاء تحميل الأوزان:** تقوم طريقة `_split_qkv_load_hook` بتقسيم أوزان `qkv` المسبقة التدريب إلى أوزان `q` و `k` و `v` منفصلة عند تحميل النموذج. يضمن هذا التوافق مع أي نموذج مسبق التدريب.
|
||||
- **التنفيذ الأمامي:** يتم حساب الاستعلامات والمفاتيح والقيم بشكل منفصل، وتستمر آلية الانتباه كالمعتاد.
|
||||
|
||||
#### **الخطوة 2: استبدال فئة الانتباه الأصلية**
|
||||
|
||||
استبدل فئة `SamVisionAttention` الأصلية بفئتك المخصصة بحيث يستخدم النموذج آلية الانتباه المعدلة.
|
||||
|
||||
```python
|
||||
from transformers import SamModel
|
||||
from transformers.models.sam import modeling_sam
|
||||
|
||||
# استبدال فئة الاهتمام في وحدة نمطية modeling_sam
|
||||
modeling_sam.SamVisionAttention = SamVisionAttentionSplit
|
||||
|
||||
# تحميل نموذج SAM المسبق التدريب
|
||||
model = SamModel.from_pretrained("facebook/sam-vit-base")
|
||||
```
|
||||
|
||||
**الشرح:**
|
||||
|
||||
- **استبدال الفئة:** من خلال تعيين فئتك المخصصة إلى `modeling_sam.SamVisionAttention`، فإن أي حالات من فئة `SamVisionAttention` في النموذج ستستخدم النسخة المعدلة. وبالتالي، عند استدعاء `SamModel`، سيتم استخدام `SamVisionAttentionSplit` المحددة حديثًا.
|
||||
- **تحميل النموذج:** يتم تحميل النموذج باستخدام `from_pretrained`، ويتم دمج آلية الانتباه المخصصة.
|
||||
|
||||
#### **الخطوة 3: تطبيق LoRA على إسقاطات محددة**
|
||||
|
||||
مع وجود إسقاطات `q` و `k` و `v` منفصلة، يمكنك الآن تطبيق LoRA على مكونات محددة، مثل إسقاطات `q` و `v`.
|
||||
|
||||
```python
|
||||
from peft import LoraConfig, get_peft_model
|
||||
|
||||
config = LoraConfig(
|
||||
r=16,
|
||||
lora_alpha=32,
|
||||
target_modules=["q", "v"], # تطبيق LoRA على إسقاطات q و v
|
||||
lora_dropout=0.1,
|
||||
task_type="mask-generation"
|
||||
)
|
||||
|
||||
# تطبيق LoRA على النموذج
|
||||
model = get_peft_model(model, config)
|
||||
```
|
||||
|
||||
**الشرح:**
|
||||
|
||||
- **تكوين LoRA:** تحدد `LoraConfig` المرتبة `r`، وعامل القياس `lora_alpha`، والوحدات المستهدفة (`"q"` و `"v"`)، ومعدل التخلي، ونوع المهمة.
|
||||
- **تطبيق LoRA:** تقوم دالة `get_peft_model` بتطبيق LoRA على الوحدات المحددة في النموذج.
|
||||
- **تقليل المعلمات:** من خلال التركيز على `q` و `v`، فإنك تقلل عدد المعلمات القابلة للتدريب، مما يؤدي إلى تسريع التدريب وتقليل استخدام الذاكرة.
|
||||
|
||||
#### **الخطوة 4: التحقق من عدد المعلمات القابلة للتدريب**
|
||||
|
||||
من السهل التحقق من عدد المعلمات القابلة للتدريب ومعرفة تأثير تعديلك.
|
||||
|
||||
```python
|
||||
model.print_trainable_parameters()
|
||||
```
|
||||
|
||||
**الناتج المتوقع:**
|
||||
|
||||
```
|
||||
عدد المعلمات القابلة للتدريب: 608,256 || جميع المعلمات: 94,343,728 || نسبة المعلمات القابلة للتدريب: 0.6447
|
||||
عدد المعلمات القابلة للتدريب: 912,384 || جميع المعلمات: 94,647,856 || نسبة المعلمات القابلة للتدريب: 0.9640 # مع k
|
||||
```
|
||||
|
||||
## المساهمة بابداعاتك الخاصة
|
||||
|
||||
يمكن لتعديل النماذج المسبقة التدريب أن يفتح آفاقًا جديدة للبحث والتطبيق. من خلال فهم وتعديل الآليات الداخلية للنماذج مثل SAM، يمكنك تخصيصها لتلبية احتياجاتك المحددة، وتحسين الأداء، وتجربة أفكار جديدة.
|
||||
|
||||
إذا قمت بتطوير تعديﻻتك الخاصة لنماذج Transformers وترغب في مشاركتها، ففكر في المساهمة في هذه الوثيقة.
|
||||
|
||||
- **إنشاء طلب سحب (Pull Request):** شارك تغييراتك وتحسيناتك في التعليمات البرمجية مباشرة في المستودع.
|
||||
- **كتابة التوثيق:** قدم تفسيرات وأمثلة واضحة لتعديلاتك.
|
||||
- **التفاعل مع المجتمع:** ناقش أفكارك واحصل على تعليقات من المطورين والباحثين الآخرين من خلال فتح مشكلة.
|
||||
@ -144,7 +144,7 @@ conda install conda-forge::transformers
|
||||
|
||||
تُحمّل النماذج المُسبقة التدريب وتُخزّن مؤقتًا في: `~/.cache/huggingface/hub`. هذا هو المجلد الافتراضي الذي يُحدده متغير البيئة `TRANSFORMERS_CACHE`. على Windows، يكون دليل ذاكرة التخزين المؤقت الافتراضي هو `C:\Users\username\.cache\huggingface\hub`. يمكنك تغيير متغيرات البيئة shell الموضحة أدناه - حسب الأولوية - لتحديد دليل ذاكرة تخزين مؤقت مختلف:
|
||||
|
||||
1. متغير البيئة (افتراضي): `HF_HUB_CACHE` أو `TRANSFORMERS_CACHE`.
|
||||
1. متغير البيئة (افتراضي): `HUGGINGFACE_HUB_CACHE` أو `TRANSFORMERS_CACHE`.
|
||||
2. متغير البيئة: `HF_HOME`.
|
||||
3. متغير البيئة: `XDG_CACHE_HOME` + `/huggingface`.
|
||||
|
||||
|
||||
@ -1,184 +0,0 @@
|
||||
# المحولات النمطية
|
||||
|
||||
مكتبة `transformers` هي إطار عمل ذو فلسفة محدد؛ يتم تعريف فلسفتنا في [الدليل المفاهيمي](./philosophy).
|
||||
|
||||
جوهر هذه الفلسفة يتمثل في مبدأ [نموذج واحد، ملف واحد](https://huggingface.co/blog/transformers-design-philosophy)
|
||||
في المكتبة. الجانب السلبي لهذا المكون هو تقييده لوراثة واستيراد مكونات الملفات.
|
||||
|
||||
نتيجة لذلك، تتكرر مكونات النموذج عبر العديد من الملفات. يحتوي `transformers` على عدد كبير من طبقات الانتباه، يقارب عدد النماذج، والكثير منها متطابق. يتسبب هذا في تباعد عمليات التنفيذ المستقلة مع تطبيق الإصلاحات والتغييرات.
|
||||
على أجزاء محددة من التعليمات البرمجية.
|
||||
|
||||
ولمعالجة ذلك، اعتمدنا مفهوم "النسخ" في المكتبة. فبإضافة تعليق يُشير إلى أن التعليمات البرمجية هي نسخة من أخرى، نضمن من خلال أنظمة CI والأوامر المحلية عدم تباعد النسخ. لكن هذه العملية، رغم بساطتها، تُسبب إرهاقاً. كما أنها تزيد العبء على المساهمين، وهو ما نهدف إلى تجاوزه.
|
||||
|
||||
غالباً ما تتطلب مساهمات النماذج إضافة تعليمات برمجية (حوالي 1000 سطر)، ومعالج (حوالي 500 سطر)، واختبارات، ووثائق، إلخ. ونادراً ما تقل مساهمات النماذج عن 3000-5000 سطر من التعليمات البرمجية، معظمها أكواد نمطية. هذا يرفع مستوى المساهمات،
|
||||
|
||||
ونهدف مع المحولات النمطية إلى خفض هذا المستوى إلى حدّ مقبول.
|
||||
|
||||
## ما هو؟
|
||||
|
||||
تقدم المحولات النمطية مفهوم ملف "نمطي" لمجلد نموذج. يقبل هذا الملف النمطي تعليمات برمجية
|
||||
غير مقبولة عادة في ملفات النمذجة/المعالجة، حيث يسمح بالاستيراد من نماذج مجاورة وكذلك
|
||||
الوراثة من الفئات إلى فئات أخرى.
|
||||
|
||||
يعرّف هذا الملف النمطي النماذج والمعالجات وفئة التكوين التي سيتم تعريفها في وحداتهم
|
||||
المتعلقة.
|
||||
|
||||
وأخيرًا، يقدم هذا الميزة أداة `linter` جديدة والتي ستعمل على "تفكيك" الملف النمطي إلى بنية "نموذج واحد، ملف واحد"
|
||||
هيكل الدليل. سيتم إنشاء هذه الملفات تلقائيًا في كل مرة يتم فيها تشغيل البرنامج النصي؛ مما يقلل من المساهمات المطلوبة
|
||||
إلى الملف النمطي، وبالتالي فقط إلى التغييرات بين النموذج المساهم والنماذج الأخرى.
|
||||
|
||||
سيقوم مستخدمو النموذج في النهاية باستيراد واستخدام واجهة الملف الواحد، لذا لا يتوقع حدوث أي تغيير هنا. من خلال القيام بذلك،
|
||||
نأمل في الجمع بين أفضل ما في العالمين: تمكين المساهمات البسيطة مع الالتزام بفلسفتنا.
|
||||
|
||||
لذلك، هذا بديل لعلامات `# Copied from`، ويمكن توقع انتقال النماذج المساهمة سابقًا إلى
|
||||
تنسيق المحولات النمطية الجديد في الأشهر المقبلة.
|
||||
|
||||
### التفاصيل
|
||||
|
||||
تُبسط أداة "linter" الوراثة، مُنشئةً جميع الملفات المفردة من الملف النمطي، مع الحفاظ على شفافيتها أمام مستخدمي Python. حاليًا، تُبسط الأداة مستوىً واحدًا من الوراثة
|
||||
|
||||
على سبيل المثال:
|
||||
- إذا ورثت فئة التكوين من فئة أخرى وأضافت/حذفت معامل، فسيتم إما الإشارة إلى الملف المولد مباشرةً
|
||||
(في حالة الإضافة) أو إزالته تمامًا (في حالة الحذف).
|
||||
- إذا ورثت فئة من فئة أخرى، على سبيل المثال: `class GemmaModel(LlamaModel):`، تُستنتج التبعيات تلقائيًا
|
||||
سيتم استنتاج جميع الوحدات الفرعية تلقائيًا من الفئة الأصلية.
|
||||
- إذا قمت بتعريف وظائف جديدة في الملف `modular` واستخدمتها داخل الفئات، فستستنتج أداة linter ذلك تلقائيًا
|
||||
|
||||
يجب أن تكون قادرًا على كتابة كل شيء (المجزىء اللغوي، ومُعالِج الصور، والنموذج، والتكوين) في الملف `modular`، وسيتم إنشاء الملفات المُقابلة تلقائيًا.
|
||||
|
||||
### التطبيق
|
||||
|
||||
[TODO] نقدم اختبارًا جديدًا، للتأكد من أن المحتوى المولد يتطابق مع ما هو موجود في `modular_xxxx.py`
|
||||
|
||||
### الأمثلة
|
||||
|
||||
هنا مثال سريع باستخدام BERT و RoBERTa. النموذجان مرتبطان ارتباطًا وثيقًا: يختلف تنفيذهما النموذجي في طبقة تضمين.
|
||||
|
||||
بدلاً من إعادة تعريف النموذج بالكامل، إليك كيف يبدو ملف `modular_roberta.py` لفئات النمذجة والتكوين (لأغراض المثال، يتم تجاهل المجزىء اللغوي في هذا الوقت حيث أنه مختلف جدًا).
|
||||
|
||||
```python
|
||||
from torch import nn
|
||||
from ..bert.configuration_bert import BertConfig
|
||||
from ..bert.modeling_bert import (
|
||||
BertModel,
|
||||
BertEmbeddings,
|
||||
BertForMaskedLM
|
||||
)
|
||||
|
||||
# تكوين RoBERTa مطابق لتكوين BERT
|
||||
class RobertaConfig(BertConfig):
|
||||
model_type = 'roberta'
|
||||
|
||||
# نعيد تعريف الإضافات هنا لتسليط الضوء على اختلاف معرف الحشو، ونعيد تعريف الإضافات الموضعية
|
||||
class RobertaEmbeddings(BertEmbeddings):
|
||||
def __init__(self, config):
|
||||
super().__init__(config())
|
||||
|
||||
self.padding_idx = config.pad_token_id
|
||||
self.position_embeddings = nn.Embedding(
|
||||
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
|
||||
)
|
||||
|
||||
# نموذج RoBERTa مطابق لنموذج BERT، باستثناء طبقة الإضافات.
|
||||
# نعيد تعريف الإضافات أعلاه، لذا هنا لا توجد حاجة لعمل إضافي
|
||||
class RobertaModel(BertModel):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.embeddings = RobertaEmbeddings(config)
|
||||
|
||||
|
||||
# الرؤوس الآن تحتاج فقط إلى إعادة تعريف النموذج داخل `RobertaModel` الصحيح
|
||||
class RobertaForMaskedLM(BertForMaskedLM):
|
||||
def __init__(self, config):
|
||||
super().__init__(config)
|
||||
self.model = RobertaModel(config)
|
||||
```
|
||||
|
||||
لاحظ أنه إذا لم تستخدم الاعتماد الذي حددته، فستحصل على الخطأ التالي:
|
||||
|
||||
```bash
|
||||
ValueError: You defined `RobertaEmbeddings` in the modular_roberta.py, it should be used
|
||||
when you define `BertModel`, as it is one of it's direct dependencies. Make sure
|
||||
you use it in the `__init__` function.
|
||||
```
|
||||
|
||||
بالإضافة إلى ذلك، قد تجد قائمة بالأمثلة هنا:
|
||||
|
||||
## ما هو ليس كذلك
|
||||
|
||||
ليس بديلاً لتعليمات برمجة النمذجة (بعد؟)، وإذا لم يكن نموذجك يعتمد على أي شيء آخر موجود من قبل، فيمكنك إضافة ملف `نمذجة` كالعادة.
|
||||
|
||||
|
||||
## الاستخدام المتقدم
|
||||
|
||||
### إزالة السمات والوظائف
|
||||
لإزالة السمات التي لا تستخدم في نموذجك النمطي، والتي لا تريد رؤيتها في النمذجة المفككة:
|
||||
|
||||
```python
|
||||
class GemmaModel(LlamaModel): | class GemmaModel(PreTrainedModel):
|
||||
def __init__(self, config): | def __init__(self, config):
|
||||
super().__init__(self, eos_token) | super().__init__(config)
|
||||
del self.embed_tokens | self.padding_idx = config.pad_token_id
|
||||
| self.vocab_size = config.vocab_size
|
||||
|
|
||||
| self.layers = nn.ModuleList(
|
||||
| [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
||||
| )
|
||||
| self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
||||
| self.rotary_emb = LlamaRotaryEmbedding(config=config)
|
||||
| self.gradient_checkpointing = False
|
||||
|
|
||||
| # Initialize weights and apply final processing
|
||||
| self.post_init()
|
||||
```
|
||||
إذا قمت بالتحقق من `LlamaModel` الأصلي، فستجد `embed_tokens` الذي تمت إزالته هنا (كما هو متوقع!)
|
||||
|
||||
إزالة وظيفة مشابهة، تحتاج فقط إلى كتابتها مع `raise ValueError("")` لمحاكاة السلوك الذي تريده فعليًا عند إزالة وظيفة أصلية في بايثون.
|
||||
|
||||
```python
|
||||
class GemmaTokenizer(LlamaTokenizer):
|
||||
...
|
||||
|
||||
def get_spm_processor(self):
|
||||
raise AttributeError("Not needed for Gemma")
|
||||
|
||||
def unk_token_length(self):
|
||||
raise AttributeError("Not needed for Gemma")
|
||||
```
|
||||
|
||||
### تعريف وظائف جديدة
|
||||
|
||||
إذا قمت بتعريف وظيفة جديدة في الملف `modular` لاستخدامها داخل فئة، على سبيل المثال
|
||||
|
||||
```python
|
||||
def my_new_function(*args, **kwargs):
|
||||
# Do something here
|
||||
pass
|
||||
|
||||
class GemmaModel(LlamaModel):
|
||||
def forward(*args, **kwargs):
|
||||
# Call the function
|
||||
example = my_new_function(*args, **kwargs)
|
||||
# continue here
|
||||
```
|
||||
|
||||
سيتم نسخ وظيفة `my_new_function` (وبشكل متكرر، أي وظائف أخرى جديدة يتم استدعاؤها في جسمها) تلقائيًا
|
||||
في الملف الذي يتم استخدامه.
|
||||
|
||||
### استدعاء `super()`
|
||||
قمنا مؤخرًا بشحن بعض الميزات التي تسمح لك بالانتقال من:
|
||||
```python
|
||||
class GemmaTokenizer(LlamaTokenizer, PretrainedTokenizerFast): | class GemmaModel(nn.Module):
|
||||
def __init__(self, eos_token="</s>"): | def __init__(self):
|
||||
eos_token = AddedToken(eos_token) | eos_token = AddedToken(eos_token)
|
||||
PretrainedTokenizerFast.__init__(self, eos_token) | super().__init__(eos_token)
|
||||
```
|
||||
هذا مفيد عندما لا تريد تفكيك استدعاء `super()`، وتريد التمييز بين أي استدعاء super init تقوم به!
|
||||
|
||||
### التسمية الخاصة
|
||||
ندعم الآن أيضًا حالات خاصة مثل
|
||||
```python
|
||||
class GemmaVisionModel(CLIPModel):
|
||||
pass
|
||||
```
|
||||
حيث اسم فئة `GemmaVision` الخاصة بك ليس هو نفسه `Gemma` النمطي. هذا مفيد للغاية للنماذج المركبة.
|
||||
@ -1,140 +0,0 @@
|
||||
# دفاتر ملاحظات 🤗 Transformers
|
||||
|
||||
يمكنك أن تجد هنا قائمة بدفاتر الملاحظات الرسمية التي تقدمها Hugging Face.
|
||||
|
||||
كما نود أن ندرج هنا محتوى مثيرًا للاهتمام تم إنشاؤه بواسطة المجتمع.
|
||||
إذا كتبت دفتر ملاحظات يستفيد من 🤗 Transformers وتود إدراجه هنا، فيُرجى فتح طلب سحب حتى يمكن تضمينه ضمن دفاتر ملاحظات المجتمع.
|
||||
|
||||
|
||||
## دفاتر ملاحظات Hugging Face 🤗
|
||||
|
||||
### دفاتر ملاحظات التوثيق
|
||||
|
||||
يمكنك فتح أي صفحة من صفحات التوثيق كدفتر ملاحظات في Colab (يوجد زر مباشرة على تلك الصفحات) ولكنها مدرجة هنا أيضًا إذا كنت بحاجة إليها:
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [جولة سريعة في المكتبة](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb) | عرض لمختلف واجهات برمجة التطبيقات في Transformers |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/quicktour.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/en/transformers_doc/quicktour.ipynb)|
|
||||
| [ملخص المهام](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb) | كيفية تشغيل نماذج مكتبة Transformers مهمة تلو الأخرى |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/task_summary.ipynb)|
|
||||
| [معالجة البيانات مسبقًا](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb) | كيفية استخدام محلل لغوي لمعالجة بياناتك مسبقًا |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/preprocessing.ipynb)|
|
||||
| [الضبط الدقيق لنموذج مُدرَّب مسبقًا](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb) | كيفية استخدام المدرب لضبط نموذج مُدرَّب مسبقًا بدقة |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/training.ipynb)|
|
||||
| [ملخص للمحللات اللغوية](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb) | الاختلافات بين خوارزمية المحلل اللغوي |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/tokenizer_summary.ipynb)|
|
||||
| [النماذج متعددة اللغات](https://github.com/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb) | كيفية استخدام النماذج متعددة اللغات للمكتبة |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/transformers_doc/en/multilingual.ipynb)|
|
||||
|
||||
|
||||
### أمثلة PyTorch
|
||||
|
||||
#### معالجة اللغة الطبيعية[[pytorch-nlp]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [تدريب محللك اللغوي](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | كيفية تدريب واستخدام محللك اللغوي الخاص بك |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)|
|
||||
| [تدريب نموذج لغتك](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb) | كيفية البدء بسهولة في استخدام المحولات |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف النص](https://github.com/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على أي مهمة GLUE. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على النمذجة اللغوية](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على مهمة LM سببية أو مقنعة. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف الرموز المميزة](https://github.com/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على مهمة تصنيف الرموز المميزة (NER، PoS). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على الإجابة على الأسئلة](https://github.com/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على SQUAD. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على الاختيار من متعدد](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على SWAG. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على الترجمة](https://github.com/huggingface/notebooks/blob/main/examples/translation.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على WMT. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على التلخيص](https://github.com/huggingface/notebooks/blob/main/examples/summarization.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على XSUM. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization.ipynb)|
|
||||
| [كيفية تدريب نموذج لغة من البداية](https://github.com/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| تسليط الضوء على جميع الخطوات لتدريب نموذج Transformer بشكل فعال على بيانات مخصصة | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/01_how_to_train.ipynb)|
|
||||
| [كيفية إنشاء نص](https://github.com/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| كيفية استخدام أساليب فك التشفير المختلفة لإنشاء اللغة باستخدام المحولات | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/02_how_to_generate.ipynb)|
|
||||
| [كيفية إنشاء نص (مع قيود)](https://github.com/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| كيفية توجيه إنشاء اللغة باستخدام القيود التي يوفرها المستخدم | [](https://colab.research.google.com/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/blog/blob/main/notebooks/53_constrained_beam_search.ipynb)|
|
||||
| [Reformer](https://github.com/huggingface/blog/blob/main/notebooks/03_reformer.ipynb)| كيف يدفع Reformer حدود النمذجة اللغوية | [](https://colab.research.google.com/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)| [](https://studiolab.sagemaker.aws/import/github/patrickvonplaten/blog/blob/main/notebooks/03_reformer.ipynb)|
|
||||
|
||||
#### رؤية الكمبيوتر[[pytorch-cv]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------:|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف الصور (Torchvision)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification.ipynb) | يوضح كيفية معالجة البيانات مسبقًا باستخدام Torchvision وضبط أي نموذج رؤية مُدرَّب مسبقًا بدقة على تصنيف الصور | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف الصور (Albumentations)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) | يوضح كيفية معالجة البيانات مسبقًا باستخدام Albumentations وضبط أي نموذج رؤية مُدرَّب مسبقًا بدقة على تصنيف الصور | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification_albumentations.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف الصور (Kornia)](https://github.com/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb) | يوضح كيفية معالجة البيانات مسبقًا باستخدام Kornia وضبط أي نموذج رؤية مُدرَّب مسبقًا بدقة على تصنيف الصور | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification_kornia.ipynb)|
|
||||
| [كيفية إجراء الكشف عن الأشياء بدون لقطات مع OWL-ViT](https://github.com/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb) | يوضح كيفية إجراء الكشف عن الأشياء بدون لقطات على الصور باستخدام استعلامات نصية | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb)|
|
||||
| [كيفية ضبط نموذج وصف الصور بدقة](https://github.com/huggingface/notebooks/blob/main/examples/image_captioning_blip.ipynb) | يوضح كيفية ضبط BLIP بدقة لوصف الصور على مجموعة بيانات مخصصة | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_captioning_blip.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_captioning_blip.ipynb)|
|
||||
| [كيفية بناء نظام تشابه الصور مع Transformers](https://github.com/huggingface/notebooks/blob/main/examples/image_similarity.ipynb) | يوضح كيفية بناء نظام تشابه الصور | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_similarity.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_similarity.ipynb)|
|
||||
| [كيفية ضبط نموذج SegFormer بدقة على التجزئة الدلالية](https://github.com/huggingface/notebooks/blob/main/examples/semantic_segmentation.ipynb) | يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج SegFormer مُدرَّب مسبقًا بدقة على التجزئة الدلالية | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/semantic_segmentation.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/semantic_segmentation.ipynb)|
|
||||
| [كيفية ضبط نموذج VideoMAE بدقة على تصنيف الفيديو](https://github.com/huggingface/notebooks/blob/main/examples/video_classification.ipynb) | يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج VideoMAE مُدرَّب مسبقًا بدقة على تصنيف الفيديو | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/video_classification.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/video_classification.ipynb)|
|
||||
|
||||
|
||||
#### الصوت[[pytorch-audio]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [كيفية ضبط نموذج التعرف على الكلام باللغة الإنجليزية بدقة](https://github.com/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج كلام مُدرَّب مسبقًا بدقة على TIMIT | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/speech_recognition.ipynb)|
|
||||
| [كيفية ضبط نموذج التعرف على الكلام بأي لغة بدقة](https://github.com/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج كلام مُدرَّب مسبقًا متعدد اللغات بدقة على Common Voice | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multi_lingual_speech_recognition.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف الصوت](https://github.com/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج كلام مُدرَّب مسبقًا بدقة على Keyword Spotting | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb)|
|
||||
|
||||
|
||||
#### التسلسلات البيولوجية[[pytorch-bio]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:----------------------------------------------------------------------------------------|:-------------|------:|
|
||||
| [كيفية ضبط نموذج بروتين مُدرَّب مسبقًا بدقة](https://github.com/huggingface/notebooks/blob/main/examples/protein_language_modeling.ipynb) | شاهد كيفية ترميز البروتينات وضبط نموذج "لغة" بروتين مُدرَّب مسبقًا كبير بدقة | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/protein_language_modeling.ipynb) |
|
||||
| [كيفية إنشاء طيات بروتينية](https://github.com/huggingface/notebooks/blob/main/examples/protein_folding.ipynb) | شاهد كيفية الانتقال من تسلسل البروتين إلى نموذج بروتين كامل وملف PDB | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_folding.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/protein_folding.ipynb) |
|
||||
| [كيفية ضبط نموذج محول النيوكليوتيدات بدقة](https://github.com/huggingface/notebooks/blob/main/examples/nucleotide_transformer_dna_sequence_modelling.ipynb) | شاهد كيفية ترميز الحمض النووي وضبط نموذج "لغة" الحمض النووي مُدرَّب مسبقًا كبير بدقة | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/nucleotide_transformer_dna_sequence_modelling.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/nucleotide_transformer_dna_sequence_modelling.ipynb) |
|
||||
| [ضبط نموذج محول النيوكليوتيدات بدقة باستخدام LoRA](https://github.com/huggingface/notebooks/blob/main/examples/nucleotide_transformer_dna_sequence_modelling_with_peft.ipynb) | تدريب نماذج DNA أكبر بكثير بطريقة فعالة من حيث الذاكرة | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/nucleotide_transformer_dna_sequence_modelling_with_peft.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/nucleotide_transformer_dna_sequence_modelling_with_peft.ipynb) |
|
||||
|
||||
|
||||
#### طرائق أخرى[[pytorch-other]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:----------------------------------------------------------------------------------------|:-------------|------:|
|
||||
| [التنبؤ الاحتمالي بالسلاسل الزمنية](https://github.com/huggingface/notebooks/blob/main/examples/time-series-transformers.ipynb) | شاهد كيفية تدريب Time Series Transformer على مجموعة بيانات مخصصة | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/time-series-transformers.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/time-series-transformers.ipynb) |
|
||||
|
||||
#### دفاتر ملاحظات الأدوات المساعدة [[pytorch-utility]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [كيفية تصدير النموذج إلى ONNX](https://github.com/huggingface/notebooks/blob/main/examples/onnx-export.ipynb)| تسليط الضوء على كيفية التصدير وتشغيل أعباء عمل الاستدلال من خلال ONNX | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/onnx-export.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/onnx-export.ipynb)|
|
||||
| [كيفية استخدام المعايير](https://github.com/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| كيفية قياس أداء النماذج باستخدام المحولات | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/benchmark.ipynb)|
|
||||
|
||||
### أمثلة TensorFlow
|
||||
|
||||
#### معالجة اللغة الطبيعية[[tensorflow-nlp]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [تدريب محللك اللغوي](https://github.com/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb) | كيفية تدريب واستخدام محللك اللغوي الخاص بك |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tokenizer_training.ipynb)|
|
||||
| [تدريب نموذج لغتك](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb) | كيفية البدء بسهولة في استخدام المحولات |[](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling_from_scratch-tf.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف النص](https://github.com/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على أي مهمة GLUE. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على النمذجة اللغوية](https://github.com/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على مهمة LM سببية أو مقنعة. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف الرموز المميزة](https://github.com/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على مهمة تصنيف الرموز المميزة (NER، PoS). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على الإجابة على الأسئلة](https://github.com/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على SQUAD. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على الاختيار من متعدد](https://github.com/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على SWAG. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على الترجمة](https://github.com/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على WMT. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على التلخيص](https://github.com/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج مُدرَّب مسبقًا بدقة على XSUM. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization-tf.ipynb)|
|
||||
|
||||
#### رؤية الكمبيوتر[[tensorflow-cv]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:---------------------------------------------------------------------------------------------------------------------------------------------------------|:----------------------------------------------------------------------------------------------------|:-------------|------:|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف الصور](https://github.com/huggingface/notebooks/blob/main/examples/image_classification-tf.ipynb) | يوضح كيفية معالجة البيانات مسبقًا وضبط أي نموذج رؤية مُدرَّب مسبقًا بدقة على تصنيف الصور | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/image_classification-tf.ipynb)|
|
||||
| [كيفية ضبط نموذج SegFormer بدقة على التجزئة الدلالية](https://github.com/huggingface/notebooks/blob/main/examples/semantic_segmentation-tf.ipynb) | يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج SegFormer مُدرَّب مسبقًا بدقة على التجزئة الدلالية | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/semantic_segmentation-tf.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/semantic_segmentation-tf.ipynb)|
|
||||
|
||||
#### التسلسلات البيولوجية[[tensorflow-bio]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [كيفية ضبط نموذج بروتين مُدرَّب مسبقًا بدقة](https://github.com/huggingface/notebooks/blob/main/examples/protein_language_modeling-tf.ipynb) | شاهد كيفية ترميز البروتينات وضبط نموذج "لغة" بروتين مُدرَّب مسبقًا كبير بدقة | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/protein_language_modeling-tf.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/protein_language_modeling-tf.ipynb) |
|
||||
|
||||
#### دفاتر ملاحظات الأدوات المساعدة [[tensorflow-utility]]
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [كيفية تدريب نماذج TF/Keras على TPU](https://github.com/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb) | شاهد كيفية التدريب بسرعة عالية على أجهزة TPU من Google | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb) | [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/tpu_training-tf.ipynb) |
|
||||
|
||||
### دفاتر ملاحظات Optimum
|
||||
|
||||
🤗 [Optimum](https://github.com/huggingface/optimum) هو امتداد لـ 🤗 Transformers، يوفر مجموعة من أدوات تحسين الأداء التي تمكن من تحقيق أقصى قدر من الكفاءة لتدريب وتشغيل النماذج على الأجهزة المستهدفة.
|
||||
|
||||
| دفتر الملاحظات | الوصف | | |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| [كيفية تكميم نموذج باستخدام ONNX Runtime لتصنيف النص](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)| يوضح كيفية تطبيق التكميم الثابت والديناميكي على نموذج باستخدام [ONNX Runtime](https://github.com/microsoft/onnxruntime) لأي مهمة GLUE. | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_quantization_ort.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على تصنيف النص باستخدام ONNX Runtime](https://github.com/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج بدقة على أي مهمة GLUE باستخدام [ONNX Runtime](https://github.com/microsoft/onnxruntime). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/text_classification_ort.ipynb)|
|
||||
| [كيفية ضبط نموذج بدقة على التلخيص باستخدام ONNX Runtime](https://github.com/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)| يوضح كيفية معالجة البيانات مسبقًا وضبط نموذج بدقة على XSUM باستخدام [ONNX Runtime](https://github.com/microsoft/onnxruntime). | [](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)| [](https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/main/examples/summarization_ort.ipynb)|
|
||||
|
||||
|
||||
## دفاتر ملاحظات المجتمع:
|
||||
|
||||
تتوفر المزيد من دفاتر الملاحظات التي طورها المجتمع [هنا](https://hf.co/docs/transformers/community#community-notebooks).
|
||||
|
||||
@ -347,8 +347,8 @@ tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725],
|
||||
```py
|
||||
>>> from transformers import AutoModel
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
|
||||
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)
|
||||
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
@ -356,8 +356,8 @@ tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725],
|
||||
```py
|
||||
>>> from transformers import TFAutoModel
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)
|
||||
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
|
||||
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
@ -116,11 +116,11 @@ optimum-cli export onnx --model keras-io/transformers-qa distilbert_base_cased_s
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
لم يعد يتم دعم `transformers.onnx` يُرجى تصدير النماذج باستخدام 🤗 Optimum كما هو موضح أعلاه. سيتم إزالة هذا القسم في الإصدارات القادمة.
|
||||
لم يعد يتم دعم `tranformers.onnx` يُرجى تصدير النماذج باستخدام 🤗 Optimum كما هو موضح أعلاه. سيتم إزالة هذا القسم في الإصدارات القادمة.
|
||||
|
||||
</Tip>
|
||||
|
||||
لتصدير نموذج 🤗 Transformers إلى ONNX باستخدام `transformers.onnx`، ثبّت التبعيات الإضافية:
|
||||
لتصدير نموذج 🤗 Transformers إلى ONNX باستخدام `tranformers.onnx`، ثبّت التبعيات الإضافية:
|
||||
|
||||
```bash
|
||||
pip install transformers[onnx]
|
||||
|
||||
@ -1,422 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# نمذجة اللغة السببية (Causal language modeling)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
هناك نوعان من نمذجة اللغة، السببية والمقنعة. يوضح هذا الدليل نمذجة اللغة السببية.
|
||||
تُستخدم نماذج اللغة السببية غالبًا لتوليد النص. يمكنك استخدام هذه النماذج للتطبيقات الإبداعية مثل
|
||||
اختيار مغامرة النص الخاصة بك أو مساعد ترميز ذكي مثل Copilot أو CodeParrot.
|
||||
|
||||
<Youtube id="Vpjb1lu0MDk"/>
|
||||
|
||||
تتنبأ نمذجة اللغة السببية بالرمز التالي في تسلسل من الرموز، ولا يمكن للنموذج سوى الاهتمام بالرموز على
|
||||
اليسار. هذا يعني أن النموذج لا يمكنه رؤية الرموز المستقبلية. GPT-2 هو مثال على نموذج اللغة السببية.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط دقيق [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) على مجموعة فرعية [r/askscience](https://www.reddit.com/r/askscience/) من مجموعة بيانات [ELI5](https://huggingface.co/datasets/eli5).
|
||||
2. استخدام النموذج المدرب الخاص بك للاستنتاج.
|
||||
|
||||
<Tip>
|
||||
|
||||
لرؤية جميع العمارات ونقاط التحقق المتوافقة مع هذه المهمة، نوصي بالتحقق من [task-page](https://huggingface.co/tasks/text-generation)
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate
|
||||
```
|
||||
|
||||
نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عند المطالبة، أدخل رمزك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات ELI5
|
||||
|
||||
ابدأ بتحميل أول 5000 مثال من [ELI5-Category](https://huggingface.co/datasets/eli5_category) مجموعة البيانات مع مكتبة 🤗 Datasets. سيعطيك هذا فرصة للتجربة والتأكد من أن كل شيء يعمل قبل قضاء المزيد من الوقت في التدريب على مجموعة البيانات الكاملة.
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> eli5 = load_dataset("eli5_category", split="train[:5000]")
|
||||
```
|
||||
|
||||
قم بتقسيم مجموعة بيانات `train` إلى مجموعتي تدريب واختبار باستخدام الخاصية [`~datasets.Dataset.train_test_split`]:
|
||||
|
||||
```py
|
||||
>>> eli5 = eli5.train_test_split(test_size=0.2)
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> eli5["train"][0]
|
||||
{'q_id': '7h191n',
|
||||
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
|
||||
'selftext': '',
|
||||
'category': 'Economics',
|
||||
'subreddit': 'explainlikeimfive',
|
||||
'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
|
||||
'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
|
||||
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
|
||||
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
|
||||
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
|
||||
'score': [21, 19, 5, 3],
|
||||
'text_urls': [[],
|
||||
[],
|
||||
[],
|
||||
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]},
|
||||
'title_urls': ['url'],
|
||||
'selftext_urls': ['url']}
|
||||
```
|
||||
|
||||
على الرغم من أن هذا قد يبدو معقدًا، إلا أنك مهتم حقًا بحقل `text`. ما هو رائع حول مهام نمذجة اللغة
|
||||
أنت لا تحتاج إلى تسميات (تُعرف أيضًا باسم المهمة غير الخاضعة للإشراف) لأن الكلمة التالية تعمل كتسمية.
|
||||
|
||||
## معالجة مسبقة (Preprocess)
|
||||
|
||||
<Youtube id="ma1TrR7gE7I"/>
|
||||
|
||||
الخطوة التالية هي تحميل مجزء النص DistilGPT2 لمعالجة حقل `text` الفرعي:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2")
|
||||
```
|
||||
|
||||
ستلاحظ من المثال أعلاه، الحقل `text` هو في الواقع متداخل داخل `answers`. هذا يعني أنك ستحتاج إلى
|
||||
استخراج حقل `text` الفرعي من بنيته المتداخلة باستخدام الدالة [`flatten`](https://huggingface.co/docs/datasets/process#flatten):
|
||||
|
||||
```py
|
||||
>>> eli5 = eli5.flatten()
|
||||
>>> eli5["train"][0]
|
||||
{'q_id': '7h191n',
|
||||
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
|
||||
'selftext': '',
|
||||
'category': 'Economics',
|
||||
'subreddit': 'explainlikeimfive',
|
||||
'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
|
||||
'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
|
||||
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
|
||||
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
|
||||
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
|
||||
'answers.score': [21, 19, 5, 3],
|
||||
'answers.text_urls': [[],
|
||||
[],
|
||||
[],
|
||||
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']],
|
||||
'title_urls': ['url'],
|
||||
'selftext_urls': ['url']}
|
||||
```
|
||||
|
||||
كل حقل فرعي هو الآن عموداً منفصلاً مسبوقاً بـ `answers`، وحقل `text` هو قائمة الآن. بدلاً من ذلك
|
||||
من تجزائة نص كل جملة بشكل منفصل، قم بتحويل القائمة إلى سلسلة حتى تتمكن من تجزئة نصها بشكل مجمّع.
|
||||
|
||||
هنا أول دالة معالجة مسبقة لدمج قائمة السلاسل لكل مثال ومجزىء النتيجة:
|
||||
|
||||
```py
|
||||
>>> def preprocess_function(examples):
|
||||
... return tokenizer([" ".join(x) for x in examples["answers.text"]])
|
||||
```
|
||||
|
||||
لتطبيق دالة المعالجة المسبقة هذه على مجموعة البيانات بأكملها، استخدم الدالة 🤗 Datasets [`~datasets.Dataset.map`]. يمكنك تسريع هذه العملية `map` عن طريق تعيين `batched=True` لمعالجة عناصر متعددة من مجموعة البيانات في وقت واحد، وزيادة عدد العمليات مع `num_proc`. احذف أي أعمدة لا تحتاجها:
|
||||
|
||||
```py
|
||||
>>> tokenized_eli5 = eli5.map(
|
||||
... preprocess_function,
|
||||
... batched=True,
|
||||
... num_proc=4,
|
||||
... remove_columns=eli5["train"].column_names,
|
||||
... )
|
||||
```
|
||||
|
||||
تحتوي هذه المجموعة من البيانات على تسلسلات الرموز، ولكن بعضها أطول من الطول الأقصى للمدخلات للنموذج.
|
||||
|
||||
يمكنك الآن استخدام دالة ما قبل المعالجة ثانية لـ:
|
||||
|
||||
- تجميع كل التسلسلات.
|
||||
- تقسيم التسلسلات المجمّعة إلى أجزاء أقصر محددة، بحجم `block_size`، والتي يجب أن تكون أقصر من الطول الأقصى للمدخلات ومناسبة لذاكرة GPU.
|
||||
|
||||
```py
|
||||
>>> block_size = 128
|
||||
|
||||
>>> def group_texts(examples):
|
||||
... # ربط جميع النصوص.
|
||||
... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
||||
... total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||||
... # نتجاهل الباقي الصغير، يمكننا إضافة الحشو إذا كان النموذج يدعمه بدلاً من هذا الإسقاط، يمكنك
|
||||
... # تخصيص هذا الجزء حسب احتياجاتك.
|
||||
... if total_length >= block_size:
|
||||
... total_length = (total_length // block_size) * block_size
|
||||
... # التقسيم إلى أجزاء بحجم block_size.
|
||||
... result = {
|
||||
... k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
||||
... for k, t in concatenated_examples.items()
|
||||
... }
|
||||
... result["labels"] = result["input_ids"].copy()
|
||||
... return result
|
||||
```
|
||||
|
||||
طبق دالة `group_texts` على كامل المجموعة من البيانات:
|
||||
|
||||
```py
|
||||
>>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)
|
||||
```
|
||||
|
||||
الآن قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorForLanguageModeling`]. من الأفضل أن تقوم بـ *الحشو الديناميكي* للجمل إلى الطول الأطول في الدفعة أثناء التجميع، بدلاً من حشو كامل المجموعة من البيانات إلى الطول الأقصى.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
استخدم رمز نهاية التسلسل كرمز للحشو، وحدد `mlm_probability` لحجب الرموز بشكل عشوائي عند كل تكرار للبيانات:
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForLanguageModeling
|
||||
|
||||
>>> tokenizer.pad_token = tokenizer.eos_token
|
||||
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
|
||||
```
|
||||
|
||||
</pt>
|
||||
<tf>
|
||||
استخدم رمز نهاية التسلسل كرمز للحشو، وحدد `mlm_probability` لحجب الرموز بشكل عشوائي عند كل تكرار للبيانات:
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForLanguageModeling
|
||||
|
||||
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False, return_tensors="tf")
|
||||
```
|
||||
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التدريب (Train)
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتدريب نموذج باستخدام [`Trainer`], اطلع على [البرنامج التعليمي الأساسي](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت جاهز الآن لبدء تدريب نموذجك! قم بتحميل DistilGPT2 باستخدام [`AutoModelForCausalLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForCausalLM, TrainingArguments, Trainer
|
||||
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
|
||||
```
|
||||
|
||||
في هذه المرحلة، تبقى ثلاث خطوات فقط:
|
||||
|
||||
1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعامل الوحيد المطلوب هو `output_dir` الذي يحدد أين سيتم حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub بتحديد `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك).
|
||||
2. قم بتمرير معاملات التدريب إلى [`Trainer`] إلى جانب النموذج، والمجموعات من البيانات، ومجمّع البيانات.
|
||||
3. قم باستدعاء [`~Trainer.train`] لتدريب نموذجك.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_eli5_clm-model",
|
||||
... eval_strategy="epoch",
|
||||
... learning_rate=2e-5,
|
||||
... weight_decay=0.01,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=lm_dataset["train"],
|
||||
... eval_dataset=lm_dataset["test"],
|
||||
... data_collator=data_collator,
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، استخدم طريقة [`~transformers.Trainer.evaluate`] لتقييم نموذجك والحصول على احتمالية الارتباك:
|
||||
|
||||
```py
|
||||
>>> import math
|
||||
|
||||
>>> eval_results = trainer.evaluate()
|
||||
>>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
|
||||
Perplexity: 49.61
|
||||
```
|
||||
|
||||
ثم شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتدريب نموذج باستخدام Keras، اطلع على [البرنامج التعليمي الأساسي](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لتدريب نموذج في TensorFlow، ابدأ بإعداد دالة المحسن، وجدول معدل التعلم، وبعض معاملات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer, AdamWeightDecay
|
||||
|
||||
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilGPT2 باستخدام [`TFAutoModelForCausalLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForCausalLM
|
||||
|
||||
>>> model = TFAutoModelForCausalLM.from_pretrained("distilbert/distilgpt2")
|
||||
```
|
||||
|
||||
حول مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... lm_dataset["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_test_set = model.prepare_tf_dataset(
|
||||
... lm_dataset["test"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن جميع نماذج Transformers لديها دالة خسارة ذات صلة بالمهمة الافتراضية، لذلك لا تحتاج إلى تحديد واحدة ما لم ترغب في ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # لا يوجد حجة للخسارة!
|
||||
```
|
||||
|
||||
يمكن القيام بذلك عن طريق تحديد مكان دفع نموذجك ومجمّع البيانات في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_eli5_clm-model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
أخيراً، أنت جاهز لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق من الصحة، وعدد العصور، والتعليقات الخاصة بك لتدريب النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback])
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر تعمقًا حول كيفية تدريب نموذج للنمذجة اللغوية السببية، اطلع على الدفتر المقابل
|
||||
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)
|
||||
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال (Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بتدريب نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
قم بابتكار سؤال تود توليد نص منه:
|
||||
|
||||
```py
|
||||
>>> prompt = "Somatic hypermutation allows the immune system to"
|
||||
```
|
||||
|
||||
أبسط طريقة لتجربة نموذجك المدرب للاستدلال هي استخدامه في [`pipeline`]. قم بتنفيذ `pipeline` لتوليد النص مع نموذجك، ومرر نصك إليه:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> generator = pipeline("text-generation", model="username/my_awesome_eli5_clm-model")
|
||||
>>> generator(prompt)
|
||||
[{'generated_text': "Somatic hypermutation allows the immune system to be able to effectively reverse the damage caused by an infection.\n\n\nThe damage caused by an infection is caused by the immune system's ability to perform its own self-correcting tasks."}]
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قسم النص وإرجع `input_ids` كتنسورات PyTorch:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model")
|
||||
>>> inputs = tokenizer(prompt, return_tensors="pt").input_ids
|
||||
```
|
||||
|
||||
استخدم طريقة [`~generation.GenerationMixin.generate`] لتوليد النص.
|
||||
للمزيد من التفاصيل حول استراتيجيات توليد النص المختلفة والبارامترات للتحكم في التوليد، راجع صفحة [استراتيجيات توليد النص](../generation_strategies).
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForCausalLM
|
||||
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model")
|
||||
>>> outputs = model.generate(inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
|
||||
```
|
||||
|
||||
فك ترميز الرموز المولدة مرة أخرى إلى نص:
|
||||
|
||||
```py
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
["Somatic hypermutation allows the immune system to react to drugs with the ability to adapt to a different environmental situation. In other words, a system of 'hypermutation' can help the immune system to adapt to a different environmental situation or in some cases even a single life. In contrast, researchers at the University of Massachusetts-Boston have found that 'hypermutation' is much stronger in mice than in humans but can be found in humans, and that it's not completely unknown to the immune system. A study on how the immune system"]
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتقسيم النص وإرجاع `input_ids` كـ TensorFlow tensors:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_clm-model")
|
||||
>>> inputs = tokenizer(prompt, return_tensors="tf").input_ids
|
||||
```
|
||||
|
||||
استخدم طريقة [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] لإنشاء الملخص. للمزيد من التفاصيل حول استراتيجيات توليد النص المختلفة والبارامترات للتحكم في التوليد، راجع صفحة [استراتيجيات توليد النص](../generation_strategies).
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForCausalLM
|
||||
|
||||
>>> model = TFAutoModelForCausalLM.from_pretrained("username/my_awesome_eli5_clm-model")
|
||||
>>> outputs = model.generate(input_ids=inputs, max_new_tokens=100, do_sample=True, top_k=50, top_p=0.95)
|
||||
```
|
||||
|
||||
فك ترميز الرموز المولدة مرة أخرى إلى نص:
|
||||
|
||||
```py
|
||||
>>> tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
['Somatic hypermutation allows the immune system to detect the presence of other viruses as they become more prevalent. Therefore, researchers have identified a high proportion of human viruses. The proportion of virus-associated viruses in our study increases with age. Therefore, we propose a simple algorithm to detect the presence of these new viruses in our samples as a sign of improved immunity. A first study based on this algorithm, which will be published in Science on Friday, aims to show that this finding could translate into the development of a better vaccine that is more effective for']
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
@ -1,442 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# نمذجة اللغة المقنعة (Masked language modeling)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Youtube id="mqElG5QJWUg"/>
|
||||
|
||||
تتنبأ نمذجة اللغة المقنعة برمز مقنع في تسلسل، ويمكن للنموذج الانتباه إلى الرموز بشكل ثنائي الاتجاه. هذا
|
||||
يعني أن النموذج لديه إمكانية الوصول الكاملة إلى الرموز الموجودة على اليسار واليمين. تعد نمذجة اللغة المقنعة ممتازة للمهام التي
|
||||
تتطلب فهمًا سياقيًا جيدًا لتسلسل كامل. BERT هو مثال على نموذج لغة مقنع.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. تكييف [DistilRoBERTa](https://huggingface.co/distilbert/distilroberta-base) على مجموعة فرعية [r/askscience](https://www.reddit.com/r/askscience/) من مجموعة بيانات [ELI5](https://huggingface.co/datasets/eli5).
|
||||
2. استخدام نموذج المدرب الخاص بك للاستدلال.
|
||||
|
||||
<Tip>
|
||||
|
||||
لمعرفة جميع البنى والنسخ المتوافقة مع هذه المهمة، نوصي بالتحقق من [صفحة المهمة](https://huggingface.co/tasks/fill-mask)
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate
|
||||
```
|
||||
|
||||
نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عندما تتم مطالبتك، أدخل رمزك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات ELI5
|
||||
|
||||
ابدأ بتحميل أول 5000 مثال من مجموعة بيانات [ELI5-Category](https://huggingface.co/datasets/eli5_category) باستخدام مكتبة 🤗 Datasets. سيعطيك هذا فرصة للتجربة والتأكد من أن كل شيء يعمل قبل قضاء المزيد من الوقت في التدريب على مجموعة البيانات الكاملة.
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> eli5 = load_dataset("eli5_category", split="train[:5000]")
|
||||
```
|
||||
|
||||
قم بتقسيم مجموعة البيانات `train` إلى مجموعتي تدريب واختبار باستخدام الدالة [`~datasets.Dataset.train_test_split`]:
|
||||
|
||||
```py
|
||||
>>> eli5 = eli5.train_test_split(test_size=0.2)
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> eli5["train"][0]
|
||||
{'q_id': '7h191n',
|
||||
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
|
||||
'selftext': '',
|
||||
'category': 'Economics',
|
||||
'subreddit': 'explainlikeimfive',
|
||||
'answers': {'a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
|
||||
'text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
|
||||
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
|
||||
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
|
||||
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
|
||||
'score': [21, 19, 5, 3],
|
||||
'text_urls': [[],
|
||||
[],
|
||||
[],
|
||||
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']]},
|
||||
'title_urls': ['url'],
|
||||
'selftext_urls': ['url']}
|
||||
```
|
||||
|
||||
على الرغم من أن هذا قد يبدو كثيرًا، إلا أنك مهتم حقًا بحقل `text`. ما هو رائع حول مهام نمذجة اللغة هو أنك لا تحتاج إلى تسميات (تُعرف أيضًا باسم المهمة غير الخاضعة للإشراف) لأن الكلمة التالية *هي* التسمية.
|
||||
|
||||
## معالجة مسبقة (Preprocess)
|
||||
|
||||
<Youtube id="8PmhEIXhBvI"/>
|
||||
|
||||
بالنسبة لنمذجة اللغة المقنعة، فإن الخطوة التالية هي تحميل معالج DistilRoBERTa لمعالجة حقل `text` الفرعي:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilroberta-base")
|
||||
```
|
||||
|
||||
ستلاحظ من المثال أعلاه، أن حقل `text` موجود بالفعل داخل `answers`. هذا يعني أنك ستحتاج إلى استخراج حقل `text` الفرعي من بنيته المضمنة باستخدام الدالة [`flatten`](https://huggingface.co/docs/datasets/process#flatten):
|
||||
|
||||
```py
|
||||
>>> eli5 = eli5.flatten()
|
||||
>>> eli5["train"][0]
|
||||
{'q_id': '7h191n',
|
||||
'title': 'What does the tax bill that was passed today mean? How will it affect Americans in each tax bracket?',
|
||||
'selftext': '',
|
||||
'category': 'Economics',
|
||||
'subreddit': 'explainlikeimfive',
|
||||
'answers.a_id': ['dqnds8l', 'dqnd1jl', 'dqng3i1', 'dqnku5x'],
|
||||
'answers.text': ["The tax bill is 500 pages long and there were a lot of changes still going on right to the end. It's not just an adjustment to the income tax brackets, it's a whole bunch of changes. As such there is no good answer to your question. The big take aways are: - Big reduction in corporate income tax rate will make large companies very happy. - Pass through rate change will make certain styles of business (law firms, hedge funds) extremely happy - Income tax changes are moderate, and are set to expire (though it's the kind of thing that might just always get re-applied without being made permanent) - People in high tax states (California, New York) lose out, and many of them will end up with their taxes raised.",
|
||||
'None yet. It has to be reconciled with a vastly different house bill and then passed again.',
|
||||
'Also: does this apply to 2017 taxes? Or does it start with 2018 taxes?',
|
||||
'This article explains both the House and senate bills, including the proposed changes to your income taxes based on your income level. URL_0'],
|
||||
'answers.score': [21, 19, 5, 3],
|
||||
'answers.text_urls': [[],
|
||||
[],
|
||||
[],
|
||||
['https://www.investopedia.com/news/trumps-tax-reform-what-can-be-done/']],
|
||||
'title_urls': ['url'],
|
||||
'selftext_urls': ['url']}
|
||||
```
|
||||
|
||||
كل حقل فرعي هو الآن عمود منفصل كما هو موضح بواسطة بادئة `answers`، وحقل `text` هو قائمة الآن. بدلاً من
|
||||
معالجة كل جملة بشكل منفصل، قم بتحويل القائمة إلى سلسلة حتى تتمكن من معالجتها بشكل مشترك.
|
||||
|
||||
هنا أول دالة معالجة مسبقة لربط قائمة السلاسل لكل مثال ومعالجة النتيجة:
|
||||
|
||||
```py
|
||||
>>> def preprocess_function(examples):
|
||||
... return tokenizer([" ".join(x) for x in examples["answers.text"]])
|
||||
```
|
||||
|
||||
لتطبيق دالة المعالجة المسبقة على مجموعة البيانات بأكملها، استخدم الدالة 🤗 Datasets [`~datasets.Dataset.map`]. يمكنك تسريع دالة `map` عن طريق تعيين `batched=True` لمعالجة عدة عناصر في وقت واحد، وزيادة عدد العمليات باستخدام `num_proc`. احذف أي أعمدة غير ضرورية:
|
||||
|
||||
```py
|
||||
>>> tokenized_eli5 = eli5.map(
|
||||
... preprocess_function,
|
||||
... batched=True,
|
||||
... num_proc=4,
|
||||
... remove_columns=eli5["train"].column_names,
|
||||
... )
|
||||
```
|
||||
|
||||
|
||||
تحتوي مجموعة البيانات هذه على تسلسلات رمزية، ولكن بعضها أطول من الطول الأقصى للمدخلات للنموذج.
|
||||
|
||||
يمكنك الآن استخدام دالة معالجة مسبقة ثانية لـ:
|
||||
- تجميع جميع التسلسلات
|
||||
- تقسيم التسلسلات المجمّعة إلى أجزاء أقصر محددة بـ `block_size`، والتي يجب أن تكون أقصر من الحد الأقصى لطول المدخلات ومناسبة لذاكرة GPU.
|
||||
|
||||
```py
|
||||
>>> block_size = 128
|
||||
|
||||
>>> def group_texts(examples):
|
||||
... # تجميع جميع النصوص.
|
||||
... concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()}
|
||||
... total_length = len(concatenated_examples[list(examples.keys())[0]])
|
||||
... # نتجاهل الجزء المتبقي الصغير، يمكننا إضافة الحشو إذا كان النموذج يدعمه بدلاً من هذا الإسقاط، يمكنك
|
||||
... # تخصيص هذا الجزء حسب احتياجاتك.
|
||||
... if total_length >= block_size:
|
||||
... total_length = (total_length // block_size) * block_size
|
||||
... # تقسيمها إلى أجزاء بحجم block_size.
|
||||
... result = {
|
||||
... k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
|
||||
... for k, t in concatenated_examples.items()
|
||||
... }
|
||||
... return result
|
||||
```
|
||||
|
||||
طبق دالة `group_texts` على مجموعة البيانات بأكملها:
|
||||
|
||||
```py
|
||||
>>> lm_dataset = tokenized_eli5.map(group_texts, batched=True, num_proc=4)
|
||||
```
|
||||
|
||||
الآن، قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorForLanguageModeling`]. من الأكثر كفاءة أن تقوم بـ *الحشو الديناميكي* ليصل طولها إلى أطول جملة في الدفعة أثناء التجميع، بدلاً من حشو مجموعة البيانات بأكملها إلى الطول الأقصى.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
استخدم رمز نهاية التسلسل كرمز الحشو وحدد `mlm_probability` لحجب الرموز عشوائياً كل مرة تكرر فيها البيانات:
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForLanguageModeling
|
||||
|
||||
>>> tokenizer.pad_token = tokenizer.eos_token
|
||||
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
استخدم رمز نهاية التسلسل كرمز الحشو وحدد `mlm_probability` لحجب الرموز عشوائياً كل مرة تكرر فيها البيانات:
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForLanguageModeling
|
||||
|
||||
>>> data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=0.15, return_tensors="tf")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التدريب (Train)
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتعديل نموذج باستخدام [`Trainer`], ألق نظرة على الدليل الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت مستعد الآن لبدء تدريب نموذجك! قم بتحميل DistilRoBERTa باستخدام [`AutoModelForMaskedLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForMaskedLM
|
||||
|
||||
>>> model = AutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base")
|
||||
```
|
||||
|
||||
في هذه المرحلة، تبقى ثلاث خطوات فقط:
|
||||
|
||||
1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعلمة الوحيدة المطلوبة هي `output_dir` والتي تحدد مكان حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك).
|
||||
2. قم بتمرير معلمات التدريب إلى [`Trainer`] مع النموذج، ومجموعات البيانات، ومجمّع البيانات.
|
||||
3. قم باستدعاء [`~Trainer.train`] لتعديل نموذجك.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_eli5_mlm_model",
|
||||
... eval_strategy="epoch",
|
||||
... learning_rate=2e-5,
|
||||
... num_train_epochs=3,
|
||||
... weight_decay=0.01,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=lm_dataset["train"],
|
||||
... eval_dataset=lm_dataset["test"],
|
||||
... data_collator=data_collator,
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، استخدم طريقة [`~transformers.Trainer.evaluate`] لتقييم النموذج والحصول على مقياس
|
||||
الحيرة:
|
||||
|
||||
```py
|
||||
>>> import math
|
||||
|
||||
>>> eval_results = trainer.evaluate()
|
||||
>>> print(f"Perplexity: {math.exp(eval_results['eval_loss']):.2f}")
|
||||
Perplexity: 8.76
|
||||
```
|
||||
|
||||
ثم شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتعديل نموذج باستخدام Keras، ألق نظرة على الدليل الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لتعديل نموذج في TensorFlow، ابدأ بإعداد دالة محسن، وجدول معدل التعلم، وبعض معلمات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer, AdamWeightDecay
|
||||
|
||||
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilRoBERTa باستخدام [`TFAutoModelForMaskedLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForMaskedLM
|
||||
|
||||
>>> model = TFAutoModelForMaskedLM.from_pretrained("distilbert/distilroberta-base")
|
||||
```
|
||||
|
||||
قم بتحويل مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... lm_dataset["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_test_set = model.prepare_tf_dataset(
|
||||
... lm_dataset["test"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن نماذج Transformers لديها جميعها دالة خسارة افتراضية ذات صلة بالمهمة، لذلك لا تحتاج إلى تحديد واحدة ما لم تكن تريد ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # لا توجد حجة للخسارة!
|
||||
```
|
||||
|
||||
يمكن القيام بذلك عن طريق تحديد مكان دفع نموذجك ومعالج الرموز في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_eli5_mlm_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
أخيراً، أنت مستعد لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق، وعدد العصور، والتعليقات الخاصة بك لتعديل النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=[callback])
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائياً إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
لمثال أكثر تفصيلاً حول كيفية تعديل نموذج للنمذجة اللغوية المقنعة، ألق نظرة على الدفتر المقابل
|
||||
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)
|
||||
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال
|
||||
|
||||
رائع، الآن بعد أن قمت بتعديل نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
جهّز بعض النصوص التي تريد أن يملأ النموذج الفراغات فيها، واستخدم الرمز الخاص `<mask>` للإشارة إلى الفراغ:
|
||||
|
||||
```py
|
||||
>>> text = "The Milky Way is a <mask> galaxy."
|
||||
```
|
||||
|
||||
أبسط طريقة لتجربة نموذجك المعدل للاستدلال هي استخدامه في [`pipeline`]. قم بإنشاء كائن `pipeline` لملء الفراغ مع نموذجك، ومرر نصك إليه. إذا أردت، يمكنك استخدام معلمة `top_k` لتحديد عدد التنبؤات التي تريد إرجاعها:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> mask_filler = pipeline("fill-mask", "username/my_awesome_eli5_mlm_model")
|
||||
>>> mask_filler(text, top_k=3)
|
||||
[{'score': 0.5150994658470154,
|
||||
'token': 21300,
|
||||
'token_str': ' spiral',
|
||||
'sequence': 'The Milky Way is a spiral galaxy.'},
|
||||
{'score': 0.07087188959121704,
|
||||
'token': 2232,
|
||||
'token_str': ' massive',
|
||||
'sequence': 'The Milky Way is a massive galaxy.'},
|
||||
{'score': 0.06434620916843414,
|
||||
'token': 650,
|
||||
'token_str': ' small',
|
||||
'sequence': 'The Milky Way is a small galaxy.'}]
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قم بتجزئة النص وإرجاع `input_ids` كمتجهات PyTorch. ستحتاج أيضًا إلى تحديد موضع رمز `<mask>`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_mlm_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="pt")
|
||||
>>> mask_token_index = torch.where(inputs["input_ids"] == tokenizer.mask_token_id)[1]
|
||||
```
|
||||
|
||||
قم بتمرير المدخلات إلى النموذج وإرجاع `logits` للرمز المقنع:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForMaskedLM
|
||||
|
||||
>>> model = AutoModelForMaskedLM.from_pretrained("username/my_awesome_eli5_mlm_model")
|
||||
>>> logits = model(**inputs).logits
|
||||
>>> mask_token_logits = logits[0, mask_token_index, :]
|
||||
```
|
||||
|
||||
ثم قم بإرجاع الرموز الثلاثة المقنعة ذات الاحتمالية الأعلى وطباعتها:
|
||||
|
||||
```py
|
||||
>>> top_3_tokens = torch.topk(mask_token_logits, 3, dim=1).indices[0].tolist()
|
||||
|
||||
>>> for token in top_3_tokens:
|
||||
... print(text.replace(tokenizer.mask_token, tokenizer.decode([token])))
|
||||
The Milky Way is a spiral galaxy.
|
||||
The Milky Way is a massive galaxy.
|
||||
The Milky Way is a small galaxy.
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتقسيم النص إلى رموز وإرجاع `input_ids` كـ TensorFlow tensors. ستحتاج أيضًا إلى تحديد موضع رمز `<mask>`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_eli5_mlm_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="tf")
|
||||
>>> mask_token_index = tf.where(inputs["input_ids"] == tokenizer.mask_token_id)[0, 1]
|
||||
```
|
||||
|
||||
قم بتمرير المدخلات إلى النموذج وإرجاع `logits` للرمز المقنع:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForMaskedLM
|
||||
|
||||
>>> model = TFAutoModelForMaskedLM.from_pretrained("username/my_awesome_eli5_mlm_model")
|
||||
>>> logits = model(**inputs).logits
|
||||
>>> mask_token_logits = logits[0, mask_token_index, :]
|
||||
```
|
||||
|
||||
ثم قم بإرجاع الرموز الثلاثة المقنعة ذات الاحتمالية الأعلى وطباعتها:
|
||||
|
||||
```py
|
||||
>>> top_3_tokens = tf.math.top_k(mask_token_logits, 3).indices.numpy()
|
||||
|
||||
>>> for token in top_3_tokens:
|
||||
... print(text.replace(tokenizer.mask_token, tokenizer.decode([token])))
|
||||
The Milky Way is a spiral galaxy.
|
||||
The Milky Way is a massive galaxy.
|
||||
The Milky Way is a small galaxy.
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
@ -1,452 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# الاختيار من متعدد (Multiple choice)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
مهمة الاختيار من متعدد مشابهة لمهمة الإجابة على الأسئلة، ولكن مع توفير عدة إجابات محتملة مع سياق، ويُدرّب النموذج على تحديد الإجابة الصحيحة.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط نموذج [BERT](https://huggingface.co/google-bert/bert-base-uncased) باستخدام الإعداد `regular` لمجموعة بيانات [SWAG](https://huggingface.co/datasets/swag) لاختيار الإجابة الأفضل من بين الخيارات المتعددة المتاحة مع السياق.
|
||||
2. استخدام النموذج المضبوط للاستدلال.
|
||||
|
||||
قبل البدء، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate
|
||||
```
|
||||
|
||||
نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل نموذجك ومشاركته مع المجتمع. عند المطالبة، أدخل الرمز المميز الخاص بك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات SWAG
|
||||
|
||||
ابدأ بتحميل تهيئة `regular` لمجموعة بيانات SWAG من مكتبة 🤗 Datasets:
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> swag = load_dataset("swag", "regular")
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> swag["train"][0]
|
||||
{'ending0': 'passes by walking down the street playing their instruments.',
|
||||
'ending1': 'has heard approaching them.',
|
||||
'ending2': "arrives and they're outside dancing and asleep.",
|
||||
'ending3': 'turns the lead singer watches the performance.',
|
||||
'fold-ind': '3416',
|
||||
'gold-source': 'gold',
|
||||
'label': 0,
|
||||
'sent1': 'Members of the procession walk down the street holding small horn brass instruments.',
|
||||
'sent2': 'A drum line',
|
||||
'startphrase': 'Members of the procession walk down the street holding small horn brass instruments. A drum line',
|
||||
'video-id': 'anetv_jkn6uvmqwh4'}
|
||||
```
|
||||
|
||||
على الرغم من أن الحقول تبدو كثيرة، إلا أنها في الواقع بسيطة جداً:
|
||||
|
||||
- `sent1` و `sent2`: يعرض هذان الحقلان بداية الجملة، وبدمجهما معًا، نحصل على حقل `startphrase`.
|
||||
- `ending`: يقترح نهاية محتملة للجملة، واحدة منها فقط هي الصحيحة.
|
||||
- `label`: يحدد نهاية الجملة الصحيحة.
|
||||
|
||||
## المعالجة المسبقة (Preprocess)
|
||||
|
||||
الخطوة التالية هي استدعاء مُجزئ BERT لمعالجة بدايات الجمل والنهايات الأربع المحتملة:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
|
||||
```
|
||||
|
||||
تحتاج دالة المعالجة المسبقة التي تريد إنشاءها إلى:
|
||||
|
||||
1. إنشاء أربع نسخ من حقل `sent1` ودمج كل منها مع `sent2` لإعادة إنشاء كيفية بدء الجملة.
|
||||
2. دمج `sent2` مع كل من نهايات الجمل الأربع المحتملة.
|
||||
3. تتجميع هاتين القائمتين لتتمكن من تجزئتهما، ثم إعادة ترتيبها بعد ذلك بحيث يكون لكل مثال حقول `input_ids` و `attention_mask` و `labels` مقابلة.
|
||||
|
||||
|
||||
```py
|
||||
>>> ending_names = ["ending0", "ending1", "ending2", "ending3"]
|
||||
|
||||
>>> def preprocess_function(examples):
|
||||
... first_sentences = [[context] * 4 for context in examples["sent1"]]
|
||||
... question_headers = examples["sent2"]
|
||||
... second_sentences = [
|
||||
... [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
|
||||
... ]
|
||||
|
||||
... first_sentences = sum(first_sentences, [])
|
||||
... second_sentences = sum(second_sentences, [])
|
||||
|
||||
... tokenized_examples = tokenizer(first_sentences, second_sentences, truncation=True)
|
||||
... return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
|
||||
```
|
||||
|
||||
لتطبيق دالة المعالجة المسبقة على مجموعة البيانات بأكملها، استخدم طريقة [`~datasets.Dataset.map`] الخاصة بـ 🤗 Datasets. يمكنك تسريع دالة `map` عن طريق تعيين `batched=True` لمعالجة عناصر متعددة من مجموعة البيانات في وقت واحد:
|
||||
|
||||
```py
|
||||
tokenized_swag = swag.map(preprocess_function, batched=True)
|
||||
```
|
||||
|
||||
لا يحتوي 🤗 Transformers على مجمع بيانات للاختيار من متعدد، لذلك ستحتاج إلى تكييف [`DataCollatorWithPadding`] لإنشاء دفعة من الأمثلة. من الأكفأ إضافة حشو (padding) ديناميكي للجمل إلى أطول طول في دفعة أثناء التجميع، بدلاً من حشو مجموعة البيانات بأكملها إلى الحد الأقصى للطول.
|
||||
|
||||
يقوم `DataCollatorForMultipleChoice` بتجميع جميع مدخلات النموذج، ويطبق الحشو، ثم يعيد تجميع النتائج في شكلها الأصلي:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
```py
|
||||
>>> from dataclasses import dataclass
|
||||
>>> from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy
|
||||
>>> from typing import Optional, Union
|
||||
>>> import torch
|
||||
|
||||
>>> @dataclass
|
||||
... class DataCollatorForMultipleChoice:
|
||||
... """
|
||||
... Data collator that will dynamically pad the inputs for multiple choice received.
|
||||
... """
|
||||
|
||||
... tokenizer: PreTrainedTokenizerBase
|
||||
... padding: Union[bool, str, PaddingStrategy] = True
|
||||
... max_length: Optional[int] = None
|
||||
... pad_to_multiple_of: Optional[int] = None
|
||||
|
||||
... def __call__(self, features):
|
||||
... label_name = "label" if "label" in features[0].keys() else "labels"
|
||||
... labels = [feature.pop(label_name) for feature in features]
|
||||
... batch_size = len(features)
|
||||
... num_choices = len(features[0]["input_ids"])
|
||||
... flattened_features = [
|
||||
... [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
|
||||
... ]
|
||||
... flattened_features = sum(flattened_features, [])
|
||||
|
||||
... batch = self.tokenizer.pad(
|
||||
... flattened_features,
|
||||
... padding=self.padding,
|
||||
... max_length=self.max_length,
|
||||
... pad_to_multiple_of=self.pad_to_multiple_of,
|
||||
... return_tensors="pt",
|
||||
... )
|
||||
|
||||
... batch = {k: v.view(batch_size, num_choices, -1) for k, v in batch.items()}
|
||||
... batch["labels"] = torch.tensor(labels, dtype=torch.int64)
|
||||
... return batch
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
```py
|
||||
>>> from dataclasses import dataclass
|
||||
>>> from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy
|
||||
>>> from typing import Optional, Union
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> @dataclass
|
||||
... class DataCollatorForMultipleChoice:
|
||||
... """
|
||||
... Data collator that will dynamically pad the inputs for multiple choice received.
|
||||
... """
|
||||
|
||||
... tokenizer: PreTrainedTokenizerBase
|
||||
... padding: Union[bool, str, PaddingStrategy] = True
|
||||
... max_length: Optional[int] = None
|
||||
... pad_to_multiple_of: Optional[int] = None
|
||||
|
||||
... def __call__(self, features):
|
||||
... label_name = "label" if "label" in features[0].keys() else "labels"
|
||||
... labels = [feature.pop(label_name) for feature in features]
|
||||
... batch_size = len(features)
|
||||
... num_choices = len(features[0]["input_ids"])
|
||||
... flattened_features = [
|
||||
... [{k: v[i] for k, v in feature.items()} for i in range(num_choices)] for feature in features
|
||||
... ]
|
||||
... flattened_features = sum(flattened_features, [])
|
||||
|
||||
... batch = self.tokenizer.pad(
|
||||
... flattened_features,
|
||||
... padding=self.padding,
|
||||
... max_length=self.max_length,
|
||||
... pad_to_multiple_of=self.pad_to_multiple_of,
|
||||
... return_tensors="tf",
|
||||
... )
|
||||
|
||||
... batch = {k: tf.reshape(v, (batch_size, num_choices, -1)) for k, v in batch.items()}
|
||||
... batch["labels"] = tf.convert_to_tensor(labels, dtype=tf.int64)
|
||||
... return batch
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التقييم (Evaluate)
|
||||
|
||||
يُفضل غالبًا تضمين مقياس أثناء التدريب لتقييم أداء نموذجك. يمكنك تحميل طريقة تقييم بسرعة باستخدام مكتبة 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index). لهذه المهمة، قم بتحميل مقياس [الدقة](https://huggingface.co/spaces/evaluate-metric/accuracy) (انظر إلى [الجولة السريعة](https://huggingface.co/docs/evaluate/a_quick_tour) لـ 🤗 Evaluate لمعرفة المزيد حول كيفية تحميل المقياس وحسابه):
|
||||
|
||||
```py
|
||||
>>> import evaluate
|
||||
|
||||
>>> accuracy = evaluate.load("accuracy")
|
||||
```
|
||||
|
||||
ثم أنشئ دالة لتمرير التنبؤات والتسميات إلى [`~evaluate.EvaluationModule.compute`] لحساب الدقة:
|
||||
|
||||
```py
|
||||
>>> import numpy as np
|
||||
|
||||
>>> def compute_metrics(eval_pred):
|
||||
... predictions, labels = eval_pred
|
||||
... predictions = np.argmax(predictions, axis=1)
|
||||
... return accuracy.compute(predictions=predictions, references=labels)
|
||||
```
|
||||
|
||||
دالتك `compute_metrics` جاهزة الآن، وستعود إليها عند إعداد تدريبك.
|
||||
|
||||
## التدريب (Train)
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن معتادًا على ضبط نموذج باستخدام [`Trainer`], فراجع الدرس الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت جاهز لبدء تدريب نموذجك الآن! قم بتحميل BERT باستخدام [`AutoModelForMultipleChoice`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForMultipleChoice, TrainingArguments, Trainer
|
||||
|
||||
>>> model = AutoModelForMultipleChoice.from_pretrained("google-bert/bert-base-uncased")
|
||||
```
|
||||
|
||||
في هذه المرحلة، تبقى ثلاث خطوات فقط:
|
||||
|
||||
1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعلمة الوحيدة المطلوبة هي `output_dir` التي تحدد مكان حفظ نموذجك. ستدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب عليك تسجيل الدخول إلى Hugging Face لتحميل نموذجك). في نهاية كل حقبة، سيقوم [`Trainer`] بتقييم الدقة وحفظ نقطة فحص التدريب.
|
||||
2. مرر معلمات التدريب إلى [`Trainer`] جنبًا إلى جنب مع النموذج ومُجمِّع البيانات والمعالج ودالة تجميع البيانات ودالة `compute_metrics`.
|
||||
3. استدعي [`~Trainer.train`] لضبط نموذجك.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_swag_model",
|
||||
... eval_strategy="epoch",
|
||||
... save_strategy="epoch",
|
||||
... load_best_model_at_end=True,
|
||||
... learning_rate=5e-5,
|
||||
... per_device_train_batch_size=16,
|
||||
... per_device_eval_batch_size=16,
|
||||
... num_train_epochs=3,
|
||||
... weight_decay=0.01,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=tokenized_swag["train"],
|
||||
... eval_dataset=tokenized_swag["validation"],
|
||||
... processing_class=tokenizer,
|
||||
... data_collator=DataCollatorForMultipleChoice(tokenizer=tokenizer),
|
||||
... compute_metrics=compute_metrics,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، شارك نموذجك مع Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن معتادًا على ضبط نموذج باستخدام Keras، فراجع الدرس الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لضبط نموذج في TensorFlow، ابدأ بإعداد دالة مُحسِّن وجدول معدل التعلم وبعض معلمات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer
|
||||
|
||||
>>> batch_size = 16
|
||||
>>> num_train_epochs = 2
|
||||
>>> total_train_steps = (len(tokenized_swag["train"]) // batch_size) * num_train_epochs
|
||||
>>> optimizer, schedule = create_optimizer(init_lr=5e-5, num_warmup_steps=0, num_train_steps=total_train_steps)
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل BERT باستخدام [`TFAutoModelForMultipleChoice`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForMultipleChoice
|
||||
|
||||
>>> model = TFAutoModelForMultipleChoice.from_pretrained("google-bert/bert-base-uncased")
|
||||
```
|
||||
|
||||
حوّل مجموعات البيانات الخاصة بك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> data_collator = DataCollatorForMultipleChoice(tokenizer=tokenizer)
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... tokenized_swag["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=batch_size,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_validation_set = model.prepare_tf_dataset(
|
||||
... tokenized_swag["validation"],
|
||||
... shuffle=False,
|
||||
... batch_size=batch_size,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن جميع نماذج Transformers تحتوي على دالة خسارة مناسبة للمهمة بشكل افتراضي، لذلك لا تحتاج إلى تحديد واحدة ما لم ترغب في ذلك:
|
||||
|
||||
```py
|
||||
>>> model.compile(optimizer=optimizer) # لا توجد وسيطة خسارة!
|
||||
```
|
||||
|
||||
الخطوتان الأخيرتان قبل بدء التدريب هما: حساب دقة التنبؤات، وتوفير طريقة لرفع النموذج إلى Hub. ويمكن تحقيق ذلك باستخدام [استدعاءات Keras](../main_classes/keras_callbacks)
|
||||
|
||||
مرر دالتك `compute_metrics` إلى [`~transformers.KerasMetricCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import KerasMetricCallback
|
||||
|
||||
>>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set)
|
||||
```
|
||||
|
||||
حدد مكان دفع نموذجك ومعالجك في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> push_to_hub_callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم قم بتضمين الاستدعاءات معًا:
|
||||
|
||||
```py
|
||||
>>> callbacks = [metric_callback, push_to_hub_callback]
|
||||
```
|
||||
|
||||
أخيرًا، أنت جاهز لبدء تدريب نموذجك! استدعِ[`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق من الصحة وعدد الحقب والاستدعاءات لضبط النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=2, callbacks=callbacks)
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر تعمقًا حول كيفية ضبط نموذج للاختيار من متعدد، ألق نظرة على [دفتر ملاحظات PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)
|
||||
أو [دفتر ملاحظات TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb) المقابل.
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال (Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بضبط نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
قم بإنشاء نص واقتراح إجابتين محتملتين:
|
||||
|
||||
```py
|
||||
>>> prompt = "France has a bread law, Le Décret Pain, with strict rules on what is allowed in a traditional baguette."
|
||||
>>> candidate1 = "The law does not apply to croissants and brioche."
|
||||
>>> candidate2 = "The law applies to baguettes."
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قم بتحليل كل مطالبة وزوج إجابة مرشح وأعد تنسورات PyTorch. يجب عليك أيضًا إنشاء بعض `العلامات`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_swag_model")
|
||||
>>> inputs = tokenizer([[prompt, candidate1], [prompt, candidate2]], return_tensors="pt", padding=True)
|
||||
>>> labels = torch.tensor(0).unsqueeze(0)
|
||||
```
|
||||
|
||||
مرر مدخلاتك والعلامات إلى النموذج وأرجع`logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForMultipleChoice
|
||||
|
||||
>>> model = AutoModelForMultipleChoice.from_pretrained("username/my_awesome_swag_model")
|
||||
>>> outputs = model(**{k: v.unsqueeze(0) for k, v in inputs.items()}, labels=labels)
|
||||
>>> logits = outputs.logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأكبر:
|
||||
|
||||
```py
|
||||
>>> predicted_class = logits.argmax().item()
|
||||
>>> predicted_class
|
||||
0
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتحليل كل مطالبة وزوج إجابة مرشح وأعد موترات TensorFlow:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_swag_model")
|
||||
>>> inputs = tokenizer([[prompt, candidate1], [prompt, candidate2]], return_tensors="tf", padding=True)
|
||||
```
|
||||
|
||||
مرر مدخلاتك إلى النموذج وأعد القيم logits:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForMultipleChoice
|
||||
|
||||
>>> model = TFAutoModelForMultipleChoice.from_pretrained("username/my_awesome_swag_model")
|
||||
>>> inputs = {k: tf.expand_dims(v, 0) for k, v in inputs.items()}
|
||||
>>> outputs = model(inputs)
|
||||
>>> logits = outputs.logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأكبر:
|
||||
|
||||
```py
|
||||
>>> predicted_class = int(tf.math.argmax(logits, axis=-1)[0])
|
||||
>>> predicted_class
|
||||
0
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
@ -1,432 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# الإجابة على الأسئلة (Question answering)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Youtube id="ajPx5LwJD-I"/>
|
||||
|
||||
تُقدّم مهام الإجابة على الأسئلة إجابةً بناءً على سؤال. إذا سبق لك أن سألت مساعدًا افتراضيًا مثل Alexa أو Siri أو Google عن حالة الطقس، فأنت قد استخدمت نموذج للإجابة على الأسئلة من قبل. هناك نوعان شائعان لمهام الإجابة على الأسئلة:
|
||||
|
||||
- الاستخراجية: استخراج الإجابة من السياق المحدد.
|
||||
- التلخيصية: إنشاء إجابة من السياق تجيب على السؤال بشكل صحيح.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) على مجموعة بيانات [SQuAD](https://huggingface.co/datasets/squad) للإجابة على الأسئلة الاستخراجية.
|
||||
2. استخدام النموذج المضبوط للاستدلال.
|
||||
|
||||
<Tip>
|
||||
|
||||
لمشاهدة جميع الهياكل والنسخ المتوافقة مع هذه المهمة، نوصي بالرجوع إلى [صفحة المهمة](https://huggingface.co/tasks/question-answering)
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل البدء، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate
|
||||
```
|
||||
|
||||
نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل نموذجك ومشاركته مع المجتمع. عند المطالبة، أدخل الرمز المميز الخاص بك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات SQuAD
|
||||
|
||||
ابدأ بتحميل جزء أصغر من مجموعة بيانات SQuAD من مكتبة 🤗 Datasets. سيتيح لك ذلك فرصة للتجربة والتحقق من عمل كل شيء بشكل صحيح قبل قضاء المزيد من الوقت في التدريب على مجموعة البيانات الكاملة.
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> squad = load_dataset("squad", split="train[:5000]")
|
||||
```
|
||||
|
||||
قم بتقسيم تقسيم `train` لمجموعة البيانات إلى مجموعة تدريب واختبار باستخدام طريقة [`~datasets.Dataset.train_test_split`]:
|
||||
|
||||
```py
|
||||
>>> squad = squad.train_test_split(test_size=0.2)
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> squad["train"][0]
|
||||
{'answers': {'answer_start': [515], 'text': ['Saint Bernadette Soubirous']},
|
||||
'context': 'Architecturally, the school has a Catholic character. Atop the Main Building\'s gold dome is a golden statue of the Virgin Mary. Immediately in front of the Main Building and facing it, is a copper statue of Christ with arms upraised with the legend "Venite Ad Me Omnes". Next to the Main Building is the Basilica of the Sacred Heart. Immediately behind the basilica is the Grotto, a Marian place of prayer and reflection. It is a replica of the grotto at Lourdes, France where the Virgin Mary reputedly appeared to Saint Bernadette Soubirous in 1858. At the end of the main drive (and in a direct line that connects through 3 statues and the Gold Dome), is a simple, modern stone statue of Mary.',
|
||||
'id': '5733be284776f41900661182',
|
||||
'question': 'To whom did the Virgin Mary allegedly appear in 1858 in Lourdes France?',
|
||||
'title': 'University_of_Notre_Dame'
|
||||
}
|
||||
```
|
||||
|
||||
هناك العديد من الحقول المهمة هنا:
|
||||
|
||||
- `answers`: موقع بداية الرمز المميز للإجابة ونص الإجابة.
|
||||
- `context`: معلومات أساسية يحتاج النموذج إلى استخراج الإجابة منها.
|
||||
- `question`: السؤال الذي يجب على النموذج الإجابة عليه.
|
||||
|
||||
## المعالجة المسبقة (Preprocess)
|
||||
|
||||
<Youtube id="qgaM0weJHpA"/>
|
||||
|
||||
الخطوة التالية هي تحميل المحلل اللغوى DistilBERT لمعالجة حقلي `question` و `context`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
هناك بعض خطوات المعالجة المسبقة الخاصة بمهام الإجابة على الأسئلة التي يجب أن تكون على دراية بها:
|
||||
|
||||
1. قد تحتوي بعض الأمثلة في مجموعة البيانات على `context` طويلًا يتجاوز الحد الأقصى لطول مدخل النموذج. للتعامل مع النصوص الأطول، يتم اقتطاع `context` فقط عن طريق تعيين `truncation="only_second"`.
|
||||
2. بعد ذلك، يتم تحديد مواضع بداية ونهاية الإجابة في `context` الأصلي عن طريق تعيين
|
||||
`return_offset_mapping=True`.
|
||||
3. باستخدام التعيين، يمكن الآن تحديد رموز بداية ونهاية الإجابة. استخدم طريقة [`~tokenizers.Encoding.sequence_ids`]
|
||||
لتحديد أجزاء الإزاحة التي تتوافق مع `question` و `context`.
|
||||
|
||||
فيما يلي كيفية إنشاء دالة لقص وتعيين رموز البداية والنهاية لـ `answer` إلى `context`:
|
||||
|
||||
```py
|
||||
>>> def preprocess_function(examples):
|
||||
... questions = [q.strip() for q in examples["question"]]
|
||||
... inputs = tokenizer(
|
||||
... questions,
|
||||
... examples["context"],
|
||||
... max_length=384,
|
||||
... truncation="only_second",
|
||||
... return_offsets_mapping=True,
|
||||
... padding="max_length",
|
||||
... )
|
||||
|
||||
... offset_mapping = inputs.pop("offset_mapping")
|
||||
... answers = examples["answers"]
|
||||
... start_positions = []
|
||||
... end_positions = []
|
||||
|
||||
... for i, offset in enumerate(offset_mapping):
|
||||
... answer = answers[i]
|
||||
... start_char = answer["answer_start"][0]
|
||||
... end_char = answer["answer_start"][0] + len(answer["text"][0])
|
||||
... sequence_ids = inputs.sequence_ids(i)
|
||||
|
||||
... # Find the start and end of the context
|
||||
... idx = 0
|
||||
... while sequence_ids[idx] != 1:
|
||||
... idx += 1
|
||||
... context_start = idx
|
||||
... while sequence_ids[idx] == 1:
|
||||
... idx += 1
|
||||
... context_end = idx - 1
|
||||
|
||||
... # If the answer is not fully inside the context, label it (0, 0)
|
||||
... if offset[context_start][0] > end_char or offset[context_end][1] < start_char:
|
||||
... start_positions.append(0)
|
||||
... end_positions.append(0)
|
||||
... else:
|
||||
... # Otherwise it's the start and end token positions
|
||||
... idx = context_start
|
||||
... while idx <= context_end and offset[idx][0] <= start_char:
|
||||
... idx += 1
|
||||
... start_positions.append(idx - 1)
|
||||
|
||||
... idx = context_end
|
||||
... while idx >= context_start and offset[idx][1] >= end_char:
|
||||
... idx -= 1
|
||||
... end_positions.append(idx + 1)
|
||||
|
||||
... inputs["start_positions"] = start_positions
|
||||
... inputs["end_positions"] = end_positions
|
||||
... return inputs
|
||||
```
|
||||
|
||||
لتطبيق المعالجة المسبقة على كامل مجموعة البيانات، استخدم [`~datasets.Dataset.map`] من مكتبة 🤗 Datasets. يمكنك تسريع دالة `map` عن طريق تعيين `batched=True` لمعالجة عناصر متعددة من مجموعة البيانات دفعة واحدة. قم بإزالة أي أعمدة لا تحتاجها:
|
||||
|
||||
```py
|
||||
>>> tokenized_squad = squad.map(preprocess_function, batched=True, remove_columns=squad["train"].column_names)
|
||||
```
|
||||
|
||||
الآن قم بإنشاء دفعة من الأمثلة باستخدام [`DefaultDataCollator`]. بخلاف مجمّعات البيانات الأخرى في 🤗 Transformers، لا يطبق [`DefaultDataCollator`] أي معالجة مسبقة إضافية مثل الحشو.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
```py
|
||||
>>> from transformers import DefaultDataCollator
|
||||
|
||||
>>> data_collator = DefaultDataCollator()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
```py
|
||||
>>> from transformers import DefaultDataCollator
|
||||
|
||||
>>> data_collator = DefaultDataCollator(return_tensors="tf")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التدريب (Train)
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن معتادًا على ضبط نموذج باستخدام [`Trainer`], ألق نظرة على البرنامج التعليمي الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت جاهز لبدء تدريب نموذجك الآن! قم بتحميل DistilBERT باستخدام [`AutoModelForQuestionAnswering`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer
|
||||
|
||||
>>> model = AutoModelForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
في هذه المرحلة، تبقى ثلاث خطوات فقط:
|
||||
|
||||
1. حدد المعاملات الفائقة للتدريب في [`TrainingArguments`]. المعامل الوحيد المطلوب هو `output_dir` الذي يحدد مكان حفظ نموذجك. ستدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب عليك تسجيل الدخول إلى Hugging Face لتحميل نموذجك).
|
||||
2. مرر معاملات التدريب إلى [`Trainer`] جنبًا إلى جنب مع النموذج، ومجموعة البيانات، والمُحلّل النصي، ومُجمّع البيانات.
|
||||
3. استدعِ ـ [`~Trainer.train`] لضبط النموذج.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_qa_model",
|
||||
... eval_strategy="epoch",
|
||||
... learning_rate=2e-5,
|
||||
... per_device_train_batch_size=16,
|
||||
... per_device_eval_batch_size=16,
|
||||
... num_train_epochs=3,
|
||||
... weight_decay=0.01,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=tokenized_squad["train"],
|
||||
... eval_dataset=tokenized_squad["test"],
|
||||
... processing_class=tokenizer,
|
||||
... data_collator=data_collator,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، شارك نموذجك في Hub باستخدام الدالة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن معتادًا على ضبط نموذج باستخدام Keras، فألق نظرة على البرنامج التعليمي الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لضبط نموذج في TensorFlow، ابدأ بإعداد دالة مُحسِّن، وجدول معدل التعلم، وبعض المعاملات الفائقة للتدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer
|
||||
|
||||
>>> batch_size = 16
|
||||
>>> num_epochs = 2
|
||||
>>> total_train_steps = (len(tokenized_squad["train"]) // batch_size) * num_epochs
|
||||
>>> optimizer, schedule = create_optimizer(
|
||||
... init_lr=2e-5,
|
||||
... num_warmup_steps=0,
|
||||
... num_train_steps=total_train_steps,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilBERT باستخدام [`TFAutoModelForQuestionAnswering`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForQuestionAnswering
|
||||
|
||||
>>> model = TFAutoModelForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
حوّل مجموعات البيانات الخاصة بك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... tokenized_squad["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_validation_set = model.prepare_tf_dataset(
|
||||
... tokenized_squad["test"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتكوين النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method):
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer)
|
||||
```
|
||||
|
||||
آخر شيء يجب إعداده قبل بدء التدريب هو توفير طريقة لدفع نموذجك إلى Hub. يمكن القيام بذلك عن طريق تحديد مكان دفع نموذجك ومعالجك المعجمي في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_qa_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
أخيرًا، أنت جاهز لبدء تدريب نموذجك! اتصل بـ [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق من الصحة، وعدد العهود، ومعاودة الاتصال الخاصة بك لضبط النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=[callback])
|
||||
```
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر تعمقًا حول كيفية ضبط نموذج للإجابة على الأسئلة، ألق نظرة على [دفتر ملاحظات PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb) المقابل
|
||||
أو [دفتر ملاحظات TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## التقييم (Evaluate)
|
||||
|
||||
يتطلب التقييم للإجابة على الأسئلة قدرًا كبيرًا من المعالجة اللاحقة. لتوفير وقتك، يتخطى هذا الدليل خطوة التقييم. لا يزال [`Trainer`] يحسب خسارة التقييم أثناء التدريب، مما يعني أنك لست تجهل تمامًا أداء نموذجك.
|
||||
|
||||
إذا كان لديك المزيد من الوقت وتهتم بكيفية تقييم نموذجك للإجابة على الأسئلة، فألق نظرة على فصل [الإجابة على الأسئلة](https://huggingface.co/course/chapter7/7?fw=pt#post-processing) من دورة 🤗 Hugging Face!
|
||||
|
||||
## الاستدلال (Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بضبط نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
حدد سؤالًا وسياقًا ليقوم النموذج بالتنبؤ بالإجابة عليه:
|
||||
|
||||
```py
|
||||
>>> question = "How many programming languages does BLOOM support?"
|
||||
>>> context = "BLOOM has 176 billion parameters and can generate text in 46 languages natural languages and 13 programming languages."
|
||||
```
|
||||
|
||||
أبسط طريقة لتجربة نموذجك المُدرَّب للاستدلال هي استخدامه في [`pipeline`]. قم بإنشاء كائن لـ `pipeline` للإجابة على الأسئلة باستخدام نموذجك، ومرِّر النص إليه:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> question_answerer = pipeline("question-answering", model="my_awesome_qa_model")
|
||||
>>> question_answerer(question=question, context=context)
|
||||
{'score': 0.2058267742395401,
|
||||
'start': 10,
|
||||
'end': 95,
|
||||
'answer': '176 مليار معامل ويمكنه إنشاء نصوص بـ 46 لغة طبيعية و 13'}
|
||||
```
|
||||
|
||||
يمكنك أيضًا تكرار نتائج `pipeline` يدويًا إذا أردت:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
قسّم النص وأرجع تنسورات PyTorch:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_qa_model")
|
||||
>>> inputs = tokenizer(question, context, return_tensors="pt")
|
||||
```
|
||||
|
||||
مرر مدخلاتك إلى النموذج وأرجع `logits`:
|
||||
|
||||
```py
|
||||
>>> import torch
|
||||
>>> from transformers import AutoModelForQuestionAnswering
|
||||
|
||||
>>> model = AutoModelForQuestionAnswering.from_pretrained("my_awesome_qa_model")
|
||||
>>> with torch.no_grad():
|
||||
... outputs = model(**inputs)
|
||||
```
|
||||
|
||||
احصل على أعلى احتمال من مخرجات النموذج لموضعي البداية والنهاية:
|
||||
|
||||
```py
|
||||
>>> answer_start_index = outputs.start_logits.argmax()
|
||||
>>> answer_end_index = outputs.end_logits.argmax()
|
||||
```
|
||||
|
||||
استخلاص الإجابة من الرموز المتوقعة:
|
||||
|
||||
```py
|
||||
>>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
|
||||
>>> tokenizer.decode(predict_answer_tokens)
|
||||
'176 billion parameters and can generate text in 46 languages natural languages and 13'
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتحليل النص المعجمي وأعد موترات TensorFlow:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("my_awesome_qa_model")
|
||||
>>> inputs = tokenizer(question, context, return_tensors="tf")
|
||||
```
|
||||
|
||||
مرر مدخلاتك إلى النموذج وأعد `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForQuestionAnswering
|
||||
|
||||
>>> model = TFAutoModelForQuestionAnswering.from_pretrained("my_awesome_qa_model")
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
|
||||
احصل على أعلى احتمال من مخرجات النموذج لموضعي البداية والنهاية:
|
||||
|
||||
```py
|
||||
>>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])
|
||||
>>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])
|
||||
```
|
||||
|
||||
استخلاص الإجابة من الرموز المتوقعة:
|
||||
|
||||
```py
|
||||
>>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
|
||||
>>> tokenizer.decode(predict_answer_tokens)
|
||||
'176 billion parameters and can generate text in 46 languages natural languages and 13'
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
@ -1,387 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# تصنيف النص(Text classification)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Youtube id="leNG9fN9FQU"/>
|
||||
|
||||
تصنيف النص هو مهمة NLP شائعة حيث يُعيّن تصنيفًا أو فئة للنص. تستخدم بعض أكبر الشركات تصنيف النصوص في الإنتاج لمجموعة واسعة من التطبيقات العملية. أحد أكثر أشكال تصنيف النص شيوعًا هو تحليل المشاعر، والذي يقوم بتعيين تسمية مثل 🙂 إيجابية، 🙁 سلبية، أو 😐 محايدة لتسلسل نصي.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) على مجموعة بيانات [IMDb](https://huggingface.co/datasets/imdb) لتحديد ما إذا كانت مراجعة الفيلم إيجابية أو سلبية.
|
||||
2. استخدام نموذج الضبط الدقيق للتنبؤ.
|
||||
|
||||
<Tip>
|
||||
|
||||
لرؤية جميع البنى ونقاط التحقق المتوافقة مع هذه المهمة، نوصي بالتحقق من [صفحة المهمة](https://huggingface.co/tasks/text-classification).
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate accelerate
|
||||
```
|
||||
|
||||
نحن نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عند المطالبة، أدخل رمزك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات IMDb
|
||||
|
||||
ابدأ بتحميل مجموعة بيانات IMDb من مكتبة 🤗 Datasets:
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> imdb = load_dataset("imdb")
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> imdb["test"][0]
|
||||
{
|
||||
"label": 0,
|
||||
"text": "I love sci-fi and am willing to put up with a lot. Sci-fi movies/TV are usually underfunded, under-appreciated and misunderstood. I tried to like this, I really did, but it is to good TV sci-fi as Babylon 5 is to Star Trek (the original). Silly prosthetics, cheap cardboard sets, stilted dialogues, CG that doesn't match the background, and painfully one-dimensional characters cannot be overcome with a 'sci-fi' setting. (I'm sure there are those of you out there who think Babylon 5 is good sci-fi TV. It's not. It's clichéd and uninspiring.) While US viewers might like emotion and character development, sci-fi is a genre that does not take itself seriously (cf. Star Trek). It may treat important issues, yet not as a serious philosophy. It's really difficult to care about the characters here as they are not simply foolish, just missing a spark of life. Their actions and reactions are wooden and predictable, often painful to watch. The makers of Earth KNOW it's rubbish as they have to always say \"Gene Roddenberry's Earth...\" otherwise people would not continue watching. Roddenberry's ashes must be turning in their orbit as this dull, cheap, poorly edited (watching it without advert breaks really brings this home) trudging Trabant of a show lumbers into space. Spoiler. So, kill off a main character. And then bring him back as another actor. Jeeez! Dallas all over again.",
|
||||
}
|
||||
```
|
||||
|
||||
هناك حقولان في هذه المجموعة من البيانات:
|
||||
|
||||
- `text`: نص مراجعة الفيلم.
|
||||
- `label`: قيمة إما `0` لمراجعة سلبية أو `1` لمراجعة إيجابية.
|
||||
|
||||
## المعالجة المسبقة(Preprocess)
|
||||
|
||||
الخطوة التالية هي تحميل المُجزِّئ النص DistilBERT لتهيئة لحقل `text`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
أنشئ دالة لتهيئة حقل `text` وتقصير السلاسل النصية بحيث لا يتجاوز طولها الحد الأقصى لإدخالات DistilBERT:
|
||||
|
||||
```py
|
||||
>>> def preprocess_function(examples):
|
||||
... return tokenizer(examples["text"], truncation=True)
|
||||
```
|
||||
|
||||
لتطبيق دالة التهيئة على مجموعة البيانات بأكملها، استخدم دالة 🤗 Datasets [`~datasets.Dataset.map`] . يمكنك تسريع `map` باستخدام `batched=True` لمعالجة دفعات من البيانات:
|
||||
|
||||
```py
|
||||
tokenized_imdb = imdb.map(preprocess_function, batched=True)
|
||||
```
|
||||
|
||||
الآن قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorWithPadding`]. الأكثر كفاءة هو استخدام الحشو الديناميكي لجعل الجمل متساوية في الطول داخل كل دفعة، بدلًا من حشو كامل البيانات إلى الحد الأقصى للطول.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorWithPadding
|
||||
|
||||
>>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorWithPadding
|
||||
|
||||
>>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer, return_tensors="tf")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التقييم(Evaluate)
|
||||
|
||||
يُعدّ تضمين مقياس أثناء التدريب مفيدًا لتقييم أداء النموذج. يمكنك تحميل طريقة تقييم بسرعة باستخدام مكتبة 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index) . بالنسبة لهذه المهمة، قم بتحميل مقياس [الدقة](https://huggingface.co/spaces/evaluate-metric/accuracy) (راجع جولة 🤗 Evaluate [السريعة](https://huggingface.co/docs/evaluate/a_quick_tour) لمعرفة المزيد حول كيفية تحميل وحساب مقياس):
|
||||
|
||||
```py
|
||||
>>> import evaluate
|
||||
|
||||
>>> accuracy = evaluate.load("accuracy")
|
||||
```
|
||||
|
||||
ثم أنشئ دالة تقوم بتمرير تنبؤاتك وتصنيفاتك إلى [`~evaluate.EvaluationModule.compute`] لحساب الدقة:
|
||||
|
||||
```py
|
||||
>>> import numpy as np
|
||||
|
||||
>>> def compute_metrics(eval_pred):
|
||||
... predictions, labels = eval_pred
|
||||
... predictions = np.argmax(predictions, axis=1)
|
||||
... return accuracy.compute(predictions=predictions, references=labels)
|
||||
```
|
||||
|
||||
دالة `compute_metrics` جاهزة الآن، وستعود إليها عند إعداد التدريب.
|
||||
|
||||
## التدريب(Train)
|
||||
|
||||
قبل أن تبدأ في تدريب نموذجك، قم بإنشاء خريطة من المعرفات المتوقعة إلى تسمياتها باستخدام `id2label` و `label2id`:
|
||||
|
||||
```py
|
||||
>>> id2label = {0: "NEGATIVE", 1: "POSITIVE"}
|
||||
>>> label2id = {"NEGATIVE": 0, "POSITIVE": 1}
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بضبط نموذج دقيق باستخدام [`Trainer`], فالق نظرة على البرنامج التعليمي الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت مستعد الآن لبدء تدريب نموذجك! قم بتحميل DistilBERT مع [`AutoModelForSequenceClassification`] جنبًا إلى جنب مع عدد التصنيفات المتوقعة، وتصنيفات الخرائط:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSequenceClassification, TrainingArguments, Trainer
|
||||
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained(
|
||||
... "distilbert/distilbert-base-uncased", num_labels=2, id2label=id2label, label2id=label2id
|
||||
... )
|
||||
```
|
||||
|
||||
في هذه المرحلة، هناك ثلاث خطوات فقط متبقية:
|
||||
|
||||
1. حدد مُعامِلات التدريب في [`TrainingArguments`]. المُعامل المطلوب الوحيد هو `output_dir`، لتحديد مكان حفظ النموذج. يمكنك رفع النموذج إلى Hub بتعيين `push_to_hub=True` (يجب تسجيل الدخول إلى Hugging Face لرفع النموذج). سيقوم `Trainer` بتقييم الدقة وحفظ نقاط التحقق في نهاية كل حقبة.
|
||||
2. مرر مُعامِلات التدريب إلى `Trainer` مع النموذج، ومجموعة البيانات، والمحلل اللغوي، ومُجمِّع البيانات، ووظيفة `compute_metrics`.
|
||||
3. استدعِ [`~Trainer.train`] لضبط النموذج.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_model",
|
||||
... learning_rate=2e-5,
|
||||
... per_device_train_batch_size=16,
|
||||
... per_device_eval_batch_size=16,
|
||||
... num_train_epochs=2,
|
||||
... weight_decay=0.01,
|
||||
... eval_strategy="epoch",
|
||||
... save_strategy="epoch",
|
||||
... load_best_model_at_end=True,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=tokenized_imdb["train"],
|
||||
... eval_dataset=tokenized_imdb["test"],
|
||||
... processing_class=tokenizer,
|
||||
... data_collator=data_collator,
|
||||
... compute_metrics=compute_metrics,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
يستخدم [`Trainer`] الحشو الديناميكي افتراضيًا عند تمرير `tokenizer` إليه. في هذه الحالة، لا تحتاج لتحديد مُجمِّع البيانات صراحةً.
|
||||
|
||||
</Tip>
|
||||
|
||||
بعد اكتمال التدريب، شارك نموذجك على Hub باستخدام الطريقة [`~transformers.Trainer.push_to_hub`] ليستخدمه الجميع:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بضبط نموذج باستخدام Keras، قم بالاطلاع على البرنامج التعليمي الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لضبط نموذج في TensorFlow، ابدأ بإعداد دالة المحسن، وجدول معدل التعلم، وبعض معلمات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> batch_size = 16
|
||||
>>> num_epochs = 5
|
||||
>>> batches_per_epoch = len(tokenized_imdb["train"]) // batch_size
|
||||
>>> total_train_steps = int(batches_per_epoch * num_epochs)
|
||||
>>> optimizer, schedule = create_optimizer(init_lr=2e-5, num_warmup_steps=0, num_train_steps=total_train_steps)
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilBERT مع [`TFAutoModelForSequenceClassification`] بالإضافة إلى عدد التصنيفات المتوقعة، وتعيينات التسميات:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForSequenceClassification
|
||||
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained(
|
||||
... "distilbert/distilbert-base-uncased", num_labels=2, id2label=id2label, label2id=label2id
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتحويل مجموعات بياناتك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... tokenized_imdb["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_validation_set = model.prepare_tf_dataset(
|
||||
... tokenized_imdb["test"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتهيئة النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن جميع نماذج Transformers لديها دالة خسارة ذات صلة بالمهمة بشكل افتراضي، لذلك لا تحتاج إلى تحديد واحدة ما لم ترغب في ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # No loss argument!
|
||||
```
|
||||
|
||||
آخر أمرين يجب إعدادهما قبل بدء التدريب هو حساب الدقة من التوقعات، وتوفير طريقة لدفع نموذجك إلى Hub. يتم ذلك باستخدام [Keras callbacks](../main_classes/keras_callbacks).
|
||||
|
||||
قم بتمرير دالة `compute_metrics` الخاصة بك إلى [`~transformers.KerasMetricCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import KerasMetricCallback
|
||||
|
||||
>>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set)
|
||||
```
|
||||
|
||||
حدد مكان دفع نموذجك والمجزئ اللغوي في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> push_to_hub_callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم اجمع الاستدعاءات معًا:
|
||||
|
||||
```py
|
||||
>>> callbacks = [metric_callback, push_to_hub_callback]
|
||||
```
|
||||
|
||||
أخيرًا، أنت مستعد لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق، وعدد الحقبات، واستدعاءاتك لضبط النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks)
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر عمقًا حول كيفية ضبط نموذج لتصنيف النصوص، قم بالاطلاع على الدفتر المقابل
|
||||
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)
|
||||
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال(Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بضبط نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
احصل على بعض النصوص التي ترغب في إجراء الاستدلال عليها:
|
||||
|
||||
```py
|
||||
>>> text = "This was a masterpiece. Not completely faithful to the books, but enthralling from beginning to end. Might be my favorite of the three."
|
||||
```
|
||||
|
||||
أسهل طريقة لتجربة النموذج المضبوط للاستدلال هي استخدامه ضمن [`pipeline`]. قم بإنشاء `pipeline` لتحليل المشاعر مع نموذجك، ومرر نصك إليه:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> classifier = pipeline("sentiment-analysis", model="stevhliu/my_awesome_model")
|
||||
>>> classifier(text)
|
||||
[{'label': 'POSITIVE', 'score': 0.9994940757751465}]
|
||||
```
|
||||
|
||||
يمكنك أيضًا تكرار نتائج `pipeline` يدويًا إذا أردت:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قم يتجزئة النص وإرجاع تنسورات PyTorch:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="pt")
|
||||
```
|
||||
|
||||
مرر المدخلات إلى النموذج واسترجع `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSequenceClassification
|
||||
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained("stevhliu/my_awesome_model")
|
||||
>>> with torch.no_grad():
|
||||
... logits = model(**inputs).logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأعلى، واستخدم `id2label` لتحويلها إلى تصنيف نصي:
|
||||
|
||||
```py
|
||||
>>> predicted_class_id = logits.argmax().item()
|
||||
>>> model.config.id2label[predicted_class_id]
|
||||
'POSITIVE'
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتحليل النص وإرجاع تنسيقات TensorFlow:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="tf")
|
||||
```
|
||||
|
||||
قم بتمرير مدخلاتك إلى النموذج وإرجاع `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForSequenceClassification
|
||||
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained("stevhliu/my_awesome_model")
|
||||
>>> logits = model(**inputs).logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأعلى، واستخدم `id2label` لتحويلها إلى تصنيف نصي:
|
||||
|
||||
```py
|
||||
>>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0])
|
||||
>>> model.config.id2label[predicted_class_id]
|
||||
'POSITIVE'
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
File diff suppressed because one or more lines are too long
@ -1,550 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# تصنيف الرموز(Token classification)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Youtube id="wVHdVlPScxA"/>
|
||||
|
||||
يهدف تصنيف الرموز إلى إعطاء تسمية لكل رمز على حدة في الجملة. من أكثر مهام تصنيف الرموز شيوعًا هو التعرف على الكيانات المسماة (NER). يحاول NER تحديد تسمية لكل كيان في الجملة، مثل شخص، أو مكان، أو منظمة.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط [DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased) على مجموعة بيانات [WNUT 17](https://huggingface.co/datasets/wnut_17) للكشف عن كيانات جديدة.
|
||||
2. استخدام نموذجك المضبوط بدقة للاستدلال.
|
||||
|
||||
<Tip>
|
||||
|
||||
للاطلاع جميع البنى والنقاط المتوافقة مع هذه المهمة، نوصي بالرجوع من [صفحة المهمة](https://huggingface.co/tasks/token-classification).
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل أن تبدأ، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate seqeval
|
||||
```
|
||||
|
||||
نحن نشجعك على تسجيل الدخول إلى حساب HuggingFace الخاص بك حتى تتمكن من تحميل ومشاركة نموذجك مع المجتمع. عندما يُطلب منك، أدخل رمزك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات WNUT 17
|
||||
|
||||
ابدأ بتحميل مجموعة بيانات WNUT 17 من مكتبة 🤗 Datasets:
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> wnut = load_dataset("wnut_17")
|
||||
```
|
||||
|
||||
ثم ألق نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> wnut["train"][0]
|
||||
{'id': '0',
|
||||
'ner_tags': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 8, 8, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
'tokens': ['@paulwalk', 'It', "'s", 'the', 'view', 'from', 'where', 'I', "'m", 'living', 'for', 'two', 'weeks', '.', 'Empire', 'State', 'Building', '=', 'ESB', '.', 'Pretty', 'bad', 'storm', 'here', 'last', 'evening', '.']
|
||||
}
|
||||
```
|
||||
|
||||
يمثل كل رقم في `ner_tags` كياناً. حوّل الأرقام إلى أسماء التصنيفات لمعرفة ماهية الكيانات:
|
||||
|
||||
```py
|
||||
>>> label_list = wnut["train"].features[f"ner_tags"].feature.names
|
||||
>>> label_list
|
||||
[
|
||||
"O",
|
||||
"B-corporation",
|
||||
"I-corporation",
|
||||
"B-creative-work",
|
||||
"I-creative-work",
|
||||
"B-group",
|
||||
"I-group",
|
||||
"B-location",
|
||||
"I-location",
|
||||
"B-person",
|
||||
"I-person",
|
||||
"B-product",
|
||||
"I-product",
|
||||
]
|
||||
```
|
||||
|
||||
يشير الحرف الذي يسبق كل `ner_tag` إلى موضع الرمز للكيان:
|
||||
|
||||
- `B-` يشير إلى بداية الكيان.
|
||||
- `I-` يشير إلى أن الرمز يقع ضمن نفس الكيان (على سبيل المثال، الرمز `State` هو جزء من كيان مثل `Empire State Building`).
|
||||
- `0` يشير إلى أن الرمز لا يمثل أي كيان.
|
||||
|
||||
## المعالجة المسبقة(Preprocess)
|
||||
|
||||
<Youtube id="iY2AZYdZAr0"/>
|
||||
|
||||
الخطوة التالية هي تحميل مُجزِّئ النصوص DistilBERT للمعالجة المسبقة لحقل `tokens`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
كما رأيت في حقل `tokens` المثال أعلاه، يبدو أن المدخل قد تم تحليله بالفعل. لكن المدخل لم يُجزأ بعد ويتعيّن عليك ضبط `is_split_into_words=True` لتقسيم الكلمات إلى كلمات فرعية. على سبيل المثال:
|
||||
|
||||
```py
|
||||
>>> example = wnut["train"][0]
|
||||
>>> tokenized_input = tokenizer(example["tokens"], is_split_into_words=True)
|
||||
>>> tokens = tokenizer.convert_ids_to_tokens(tokenized_input["input_ids"])
|
||||
>>> tokens
|
||||
['[CLS]', '@', 'paul', '##walk', 'it', "'", 's', 'the', 'view', 'from', 'where', 'i', "'", 'm', 'living', 'for', 'two', 'weeks', '.', 'empire', 'state', 'building', '=', 'es', '##b', '.', 'pretty', 'bad', 'storm', 'here', 'last', 'evening', '.', '[SEP]']
|
||||
```
|
||||
|
||||
ومع ذلك، يضيف هذا بعض الرموز الخاصة `[CLS]` و`[SEP]` وتقسيم الكلمات إلى أجزاء يُنشئ عدم تطابق بين المُدخلات والتسميات. قد يتم تقسيم كلمة واحدة تقابل تسمية واحدة الآن إلى كلمتين فرعيتين. ستحتاج إلى إعادة محاذاة الرموز والتسميات عن طريق:
|
||||
|
||||
1. ربط كل رمز بالكلمة الأصلية باستخدام الخاصية [`word_ids`](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.BatchEncoding.word_ids).
|
||||
2. تعيين التسمية `-100` للرموز الخاصة `[CLS]` و`[SEP]` بحيث يتم تجاهلها بواسطة دالة الخسارة PyTorch (انظر [CrossEntropyLoss](https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html)).
|
||||
3. تسمية الرمز الأول فقط لكلمة معينة. قم بتعيين `-100` لأجزاء الكلمة الأخرى.
|
||||
|
||||
هنا كيف يمكنك إنشاء وظيفة لإعادة محاذاة الرموز والتسميات، وقص الجمل لتتجاوز الحد الأقصى لطول مُدخلات DistilBERT:
|
||||
|
||||
```py
|
||||
>>> def tokenize_and_align_labels(examples):
|
||||
... tokenized_inputs = tokenizer(examples["tokens"], truncation=True, is_split_into_words=True)
|
||||
|
||||
... labels = []
|
||||
... for i, label in enumerate(examples[f"ner_tags"]):
|
||||
... word_ids = tokenized_inputs.word_ids(batch_index=i) # تعيين الرموز إلى كلماتهم المقابلة.
|
||||
... previous_word_idx = None
|
||||
... label_ids = []
|
||||
... for word_idx in word_ids: # تعيين الرموز الخاصة إلى -100.
|
||||
... if word_idx is None:
|
||||
... label_ids.append(-100)
|
||||
... elif word_idx != previous_word_idx: # تسمية الرمز الأول فقط لكلمة معينة.
|
||||
... label_ids.append(label[word_idx])
|
||||
... else:
|
||||
... label_ids.append(-100)
|
||||
... previous_word_idx = word_idx
|
||||
... labels.append(label_ids)
|
||||
|
||||
... tokenized_inputs["labels"] = labels
|
||||
... return tokenized_inputs
|
||||
```
|
||||
|
||||
لتطبيق هذه العملية على كامل مجموعة البيانات، استخدم الدالة [`~datasets.Dataset.map`] لمجموعة بيانات 🤗. يمكنك تسريع الدالة `map` عن طريق تعيين `batched=True` لمعالجة عناصر متعددة من مجموعة البيانات في وقت واحد:
|
||||
|
||||
```py
|
||||
>>> tokenized_wnut = wnut.map(tokenize_and_align_labels, batched=True)
|
||||
```
|
||||
|
||||
الآن قم بإنشاء دفعة من الأمثلة باستخدام [`DataCollatorWithPadding`].من الأفضل استخدام *الحشو الديناميكي* للجمل إلى أطول طول في دفعة أثناء التجميع، بدلاً من حشو مجموعة البيانات بالكامل إلى الطول الأقصى.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import DataCollatorForTokenClassification
|
||||
|
||||
>>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import DataCollatorForTokenClassification
|
||||
|
||||
>>> data_collator = DataCollatorForTokenClassification(tokenizer=tokenizer, return_tensors="tf")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التقييم(Evaluate)
|
||||
|
||||
يُعدّ تضمين مقياس أثناء التدريب مفيدًا في تقييم أداء نموذجك. يمكنك تحميل طريقة تقييم بسرعة مع مكتبة 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index). لهذه المهمة، قم بتحميل إطار [seqeval](https://huggingface.co/spaces/evaluate-metric/seqeval) (انظر جولة 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) لمعرفة المزيد حول كيفية تحميل وحساب مقياس). يُخرج seqeval عدة نتائج: الدقة، والاستذكار، ومقياس F1، والدقة.
|
||||
|
||||
```py
|
||||
>>> import evaluate
|
||||
|
||||
>>> seqeval = evaluate.load("seqeval")
|
||||
```
|
||||
|
||||
احصل على تسميات الكيانات المسماة (NER) أولاً،ثم أنشئ دالة تُمرر تنبؤاتك وتسمياتك الصحيحة إلى [`~evaluate.EvaluationModule.compute`] لحساب النتائج:
|
||||
|
||||
```py
|
||||
>>> import numpy as np
|
||||
|
||||
>>> labels = [label_list[i] for i in example[f"ner_tags"]]
|
||||
|
||||
>>> def compute_metrics(p):
|
||||
... predictions, labels = p
|
||||
... predictions = np.argmax(predictions, axis=2)
|
||||
|
||||
... true_predictions = [
|
||||
... [label_list[p] for (p, l) in zip(prediction, label) if l != -100]
|
||||
... for prediction, label in zip(predictions, labels)
|
||||
... ]
|
||||
... true_labels = [
|
||||
... [label_list[l] for (p, l) in zip(prediction, label) if l != -100]
|
||||
... for prediction, label in zip(predictions, labels)
|
||||
... ]
|
||||
|
||||
... results = seqeval.compute(predictions=true_predictions, references=true_labels)
|
||||
... return {
|
||||
... "precision": results["overall_precision"],
|
||||
... "recall": results["overall_recall"],
|
||||
... "f1": results["overall_f1"],
|
||||
... "accuracy": results["overall_accuracy"],
|
||||
... }
|
||||
```
|
||||
|
||||
دالة `compute_metrics` جاهزة للاستخدام، وستحتاج إليها عند إعداد التدريب.
|
||||
|
||||
## التدريب(Train)
|
||||
|
||||
قبل تدريب النموذج، جهّز خريطة تربط بين المعرّفات المتوقعة وتسمياتها باستخدام `id2label` و `label2id`:
|
||||
|
||||
```py
|
||||
>>> id2label = {
|
||||
... 0: "O",
|
||||
... 1: "B-corporation",
|
||||
... 2: "I-corporation",
|
||||
... 3: "B-creative-work",
|
||||
... 4: "I-creative-work",
|
||||
... 5: "B-group",
|
||||
... 6: "I-group",
|
||||
... 7: "B-location",
|
||||
... 8: "I-location",
|
||||
... 9: "B-person",
|
||||
... 10: "I-person",
|
||||
... 11: "B-product",
|
||||
... 12: "I-product",
|
||||
... }
|
||||
>>> label2id = {
|
||||
... "O": 0,
|
||||
... "B-corporation": 1,
|
||||
... "I-corporation": 2,
|
||||
... "B-creative-work": 3,
|
||||
... "I-creative-work": 4,
|
||||
... "B-group": 5,
|
||||
... "I-group": 6,
|
||||
... "B-location": 7,
|
||||
... "I-location": 8,
|
||||
... "B-person": 9,
|
||||
... "I-person": 10,
|
||||
... "B-product": 11,
|
||||
... "I-product": 12,
|
||||
... }
|
||||
```
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتعديل نموذج باستخدام [`Trainer`], ألق نظرة على الدليل التعليمي الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت مستعد الآن لبدء تدريب نموذجك! قم بتحميل DistilBERT مع [`AutoModelForTokenClassification`] إلى جانب عدد التصنيفات المتوقعة، وخريطة التسميات:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForTokenClassification, TrainingArguments, Trainer
|
||||
|
||||
>>> model = AutoModelForTokenClassification.from_pretrained(
|
||||
... "distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id
|
||||
... )
|
||||
```
|
||||
|
||||
في هذه المرحلة، هناك ثلاث خطوات فقط متبقية:
|
||||
|
||||
1. حدد معلمات التدريب الخاصة بك في [`TrainingArguments`]. المعامل الوحيد المطلوب هو `output_dir` الذي يحدد مكان حفظ نموذجك. ستقوم بدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب أن تكون مسجلاً الدخول إلى Hugging Face لتحميل نموذجك). في نهاية كل حقبة، سيقوم [`Trainer`] بتقييم درجات seqeval وحفظ تسخة التدريب.
|
||||
2. قم بتمرير معاملات التدريب إلى [`Trainer`] إلى جانب النموذج، ومجموعة البيانات، والمُجزِّئ اللغوي، و`data collator`، ودالة `compute_metrics`.
|
||||
3.استدعِ [`~Trainer.train`] لتدريب نموذجك.
|
||||
|
||||
```py
|
||||
>>> training_args = TrainingArguments(
|
||||
... output_dir="my_awesome_wnut_model",
|
||||
... learning_rate=2e-5,
|
||||
... per_device_train_batch_size=16,
|
||||
... per_device_eval_batch_size=16,
|
||||
... num_train_epochs=2,
|
||||
... weight_decay=0.01,
|
||||
... eval_strategy="epoch",
|
||||
... save_strategy="epoch",
|
||||
... load_best_model_at_end=True,
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Trainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=tokenized_wnut["train"],
|
||||
... eval_dataset=tokenized_wnut["test"],
|
||||
... processing_class=tokenizer,
|
||||
... data_collator=data_collator,
|
||||
... compute_metrics=compute_metrics,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، شارك نموذجك على Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن على دراية بتعديل نموذج باستخدام Keras، ألق نظرة على الدليل التعليمي الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
للتعديل على نموذج في TensorFlow، ابدأ بإعداد دالة محسن، وجدول معدل التعلم، وبعض معلمات التدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import create_optimizer
|
||||
|
||||
>>> batch_size = 16
|
||||
>>> num_train_epochs = 3
|
||||
>>> num_train_steps = (len(tokenized_wnut["train"]) // batch_size) * num_train_epochs
|
||||
>>> optimizer, lr_schedule = create_optimizer(
|
||||
... init_lr=2e-5,
|
||||
... num_train_steps=num_train_steps,
|
||||
... weight_decay_rate=0.01,
|
||||
... num_warmup_steps=0,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل DistilBERT مع [`TFAutoModelForTokenClassification`] إلى جانب عدد التسميات المتوقعة، وتخطيطات التسميات:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForTokenClassification
|
||||
|
||||
>>> model = TFAutoModelForTokenClassification.from_pretrained(
|
||||
... "distilbert/distilbert-base-uncased", num_labels=13, id2label=id2label, label2id=label2id
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتحويل مجموعات بياناتك إلى تنسيق `tf.data.Dataset` مع [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... tokenized_wnut["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_validation_set = model.prepare_tf_dataset(
|
||||
... tokenized_wnut["validation"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
هيّئ النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن نماذج Transformers تتضمن دالة خسارة افتراضية مرتبطة بالمهمة، لذلك لا تحتاج إلى تحديد واحدة إلا إذا كنت ترغب في ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # No loss argument!
|
||||
```
|
||||
|
||||
آخر أمرين يجب إعدادهما قبل بدء التدريب هو حساب درجات seqeval من التنبؤات، وتوفير طريقة لدفع نموذجك إلى Hub. يتم ذلك باستخدام [Keras callbacks](../main_classes/keras_callbacks).
|
||||
|
||||
مرر دالة `compute_metrics` الخاصة بك إلى [`~transformers.KerasMetricCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import KerasMetricCallback
|
||||
|
||||
>>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_validation_set)
|
||||
```
|
||||
|
||||
حدد مكان دفع نموذجك والمحلل اللغوي في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> push_to_hub_callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_wnut_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم جمّع callbacks الخاصة بك معًا:
|
||||
|
||||
```py
|
||||
>>> callbacks = [metric_callback, push_to_hub_callback]
|
||||
```
|
||||
|
||||
أخيرًا، أنت جاهز الآن لبدء تدريب نموذجك! قم باستدعاء [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع بيانات التدريب والتحقق، وعدد الحقبات، وcallbacks لتعديل النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_validation_set, epochs=3, callbacks=callbacks)
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر تفصيلاً حول كيفية تعديل نموذج لتصنيف الرموز، ألق نظرة على الدفتر المقابل
|
||||
[دفتر PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)
|
||||
أو [دفتر TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال(Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بتعديل نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
احصل على بعض النصوص التي تريد تشغيل الاستدلال عليها:
|
||||
|
||||
```py
|
||||
>>> text = "The Golden State Warriors are an American professional basketball team based in San Francisco."
|
||||
```
|
||||
|
||||
أبسط طريقة لتجربة نموذجك المُدرب مسبقًا للاستدلال هي استخدامه في [`pipeline`]. قم بتنفيذ `pipeline` لتصنيف الكيانات المسماة مع نموذجك، ومرر نصك إليه:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
>>> classifier = pipeline("ner", model="stevhliu/my_awesome_wnut_model")
|
||||
>>> classifier(text)
|
||||
[{'entity': 'B-location',
|
||||
'score': 0.42658573,
|
||||
'index': 2,
|
||||
'word': 'golden',
|
||||
'start': 4,
|
||||
'end': 10},
|
||||
{'entity': 'I-location',
|
||||
'score': 0.35856336,
|
||||
'index': 3,
|
||||
'word': 'state',
|
||||
'start': 11,
|
||||
'end': 16},
|
||||
{'entity': 'B-group',
|
||||
'score': 0.3064001,
|
||||
'index': 4,
|
||||
'word': 'warriors',
|
||||
'start': 17,
|
||||
'end': 25},
|
||||
{'entity': 'B-location',
|
||||
'score': 0.65523505,
|
||||
'index': 13,
|
||||
'word': 'san',
|
||||
'start': 80,
|
||||
'end': 83},
|
||||
{'entity': 'B-location',
|
||||
'score': 0.4668663,
|
||||
'index': 14,
|
||||
'word': 'francisco',
|
||||
'start': 84,
|
||||
'end': 93}]
|
||||
```
|
||||
|
||||
يمكنك أيضًا تكرار نتائج `pipeline` يدويًا إذا أردت:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قسّم النص إلى رموز وأرجع المُوتّرات بلغة PyTorch:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="pt")
|
||||
```
|
||||
|
||||
مرر مدخلاتك إلى النموذج واحصل على `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForTokenClassification
|
||||
|
||||
>>> model = AutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model")
|
||||
>>> with torch.no_grad():
|
||||
... logits = model(**inputs).logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأعلى، واستخدم جدول `id2label` الخاصة بالنموذج لتحويلها إلى تسمية نصية:
|
||||
|
||||
```py
|
||||
>>> predictions = torch.argmax(logits, dim=2)
|
||||
>>> predicted_token_class = [model.config.id2label[t.item()] for t in predictions[0]]
|
||||
>>> predicted_token_class
|
||||
['O',
|
||||
'O',
|
||||
'B-location',
|
||||
'I-location',
|
||||
'B-group',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'B-location',
|
||||
'B-location',
|
||||
'O',
|
||||
'O']
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قسّم النص إلى رموز وأرجع المُوتّرات ب TensorFlow:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("stevhliu/my_awesome_wnut_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="tf")
|
||||
```
|
||||
|
||||
مرر مدخلاتك إلى النموذج واحصل على `logits`:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForTokenClassification
|
||||
|
||||
>>> model = TFAutoModelForTokenClassification.from_pretrained("stevhliu/my_awesome_wnut_model")
|
||||
>>> logits = model(**inputs).logits
|
||||
```
|
||||
|
||||
استخرج الفئة ذات الاحتمالية الأعلى، واستخدم جدول `id2label` الخاصة بالنموذج لتحويلها إلى تسمية نصية:
|
||||
|
||||
```py
|
||||
>>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1)
|
||||
>>> predicted_token_class = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()]
|
||||
>>> predicted_token_class
|
||||
['O',
|
||||
'O',
|
||||
'B-location',
|
||||
'I-location',
|
||||
'B-group',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'O',
|
||||
'B-location',
|
||||
'B-location',
|
||||
'O',
|
||||
'O']
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
@ -1,407 +0,0 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# الترجمة(Translation)
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
<Youtube id="1JvfrvZgi6c"/>
|
||||
|
||||
الترجمة هي عملية تحويل سلسلة نصية من لغة إلى أخرى. وهي إحدى المهام التي يمكن صياغتها كمسألة تسلسل إلى تسلسل، وهو إطار عمل قوي لإنتاج مخرجات من مدخلات، مثل الترجمة أو التلخيص. تُستخدم أنظمة الترجمة عادةً للترجمة بين نصوص لغات مختلفة، ويمكن استخدامها أيضًا لترجمة الكلام أو لمهام تجمع بين النصوص والكلام، مثل تحويل النص إلى كلام أو تحويل الكلام إلى نص.
|
||||
|
||||
سيوضح لك هذا الدليل كيفية:
|
||||
|
||||
1. ضبط دقيق لنموذج [T5](https://huggingface.co/google-t5/t5-small) على المجموعة الفرعية الإنجليزية-الفرنسية من مجموعة بيانات [OPUS Books](https://huggingface.co/datasets/opus_books) لترجمة النص الإنجليزي إلى الفرنسية.
|
||||
2. استخدام النموذج المضبوط بدقة للاستدلال.
|
||||
|
||||
<Tip>
|
||||
|
||||
لمشاهدة جميع البنى والنسخ المتوافقة مع هذه المهمة، نوصي بالتحقق من [صفحة المهمة](https://huggingface.co/tasks/translation).
|
||||
|
||||
</Tip>
|
||||
|
||||
قبل البدء، تأكد من تثبيت جميع المكتبات الضرورية:
|
||||
|
||||
```bash
|
||||
pip install transformers datasets evaluate sacrebleu
|
||||
```
|
||||
|
||||
نشجعك على تسجيل الدخول إلى حساب Hugging Face الخاص بك حتى تتمكن من تحميل نموذجك ومشاركته مع المجتمع. عند الطلب، أدخل الرمز المميز الخاص بك لتسجيل الدخول:
|
||||
|
||||
```py
|
||||
>>> from huggingface_hub import notebook_login
|
||||
|
||||
>>> notebook_login()
|
||||
```
|
||||
|
||||
## تحميل مجموعة بيانات OPUS Books
|
||||
|
||||
ابدأ بتحميل المجموعة الفرعية الإنجليزية-الفرنسية من مجموعة بيانات [OPUS Books](https://huggingface.co/datasets/opus_books) من مكتبة 🤗 Datasets:
|
||||
|
||||
```py
|
||||
>>> from datasets import load_dataset
|
||||
|
||||
>>> books = load_dataset("opus_books", "en-fr")
|
||||
```
|
||||
|
||||
قسّم مجموعة البيانات إلى مجموعة تدريب ومجموعة اختبار باستخدام طريقة [`~datasets.Dataset.train_test_split`]:
|
||||
|
||||
```py
|
||||
>>> books = books["train"].train_test_split(test_size=0.2)
|
||||
```
|
||||
|
||||
ثم ألقِ نظرة على مثال:
|
||||
|
||||
```py
|
||||
>>> books["train"][0]
|
||||
{'id': '90560',
|
||||
'translation': {'en': 'But this lofty plateau measured only a few fathoms, and soon we reentered Our Element.',
|
||||
'fr': 'Mais ce plateau élevé ne mesurait que quelques toises, et bientôt nous fûmes rentrés dans notre élément.'}}
|
||||
```
|
||||
|
||||
`translation`: ترجمة إنجليزية وفرنسية للنص.
|
||||
|
||||
## المعالجة المسبقة(Preprocess)
|
||||
|
||||
<Youtube id="XAR8jnZZuUs"/>
|
||||
|
||||
الخطوة التالية هي تحميل مُجزئ T5 لمعالجة أزواج اللغة الإنجليزية-الفرنسية:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> checkpoint = "google-t5/t5-small"
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||
```
|
||||
|
||||
يجب أن تقوم دالة المعالجة المسبقة التي تُريد إنشاءها بما يلي:
|
||||
|
||||
1. إضافة بادئة إلى المُدخل بمُوجه حتى يعرف T5 أن هذه مهمة ترجمة. تتطلب بعض النماذج القادرة على أداء مهام متعددة توجيهًا لمهام مُحددة.
|
||||
2. تعيين اللغة الهدف (الفرنسية) في معامل `text_target` لضمان معالجة المُجزئ للنص بشكل صحيح. إذا لم تُعيّن `text_target`، فسيُعالج المُجزئ النص على أنه إنجليزي.
|
||||
3. اقتطاع التسلسلات بحيث لا يزيد طولها عن الحد الأقصى الذي يحدده معامل `max_length`.
|
||||
|
||||
```py
|
||||
>>> source_lang = "en"
|
||||
>>> target_lang = "fr"
|
||||
>>> prefix = "translate English to French: "
|
||||
|
||||
>>> def preprocess_function(examples):
|
||||
... inputs = [prefix + example[source_lang] for example in examples["translation"]]
|
||||
... targets = [example[target_lang] for example in examples["translation"]]
|
||||
... model_inputs = tokenizer(inputs, text_target=targets, max_length=128, truncation=True)
|
||||
... return model_inputs
|
||||
```
|
||||
|
||||
لتطبيق دالة المعالجة المسبقة على مجموعة البيانات بأكملها، استخدم طريقة [`~datasets.Dataset.map`] من 🤗 Datasets. يمكنك تسريع دالة `map` عن طريق تعيين `batched=True` لمعالجة عناصر متعددة من مجموعة البيانات في وقت واحد:
|
||||
|
||||
```py
|
||||
>>> tokenized_books = books.map(preprocess_function, batched=True)
|
||||
```
|
||||
|
||||
الآن أنشئ دفعة من الأمثلة باستخدام [`DataCollatorForSeq2Seq`]. من الأكثر كفاءة *الحشو الديناميكي* للجمل إلى أطول طول في دفعة أثناء التجميع، بدلاً من حشو مجموعة البيانات بأكملها إلى الحد الأقصى للطول.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForSeq2Seq
|
||||
|
||||
>>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
|
||||
```py
|
||||
>>> from transformers import DataCollatorForSeq2Seq
|
||||
|
||||
>>> data_collator = DataCollatorForSeq2Seq(tokenizer=tokenizer, model=checkpoint, return_tensors="tf")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## التقييم (Evaluate)
|
||||
|
||||
غالباً ما يكون تضمين مقياس أثناء التدريب مفيداً لتقييم أداء نموذجك. يمكنك تحميل طريقة تقييم بسرعة باستخدام مكتبة 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index). لهذه المهمة، حمّل مقياس [SacreBLEU](https://huggingface.co/spaces/evaluate-metric/sacrebleu) (راجع [الجولة السريعة](https://huggingface.co/docs/evaluate/a_quick_tour) لـ 🤗 Evaluate لمعرفة المزيد حول كيفية تحميل وحساب مقياس):
|
||||
|
||||
```py
|
||||
>>> import evaluate
|
||||
|
||||
>>> metric = evaluate.load("sacrebleu")
|
||||
```
|
||||
|
||||
ثم أنشئ دالة تُمرر تنبؤاتك وتسمياتك إلى [`~evaluate.EvaluationModule.compute`] لحساب درجة SacreBLEU:
|
||||
|
||||
```py
|
||||
>>> import numpy as np
|
||||
|
||||
>>> def postprocess_text(preds, labels):
|
||||
... preds = [pred.strip() for pred in preds]
|
||||
... labels = [[label.strip()] for label in labels]
|
||||
|
||||
... return preds, labels
|
||||
|
||||
>>> def compute_metrics(eval_preds):
|
||||
... preds, labels = eval_preds
|
||||
... if isinstance(preds, tuple):
|
||||
... preds = preds[0]
|
||||
... decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
|
||||
|
||||
... labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
|
||||
... decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
|
||||
|
||||
... decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
|
||||
|
||||
... result = metric.compute(predictions=decoded_preds, references=decoded_labels)
|
||||
... result = {"bleu": result["score"]}
|
||||
|
||||
... prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
|
||||
... result["gen_len"] = np.mean(prediction_lens)
|
||||
... result = {k: round(v, 4) for k, v in result.items()}
|
||||
... return result
|
||||
```
|
||||
|
||||
دالة `compute_metrics` الخاصة بك جاهزة الآن، وسوف تعود إليها عند إعداد التدريب.
|
||||
|
||||
## التدريب (Train)
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن معتادًا على ضبط دقيق نموذج باستخدام [`Trainer`], فألقِ نظرة على البرنامج التعليمي الأساسي [هنا](../training#train-with-pytorch-trainer)!
|
||||
|
||||
</Tip>
|
||||
|
||||
أنت جاهز لبدء تدريب نموذجك الآن! حمّل T5 باستخدام [`AutoModelForSeq2SeqLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSeq2SeqLM, Seq2SeqTrainingArguments, Seq2SeqTrainer
|
||||
|
||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint)
|
||||
```
|
||||
|
||||
في هذه المرحلة، تبقى ثلاث خطوات فقط:
|
||||
|
||||
1. حدد مُعاملات للتدريب في [`Seq2SeqTrainingArguments`]. المُعامل الوحيدة المطلوبة هي `output_dir` التي تحدد مكان حفظ النموذج الخاص بك. ستقوم بدفع هذا النموذج إلى Hub عن طريق تعيين `push_to_hub=True` (يجب عليك تسجيل الدخول إلى Hugging Face لتحميل نموذجك). في نهاية كل حقبة، سيقوم [`Trainer`] بتقييم مقياس SacreBLEU وحفظ نقطة تدقيق التدريب.
|
||||
2. مرر مُعاملات التدريب إلى [`Seq2SeqTrainer`] جنبًا إلى جنب مع النموذج ومجموعة البيانات والمعالج اللغوي وجامع البيانات ووظيفة `compute_metrics`.
|
||||
3. نفّذ [`~Trainer.train`] لضبط نموذجك.
|
||||
|
||||
```py
|
||||
>>> training_args = Seq2SeqTrainingArguments(
|
||||
... output_dir="my_awesome_opus_books_model",
|
||||
... eval_strategy="epoch",
|
||||
... learning_rate=2e-5,
|
||||
... per_device_train_batch_size=16,
|
||||
... per_device_eval_batch_size=16,
|
||||
... weight_decay=0.01,
|
||||
... save_total_limit=3,
|
||||
... num_train_epochs=2,
|
||||
... predict_with_generate=True,
|
||||
... fp16=True, #change to bf16=True for XPU
|
||||
... push_to_hub=True,
|
||||
... )
|
||||
|
||||
>>> trainer = Seq2SeqTrainer(
|
||||
... model=model,
|
||||
... args=training_args,
|
||||
... train_dataset=tokenized_books["train"],
|
||||
... eval_dataset=tokenized_books["test"],
|
||||
... processing_class=tokenizer,
|
||||
... data_collator=data_collator,
|
||||
... compute_metrics=compute_metrics,
|
||||
... )
|
||||
|
||||
>>> trainer.train()
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، شارك نموذجك مع Hub باستخدام طريقة [`~transformers.Trainer.push_to_hub`] حتى يتمكن الجميع من استخدام نموذجك:
|
||||
|
||||
```py
|
||||
>>> trainer.push_to_hub()
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
<Tip>
|
||||
|
||||
إذا لم تكن معتادًا على ضبط نموذج باستخدام Keras، فألق نظرة على البرنامج التعليمي الأساسي [هنا](../training#train-a-tensorflow-model-with-keras)!
|
||||
|
||||
</Tip>
|
||||
لضبط نموذج في TensorFlow، ابدأ بإعداد دالة مُحسِّن وجدول معدل تعلم وبعض المعلمات الفائقة للتدريب:
|
||||
|
||||
```py
|
||||
>>> from transformers import AdamWeightDecay
|
||||
|
||||
>>> optimizer = AdamWeightDecay(learning_rate=2e-5, weight_decay_rate=0.01)
|
||||
```
|
||||
|
||||
ثم يمكنك تحميل T5 باستخدام [`TFAutoModelForSeq2SeqLM`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForSeq2SeqLM
|
||||
|
||||
>>> model = TFAutoModelForSeq2SeqLM.from_pretrained(checkpoint)
|
||||
```
|
||||
|
||||
حوّل مجموعات البيانات الخاصة بك إلى تنسيق `tf.data.Dataset` باستخدام [`~transformers.TFPreTrainedModel.prepare_tf_dataset`]:
|
||||
|
||||
```py
|
||||
>>> tf_train_set = model.prepare_tf_dataset(
|
||||
... tokenized_books["train"],
|
||||
... shuffle=True,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
|
||||
>>> tf_test_set = model.prepare_tf_dataset(
|
||||
... tokenized_books["test"],
|
||||
... shuffle=False,
|
||||
... batch_size=16,
|
||||
... collate_fn=data_collator,
|
||||
... )
|
||||
```
|
||||
|
||||
قم بتكوين النموذج للتدريب باستخدام [`compile`](https://keras.io/api/models/model_training_apis/#compile-method). لاحظ أن جميع نماذج Transformers تحتوي على دالة خسارة ذات صلة بالمهمة بشكل افتراضي، لذلك لا تحتاج إلى تحديد واحدة إلا إذا كنت ترغب في ذلك:
|
||||
|
||||
```py
|
||||
>>> import tensorflow as tf
|
||||
|
||||
>>> model.compile(optimizer=optimizer) # No loss argument!
|
||||
```
|
||||
|
||||
آخر شيئين يجب إعدادهما قبل بدء التدريب هما حساب مقياس SacreBLEU من التوقعات، وتوفير طريقة لدفع نموذجك إلى Hub. يتم كلاهما باستخدام [استدعاءات Keras](../main_classes/keras_callbacks).
|
||||
|
||||
مرر دالة `compute_metrics` الخاصة بك إلى [`~transformers.KerasMetricCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import KerasMetricCallback
|
||||
|
||||
>>> metric_callback = KerasMetricCallback(metric_fn=compute_metrics, eval_dataset=tf_test_set)
|
||||
```
|
||||
|
||||
حدد مكان دفع نموذجك ومعالجك اللغوي في [`~transformers.PushToHubCallback`]:
|
||||
|
||||
```py
|
||||
>>> from transformers.keras_callbacks import PushToHubCallback
|
||||
|
||||
>>> push_to_hub_callback = PushToHubCallback(
|
||||
... output_dir="my_awesome_opus_books_model",
|
||||
... tokenizer=tokenizer,
|
||||
... )
|
||||
```
|
||||
|
||||
ثم اجمع استدعاءاتك معًا:
|
||||
|
||||
```py
|
||||
>>> callbacks = [metric_callback, push_to_hub_callback]
|
||||
```
|
||||
|
||||
أخيرًا، أنت جاهز لبدء تدريب نموذجك! اتصل بـ [`fit`](https://keras.io/api/models/model_training_apis/#fit-method) مع مجموعات بيانات التدريب والتحقق من الصحة وعدد الحقب واستدعاءاتك لضبط النموذج:
|
||||
|
||||
```py
|
||||
>>> model.fit(x=tf_train_set, validation_data=tf_test_set, epochs=3, callbacks=callbacks)
|
||||
```
|
||||
|
||||
بمجرد اكتمال التدريب، يتم تحميل نموذجك تلقائيًا إلى Hub حتى يتمكن الجميع من استخدامه!
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
<Tip>
|
||||
|
||||
للحصول على مثال أكثر تعمقًا لكيفية ضبط نموذج للترجمة، ألق نظرة على [دفتر ملاحظات PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation.ipynb) المقابل
|
||||
أو [دفتر ملاحظات TensorFlow](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/translation-tf.ipynb).
|
||||
|
||||
</Tip>
|
||||
|
||||
## الاستدلال (Inference)
|
||||
|
||||
رائع، الآن بعد أن قمت بضبط نموذج، يمكنك استخدامه للاستدلال!
|
||||
|
||||
أحضر بعض النصوص التي ترغب في ترجمتها إلى لغة أخرى. بالنسبة لـ T5، تحتاج إلى إضافة بادئة إلى مدخلاتك اعتمادًا على المهمة التي تعمل عليها. للترجمة من الإنجليزية إلى الفرنسية، يجب عليك إضافة بادئة إلى مدخلاتك كما هو موضح أدناه:
|
||||
|
||||
```py
|
||||
>>> text = "translate English to French: Legumes share resources with nitrogen-fixing bacteria."
|
||||
```
|
||||
|
||||
أبسط طريقة لتجربة نموذجك المضبوط للاستدلال هي استخدامه في [`pipeline`]. قم بإنشاء مثيل لـ `pipeline` للترجمة باستخدام نموذجك، ومرر النص الخاص بك إليه:
|
||||
|
||||
```py
|
||||
>>> from transformers import pipeline
|
||||
|
||||
# تغيير `xx` إلى لغة الإدخال و `yy` إلى لغة المخرجات المطلوبة.
|
||||
# أمثلة: "en" للغة الإنجليزية، "fr" للغة الفرنسية، "de" للغة الألمانية، "es" للغة الإسبانية، "zh" للغة الصينية، إلخ؛ translation_en_to_fr تترجم من الإنجليزية إلى الفرنسية
|
||||
# يمكنك عرض جميع قوائم اللغات هنا - https://huggingface.co/languages
|
||||
>>> translator = pipeline("translation_xx_to_yy", model="username/my_awesome_opus_books_model")
|
||||
>>> translator(text)
|
||||
[{'translation_text': 'Legumes partagent des ressources avec des bactéries azotantes.'}]
|
||||
```
|
||||
|
||||
يمكنك أيضًا تكرار نتائج `pipeline` يدويًا إذا أردت:
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
قم بتحويل النص إلى رموز وإرجاع `input_ids` كموترات PyTorch:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_opus_books_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="pt").input_ids
|
||||
```
|
||||
|
||||
استخدم الدالة [`~generation.GenerationMixin.generate`] لإنشاء الترجمة. لمزيد من التفاصيل حول استراتيجيات توليد النصوص المختلفة والمعلمات للتحكم في التوليد، تحقق من واجهة برمجة تطبيقات [توليد النصوص](../main_classes/text_generation).
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSeq2SeqLM
|
||||
|
||||
>>> model = AutoModelForSeq2SeqLM.from_pretrained("username/my_awesome_opus_books_model")
|
||||
>>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95)
|
||||
```
|
||||
|
||||
فك تشفير معرفات الرموز المولدة مرة أخرى إلى نص:
|
||||
|
||||
```py
|
||||
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||
'Les lignées partagent des ressources avec des bactéries enfixant l'azote.'
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
قم بتحويل النص إلى رموز وإرجاع `input_ids` كموترات TensorFlow:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("username/my_awesome_opus_books_model")
|
||||
>>> inputs = tokenizer(text, return_tensors="tf").input_ids
|
||||
```
|
||||
|
||||
استخدم طريقة [`~transformers.generation_tf_utils.TFGenerationMixin.generate`] لإنشاء الترجمة. لمزيد من التفاصيل حول استراتيجيات توليد النصوص المختلفة والمعلمات للتحكم في التوليد، تحقق من واجهة برمجة تطبيقات [توليد النصوص](../main_classes/text_generation).
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForSeq2SeqLM
|
||||
|
||||
>>> model = TFAutoModelForSeq2SeqLM.from_pretrained("username/my_awesome_opus_books_model")
|
||||
>>> outputs = model.generate(inputs, max_new_tokens=40, do_sample=True, top_k=30, top_p=0.95)
|
||||
```
|
||||
|
||||
فك تشفير معرفات الرموز المولدة مرة أخرى إلى نص:
|
||||
|
||||
```py
|
||||
>>> tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||
'Les lugumes partagent les ressources avec des bactéries fixatrices d'azote.'
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
@ -1,41 +0,0 @@
|
||||
# Tiktoken والتفاعل مع Transformers
|
||||
|
||||
يتم دمج دعم ملفات نموذج tiktoken بسلاسة في 🤗 transformers عند تحميل النماذج
|
||||
`from_pretrained` مع ملف `tokenizer.model` tiktoken على Hub، والذي يتم تحويله تلقائيًا إلى [المحلل اللغوي السريع](https://huggingface.co/docs/transformers/main/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast).
|
||||
|
||||
### النماذج المعروفة التي تم إصدارها مع `tiktoken.model`:
|
||||
- gpt2
|
||||
- llama3
|
||||
|
||||
## مثال على الاستخدام
|
||||
|
||||
من أجل تحميل ملفات `tiktoken` في `transformers`، تأكد من أن ملف `tokenizer.model` هو ملف tiktoken وسيتم تحميله تلقائيًا عند التحميل `from_pretrained`. إليك كيفية تحميل مجزىء لغوي ونموذج، والذي
|
||||
يمكن تحميله من نفس الملف بالضبط:
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id, subfolder="original")
|
||||
```
|
||||
## إنشاء مجزىء لغوي tiktoken
|
||||
|
||||
لا يحتوي ملف `tokenizer.model` على أي معلومات حول الرموز أو الأنماط الإضافية. إذا كانت هذه الأمور مهمة، قم بتحويل المحلل اللغوي إلى `tokenizer.json`، وهو التنسيق المناسب لـ [`PreTrainedTokenizerFast`].
|
||||
|
||||
قم بتوليد ملف `tokenizer.model` باستخدام [tiktoken.get_encoding](https://github.com/openai/tiktoken/blob/63527649963def8c759b0f91f2eb69a40934e468/tiktoken/registry.py#L63) ثم قم بتحويله إلى `tokenizer.json` باستخدام [`convert_tiktoken_to_fast`].
|
||||
|
||||
```py
|
||||
|
||||
from transformers.integrations.tiktoken import convert_tiktoken_to_fast
|
||||
from tiktoken import get_encoding
|
||||
|
||||
# يمكنك تحميل ترميزك المخصص أو الترميز الذي توفره OpenAI
|
||||
encoding = get_encoding("gpt2")
|
||||
convert_tiktoken_to_fast(encoding, "config/save/dir")
|
||||
```
|
||||
|
||||
يتم حفظ ملف `tokenizer.json` الناتج في الدليل المحدد ويمكن تحميله باستخدام [`PreTrainedTokenizerFast`].
|
||||
|
||||
```py
|
||||
tokenizer = PreTrainedTokenizerFast.from_pretrained("config/save/dir")
|
||||
```
|
||||
@ -673,29 +673,6 @@ tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Tensor Parallelism with PyTorch 2">
|
||||
|
||||
```yml
|
||||
compute_environment: LOCAL_MACHINE
|
||||
tp_config:
|
||||
tp_size: 4
|
||||
distributed_type: TP
|
||||
downcast_bf16: 'no'
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: 'no'
|
||||
num_machines: 1
|
||||
num_processes: 4
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
يُعد أمر [`accelerate_launch`](https://huggingface.co/docs/accelerate/package_reference/cli#accelerate-launch) هو الطريقة المُوصى بها لتشغيل نص البرمجى للتدريب على نظام موزع باستخدام Accelerate و [`Trainer`] مع المعلمات المحددة في `config_file.yaml`. يتم حفظ هذا الملف في مجلد ذاكرة التخزين المؤقت لـ Accelerate ويتم تحميله تلقائيًا عند تشغيل `accelerate_launch`.
|
||||
|
||||
@ -283,6 +283,8 @@ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/t
|
||||
Wie bei den langsamen Tests gibt es auch andere Umgebungsvariablen, die standardmäßig beim Testen nicht gesetzt sind:
|
||||
|
||||
* `RUN_CUSTOM_TOKENIZERS`: Aktiviert Tests für benutzerdefinierte Tokenizer.
|
||||
* `RUN_PT_FLAX_CROSS_TESTS`: Aktiviert Tests für die Integration von PyTorch + Flax.
|
||||
* `RUN_PT_TF_CROSS_TESTS`: Aktiviert Tests für die Integration von TensorFlow + PyTorch.
|
||||
|
||||
Weitere Umgebungsvariablen und zusätzliche Informationen finden Sie in der [testing_utils.py](src/transformers/testing_utils.py).
|
||||
|
||||
|
||||
@ -149,7 +149,7 @@ conda install conda-forge::transformers
|
||||
|
||||
Vorgefertigte Modelle werden heruntergeladen und lokal zwischengespeichert unter: `~/.cache/huggingface/hub`. Dies ist das Standardverzeichnis, das durch die Shell-Umgebungsvariable "TRANSFORMERS_CACHE" vorgegeben ist. Unter Windows wird das Standardverzeichnis durch `C:\Benutzer\Benutzername\.cache\huggingface\hub` angegeben. Sie können die unten aufgeführten Shell-Umgebungsvariablen - in der Reihenfolge ihrer Priorität - ändern, um ein anderes Cache-Verzeichnis anzugeben:
|
||||
|
||||
1. Shell-Umgebungsvariable (Standard): `HF_HUB_CACHE` oder `TRANSFORMERS_CACHE`.
|
||||
1. Shell-Umgebungsvariable (Standard): `HUGGINGFACE_HUB_CACHE` oder `TRANSFORMERS_CACHE`.
|
||||
2. Shell-Umgebungsvariable: `HF_HOME`.
|
||||
3. Shell-Umgebungsvariable: `XDG_CACHE_HOME` + `/huggingface`.
|
||||
|
||||
|
||||
@ -383,8 +383,8 @@ Ein besonders cooles 🤗 Transformers-Feature ist die Möglichkeit, ein Modell
|
||||
```py
|
||||
>>> from transformers import AutoModel
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
|
||||
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)
|
||||
>>> pt_model = AutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
@ -392,8 +392,8 @@ Ein besonders cooles 🤗 Transformers-Feature ist die Möglichkeit, ein Modell
|
||||
```py
|
||||
>>> from transformers import TFAutoModel
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory)
|
||||
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True)
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory)
|
||||
>>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
@ -1,216 +1,41 @@
|
||||
- title: Get started
|
||||
sections:
|
||||
- sections:
|
||||
- local: index
|
||||
title: Transformers
|
||||
title: 🤗 Transformers
|
||||
- local: quicktour
|
||||
title: Quick tour
|
||||
- local: installation
|
||||
title: Installation
|
||||
- local: quicktour
|
||||
title: Quickstart
|
||||
- title: Base classes
|
||||
isExpanded: False
|
||||
sections:
|
||||
- title: Models
|
||||
sections:
|
||||
- local: models
|
||||
title: Loading models
|
||||
- local: custom_models
|
||||
title: Customizing models
|
||||
- local: how_to_hack_models
|
||||
title: Customizing model components
|
||||
- local: model_sharing
|
||||
title: Sharing
|
||||
- local: add_new_model
|
||||
title: Adding a new model to Transformers
|
||||
- local: modular_transformers
|
||||
title: Modular Transformers
|
||||
- local: task_summary
|
||||
title: What 🤗 Transformers can do
|
||||
- local: tasks_explained
|
||||
title: How 🤗 Transformers solve tasks
|
||||
- local: model_summary
|
||||
title: The Transformer model family
|
||||
- local: attention
|
||||
title: Attention mechanisms
|
||||
- title: Preprocessors
|
||||
sections:
|
||||
- local: fast_tokenizers
|
||||
title: Tokenizers
|
||||
- local: image_processors
|
||||
title: Image processors
|
||||
- local: backbones
|
||||
title: Backbones
|
||||
- local: feature_extractors
|
||||
title: Feature extractors
|
||||
- local: processors
|
||||
title: Processors
|
||||
- local: tokenizer_summary
|
||||
title: Summary of the tokenizers
|
||||
- local: pad_truncation
|
||||
title: Padding and truncation
|
||||
- title: Inference
|
||||
isExpanded: False
|
||||
sections:
|
||||
- title: Pipeline API
|
||||
sections:
|
||||
title: Adding a new model to `transformers`
|
||||
title: Get started
|
||||
- sections:
|
||||
- local: pipeline_tutorial
|
||||
title: Pipeline
|
||||
- local: pipeline_gradio
|
||||
title: Machine learning apps
|
||||
- local: pipeline_webserver
|
||||
title: Web server inference
|
||||
- local: add_new_pipeline
|
||||
title: Adding a new pipeline
|
||||
- title: LLMs
|
||||
sections:
|
||||
- local: llm_tutorial
|
||||
title: Text generation
|
||||
- local: generation_strategies
|
||||
title: Generation strategies
|
||||
- local: generation_features
|
||||
title: Generation features
|
||||
- local: tasks/prompting
|
||||
title: Prompt engineering
|
||||
- local: llm_optims
|
||||
title: Optimizing inference
|
||||
- local: kv_cache
|
||||
title: KV cache strategies
|
||||
- local: cache_explanation
|
||||
title: Caching
|
||||
- local: llm_tutorial_optimization
|
||||
title: Getting the most out of LLMs
|
||||
- local: perplexity
|
||||
title: Perplexity of fixed-length models
|
||||
- title: Chat with models
|
||||
sections:
|
||||
- local: conversations
|
||||
title: Chat basics
|
||||
- local: chat_templating
|
||||
title: Templates
|
||||
- local: chat_templating_multimodal
|
||||
title: Multimodal templates
|
||||
- local: chat_templating_writing
|
||||
title: Template writing
|
||||
- local: chat_extras
|
||||
title: Tools and RAG
|
||||
- title: Optimization
|
||||
sections:
|
||||
- local: perf_torch_compile
|
||||
title: torch.compile
|
||||
- local: perf_infer_gpu_one
|
||||
title: GPU
|
||||
- local: perf_infer_gpu_multi
|
||||
title: Distributed GPU inference
|
||||
- local: perf_infer_cpu
|
||||
title: CPU
|
||||
- local: tf_xla
|
||||
title: XLA
|
||||
- local: agents
|
||||
title: Agents
|
||||
- local: tools
|
||||
title: Tools
|
||||
- title: Training
|
||||
isExpanded: False
|
||||
sections:
|
||||
- title: Trainer API
|
||||
sections:
|
||||
- local: trainer
|
||||
title: Trainer
|
||||
title: Run inference with pipelines
|
||||
- local: autoclass_tutorial
|
||||
title: Write portable code with AutoClass
|
||||
- local: preprocessing
|
||||
title: Preprocess data
|
||||
- local: training
|
||||
title: Fine-tuning
|
||||
- local: optimizers
|
||||
title: Optimizers
|
||||
- local: hpo_train
|
||||
title: Hyperparameter search
|
||||
- title: Distributed training
|
||||
sections:
|
||||
- local: gpu_selection
|
||||
title: GPU selection
|
||||
title: Fine-tune a pretrained model
|
||||
- local: run_scripts
|
||||
title: Train with a script
|
||||
- local: accelerate
|
||||
title: Accelerate
|
||||
- local: fsdp
|
||||
title: FullyShardedDataParallel
|
||||
- local: deepspeed
|
||||
title: DeepSpeed
|
||||
- local: debugging
|
||||
title: Multi-GPU debugging
|
||||
- local: perf_train_cpu_many
|
||||
title: Distributed CPUs
|
||||
- local: perf_train_gpu_many
|
||||
title: Parallelism methods
|
||||
- title: Hardware
|
||||
sections:
|
||||
- local: perf_train_gpu_one
|
||||
title: GPU
|
||||
- local: perf_train_cpu
|
||||
title: CPU
|
||||
- local: perf_train_tpu_tf
|
||||
title: TPU
|
||||
- local: perf_train_special
|
||||
title: Apple Silicon
|
||||
- local: perf_hardware
|
||||
title: Build your own machine
|
||||
title: Set up distributed training with 🤗 Accelerate
|
||||
- local: peft
|
||||
title: PEFT
|
||||
- local: model_memory_anatomy
|
||||
title: Model training anatomy
|
||||
- title: Quantization
|
||||
isExpanded: False
|
||||
sections:
|
||||
- local: quantization/overview
|
||||
title: Overview
|
||||
- local: quantization/aqlm
|
||||
title: AQLM
|
||||
- local: quantization/awq
|
||||
title: AWQ
|
||||
- local: quantization/bitnet
|
||||
title: BitNet
|
||||
- local: quantization/bitsandbytes
|
||||
title: bitsandbytes
|
||||
- local: quantization/compressed_tensors
|
||||
title: compressed-tensors
|
||||
- local: quantization/eetq
|
||||
title: EETQ
|
||||
- local: quantization/fbgemm_fp8
|
||||
title: FBGEMM
|
||||
- local: quantization/finegrained_fp8
|
||||
title: Fine-grained FP8
|
||||
- local: gguf
|
||||
title: GGUF
|
||||
- local: quantization/gptq
|
||||
title: GPTQ
|
||||
- local: quantization/higgs
|
||||
title: HIGGS
|
||||
- local: quantization/hqq
|
||||
title: HQQ
|
||||
- local: quantization/optimum
|
||||
title: Optimum
|
||||
- local: quantization/quanto
|
||||
title: Quanto
|
||||
- local: quantization/torchao
|
||||
title: torchao
|
||||
- local: quantization/spqr
|
||||
title: SpQR
|
||||
- local: quantization/vptq
|
||||
title: VPTQ
|
||||
- local: quantization/contribute
|
||||
title: Contribute
|
||||
- title: Export to production
|
||||
isExpanded: False
|
||||
sections:
|
||||
- local: serialization
|
||||
title: ONNX
|
||||
- local: tflite
|
||||
title: LiteRT
|
||||
- local: executorch
|
||||
title: ExecuTorch
|
||||
- local: torchscript
|
||||
title: TorchScript
|
||||
- title: Resources
|
||||
isExpanded: False
|
||||
sections:
|
||||
- title: Task recipes
|
||||
sections:
|
||||
- title: Natural language processing
|
||||
title: Load and train adapters with 🤗 PEFT
|
||||
- local: model_sharing
|
||||
title: Share your model
|
||||
- local: agents
|
||||
title: Agents 101
|
||||
- local: agents_advanced
|
||||
title: Agents, supercharged - Multi-agents, External tools, and more
|
||||
- local: llm_tutorial
|
||||
title: Generation with LLMs
|
||||
- local: conversations
|
||||
title: Chatting with Transformers
|
||||
title: Tutorials
|
||||
- sections:
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: tasks/sequence_classification
|
||||
title: Text classification
|
||||
@ -228,13 +53,15 @@
|
||||
title: Summarization
|
||||
- local: tasks/multiple_choice
|
||||
title: Multiple choice
|
||||
- title: Audio
|
||||
title: Natural Language Processing
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: tasks/audio_classification
|
||||
title: Audio classification
|
||||
- local: tasks/asr
|
||||
title: Automatic speech recognition
|
||||
- title: Computer vision
|
||||
title: Audio
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: tasks/image_classification
|
||||
title: Image classification
|
||||
@ -257,10 +84,11 @@
|
||||
- local: tasks/mask_generation
|
||||
title: Mask Generation
|
||||
- local: tasks/keypoint_detection
|
||||
title: Keypoint detection
|
||||
title: Keypoint Detection
|
||||
- local: tasks/knowledge_distillation_for_image_classification
|
||||
title: Knowledge Distillation for Computer Vision
|
||||
- title: Multimodal
|
||||
title: Computer Vision
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: tasks/image_captioning
|
||||
title: Image captioning
|
||||
@ -270,38 +98,180 @@
|
||||
title: Visual Question Answering
|
||||
- local: tasks/text-to-speech
|
||||
title: Text to speech
|
||||
- local: tasks/idefics
|
||||
title: Image tasks with IDEFICS
|
||||
- local: tasks/image_text_to_text
|
||||
title: Image-text-to-text
|
||||
- local: tasks/video_text_to_text
|
||||
title: Video-text-to-text
|
||||
- local: run_scripts
|
||||
title: Training scripts
|
||||
- local: glossary
|
||||
title: Glossary
|
||||
- local: philosophy
|
||||
title: Philosophy
|
||||
title: Multimodal
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: generation_strategies
|
||||
title: Customize the generation strategy
|
||||
- local: kv_cache
|
||||
title: Best Practices for Generation with Cache
|
||||
title: Generation
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: tasks/idefics
|
||||
title: Image tasks with IDEFICS
|
||||
- local: tasks/prompting
|
||||
title: LLM prompting guide
|
||||
title: Prompting
|
||||
title: Task Guides
|
||||
- sections:
|
||||
- local: fast_tokenizers
|
||||
title: Use fast tokenizers from 🤗 Tokenizers
|
||||
- local: multilingual
|
||||
title: Run inference with multilingual models
|
||||
- local: create_a_model
|
||||
title: Use model-specific APIs
|
||||
- local: custom_models
|
||||
title: Share a custom model
|
||||
- local: chat_templating
|
||||
title: Chat templates
|
||||
- local: trainer
|
||||
title: Trainer
|
||||
- local: sagemaker
|
||||
title: Run training on Amazon SageMaker
|
||||
- local: serialization
|
||||
title: Export to ONNX
|
||||
- local: tflite
|
||||
title: Export to TFLite
|
||||
- local: torchscript
|
||||
title: Export to TorchScript
|
||||
- local: benchmarks
|
||||
title: Benchmarks
|
||||
- local: notebooks
|
||||
title: Notebooks with examples
|
||||
- local: community
|
||||
title: Community resources
|
||||
- local: troubleshooting
|
||||
title: Troubleshoot
|
||||
- title: Contribute
|
||||
isExpanded: False
|
||||
sections:
|
||||
- local: gguf
|
||||
title: Interoperability with GGUF files
|
||||
- local: tiktoken
|
||||
title: Interoperability with TikToken files
|
||||
- local: modular_transformers
|
||||
title: Modularity in `transformers`
|
||||
- local: how_to_hack_models
|
||||
title: Model Hacking (overwriting a class to your usage)
|
||||
title: Developer guides
|
||||
- sections:
|
||||
- local: quantization/overview
|
||||
title: Getting started
|
||||
- local: quantization/bitsandbytes
|
||||
title: bitsandbytes
|
||||
- local: quantization/gptq
|
||||
title: GPTQ
|
||||
- local: quantization/awq
|
||||
title: AWQ
|
||||
- local: quantization/aqlm
|
||||
title: AQLM
|
||||
- local: quantization/quanto
|
||||
title: Quanto
|
||||
- local: quantization/eetq
|
||||
title: EETQ
|
||||
- local: quantization/hqq
|
||||
title: HQQ
|
||||
- local: quantization/fbgemm_fp8
|
||||
title: FBGEMM_FP8
|
||||
- local: quantization/optimum
|
||||
title: Optimum
|
||||
- local: quantization/torchao
|
||||
title: TorchAO
|
||||
- local: quantization/bitnet
|
||||
title: BitNet
|
||||
- local: quantization/compressed_tensors
|
||||
title: compressed-tensors
|
||||
- local: quantization/contribute
|
||||
title: Contribute new quantization method
|
||||
title: Quantization Methods
|
||||
- sections:
|
||||
- local: performance
|
||||
title: Overview
|
||||
- local: llm_optims
|
||||
title: LLM inference optimization
|
||||
- sections:
|
||||
- local: perf_train_gpu_one
|
||||
title: Methods and tools for efficient training on a single GPU
|
||||
- local: perf_train_gpu_many
|
||||
title: Multiple GPUs and parallelism
|
||||
- local: fsdp
|
||||
title: Fully Sharded Data Parallel
|
||||
- local: deepspeed
|
||||
title: DeepSpeed
|
||||
- local: perf_train_cpu
|
||||
title: Efficient training on CPU
|
||||
- local: perf_train_cpu_many
|
||||
title: Distributed CPU training
|
||||
- local: perf_train_tpu_tf
|
||||
title: Training on TPU with TensorFlow
|
||||
- local: perf_train_special
|
||||
title: PyTorch training on Apple silicon
|
||||
- local: perf_hardware
|
||||
title: Custom hardware for training
|
||||
- local: hpo_train
|
||||
title: Hyperparameter Search using Trainer API
|
||||
title: Efficient training techniques
|
||||
- sections:
|
||||
- local: perf_infer_cpu
|
||||
title: CPU inference
|
||||
- local: perf_infer_gpu_one
|
||||
title: GPU inference
|
||||
- local: perf_infer_gpu_multi
|
||||
title: Multi-GPU inference
|
||||
title: Optimizing inference
|
||||
- local: big_models
|
||||
title: Instantiate a big model
|
||||
- local: debugging
|
||||
title: Debugging
|
||||
- local: tf_xla
|
||||
title: XLA Integration for TensorFlow Models
|
||||
- local: perf_torch_compile
|
||||
title: Optimize inference using `torch.compile()`
|
||||
title: Performance and scalability
|
||||
- sections:
|
||||
- local: contributing
|
||||
title: Contribute to Transformers
|
||||
title: How to contribute to 🤗 Transformers?
|
||||
- local: add_new_model
|
||||
title: How to add a model to 🤗 Transformers?
|
||||
- local: add_new_pipeline
|
||||
title: How to add a pipeline to 🤗 Transformers?
|
||||
- local: testing
|
||||
title: Transformers model tests
|
||||
title: Testing
|
||||
- local: pr_checks
|
||||
title: Pull request checks
|
||||
- title: API
|
||||
isExpanded: False
|
||||
sections:
|
||||
- title: Main classes
|
||||
sections:
|
||||
title: Checks on a Pull Request
|
||||
title: Contribute
|
||||
- sections:
|
||||
- local: philosophy
|
||||
title: Philosophy
|
||||
- local: glossary
|
||||
title: Glossary
|
||||
- local: task_summary
|
||||
title: What 🤗 Transformers can do
|
||||
- local: tasks_explained
|
||||
title: How 🤗 Transformers solve tasks
|
||||
- local: model_summary
|
||||
title: The Transformer model family
|
||||
- local: tokenizer_summary
|
||||
title: Summary of the tokenizers
|
||||
- local: attention
|
||||
title: Attention mechanisms
|
||||
- local: pad_truncation
|
||||
title: Padding and truncation
|
||||
- local: bertology
|
||||
title: BERTology
|
||||
- local: perplexity
|
||||
title: Perplexity of fixed-length models
|
||||
- local: pipeline_webserver
|
||||
title: Pipelines for webserver inference
|
||||
- local: model_memory_anatomy
|
||||
title: Model training anatomy
|
||||
- local: llm_tutorial_optimization
|
||||
title: Getting the most out of LLMs
|
||||
title: Conceptual guides
|
||||
- sections:
|
||||
- sections:
|
||||
- local: main_classes/agent
|
||||
title: Agents and Tools
|
||||
- local: model_doc/auto
|
||||
@ -328,8 +298,6 @@
|
||||
title: Optimization
|
||||
- local: main_classes/output
|
||||
title: Model outputs
|
||||
- local: main_classes/peft
|
||||
title: PEFT
|
||||
- local: main_classes/pipelines
|
||||
title: Pipelines
|
||||
- local: main_classes/processors
|
||||
@ -348,14 +316,12 @@
|
||||
title: Feature Extractor
|
||||
- local: main_classes/image_processor
|
||||
title: Image Processor
|
||||
- title: Models
|
||||
sections:
|
||||
- title: Text models
|
||||
title: Main Classes
|
||||
- sections:
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/albert
|
||||
title: ALBERT
|
||||
- local: model_doc/bamba
|
||||
title: Bamba
|
||||
- local: model_doc/bart
|
||||
title: BART
|
||||
- local: model_doc/barthez
|
||||
@ -396,8 +362,6 @@
|
||||
title: CodeLlama
|
||||
- local: model_doc/cohere
|
||||
title: Cohere
|
||||
- local: model_doc/cohere2
|
||||
title: Cohere2
|
||||
- local: model_doc/convbert
|
||||
title: ConvBERT
|
||||
- local: model_doc/cpm
|
||||
@ -414,8 +378,6 @@
|
||||
title: DeBERTa-v2
|
||||
- local: model_doc/dialogpt
|
||||
title: DialoGPT
|
||||
- local: model_doc/diffllama
|
||||
title: DiffLlama
|
||||
- local: model_doc/distilbert
|
||||
title: DistilBERT
|
||||
- local: model_doc/dpr
|
||||
@ -432,10 +394,10 @@
|
||||
title: ESM
|
||||
- local: model_doc/falcon
|
||||
title: Falcon
|
||||
- local: model_doc/falcon3
|
||||
title: Falcon3
|
||||
- local: model_doc/falcon_mamba
|
||||
title: FalconMamba
|
||||
- local: model_doc/fastspeech2_conformer
|
||||
title: FastSpeech2Conformer
|
||||
- local: model_doc/flan-t5
|
||||
title: FLAN-T5
|
||||
- local: model_doc/flan-ul2
|
||||
@ -478,12 +440,6 @@
|
||||
title: Granite
|
||||
- local: model_doc/granitemoe
|
||||
title: GraniteMoe
|
||||
- local: model_doc/granitemoeshared
|
||||
title: GraniteMoeShared
|
||||
- local: model_doc/granitevision
|
||||
title: GraniteVision
|
||||
- local: model_doc/helium
|
||||
title: Helium
|
||||
- local: model_doc/herbert
|
||||
title: HerBERT
|
||||
- local: model_doc/ibert
|
||||
@ -536,8 +492,6 @@
|
||||
title: mLUKE
|
||||
- local: model_doc/mobilebert
|
||||
title: MobileBERT
|
||||
- local: model_doc/modernbert
|
||||
title: ModernBert
|
||||
- local: model_doc/mpnet
|
||||
title: MPNet
|
||||
- local: model_doc/mpt
|
||||
@ -562,8 +516,8 @@
|
||||
title: Nyströmformer
|
||||
- local: model_doc/olmo
|
||||
title: OLMo
|
||||
- local: model_doc/olmo2
|
||||
title: OLMo2
|
||||
- local: model_doc/olmo_1124
|
||||
title: OLMo November 2024
|
||||
- local: model_doc/olmoe
|
||||
title: OLMoE
|
||||
- local: model_doc/open-llama
|
||||
@ -658,9 +612,8 @@
|
||||
title: YOSO
|
||||
- local: model_doc/zamba
|
||||
title: Zamba
|
||||
- local: model_doc/zamba2
|
||||
title: Zamba2
|
||||
- title: Vision models
|
||||
title: Text models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/beit
|
||||
title: BEiT
|
||||
@ -674,8 +627,6 @@
|
||||
title: ConvNeXTV2
|
||||
- local: model_doc/cvt
|
||||
title: CvT
|
||||
- local: model_doc/dab-detr
|
||||
title: DAB-DETR
|
||||
- local: model_doc/deformable_detr
|
||||
title: Deformable DETR
|
||||
- local: model_doc/deit
|
||||
@ -684,8 +635,6 @@
|
||||
title: Depth Anything
|
||||
- local: model_doc/depth_anything_v2
|
||||
title: Depth Anything V2
|
||||
- local: model_doc/depth_pro
|
||||
title: DepthPro
|
||||
- local: model_doc/deta
|
||||
title: DETA
|
||||
- local: model_doc/detr
|
||||
@ -694,8 +643,6 @@
|
||||
title: DiNAT
|
||||
- local: model_doc/dinov2
|
||||
title: DINOV2
|
||||
- local: model_doc/dinov2_with_registers
|
||||
title: DINOv2 with Registers
|
||||
- local: model_doc/dit
|
||||
title: DiT
|
||||
- local: model_doc/dpt
|
||||
@ -710,8 +657,6 @@
|
||||
title: GLPN
|
||||
- local: model_doc/hiera
|
||||
title: Hiera
|
||||
- local: model_doc/ijepa
|
||||
title: I-JEPA
|
||||
- local: model_doc/imagegpt
|
||||
title: ImageGPT
|
||||
- local: model_doc/levit
|
||||
@ -742,14 +687,10 @@
|
||||
title: ResNet
|
||||
- local: model_doc/rt_detr
|
||||
title: RT-DETR
|
||||
- local: model_doc/rt_detr_v2
|
||||
title: RT-DETRv2
|
||||
- local: model_doc/segformer
|
||||
title: SegFormer
|
||||
- local: model_doc/seggpt
|
||||
title: SegGpt
|
||||
- local: model_doc/superglue
|
||||
title: SuperGlue
|
||||
- local: model_doc/superpoint
|
||||
title: SuperPoint
|
||||
- local: model_doc/swiftformer
|
||||
@ -762,10 +703,6 @@
|
||||
title: Swin2SR
|
||||
- local: model_doc/table-transformer
|
||||
title: Table Transformer
|
||||
- local: model_doc/textnet
|
||||
title: TextNet
|
||||
- local: model_doc/timm_wrapper
|
||||
title: Timm Wrapper
|
||||
- local: model_doc/upernet
|
||||
title: UperNet
|
||||
- local: model_doc/van
|
||||
@ -782,13 +719,12 @@
|
||||
title: ViTMatte
|
||||
- local: model_doc/vit_msn
|
||||
title: ViTMSN
|
||||
- local: model_doc/vitpose
|
||||
title: ViTPose
|
||||
- local: model_doc/yolos
|
||||
title: YOLOS
|
||||
- local: model_doc/zoedepth
|
||||
title: ZoeDepth
|
||||
- title: Audio models
|
||||
title: Vision models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/audio-spectrogram-transformer
|
||||
title: Audio Spectrogram Transformer
|
||||
@ -800,8 +736,8 @@
|
||||
title: dac
|
||||
- local: model_doc/encodec
|
||||
title: EnCodec
|
||||
- local: model_doc/fastspeech2_conformer
|
||||
title: FastSpeech2Conformer
|
||||
- local: model_doc/hiera
|
||||
title: Hiera
|
||||
- local: model_doc/hubert
|
||||
title: Hubert
|
||||
- local: model_doc/mctct
|
||||
@ -810,8 +746,6 @@
|
||||
title: Mimi
|
||||
- local: model_doc/mms
|
||||
title: MMS
|
||||
- local: model_doc/moonshine
|
||||
title: Moonshine
|
||||
- local: model_doc/moshi
|
||||
title: Moshi
|
||||
- local: model_doc/musicgen
|
||||
@ -858,7 +792,8 @@
|
||||
title: XLS-R
|
||||
- local: model_doc/xlsr_wav2vec2
|
||||
title: XLSR-Wav2Vec2
|
||||
- title: Video models
|
||||
title: Audio models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/timesformer
|
||||
title: TimeSformer
|
||||
@ -866,16 +801,13 @@
|
||||
title: VideoMAE
|
||||
- local: model_doc/vivit
|
||||
title: ViViT
|
||||
- title: Multimodal models
|
||||
title: Video models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/align
|
||||
title: ALIGN
|
||||
- local: model_doc/altclip
|
||||
title: AltCLIP
|
||||
- local: model_doc/aria
|
||||
title: Aria
|
||||
- local: model_doc/aya_vision
|
||||
title: AyaVision
|
||||
- local: model_doc/blip
|
||||
title: BLIP
|
||||
- local: model_doc/blip-2
|
||||
@ -894,22 +826,16 @@
|
||||
title: CLIPSeg
|
||||
- local: model_doc/clvp
|
||||
title: CLVP
|
||||
- local: model_doc/colpali
|
||||
title: ColPali
|
||||
- local: model_doc/data2vec
|
||||
title: Data2Vec
|
||||
- local: model_doc/deplot
|
||||
title: DePlot
|
||||
- local: model_doc/donut
|
||||
title: Donut
|
||||
- local: model_doc/emu3
|
||||
title: Emu3
|
||||
- local: model_doc/flava
|
||||
title: FLAVA
|
||||
- local: model_doc/git
|
||||
title: GIT
|
||||
- local: model_doc/got_ocr2
|
||||
title: GOT-OCR2
|
||||
- local: model_doc/grounding-dino
|
||||
title: Grounding DINO
|
||||
- local: model_doc/groupvit
|
||||
@ -970,8 +896,6 @@
|
||||
title: Pix2Struct
|
||||
- local: model_doc/pixtral
|
||||
title: Pixtral
|
||||
- local: model_doc/qwen2_5_vl
|
||||
title: Qwen2.5-VL
|
||||
- local: model_doc/qwen2_audio
|
||||
title: Qwen2Audio
|
||||
- local: model_doc/qwen2_vl
|
||||
@ -980,10 +904,6 @@
|
||||
title: Segment Anything
|
||||
- local: model_doc/siglip
|
||||
title: SigLIP
|
||||
- local: model_doc/siglip2
|
||||
title: SigLIP2
|
||||
- local: model_doc/smolvlm
|
||||
title: SmolVLM
|
||||
- local: model_doc/speech-encoder-decoder
|
||||
title: Speech Encoder Decoder Models
|
||||
- local: model_doc/tapas
|
||||
@ -1010,13 +930,15 @@
|
||||
title: VisualBERT
|
||||
- local: model_doc/xclip
|
||||
title: X-CLIP
|
||||
- title: Reinforcement learning models
|
||||
title: Multimodal models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/decision_transformer
|
||||
title: Decision Transformer
|
||||
- local: model_doc/trajectory_transformer
|
||||
title: Trajectory Transformer
|
||||
- title: Time series models
|
||||
title: Reinforcement learning models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/autoformer
|
||||
title: Autoformer
|
||||
@ -1028,12 +950,14 @@
|
||||
title: PatchTST
|
||||
- local: model_doc/time_series_transformer
|
||||
title: Time Series Transformer
|
||||
- title: Graph models
|
||||
title: Time series models
|
||||
- isExpanded: false
|
||||
sections:
|
||||
- local: model_doc/graphormer
|
||||
title: Graphormer
|
||||
- title: Internal helpers
|
||||
sections:
|
||||
title: Graph models
|
||||
title: Models
|
||||
- sections:
|
||||
- local: internal/modeling_utils
|
||||
title: Custom Layers and Utilities
|
||||
- local: internal/pipelines_utils
|
||||
@ -1052,4 +976,5 @@
|
||||
title: General Utilities
|
||||
- local: internal/time_series_utils
|
||||
title: Utilities for Time Series
|
||||
|
||||
title: Internal Helpers
|
||||
title: API
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
@ -14,152 +14,123 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Accelerate
|
||||
# Distributed training with 🤗 Accelerate
|
||||
|
||||
[Accelerate](https://hf.co/docs/accelerate/index) is a library designed to simplify distributed training on any type of setup with PyTorch by uniting the most common frameworks ([Fully Sharded Data Parallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) and [DeepSpeed](https://www.deepspeed.ai/)) for it into a single interface. [`Trainer`] is powered by Accelerate under the hood, enabling loading big models and distributed training.
|
||||
As models get bigger, parallelism has emerged as a strategy for training larger models on limited hardware and accelerating training speed by several orders of magnitude. At Hugging Face, we created the [🤗 Accelerate](https://huggingface.co/docs/accelerate) library to help users easily train a 🤗 Transformers model on any type of distributed setup, whether it is multiple GPU's on one machine or multiple GPU's across several machines. In this tutorial, learn how to customize your native PyTorch training loop to enable training in a distributed environment.
|
||||
|
||||
This guide will show you two ways to use Accelerate with Transformers, using FSDP as the backend. The first method demonstrates distributed training with [`Trainer`], and the second method demonstrates adapting a PyTorch training loop. For more detailed information about Accelerate, please refer to the [documentation](https://hf.co/docs/accelerate/index).
|
||||
## Setup
|
||||
|
||||
Get started by installing 🤗 Accelerate:
|
||||
|
||||
```bash
|
||||
pip install accelerate
|
||||
```
|
||||
|
||||
Start by running [accelerate config](https://hf.co/docs/accelerate/main/en/package_reference/cli#accelerate-config) in the command line to answer a series of prompts about your training system. This creates and saves a configuration file to help Accelerate correctly set up training based on your setup.
|
||||
|
||||
```bash
|
||||
accelerate config
|
||||
```
|
||||
|
||||
Depending on your setup and the answers you provide, an example configuration file for distributing training with FSDP on one machine with two GPUs may look like the following.
|
||||
|
||||
```yaml
|
||||
compute_environment: LOCAL_MACHINE
|
||||
debug: false
|
||||
distributed_type: FSDP
|
||||
downcast_bf16: 'no'
|
||||
fsdp_config:
|
||||
fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP
|
||||
fsdp_backward_prefetch_policy: BACKWARD_PRE
|
||||
fsdp_forward_prefetch: false
|
||||
fsdp_cpu_ram_efficient_loading: true
|
||||
fsdp_offload_params: false
|
||||
fsdp_sharding_strategy: FULL_SHARD
|
||||
fsdp_state_dict_type: SHARDED_STATE_DICT
|
||||
fsdp_sync_module_states: true
|
||||
fsdp_transformer_layer_cls_to_wrap: BertLayer
|
||||
fsdp_use_orig_params: true
|
||||
machine_rank: 0
|
||||
main_training_function: main
|
||||
mixed_precision: bf16
|
||||
num_machines: 1
|
||||
num_processes: 2
|
||||
rdzv_backend: static
|
||||
same_network: true
|
||||
tpu_env: []
|
||||
tpu_use_cluster: false
|
||||
tpu_use_sudo: false
|
||||
use_cpu: false
|
||||
```
|
||||
|
||||
## Trainer
|
||||
|
||||
Pass the path to the saved configuration file to [`TrainingArguments`], and from there, pass your [`TrainingArguments`] to [`Trainer`].
|
||||
Then import and create an [`~accelerate.Accelerator`] object. The [`~accelerate.Accelerator`] will automatically detect your type of distributed setup and initialize all the necessary components for training. You don't need to explicitly place your model on a device.
|
||||
|
||||
```py
|
||||
from transformers import TrainingArguments, Trainer
|
||||
>>> from accelerate import Accelerator
|
||||
|
||||
training_args = TrainingArguments(
|
||||
output_dir="your-model",
|
||||
learning_rate=2e-5,
|
||||
per_device_train_batch_size=16,
|
||||
per_device_eval_batch_size=16,
|
||||
num_train_epochs=2,
|
||||
fsdp_config="path/to/fsdp_config",
|
||||
fsdp_strategy="full_shard",
|
||||
weight_decay=0.01,
|
||||
eval_strategy="epoch",
|
||||
save_strategy="epoch",
|
||||
load_best_model_at_end=True,
|
||||
push_to_hub=True,
|
||||
)
|
||||
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=dataset["train"],
|
||||
eval_dataset=dataset["test"],
|
||||
processing_class=tokenizer,
|
||||
data_collator=data_collator,
|
||||
compute_metrics=compute_metrics,
|
||||
)
|
||||
|
||||
trainer.train()
|
||||
>>> accelerator = Accelerator()
|
||||
```
|
||||
|
||||
## Native PyTorch
|
||||
## Prepare to accelerate
|
||||
|
||||
Accelerate can also be added to any PyTorch training loop to enable distributed training. The [`~accelerate.Accelerator`] is the main entry point for adapting your PyTorch code to work with Accelerate. It automatically detects your distributed training setup and initializes all the necessary components for training. You don't need to explicitly place your model on a device because [`~accelerate.Accelerator`] knows which device to move your model to.
|
||||
The next step is to pass all the relevant training objects to the [`~accelerate.Accelerator.prepare`] method. This includes your training and evaluation DataLoaders, a model and an optimizer:
|
||||
|
||||
```py
|
||||
from accelerate import Accelerator
|
||||
|
||||
accelerator = Accelerator()
|
||||
device = accelerator.device
|
||||
>>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(
|
||||
... train_dataloader, eval_dataloader, model, optimizer
|
||||
... )
|
||||
```
|
||||
|
||||
All PyTorch objects (model, optimizer, scheduler, dataloaders) should be passed to the [`~accelerate.Accelerator.prepare`] method now. This method moves your model to the appropriate device or devices, adapts the optimizer and scheduler to use [`~accelerate.optimizer.AcceleratedOptimizer`] and [`~accelerate.scheduler.AcceleratedScheduler`], and creates a new shardable dataloader.
|
||||
## Backward
|
||||
|
||||
The last addition is to replace the typical `loss.backward()` in your training loop with 🤗 Accelerate's [`~accelerate.Accelerator.backward`] method:
|
||||
|
||||
```py
|
||||
train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(
|
||||
train_dataloader, eval_dataloader, model, optimizer
|
||||
)
|
||||
>>> for epoch in range(num_epochs):
|
||||
... for batch in train_dataloader:
|
||||
... outputs = model(**batch)
|
||||
... loss = outputs.loss
|
||||
... accelerator.backward(loss)
|
||||
|
||||
... optimizer.step()
|
||||
... lr_scheduler.step()
|
||||
... optimizer.zero_grad()
|
||||
... progress_bar.update(1)
|
||||
```
|
||||
|
||||
Replace `loss.backward` in your training loop with Accelerates [`~accelerate.Accelerator.backward`] method to scale the gradients and determine the appropriate `backward` method to use depending on your framework (for example, DeepSpeed or Megatron).
|
||||
As you can see in the following code, you only need to add four additional lines of code to your training loop to enable distributed training!
|
||||
|
||||
```py
|
||||
for epoch in range(num_epochs):
|
||||
```diff
|
||||
+ from accelerate import Accelerator
|
||||
from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler
|
||||
|
||||
+ accelerator = Accelerator()
|
||||
|
||||
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
|
||||
optimizer = AdamW(model.parameters(), lr=3e-5)
|
||||
|
||||
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
|
||||
- model.to(device)
|
||||
|
||||
+ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(
|
||||
+ train_dataloader, eval_dataloader, model, optimizer
|
||||
+ )
|
||||
|
||||
num_epochs = 3
|
||||
num_training_steps = num_epochs * len(train_dataloader)
|
||||
lr_scheduler = get_scheduler(
|
||||
"linear",
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=0,
|
||||
num_training_steps=num_training_steps
|
||||
)
|
||||
|
||||
progress_bar = tqdm(range(num_training_steps))
|
||||
|
||||
model.train()
|
||||
for epoch in range(num_epochs):
|
||||
for batch in train_dataloader:
|
||||
- batch = {k: v.to(device) for k, v in batch.items()}
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
accelerator.backward(loss)
|
||||
- loss.backward()
|
||||
+ accelerator.backward(loss)
|
||||
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
progress_bar.update(1)
|
||||
```
|
||||
|
||||
Combine everything into a function and make it callable as a script.
|
||||
## Train
|
||||
|
||||
```py
|
||||
from accelerate import Accelerator
|
||||
Once you've added the relevant lines of code, launch your training in a script or a notebook like Colaboratory.
|
||||
|
||||
def main():
|
||||
accelerator = Accelerator()
|
||||
### Train with a script
|
||||
|
||||
model, optimizer, training_dataloader, scheduler = accelerator.prepare(
|
||||
model, optimizer, training_dataloader, scheduler
|
||||
)
|
||||
|
||||
for batch in training_dataloader:
|
||||
optimizer.zero_grad()
|
||||
inputs, targets = batch
|
||||
outputs = model(inputs)
|
||||
loss = loss_function(outputs, targets)
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
scheduler.step()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
```
|
||||
|
||||
From the command line, call [accelerate launch](https://hf.co/docs/accelerate/main/en/package_reference/cli#accelerate-launch) to run your training script. Any additional arguments or parameters can be passed here as well.
|
||||
|
||||
To launch your training script on two GPUs, add the `--num_processes` argument.
|
||||
If you are running your training from a script, run the following command to create and save a configuration file:
|
||||
|
||||
```bash
|
||||
accelerate launch --num_processes=2 your_script.py
|
||||
accelerate config
|
||||
```
|
||||
|
||||
Refer to the [Launching Accelerate scripts](https://hf.co/docs/accelerate/main/en/basic_tutorials/launch) for more details.
|
||||
Then launch your training with:
|
||||
|
||||
```bash
|
||||
accelerate launch train.py
|
||||
```
|
||||
|
||||
### Train with a notebook
|
||||
|
||||
🤗 Accelerate can also run in a notebook if you're planning on using Colaboratory's TPUs. Wrap all the code responsible for training in a function, and pass it to [`~accelerate.notebook_launcher`]:
|
||||
|
||||
```py
|
||||
>>> from accelerate import notebook_launcher
|
||||
|
||||
>>> notebook_launcher(training_function)
|
||||
```
|
||||
|
||||
For more information about 🤗 Accelerate and its rich features, refer to the [documentation](https://huggingface.co/docs/accelerate).
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,4 +1,4 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
@ -13,66 +13,92 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Adding a new pipeline
|
||||
# How to create a custom pipeline?
|
||||
|
||||
Make [`Pipeline`] your own by subclassing it and implementing a few methods. Share the code with the community on the [Hub](https://hf.co) and register the pipeline with Transformers so that everyone can quickly and easily use it.
|
||||
In this guide, we will see how to create a custom pipeline and share it on the [Hub](https://hf.co/models) or add it to the
|
||||
🤗 Transformers library.
|
||||
|
||||
This guide will walk you through the process of adding a new pipeline to Transformers.
|
||||
First and foremost, you need to decide the raw entries the pipeline will be able to take. It can be strings, raw bytes,
|
||||
dictionaries or whatever seems to be the most likely desired input. Try to keep these inputs as pure Python as possible
|
||||
as it makes compatibility easier (even through other languages via JSON). Those will be the `inputs` of the
|
||||
pipeline (`preprocess`).
|
||||
|
||||
## Design choices
|
||||
Then define the `outputs`. Same policy as the `inputs`. The simpler, the better. Those will be the outputs of
|
||||
`postprocess` method.
|
||||
|
||||
At a minimum, you only need to provide [`Pipeline`] with an appropriate input for a task. This is also where you should begin when designing your pipeline.
|
||||
Start by inheriting the base class `Pipeline` with the 4 methods needed to implement `preprocess`,
|
||||
`_forward`, `postprocess`, and `_sanitize_parameters`.
|
||||
|
||||
Decide what input types [`Pipeline`] can accept. It can be strings, raw bytes, dictionaries, and so on. Try to keep the inputs in pure Python where possible because it's more compatible. Next, decide on the output [`Pipeline`] should return. Again, keeping the output in Python is the simplest and best option because it's easier to work with.
|
||||
|
||||
Keeping the inputs and outputs simple, and ideally JSON-serializable, makes it easier for users to run your [`Pipeline`] without needing to learn new object types. It's also common to support many different input types for even greater ease of use. For example, making an audio file acceptable from a filename, URL, or raw bytes gives the user more flexibility in how they provide the audio data.
|
||||
|
||||
## Create a pipeline
|
||||
|
||||
With an input and output decided, you can start implementing [`Pipeline`]. Your pipeline should inherit from the base [`Pipeline`] class and include 4 methods.
|
||||
|
||||
```py
|
||||
```python
|
||||
from transformers import Pipeline
|
||||
|
||||
|
||||
class MyPipeline(Pipeline):
|
||||
def _sanitize_parameters(self, **kwargs):
|
||||
preprocess_kwargs = {}
|
||||
if "maybe_arg" in kwargs:
|
||||
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
|
||||
return preprocess_kwargs, {}, {}
|
||||
|
||||
def preprocess(self, inputs, args=2):
|
||||
|
||||
def _forward(self, model_inputs):
|
||||
|
||||
def postprocess(self, model_outputs):
|
||||
```
|
||||
|
||||
1. `preprocess` takes the inputs and transforms them into the appropriate input format for the model.
|
||||
|
||||
```py
|
||||
def preprocess(self, inputs, maybe_arg=2):
|
||||
def preprocess(self, inputs, maybe_arg=2):
|
||||
model_input = Tensor(inputs["input_ids"])
|
||||
return {"model_input": model_input}
|
||||
```
|
||||
|
||||
2. `_forward` shouldn't be called directly. `forward` is the preferred method because it includes safeguards to make sure everything works correctly on the expected device. Anything linked to the model belongs in `_forward` and everything else belongs in either `preprocess` or `postprocess`.
|
||||
|
||||
```py
|
||||
def _forward(self, model_inputs):
|
||||
def _forward(self, model_inputs):
|
||||
# model_inputs == {"model_input": model_input}
|
||||
outputs = self.model(**model_inputs)
|
||||
# Maybe {"logits": Tensor(...)}
|
||||
return outputs
|
||||
```
|
||||
|
||||
3. `postprocess` generates the final output from the models output in `_forward`.
|
||||
|
||||
```py
|
||||
def postprocess(self, model_outputs, top_k=5):
|
||||
def postprocess(self, model_outputs):
|
||||
best_class = model_outputs["logits"].softmax(-1)
|
||||
return best_class
|
||||
```
|
||||
|
||||
4. `_sanitize_parameters` lets users pass additional parameters to [`Pipeline`]. This could be during initialization or when [`Pipeline`] is called. `_sanitize_parameters` returns 3 dicts of additional keyword arguments that are passed directly to `preprocess`, `_forward`, and `postprocess`. Don't add anything if a user didn't call the pipeline with extra parameters. This keeps the default arguments in the function definition which is always more natural.
|
||||
The structure of this breakdown is to support relatively seamless support for CPU/GPU, while supporting doing
|
||||
pre/postprocessing on the CPU on different threads
|
||||
|
||||
`preprocess` will take the originally defined inputs, and turn them into something feedable to the model. It might
|
||||
contain more information and is usually a `Dict`.
|
||||
|
||||
`_forward` is the implementation detail and is not meant to be called directly. `forward` is the preferred
|
||||
called method as it contains safeguards to make sure everything is working on the expected device. If anything is
|
||||
linked to a real model it belongs in the `_forward` method, anything else is in the preprocess/postprocess.
|
||||
|
||||
`postprocess` methods will take the output of `_forward` and turn it into the final output that was decided
|
||||
earlier.
|
||||
|
||||
`_sanitize_parameters` exists to allow users to pass any parameters whenever they wish, be it at initialization
|
||||
time `pipeline(...., maybe_arg=4)` or at call time `pipe = pipeline(...); output = pipe(...., maybe_arg=4)`.
|
||||
|
||||
The returns of `_sanitize_parameters` are the 3 dicts of kwargs that will be passed directly to `preprocess`,
|
||||
`_forward`, and `postprocess`. Don't fill anything if the caller didn't call with any extra parameter. That
|
||||
allows to keep the default arguments in the function definition which is always more "natural".
|
||||
|
||||
A classic example would be a `top_k` argument in the post processing in classification tasks.
|
||||
|
||||
```python
|
||||
>>> pipe = pipeline("my-new-task")
|
||||
>>> pipe("This is a test")
|
||||
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05}
|
||||
{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}]
|
||||
|
||||
>>> pipe("This is a test", top_k=2)
|
||||
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}]
|
||||
```
|
||||
|
||||
In order to achieve that, we'll update our `postprocess` method with a default parameter to `5`. and edit
|
||||
`_sanitize_parameters` to allow this new parameter.
|
||||
|
||||
|
||||
```python
|
||||
def postprocess(self, model_outputs, top_k=5):
|
||||
best_class = model_outputs["logits"].softmax(-1)
|
||||
# Add logic to handle top_k
|
||||
return best_class
|
||||
|
||||
For example, add a `top_k` parameter in `postprocess` to return the top 5 most likely classes. Then in `_sanitize_parameters`, check if the user passed in `top_k` and add it to `postprocess_kwargs`.
|
||||
|
||||
```py
|
||||
def _sanitize_parameters(self, **kwargs):
|
||||
preprocess_kwargs = {}
|
||||
if "maybe_arg" in kwargs:
|
||||
@ -84,61 +110,55 @@ def _sanitize_parameters(self, **kwargs):
|
||||
return preprocess_kwargs, {}, postprocess_kwargs
|
||||
```
|
||||
|
||||
Now the pipeline can return the top most likely labels if a user chooses to.
|
||||
Try to keep the inputs/outputs very simple and ideally JSON-serializable as it makes the pipeline usage very easy
|
||||
without requiring users to understand new kinds of objects. It's also relatively common to support many different types
|
||||
of arguments for ease of use (audio files, which can be filenames, URLs or pure bytes)
|
||||
|
||||
```py
|
||||
from transformers import pipeline
|
||||
|
||||
pipeline = pipeline("my-task")
|
||||
# returns 3 most likely labels
|
||||
pipeline("This is the best meal I've ever had", top_k=3)
|
||||
# returns 5 most likely labels by default
|
||||
pipeline("This is the best meal I've ever had")
|
||||
```
|
||||
|
||||
## Register a pipeline
|
||||
## Adding it to the list of supported tasks
|
||||
|
||||
Register the new task your pipeline supports in the `PIPELINE_REGISTRY`. The registry defines:
|
||||
To register your `new-task` to the list of supported tasks, you have to add it to the `PIPELINE_REGISTRY`:
|
||||
|
||||
- the machine learning framework the pipeline supports with either `pt_model` or `tf_model` (add both to ensure it works with either frameworks)
|
||||
- a default model which should come from a specific revision (branch, or commit hash) where the model works as expected with `default`
|
||||
- the expected input with `type`
|
||||
|
||||
```py
|
||||
```python
|
||||
from transformers.pipelines import PIPELINE_REGISTRY
|
||||
from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification
|
||||
|
||||
PIPELINE_REGISTRY.register_pipeline(
|
||||
"new-task",
|
||||
pipeline_class=MyPipeline,
|
||||
pt_model=AutoModelForSequenceClassification,
|
||||
tf_model=TFAutoModelForSequenceClassification,
|
||||
default={"pt": ("user/awesome-model", "branch-name")},
|
||||
type="text",
|
||||
)
|
||||
```
|
||||
|
||||
## Share your pipeline
|
||||
You can specify a default model if you want, in which case it should come with a specific revision (which can be the name of a branch or a commit hash, here we took `"abcdef"`) as well as the type:
|
||||
|
||||
Share your pipeline with the community on the [Hub](https://hf.co) or you can add it directly to Transformers.
|
||||
```python
|
||||
PIPELINE_REGISTRY.register_pipeline(
|
||||
"new-task",
|
||||
pipeline_class=MyPipeline,
|
||||
pt_model=AutoModelForSequenceClassification,
|
||||
default={"pt": ("user/awesome_model", "abcdef")},
|
||||
type="text", # current support type: text, audio, image, multimodal
|
||||
)
|
||||
```
|
||||
|
||||
It's faster to upload your pipeline code to the Hub because it doesn't require a review from the Transformers team. Adding the pipeline to Transformers may be slower because it requires a review and you need to add tests to ensure your [`Pipeline`] works.
|
||||
## Share your pipeline on the Hub
|
||||
|
||||
### Upload to the Hub
|
||||
|
||||
Add your pipeline code to the Hub in a Python file.
|
||||
|
||||
For example, a custom pipeline for sentence pair classification might look like the following code below. The implementation works for PyTorch and TensorFlow models.
|
||||
To share your custom pipeline on the Hub, you just have to save the custom code of your `Pipeline` subclass in a
|
||||
python file. For instance, let's say we want to use a custom pipeline for sentence pair classification like this:
|
||||
|
||||
```py
|
||||
import numpy as np
|
||||
|
||||
from transformers import Pipeline
|
||||
|
||||
|
||||
def softmax(outputs):
|
||||
maxes = np.max(outputs, axis=-1, keepdims=True)
|
||||
shifted_exp = np.exp(outputs - maxes)
|
||||
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
|
||||
|
||||
|
||||
class PairClassificationPipeline(Pipeline):
|
||||
def _sanitize_parameters(self, **kwargs):
|
||||
preprocess_kwargs = {}
|
||||
@ -163,7 +183,8 @@ class PairClassificationPipeline(Pipeline):
|
||||
return {"label": label, "score": score, "logits": logits}
|
||||
```
|
||||
|
||||
Save the code in a file named `pair_classification.py`, and import and register it as shown below.
|
||||
The implementation is framework agnostic, and will work for PyTorch and TensorFlow models. If we have saved this in
|
||||
a file named `pair_classification.py`, we can then import it and register it like this:
|
||||
|
||||
```py
|
||||
from pair_classification import PairClassificationPipeline
|
||||
@ -178,52 +199,56 @@ PIPELINE_REGISTRY.register_pipeline(
|
||||
)
|
||||
```
|
||||
|
||||
The [register_pipeline](https://github.com/huggingface/transformers/blob/9feae5fb0164e89d4998e5776897c16f7330d3df/src/transformers/pipelines/base.py#L1387) function registers the pipeline details (task type, pipeline class, supported backends) to a models `config.json` file.
|
||||
|
||||
```json
|
||||
"custom_pipelines": {
|
||||
"pair-classification": {
|
||||
"impl": "pair_classification.PairClassificationPipeline",
|
||||
"pt": [
|
||||
"AutoModelForSequenceClassification"
|
||||
],
|
||||
"tf": [
|
||||
"TFAutoModelForSequenceClassification"
|
||||
],
|
||||
}
|
||||
},
|
||||
```
|
||||
|
||||
Call [`~Pipeline.push_to_hub`] to push the pipeline to the Hub. The Python file containing the code is copied to the Hub, and the pipelines model and tokenizer are also saved and pushed to the Hub. Your pipeline should now be available on the Hub under your namespace.
|
||||
Once this is done, we can use it with a pretrained model. For instance `sgugger/finetuned-bert-mrpc` has been
|
||||
fine-tuned on the MRPC dataset, which classifies pairs of sentences as paraphrases or not.
|
||||
|
||||
```py
|
||||
from transformers import pipeline
|
||||
|
||||
pipeline = pipeline(task="pair-classification", model="sgugger/finetuned-bert-mrpc")
|
||||
pipeline.push_to_hub("pair-classification-pipeline")
|
||||
classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc")
|
||||
```
|
||||
|
||||
To use the pipeline, add `trust_remote_code=True` when loading the pipeline.
|
||||
Then we can share it on the Hub by using the `push_to_hub` method:
|
||||
|
||||
```py
|
||||
classifier.push_to_hub("test-dynamic-pipeline")
|
||||
```
|
||||
|
||||
This will copy the file where you defined `PairClassificationPipeline` inside the folder `"test-dynamic-pipeline"`,
|
||||
along with saving the model and tokenizer of the pipeline, before pushing everything into the repository
|
||||
`{your_username}/test-dynamic-pipeline`. After that, anyone can use it as long as they provide the option
|
||||
`trust_remote_code=True`:
|
||||
|
||||
```py
|
||||
from transformers import pipeline
|
||||
|
||||
pipeline = pipeline(task="pair-classification", trust_remote_code=True)
|
||||
classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True)
|
||||
```
|
||||
|
||||
### Add to Transformers
|
||||
## Add the pipeline to 🤗 Transformers
|
||||
|
||||
Adding a custom pipeline to Transformers requires adding tests to make sure everything works as expected, and requesting a review from the Transformers team.
|
||||
If you want to contribute your pipeline to 🤗 Transformers, you will need to add a new module in the `pipelines` submodule
|
||||
with the code of your pipeline, then add it to the list of tasks defined in `pipelines/__init__.py`.
|
||||
|
||||
Add your pipeline code as a new module to the [pipelines](https://github.com/huggingface/transformers/tree/main/src/transformers/pipelines) submodule, and add it to the list of tasks defined in [pipelines/__init__.py](https://github.com/huggingface/transformers/blob/main/src/transformers/pipelines/__init__.py).
|
||||
Then you will need to add tests. Create a new file `tests/test_pipelines_MY_PIPELINE.py` with examples of the other tests.
|
||||
|
||||
Next, add a new test for the pipeline in [transformers/tests/pipelines](https://github.com/huggingface/transformers/tree/main/tests/pipelines). You can look at the other tests for examples of how to test your pipeline.
|
||||
The `run_pipeline_test` function will be very generic and run on small random models on every possible
|
||||
architecture as defined by `model_mapping` and `tf_model_mapping`.
|
||||
|
||||
The [run_pipeline_test](https://github.com/huggingface/transformers/blob/db70426854fe7850f2c5834d633aff637f14772e/tests/pipelines/test_pipelines_text_classification.py#L186) function should be very generic and run on the models defined in [model_mapping](https://github.com/huggingface/transformers/blob/db70426854fe7850f2c5834d633aff637f14772e/tests/pipelines/test_pipelines_text_classification.py#L48) and [tf_model_mapping](https://github.com/huggingface/transformers/blob/db70426854fe7850f2c5834d633aff637f14772e/tests/pipelines/test_pipelines_text_classification.py#L49). This is important for testing future compatibility with new models.
|
||||
This is very important to test future compatibility, meaning if someone adds a new model for
|
||||
`XXXForQuestionAnswering` then the pipeline test will attempt to run on it. Because the models are random it's
|
||||
impossible to check for actual values, that's why there is a helper `ANY` that will simply attempt to match the
|
||||
output of the pipeline TYPE.
|
||||
|
||||
You'll also notice `ANY` is used throughout the [run_pipeline_test](https://github.com/huggingface/transformers/blob/db70426854fe7850f2c5834d633aff637f14772e/tests/pipelines/test_pipelines_text_classification.py#L186) function. The models are random, so you can't check the actual values. Using `ANY` allows the test to match the output of the pipeline type instead.
|
||||
You also *need* to implement 2 (ideally 4) tests.
|
||||
|
||||
Finally, you should also implement the following 4 tests.
|
||||
|
||||
1. [test_small_model_pt](https://github.com/huggingface/transformers/blob/db70426854fe7850f2c5834d633aff637f14772e/tests/pipelines/test_pipelines_text_classification.py#L59) and [test_small_model_tf](https://github.com/huggingface/transformers/blob/db70426854fe7850f2c5834d633aff637f14772e/tests/pipelines/test_pipelines_text_classification.py#L150), use a small model for these pipelines to make sure they return the correct outputs. The results don't have to make sense. Each pipeline should return the same result.
|
||||
1. [test_large_model_pt](https://github.com/huggingface/transformers/blob/db70426854fe7850f2c5834d633aff637f14772e/tests/pipelines/test_pipelines_zero_shot_image_classification.py#L187) nad [test_large_model_tf](https://github.com/huggingface/transformers/blob/db70426854fe7850f2c5834d633aff637f14772e/tests/pipelines/test_pipelines_zero_shot_image_classification.py#L220), use a realistic model for these pipelines to make sure they return meaningful results. These tests are slow and should be marked as slow.
|
||||
- `test_small_model_pt` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
|
||||
and test the pipeline outputs. The results should be the same as `test_small_model_tf`.
|
||||
- `test_small_model_tf` : Define 1 small model for this pipeline (doesn't matter if the results don't make sense)
|
||||
and test the pipeline outputs. The results should be the same as `test_small_model_pt`.
|
||||
- `test_large_model_pt` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
||||
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
||||
sure there is no drift in future releases.
|
||||
- `test_large_model_tf` (`optional`): Tests the pipeline on a real pipeline where the results are supposed to
|
||||
make sense. These tests are slow and should be marked as such. Here the goal is to showcase the pipeline and to make
|
||||
sure there is no drift in future releases.
|
||||
|
||||
@ -13,135 +13,211 @@ specific language governing permissions and limitations under the License.
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
> [!WARNING]
|
||||
> Agents and tools are being spun out into the standalone [smolagents](https://huggingface.co/docs/smolagents/index) library. These docs will be deprecated in the future!
|
||||
|
||||
# Agents
|
||||
# Agents and tools
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
An agent is a system where a large language model (LLM) can execute more complex tasks through *planning* and using *tools*.
|
||||
### What is an agent?
|
||||
|
||||
- Planning helps a LLM reason its way through a task by breaking it down into smaller subtasks. For example, [`CodeAgent`] plans a series of actions to take and then generates Python code to execute all the actions at once.
|
||||
Large Language Models (LLMs) trained to perform [causal language modeling](./tasks/language_modeling) can tackle a wide range of tasks, but they often struggle with basic tasks like logic, calculation, and search. When prompted in domains in which they do not perform well, they often fail to generate the answer we expect them to.
|
||||
|
||||
Another planning method is by self-reflection and refinement of its previous actions to improve its performance. The [`ReactJsonAgent`] is an example of this type of planning, and it's based on the [ReAct](https://hf.co/papers/2210.03629) framework. This agent plans and executes actions one at a time based on the feedback it receives from each action.
|
||||
One approach to overcome this weakness is to create an *agent*.
|
||||
|
||||
- Tools give a LLM access to external functions or APIs that it can use to help it complete a task. For example, [gradio-tools](https://github.com/freddyaboulton/gradio-tools) gives a LLM access to any of the [Gradio](https://www.gradio.app/) apps available on Hugging Face [Spaces](https://hf.co/spaces). These apps can be used for a wide range of tasks such as image generation, video generation, audio transcription, and more.
|
||||
An agent is a system that uses an LLM as its engine, and it has access to functions called *tools*.
|
||||
|
||||
To use agents in Transformers, make sure you have the extra `agents` dependencies installed.
|
||||
These *tools* are functions for performing a task, and they contain all necessary description for the agent to properly use them.
|
||||
|
||||
```bash
|
||||
!pip install transformers[agents]
|
||||
```
|
||||
The agent can be programmed to:
|
||||
- devise a series of actions/tools and run them all at once, like the [`CodeAgent`]
|
||||
- plan and execute actions/tools one by one and wait for the outcome of each action before launching the next one, like the [`ReactJsonAgent`]
|
||||
|
||||
Create an agent instance (refer to the [Agents](./main_classes/agent#agents) API for supported agents in Transformers) and a list of tools available for it to use, then [`~ReactAgent.run`] the agent on your task. The example below demonstrates how a ReAct agent reasons through a task.
|
||||
### Types of agents
|
||||
|
||||
```py
|
||||
from transformers import ReactCodeAgent
|
||||
#### Code agent
|
||||
|
||||
agent = ReactCodeAgent(tools=[])
|
||||
agent.run(
|
||||
"How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?",
|
||||
)
|
||||
```
|
||||
This agent has a planning step, then generates python code to execute all its actions at once. It natively handles different input and output types for its tools, thus it is the recommended choice for multimodal tasks.
|
||||
|
||||
```bash
|
||||
======== New task ========
|
||||
#### React agents
|
||||
|
||||
This is the go-to agent to solve reasoning tasks, since the ReAct framework ([Yao et al., 2022](https://huggingface.co/papers/2210.03629)) makes it really efficient to think on the basis of its previous observations.
|
||||
|
||||
We implement two versions of ReactJsonAgent:
|
||||
- [`ReactJsonAgent`] generates tool calls as a JSON in its output.
|
||||
- [`ReactCodeAgent`] is a new type of ReactJsonAgent that generates its tool calls as blobs of code, which works really well for LLMs that have strong coding performance.
|
||||
|
||||
> [!TIP]
|
||||
> Read [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) blog post to learn more about ReAct agents.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img
|
||||
class="block dark:hidden"
|
||||
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif"
|
||||
/>
|
||||
<img
|
||||
class="hidden dark:block"
|
||||
src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Agent_ManimCE.gif"
|
||||
/>
|
||||
</div>
|
||||
|
||||

|
||||
|
||||
For example, here is how a ReAct Code agent would work its way through the following question.
|
||||
|
||||
```py3
|
||||
>>> agent.run(
|
||||
... "How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?",
|
||||
... )
|
||||
=====New task=====
|
||||
How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?
|
||||
==== Agent is executing the code below:
|
||||
bert_layers = 12 # BERT base encoder has 12 layers
|
||||
attention_layers = 6 # Encoder in Attention is All You Need has 6 layers
|
||||
layer_diff = bert_layers - attention_layers
|
||||
print("The difference in layers between BERT base encoder and Attention is All You Need is", layer_diff)
|
||||
====Agent is executing the code below:
|
||||
bert_blocks = search(query="number of blocks in BERT base encoder")
|
||||
print("BERT blocks:", bert_blocks)
|
||||
====
|
||||
Print outputs:
|
||||
The difference in layers between BERT base encoder and Attention is All You Need is 6
|
||||
BERT blocks: twelve encoder blocks
|
||||
|
||||
==== Agent is executing the code below:
|
||||
final_answer("BERT base encoder has {} more layers than the encoder from Attention is All You Need.".format(layer_diff))
|
||||
====Agent is executing the code below:
|
||||
attention_layer = search(query="number of layers in Attention is All You Need")
|
||||
print("Attention layers:", attention_layer)
|
||||
====
|
||||
Print outputs:
|
||||
Attention layers: Encoder: The encoder is composed of a stack of N = 6 identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position- 2 Page 3 Figure 1: The Transformer - model architecture.
|
||||
|
||||
>>> Final answer:
|
||||
BERT base encoder has 6 more layers than the encoder from Attention is All You Need.
|
||||
====Agent is executing the code below:
|
||||
bert_blocks = 12
|
||||
attention_layers = 6
|
||||
diff = bert_blocks - attention_layers
|
||||
print("Difference in blocks:", diff)
|
||||
final_answer(diff)
|
||||
====
|
||||
|
||||
Print outputs:
|
||||
Difference in blocks: 6
|
||||
|
||||
Final answer: 6
|
||||
```
|
||||
|
||||
This guide will walk you through in more detail how to initialize an agent.
|
||||
### How can I build an agent?
|
||||
|
||||
## LLM
|
||||
To initialize an agent, you need these arguments:
|
||||
|
||||
An agent uses a LLM to plan and execute a task; it is the engine that powers the agent. To choose and build your own LLM engine, you need a method that:
|
||||
- an LLM to power your agent - the agent is not exactly the LLM, it’s more like the agent is a program that uses an LLM as its engine.
|
||||
- a system prompt: what the LLM engine will be prompted with to generate its output
|
||||
- a toolbox from which the agent pick tools to execute
|
||||
- a parser to extract from the LLM output which tools are to call and with which arguments
|
||||
|
||||
1. the input uses the [chat template](./chat_templating) format, `List[Dict[str, str]]`, and it returns a string
|
||||
2. the LLM stops generating outputs when it encounters the sequences in `stop_sequences`
|
||||
Upon initialization of the agent system, the tool attributes are used to generate a tool description, then baked into the agent’s `system_prompt` to let it know which tools it can use and why.
|
||||
|
||||
To start with, please install the `agents` extras in order to install all default dependencies.
|
||||
|
||||
```bash
|
||||
pip install transformers[agents]
|
||||
```
|
||||
|
||||
Build your LLM engine by defining a `llm_engine` method which accepts a list of [messages](./chat_templating) and returns text. This callable also needs to accept a `stop` argument that indicates when to stop generating.
|
||||
|
||||
```python
|
||||
from huggingface_hub import login, InferenceClient
|
||||
|
||||
login("<YOUR_HUGGINGFACEHUB_API_TOKEN>")
|
||||
|
||||
client = InferenceClient(model="meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
|
||||
```py
|
||||
def llm_engine(messages, stop_sequences=["Task"]) -> str:
|
||||
response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000)
|
||||
answer = response.choices[0].message.content
|
||||
return answer
|
||||
```
|
||||
|
||||
Next, initialize an engine to load a model. To run an agent locally, create a [`TransformersEngine`] to load a preinitialized [`Pipeline`].
|
||||
You could use any `llm_engine` method as long as:
|
||||
1. it follows the [messages format](./chat_templating) (`List[Dict[str, str]]`) for its input `messages`, and it returns a `str`.
|
||||
2. it stops generating outputs at the sequences passed in the argument `stop_sequences`
|
||||
|
||||
However, you could also leverage Hugging Face's powerful inference infrastructure, [Inference API](https://hf.co/docs/api-inference/index) or [Inference Endpoints](https://hf.co/docs/inference-endpoints/index), to run your model. This is useful for loading larger models that are typically required for agentic behavior. In this case, load the [`HfApiEngine`] to run the agent.
|
||||
Additionally, `llm_engine` can also take a `grammar` argument. In the case where you specify a `grammar` upon agent initialization, this argument will be passed to the calls to llm_engine, with the `grammar` that you defined upon initialization, to allow [constrained generation](https://huggingface.co/docs/text-generation-inference/conceptual/guidance) in order to force properly-formatted agent outputs.
|
||||
|
||||
The agent requires a list of tools it can use to complete a task. If you aren't using any additional tools, pass an empty list. The default tools provided by Transformers are loaded automatically, but you can optionally set `add_base_tools=True` to explicitly enable them.
|
||||
You will also need a `tools` argument which accepts a list of `Tools` - it can be an empty list. You can also add the default toolbox on top of your `tools` list by defining the optional argument `add_base_tools=True`.
|
||||
|
||||
<hfoptions id="engine">
|
||||
<hfoption id="TransformersEngine">
|
||||
Now you can create an agent, like [`CodeAgent`], and run it. You can also create a [`TransformersEngine`] with a pre-initialized pipeline to run inference on your local machine using `transformers`.
|
||||
For convenience, since agentic behaviours generally require stronger models such as `Llama-3.1-70B-Instruct` that are harder to run locally for now, we also provide the [`HfApiEngine`] class that initializes a `huggingface_hub.InferenceClient` under the hood.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TransformersEngine, CodeAgent
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.1-8B-Instruct")
|
||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.1-8B-Instruct").to("cuda")
|
||||
pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
||||
llm_engine = TransformersEngine(pipeline)
|
||||
agent = CodeAgent(tools=[], llm_engine=llm_engine)
|
||||
agent.run(
|
||||
"What causes bread to rise?",
|
||||
)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="HfApiEngine">
|
||||
|
||||
```py
|
||||
```python
|
||||
from transformers import CodeAgent, HfApiEngine
|
||||
|
||||
llm_engine = HfApiEngine(model="meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
agent = CodeAgent(tools=[], llm_engine=llm_engine)
|
||||
agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True)
|
||||
|
||||
agent.run(
|
||||
"Could you translate this sentence from French, say it out loud and return the audio.",
|
||||
sentence="Où est la boulangerie la plus proche?",
|
||||
)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
This will be handy in case of emergency baguette need!
|
||||
You can even leave the argument `llm_engine` undefined, and an [`HfApiEngine`] will be created by default.
|
||||
|
||||
The agent supports [constrained generation](https://hf.co/docs/text-generation-inference/conceptual/guidance) for generating outputs according to a specific structure with the `grammar` parameter. The `grammar` parameter should be specified in the `llm_engine` method or you can set it when initializing an agent.
|
||||
```python
|
||||
from transformers import CodeAgent
|
||||
|
||||
Lastly, an agent accepts additional inputs such as text and audio. In the [`HfApiEngine`] example above, the agent accepted a sentence to translate. But you could also pass a path to a local or remote file for the agent to access. The example below demonstrates how to pass a path to an audio file.
|
||||
agent = CodeAgent(tools=[], add_base_tools=True)
|
||||
|
||||
agent.run(
|
||||
"Could you translate this sentence from French, say it out loud and give me the audio.",
|
||||
sentence="Où est la boulangerie la plus proche?",
|
||||
)
|
||||
```
|
||||
|
||||
Note that we used an additional `sentence` argument: you can pass text as additional arguments to the model.
|
||||
|
||||
You can also use this to indicate the path to local or remote files for the model to use:
|
||||
|
||||
```py
|
||||
from transformers import ReactCodeAgent
|
||||
|
||||
agent = ReactCodeAgent(tools=[], llm_engine=llm_engine)
|
||||
agent.run("Why doesn't he know many people in New York?", audio="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3")
|
||||
agent = ReactCodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True)
|
||||
|
||||
agent.run("Why does Mike not know many people in New York?", audio="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3")
|
||||
```
|
||||
|
||||
## System prompt
|
||||
|
||||
A system prompt describes how an agent should behave, a description of the available tools, and the expected output format.
|
||||
The prompt and output parser were automatically defined, but you can easily inspect them by calling the `system_prompt_template` on your agent.
|
||||
|
||||
Tools are defined by the `<<tool_descriptions>>` token which is dynamically replaced during runtime with the actual tool. The tool description is derived from the tool name, description, inputs, output type, and a Jinja2 template. Refer to the [Tools](./tools) guide for more information about how to describe tools.
|
||||
```python
|
||||
print(agent.system_prompt_template)
|
||||
```
|
||||
|
||||
The example below is the system prompt for [`ReactCodeAgent`].
|
||||
It's important to explain as clearly as possible the task you want to perform.
|
||||
Every [`~Agent.run`] operation is independent, and since an agent is powered by an LLM, minor variations in your prompt might yield completely different results.
|
||||
You can also run an agent consecutively for different tasks: each time the attributes `agent.task` and `agent.logs` will be re-initialized.
|
||||
|
||||
|
||||
#### Code execution
|
||||
|
||||
A Python interpreter executes the code on a set of inputs passed along with your tools.
|
||||
This should be safe because the only functions that can be called are the tools you provided (especially if it's only tools by Hugging Face) and the print function, so you're already limited in what can be executed.
|
||||
|
||||
The Python interpreter also doesn't allow imports by default outside of a safe list, so all the most obvious attacks shouldn't be an issue.
|
||||
You can still authorize additional imports by passing the authorized modules as a list of strings in argument `additional_authorized_imports` upon initialization of your [`ReactCodeAgent`] or [`CodeAgent`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import ReactCodeAgent
|
||||
|
||||
>>> agent = ReactCodeAgent(tools=[], additional_authorized_imports=['requests', 'bs4'])
|
||||
>>> agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?")
|
||||
|
||||
(...)
|
||||
'Hugging Face – Blog'
|
||||
```
|
||||
|
||||
The execution will stop at any code trying to perform an illegal operation or if there is a regular Python error with the code generated by the agent.
|
||||
|
||||
> [!WARNING]
|
||||
> The LLM can generate arbitrary code that will then be executed: do not add any unsafe imports!
|
||||
|
||||
### The system prompt
|
||||
|
||||
An agent, or rather the LLM that drives the agent, generates an output based on the system prompt. The system prompt can be customized and tailored to the intended task. For example, check the system prompt for the [`ReactCodeAgent`] (below version is slightly simplified).
|
||||
|
||||
```text
|
||||
You will be given a task to solve as best you can.
|
||||
You have access to the following tools:
|
||||
<<tool_descriptions>>
|
||||
@ -149,7 +225,7 @@ You have access to the following tools:
|
||||
To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences.
|
||||
|
||||
At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task, then the tools that you want to use.
|
||||
Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '/End code' sequence.
|
||||
Then in the 'Code:' sequence, you shold write the code in simple Python. The code sequence must end with '/End code' sequence.
|
||||
During each intermediate step, you can use 'print()' to save whatever important information you will then need.
|
||||
These print outputs will then be available in the 'Observation:' field, for using this information as input for the next step.
|
||||
|
||||
@ -159,7 +235,7 @@ Here are a few examples using notional tools:
|
||||
---
|
||||
{examples}
|
||||
|
||||
Above example were using notional tools that might not exist for you. You only have access to those tools:
|
||||
Above example were using notional tools that might not exist for you. You only have acces to those tools:
|
||||
<<tool_names>>
|
||||
You also can perform computations in the python code you generate.
|
||||
|
||||
@ -173,125 +249,183 @@ Remember to make sure that variables you use are all defined.
|
||||
Now Begin!
|
||||
```
|
||||
|
||||
The system prompt can be tailored to the intended task. For example, you can add a better explanation of the output format or you can overwrite the system prompt template entirely with your own custom system prompt as shown below.
|
||||
The system prompt includes:
|
||||
- An *introduction* that explains how the agent should behave and what tools are.
|
||||
- A description of all the tools that is defined by a `<<tool_descriptions>>` token that is dynamically replaced at runtime with the tools defined/chosen by the user.
|
||||
- The tool description comes from the tool attributes, `name`, `description`, `inputs` and `output_type`, and a simple `jinja2` template that you can refine.
|
||||
- The expected output format.
|
||||
|
||||
> [!WARNING]
|
||||
> If you're writing a custom system prompt, make sure to include `<<tool_descriptions>>` in the template so the agent is aware of the available tools.
|
||||
You could improve the system prompt, for example, by adding an explanation of the output format.
|
||||
|
||||
```py
|
||||
For maximum flexibility, you can overwrite the whole system prompt template by passing your custom prompt as an argument to the `system_prompt` parameter.
|
||||
|
||||
```python
|
||||
from transformers import ReactJsonAgent
|
||||
from transformers.agents import PythonInterpreterTool
|
||||
|
||||
agent = ReactJsonAgent(tools=[PythonInterpreterTool()], system_prompt="{your_custom_prompt}")
|
||||
```
|
||||
|
||||
## Code execution
|
||||
> [!WARNING]
|
||||
> Please make sure to define the `<<tool_descriptions>>` string somewhere in the `template` so the agent is aware
|
||||
of the available tools.
|
||||
|
||||
For safety, only the tools you provide (and the default Transformers tools) and the `print` function are executed. The interpreter doesn't allow importing modules that aren't on a safe list.
|
||||
|
||||
To import modules that aren't on the list, add them as a list to the `additional_authorized_imports` parameter when initializing an agent.
|
||||
### Inspecting an agent run
|
||||
|
||||
```py
|
||||
from transformers import ReactCodeAgent
|
||||
Here are a few useful attributes to inspect what happened after a run:
|
||||
- `agent.logs` stores the fine-grained logs of the agent. At every step of the agent's run, everything gets stored in a dictionary that then is appended to `agent.logs`.
|
||||
- Running `agent.write_inner_memory_from_logs()` creates an inner memory of the agent's logs for the LLM to view, as a list of chat messages. This method goes over each step of the log and only stores what it's interested in as a message: for instance, it will save the system prompt and task in separate messages, then for each step it will store the LLM output as a message, and the tool call output as another message. Use this if you want a higher-level view of what has happened - but not every log will be transcripted by this method.
|
||||
|
||||
agent = ReactCodeAgent(tools=[], additional_authorized_imports=['requests', 'bs4'])
|
||||
agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?")
|
||||
## Tools
|
||||
|
||||
A tool is an atomic function to be used by an agent.
|
||||
|
||||
You can for instance check the [`PythonInterpreterTool`]: it has a name, a description, input descriptions, an output type, and a `__call__` method to perform the action.
|
||||
|
||||
When the agent is initialized, the tool attributes are used to generate a tool description which is baked into the agent's system prompt. This lets the agent know which tools it can use and why.
|
||||
|
||||
### Default toolbox
|
||||
|
||||
Transformers comes with a default toolbox for empowering agents, that you can add to your agent upon initialization with argument `add_base_tools = True`:
|
||||
|
||||
- **Document question answering**: given a document (such as a PDF) in image format, answer a question on this document ([Donut](./model_doc/donut))
|
||||
- **Image question answering**: given an image, answer a question on this image ([VILT](./model_doc/vilt))
|
||||
- **Speech to text**: given an audio recording of a person talking, transcribe the speech into text ([Whisper](./model_doc/whisper))
|
||||
- **Text to speech**: convert text to speech ([SpeechT5](./model_doc/speecht5))
|
||||
- **Translation**: translates a given sentence from source language to target language.
|
||||
- **DuckDuckGo search***: performs a web search using DuckDuckGo browser.
|
||||
- **Python code interpreter**: runs your the LLM generated Python code in a secure environment. This tool will only be added to [`ReactJsonAgent`] if you initialize it with `add_base_tools=True`, since code-based agent can already natively execute Python code
|
||||
|
||||
|
||||
You can manually use a tool by calling the [`load_tool`] function and a task to perform.
|
||||
|
||||
|
||||
```python
|
||||
from transformers import load_tool
|
||||
|
||||
tool = load_tool("text-to-speech")
|
||||
audio = tool("This is a text to speech tool")
|
||||
```
|
||||
|
||||
Code execution stops if a tool isn't on the safe list, it isn't authorized, or if the code generated by the agent returns a Python error.
|
||||
|
||||
### Create a new tool
|
||||
|
||||
You can create your own tool for use cases not covered by the default tools from Hugging Face.
|
||||
For example, let's create a tool that returns the most downloaded model for a given task from the Hub.
|
||||
|
||||
You'll start with the code below.
|
||||
|
||||
```python
|
||||
from huggingface_hub import list_models
|
||||
|
||||
task = "text-classification"
|
||||
|
||||
model = next(iter(list_models(filter=task, sort="downloads", direction=-1)))
|
||||
print(model.id)
|
||||
```
|
||||
|
||||
This code can quickly be converted into a tool, just by wrapping it in a function and adding the `tool` decorator:
|
||||
|
||||
|
||||
```py
|
||||
from transformers import tool
|
||||
|
||||
@tool
|
||||
def model_download_tool(task: str) -> str:
|
||||
"""
|
||||
This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub.
|
||||
It returns the name of the checkpoint.
|
||||
|
||||
Args:
|
||||
task: The task for which
|
||||
"""
|
||||
model = next(iter(list_models(filter="text-classification", sort="downloads", direction=-1)))
|
||||
return model.id
|
||||
```
|
||||
|
||||
The function needs:
|
||||
- A clear name. The name usually describes what the tool does. Since the code returns the model with the most downloads for a task, let's put `model_download_tool`.
|
||||
- Type hints on both inputs and output
|
||||
- A description, that includes an 'Args:' part where each argument is described (without a type indication this time, it will be pulled from the type hint).
|
||||
All these will be automatically baked into the agent's system prompt upon initialization: so strive to make them as clear as possible!
|
||||
|
||||
> [!TIP]
|
||||
> This definition format is the same as tool schemas used in `apply_chat_template`, the only difference is the added `tool` decorator: read more on our tool use API [here](https://huggingface.co/blog/unified-tool-use#passing-tools-to-a-chat-template).
|
||||
|
||||
Then you can directly initialize your agent:
|
||||
```py
|
||||
from transformers import CodeAgent
|
||||
agent = CodeAgent(tools=[model_download_tool], llm_engine=llm_engine)
|
||||
agent.run(
|
||||
"Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?"
|
||||
)
|
||||
```
|
||||
|
||||
You get the following:
|
||||
```text
|
||||
======== New task ========
|
||||
Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?
|
||||
==== Agent is executing the code below:
|
||||
most_downloaded_model = model_download_tool(task="text-to-video")
|
||||
print(f"The most downloaded model for the 'text-to-video' task is {most_downloaded_model}.")
|
||||
====
|
||||
```
|
||||
|
||||
And the output:
|
||||
`"The most downloaded model for the 'text-to-video' task is ByteDance/AnimateDiff-Lightning."`
|
||||
|
||||
### Manage your agent's toolbox
|
||||
|
||||
If you have already initialized an agent, it is inconvenient to reinitialize it from scratch with a tool you want to use. With Transformers, you can manage an agent's toolbox by adding or replacing a tool.
|
||||
|
||||
Let's add the `model_download_tool` to an existing agent initialized with only the default toolbox.
|
||||
|
||||
```python
|
||||
from transformers import CodeAgent
|
||||
|
||||
agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True)
|
||||
agent.toolbox.add_tool(model_download_tool)
|
||||
```
|
||||
Now we can leverage both the new tool and the previous text-to-speech tool:
|
||||
|
||||
```python
|
||||
agent.run(
|
||||
"Can you read out loud the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub and return the audio?"
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
| **Audio** |
|
||||
|------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/damo.wav" type="audio/wav"/> |
|
||||
|
||||
|
||||
> [!WARNING]
|
||||
> A LLM can generate any arbitrary code that can be executed, so don't add any unsafe imports!
|
||||
> Beware when adding tools to an agent that already works well because it can bias selection towards your tool or select another tool other than the one already defined.
|
||||
|
||||
## Multi-agent
|
||||
|
||||
[Multi-agent](https://hf.co/papers/2308.08155) refers to multiple agents working together to solve a task. Performance is typically better because each agent is specialized for a particular subtask.
|
||||
Use the `agent.toolbox.update_tool()` method to replace an existing tool in the agent's toolbox.
|
||||
This is useful if your new tool is a one-to-one replacement of the existing tool because the agent already knows how to perform that specific task.
|
||||
Just make sure the new tool follows the same API as the replaced tool or adapt the system prompt template to ensure all examples using the replaced tool are updated.
|
||||
|
||||
Multi-agents are created through a [`ManagedAgent`] class, where a *manager agent* oversees how other agents work together. The manager agent requires an agent and their name and description. These are added to the manager agents system prompt which lets it know how to call and use them.
|
||||
|
||||
The multi-agent example below creates a web search agent that is managed by another [`ReactCodeAgent`].
|
||||
### Use a collection of tools
|
||||
|
||||
You can leverage tool collections by using the ToolCollection object, with the slug of the collection you want to use.
|
||||
Then pass them as a list to initialize you agent, and start using them!
|
||||
|
||||
```py
|
||||
from transformers.agents import ReactCodeAgent, HfApiEngine, DuckDuckGoSearchTool, ManagedAgent
|
||||
from transformers import ToolCollection, ReactCodeAgent
|
||||
|
||||
llm_engine = HfApiEngine()
|
||||
web_agent = ReactCodeAgent(tools=[DuckDuckGoSearchTool()], llm_engine=llm_engine)
|
||||
managed_web_agent = ManagedAgent(
|
||||
agent=web_agent,
|
||||
name="web_search",
|
||||
description="Runs web searches for you. Give it your query as an argument."
|
||||
)
|
||||
manager_agent = ReactCodeAgent(
|
||||
tools=[], llm_engine=llm_engine, managed_agents=[managed_web_agent]
|
||||
)
|
||||
manager_agent.run("Who is the CEO of Hugging Face?")
|
||||
image_tool_collection = ToolCollection(collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f")
|
||||
agent = ReactCodeAgent(tools=[*image_tool_collection.tools], add_base_tools=True)
|
||||
|
||||
agent.run("Please draw me a picture of rivers and lakes.")
|
||||
```
|
||||
|
||||
## Gradio integration
|
||||
To speed up the start, tools are loaded only if called by the agent.
|
||||
|
||||
[Gradio](https://www.gradio.app/) is a library for quickly creating and sharing machine learning apps. The [gradio.Chatbot](https://www.gradio.app/docs/gradio/chatbot) supports chatting with a Transformers agent with the [`stream_to_gradio`] function.
|
||||
This gets you this image:
|
||||
|
||||
Load a tool and LLM with an agent, and then create a Gradio app. The key is to use [`stream_to_gradio`] to stream the agents messages and display how it's reasoning through a task.
|
||||
|
||||
```py
|
||||
import gradio as gr
|
||||
from transformers import (
|
||||
load_tool,
|
||||
ReactCodeAgent,
|
||||
HfApiEngine,
|
||||
stream_to_gradio,
|
||||
)
|
||||
|
||||
# Import tool from Hub
|
||||
image_generation_tool = load_tool("m-ric/text-to-image")
|
||||
llm_engine = HfApiEngine("meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
|
||||
# Initialize the agent with the image generation tool
|
||||
agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
|
||||
|
||||
def interact_with_agent(task):
|
||||
messages = []
|
||||
messages.append(gr.ChatMessage(role="user", content=task))
|
||||
yield messages
|
||||
for msg in stream_to_gradio(agent, task):
|
||||
messages.append(msg)
|
||||
yield messages + [
|
||||
gr.ChatMessage(role="assistant", content="⏳ Task not finished yet!")
|
||||
]
|
||||
yield messages
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
text_input = gr.Textbox(lines=1, label="Chat Message", value="Make me a picture of the Statue of Liberty.")
|
||||
submit = gr.Button("Run illustrator agent!")
|
||||
chatbot = gr.Chatbot(
|
||||
label="Agent",
|
||||
type="messages",
|
||||
avatar_images=(
|
||||
None,
|
||||
"https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png",
|
||||
),
|
||||
)
|
||||
submit.click(interact_with_agent, [text_input], [chatbot])
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
## Troubleshoot
|
||||
|
||||
For a better idea of what is happening when you call an agent, it is always a good idea to check the system prompt template first.
|
||||
|
||||
```py
|
||||
print(agent.system_prompt_template)
|
||||
```
|
||||
|
||||
If the agent is behaving unexpectedly, remember to explain the task you want to perform as clearly as possible. Every [`~Agent.run`] is different and minor variations in your system prompt may yield completely different results.
|
||||
|
||||
To find out what happened after a run, check the following agent attributes.
|
||||
|
||||
- `agent.logs` stores the finegrained agent logs. At every step of the agents run, everything is stored in a dictionary and appended to `agent.logs`.
|
||||
- `agent.write_inner_memory_from_logs` only stores a high-level overview of the agents run. For example, at each step, it stores the LLM output as a message and the tool call output as a separate message. Not every detail from a step is transcripted by `write_inner_memory_from_logs`.
|
||||
|
||||
## Resources
|
||||
|
||||
Learn more about ReAct agents in the [Open-source LLMs as LangChain Agents](https://hf.co/blog/open-source-llms-as-agents) blog post.
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png">
|
||||
|
||||
@ -1,35 +1,40 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
# 智能体,超强版 - 多智能体、外部工具等
|
||||
# Agents, supercharged - Multi-agents, External tools, and more
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
### 什么是智能体?
|
||||
### What is an agent?
|
||||
|
||||
> [!TIP]
|
||||
> 如果你是 `transformers.agents` 的新手,请先阅读主文档 [智能体文档 ](./agents).
|
||||
在本页面中,我们将重点介绍 `transformers.agents` 的几种高级用法。
|
||||
> If you're new to `transformers.agents`, make sure to first read the main [agents documentation](./agents).
|
||||
|
||||
## 多智能体
|
||||
In this page we're going to highlight several advanced uses of `transformers.agents`.
|
||||
|
||||
多智能体功能是微软框架 [Autogen](https://huggingface.co/papers/2308.08155) 中引入的。
|
||||
它的意思是让多个智能体一起工作来解决任务,而不是只有一个智能体。
|
||||
经验表明,在大多数基准测试中,这种方法能带来更好的性能。之所以有更好的性能,原因很简单:对于许多任务,通常我们更愿意让多个单独的单元专注于子任务,而不是让一个系统做所有事情。这里,拥有不同工具集和记忆的多个智能体可以实现高效的专业化。
|
||||
## Multi-agents
|
||||
|
||||
你可以轻松地用 `transformers.agents` 构建层次化的多智能体系统。
|
||||
Multi-agent has been introduced in Microsoft's framework [Autogen](https://huggingface.co/papers/2308.08155).
|
||||
It simply means having several agents working together to solve your task instead of only one.
|
||||
It empirically yields better performance on most benchmarks. The reason for this better performance is conceptually simple: for many tasks, rather than using a do-it-all system, you would prefer to specialize units on sub-tasks. Here, having agents with separate tool sets and memories allows to achieve efficient specialization.
|
||||
|
||||
为此,需要将智能体封装在 [`ManagedAgent`] 对象中。这个对象需要 `agent`、`name` 和 `description` 这几个参数,这些信息会嵌入到管理智能体的系统提示中,帮助它知道如何调用这个管理的智能体,就像我们对工具所做的那样。
|
||||
You can easily build hierarchical multi-agent systems with `transformers.agents`.
|
||||
|
||||
下面是一个通过使用我们的 [`DuckDuckGoSearchTool`] 创建一个管理特定网络搜索智能体的示例:
|
||||
To do so, encapsulate the agent in a [`ManagedAgent`] object. This object needs arguments `agent`, `name`, and a `description`, which will then be embedded in the manager agent's system prompt to let it know how to call this managed agent, as we also do for tools.
|
||||
|
||||
Here's an example of making an agent that managed a specific web search agent using our [`DuckDuckGoSearchTool`]:
|
||||
|
||||
```py
|
||||
from transformers.agents import ReactCodeAgent, HfApiEngine, DuckDuckGoSearchTool, ManagedAgent
|
||||
@ -52,24 +57,25 @@ manager_agent.run("Who is the CEO of Hugging Face?")
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> 如果你想深入了解如何高效地实现多智能体系统,请查看 [how we pushed our multi-agent system to the top of the GAIA leaderboard](https://huggingface.co/blog/beating-gaia).
|
||||
> For an in-depth example of an efficient multi-agent implementation, see [how we pushed our multi-agent system to the top of the GAIA leaderboard](https://huggingface.co/blog/beating-gaia).
|
||||
|
||||
## 高级工具使用
|
||||
|
||||
### 通过子类化 Tool 来直接定义工具,并将其共享到 Hub
|
||||
## Advanced tool usage
|
||||
|
||||
让我们再次使用主文档中的工具示例,我们已经实现了一个 `tool` 装饰器。
|
||||
### Directly define a tool by subclassing Tool, and share it to the Hub
|
||||
|
||||
如果你需要添加一些变化,比如为工具自定义属性,可以按照更细粒度的方法构建工具:构建一个继承自 [`Tool`] 超类的类。
|
||||
Let's take again the tool example from main documentation, for which we had implemented a `tool` decorator.
|
||||
|
||||
自定义工具需要:
|
||||
- `name` 属性:表示工具本身的名称,通常描述工具的作用。由于代码返回了针对任务下载量最多的模型,我们将其命名为 model_download_counter。
|
||||
- `description` 属性:用于填充智能体的系统提示。
|
||||
- `inputs` 属性:这是一个包含 "type" 和 "description" 键的字典。它包含了有助于 Python 解释器做出选择的输入信息。
|
||||
- `output_type` 属性:指定输出类型。
|
||||
- `forward` 方法:其中包含执行推理代码。
|
||||
If you need to add variation, like custom attributes for your tool, you can build your tool following the fine-grained method: building a class that inherits from the [`Tool`] superclass.
|
||||
|
||||
`inputs` 和 `output_type` 的类型应当是 [Pydantic 格式](https://docs.pydantic.dev/latest/concepts/json_schema/#generating-json-schema)。
|
||||
The custom tool needs:
|
||||
- An attribute `name`, which corresponds to the name of the tool itself. The name usually describes what the tool does. Since the code returns the model with the most downloads for a task, let's name it `model_download_counter`.
|
||||
- An attribute `description` is used to populate the agent's system prompt.
|
||||
- An `inputs` attribute, which is a dictionary with keys `"type"` and `"description"`. It contains information that helps the Python interpreter make educated choices about the input.
|
||||
- An `output_type` attribute, which specifies the output type.
|
||||
- A `forward` method which contains the inference code to be executed.
|
||||
|
||||
The types for both `inputs` and `output_type` should be amongst [Pydantic formats](https://docs.pydantic.dev/latest/concepts/json_schema/#generating-json-schema).
|
||||
|
||||
```python
|
||||
from transformers import Tool
|
||||
@ -94,7 +100,7 @@ class HFModelDownloadsTool(Tool):
|
||||
return model.id
|
||||
```
|
||||
|
||||
现在,自定义的 `HfModelDownloadsTool` 类已经准备好,可以将其保存到名为 `model_downloads.py` 的文件中,并导入使用。
|
||||
Now that the custom `HfModelDownloadsTool` class is ready, you can save it to a file named `model_downloads.py` and import it for use.
|
||||
|
||||
|
||||
```python
|
||||
@ -103,13 +109,13 @@ from model_downloads import HFModelDownloadsTool
|
||||
tool = HFModelDownloadsTool()
|
||||
```
|
||||
|
||||
你还可以通过调用 [`~Tool.push_to_hub`] 将自定义工具推送到 Hub。确保你已经为该工具创建了一个仓库,并使用具有读取访问权限的许可。
|
||||
You can also share your custom tool to the Hub by calling [`~Tool.push_to_hub`] on the tool. Make sure you've created a repository for it on the Hub and are using a token with read access.
|
||||
|
||||
```python
|
||||
tool.push_to_hub("{your_username}/hf-model-downloads")
|
||||
```
|
||||
|
||||
通过 [`~Tool.load_tool`] 函数加载工具,并将其传递给智能体的 tools 参数。
|
||||
Load the tool with the [`~Tool.load_tool`] function and pass it to the `tools` parameter in your agent.
|
||||
|
||||
```python
|
||||
from transformers import load_tool, CodeAgent
|
||||
@ -117,27 +123,29 @@ from transformers import load_tool, CodeAgent
|
||||
model_download_tool = load_tool("m-ric/hf-model-downloads")
|
||||
```
|
||||
|
||||
### 将 Space 导入为工具 🚀
|
||||
### Import a Space as a tool 🚀
|
||||
|
||||
你可以直接通过 [`Tool.from_space`] 方法将 Hub 上的 Space 导入为工具!
|
||||
You can directly import a Space from the Hub as a tool using the [`Tool.from_space`] method!
|
||||
|
||||
只需要提供 Space 在 Hub 上的 ID、名称和描述,帮助智能体理解工具的作用。在幕后,这将使用 [`gradio-client`](https://pypi.org/project/gradio-client/) 库来调用 Space。
|
||||
You only need to provide the id of the Space on the Hub, its name, and a description that will help you agent understand what the tool does. Under the hood, this will use [`gradio-client`](https://pypi.org/project/gradio-client/) library to call the Space.
|
||||
|
||||
例如,下面是从 Hub 导入 `FLUX.1-dev` Space 并用其生成图像的示例:
|
||||
For instance, let's import the [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) Space from the Hub and use it to generate an image.
|
||||
|
||||
```
|
||||
from transformers import Tool
|
||||
|
||||
image_generation_tool = Tool.from_space(
|
||||
"black-forest-labs/FLUX.1-dev",
|
||||
name="image_generator",
|
||||
description="Generate an image from a prompt")
|
||||
|
||||
image_generation_tool("A sunny beach")
|
||||
```
|
||||
看!这就是你生成的图像!🏖️
|
||||
And voilà, here's your image! 🏖️
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/sunny_beach.webp">
|
||||
|
||||
然后,你可以像使用其他工具一样使用这个工具。例如,改进提示 `穿宇航服的兔子` 并生成其图像:
|
||||
Then you can use this tool just like any other tool. For example, let's improve the prompt `a rabbit wearing a space suit` and generate an image of it.
|
||||
|
||||
```python
|
||||
from transformers import ReactCodeAgent
|
||||
@ -152,6 +160,7 @@ agent.run(
|
||||
```text
|
||||
=== Agent thoughts:
|
||||
improved_prompt could be "A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background"
|
||||
|
||||
Now that I have improved the prompt, I can use the image generator tool to generate an image based on this prompt.
|
||||
>>> Agent is executing the code below:
|
||||
image = image_generator(prompt="A bright blue space suit wearing rabbit, on the surface of the moon, under a bright orange sunset, with the Earth visible in the background")
|
||||
@ -160,15 +169,16 @@ final_answer(image)
|
||||
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit_spacesuit_flux.webp">
|
||||
|
||||
这真酷吧?🤩
|
||||
How cool is this? 🤩
|
||||
|
||||
### 使用 gradio-tools
|
||||
### Use gradio-tools
|
||||
|
||||
[gradio-tools](https://github.com/freddyaboulton/gradio-tools) 是一个强大的库,允许使用 Hugging Face Spaces 作为工具。它支持许多现有的 Spaces,也支持自定义 Spaces。
|
||||
[gradio-tools](https://github.com/freddyaboulton/gradio-tools) is a powerful library that allows using Hugging
|
||||
Face Spaces as tools. It supports many existing Spaces as well as custom Spaces.
|
||||
|
||||
transformers 支持通过 [`Tool.from_gradio`] 方法使用 `gradio_tools`。例如,下面是如何使用来自 `gradio-tools` 工具包的 [`StableDiffusionPromptGeneratorTool`](https://github.com/freddyaboulton/gradio-tools/blob/main/gradio_tools/tools/prompt_generator.py) 来改进提示,以生成更好的图像:
|
||||
Transformers supports `gradio_tools` with the [`Tool.from_gradio`] method. For example, let's use the [`StableDiffusionPromptGeneratorTool`](https://github.com/freddyaboulton/gradio-tools/blob/main/gradio_tools/tools/prompt_generator.py) from `gradio-tools` toolkit for improving prompts to generate better images.
|
||||
|
||||
导入和实例化工具,并将其传递给 `Tool.from_gradio` 方法:
|
||||
Import and instantiate the tool, then pass it to the `Tool.from_gradio` method:
|
||||
|
||||
```python
|
||||
from gradio_tools import StableDiffusionPromptGeneratorTool
|
||||
@ -179,14 +189,15 @@ prompt_generator_tool = Tool.from_gradio(gradio_prompt_generator_tool)
|
||||
```
|
||||
|
||||
> [!WARNING]
|
||||
> gradio-tools 需要 **文本** 输入和输出,即使在处理像图像和音频这样的不同模态时也是如此。目前,图像和音频的输入输出与此不兼容。
|
||||
### 使用 LangChain 工具
|
||||
> gradio-tools require *textual* inputs and outputs even when working with different modalities like image and audio objects. Image and audio inputs and outputs are currently incompatible.
|
||||
|
||||
我们很喜欢 LangChain,并认为它有一套非常有吸引力的工具。
|
||||
要从 LangChain 导入工具,可以使用 `from_langchain()` 方法。
|
||||
### Use LangChain tools
|
||||
|
||||
例如,下面是如何使用它来重新创建上面介绍的搜索结果,使用一个 LangChain 网络搜索工具。该工具需要 `pip install google-search-results` 来正常工作。
|
||||
We love Langchain and think it has a very compelling suite of tools.
|
||||
To import a tool from LangChain, use the `from_langchain()` method.
|
||||
|
||||
Here is how you can use it to recreate the intro's search result using a LangChain web search tool.
|
||||
This tool will need `pip install google-search-results` to work properly.
|
||||
```python
|
||||
from langchain.agents import load_tools
|
||||
from transformers import Tool, ReactCodeAgent
|
||||
@ -198,9 +209,9 @@ agent = ReactCodeAgent(tools=[search_tool])
|
||||
agent.run("How many more blocks (also denoted as layers) are in BERT base encoder compared to the encoder from the architecture proposed in Attention is All You Need?")
|
||||
```
|
||||
|
||||
## 在酷炫的 Gradio 界面中展示智能体运行
|
||||
## Display your agent run in a cool Gradio interface
|
||||
|
||||
你可以利用 `gradio.Chatbot` 来展示智能体的思考过程,通过 `stream_to_gradio`,下面是一个示例:
|
||||
You can leverage `gradio.Chatbot`to display your agent's thoughts using `stream_to_gradio`, here is an example:
|
||||
|
||||
```py
|
||||
import gradio as gr
|
||||
186
docs/source/en/autoclass_tutorial.md
Normal file
186
docs/source/en/autoclass_tutorial.md
Normal file
@ -0,0 +1,186 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Load pretrained instances with an AutoClass
|
||||
|
||||
With so many different Transformer architectures, it can be challenging to create one for your checkpoint. As a part of 🤗 Transformers core philosophy to make the library easy, simple and flexible to use, an `AutoClass` automatically infers and loads the correct architecture from a given checkpoint. The `from_pretrained()` method lets you quickly load a pretrained model for any architecture so you don't have to devote time and resources to train a model from scratch. Producing this type of checkpoint-agnostic code means if your code works for one checkpoint, it will work with another checkpoint - as long as it was trained for a similar task - even if the architecture is different.
|
||||
|
||||
<Tip>
|
||||
|
||||
Remember, architecture refers to the skeleton of the model and checkpoints are the weights for a given architecture. For example, [BERT](https://huggingface.co/google-bert/bert-base-uncased) is an architecture, while `google-bert/bert-base-uncased` is a checkpoint. Model is a general term that can mean either architecture or checkpoint.
|
||||
|
||||
</Tip>
|
||||
|
||||
In this tutorial, learn to:
|
||||
|
||||
* Load a pretrained tokenizer.
|
||||
* Load a pretrained image processor
|
||||
* Load a pretrained feature extractor.
|
||||
* Load a pretrained processor.
|
||||
* Load a pretrained model.
|
||||
* Load a model as a backbone.
|
||||
|
||||
## AutoTokenizer
|
||||
|
||||
Nearly every NLP task begins with a tokenizer. A tokenizer converts your input into a format that can be processed by the model.
|
||||
|
||||
Load a tokenizer with [`AutoTokenizer.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoTokenizer
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
|
||||
```
|
||||
|
||||
Then tokenize your input as shown below:
|
||||
|
||||
```py
|
||||
>>> sequence = "In a hole in the ground there lived a hobbit."
|
||||
>>> print(tokenizer(sequence))
|
||||
{'input_ids': [101, 1999, 1037, 4920, 1999, 1996, 2598, 2045, 2973, 1037, 7570, 10322, 4183, 1012, 102],
|
||||
'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
|
||||
'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]}
|
||||
```
|
||||
|
||||
## AutoImageProcessor
|
||||
|
||||
For vision tasks, an image processor processes the image into the correct input format.
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoImageProcessor
|
||||
|
||||
>>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224")
|
||||
```
|
||||
|
||||
## AutoBackbone
|
||||
|
||||
<div style="text-align: center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stages.png">
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">A Swin backbone with multiple stages for outputting a feature map.</figcaption>
|
||||
</div>
|
||||
|
||||
The [`AutoBackbone`] lets you use pretrained models as backbones to get feature maps from different stages of the backbone. You should specify one of the following parameters in [`~PretrainedConfig.from_pretrained`]:
|
||||
|
||||
* `out_indices` is the index of the layer you'd like to get the feature map from
|
||||
* `out_features` is the name of the layer you'd like to get the feature map from
|
||||
|
||||
These parameters can be used interchangeably, but if you use both, make sure they're aligned with each other! If you don't pass any of these parameters, the backbone returns the feature map from the last layer.
|
||||
|
||||
<div style="text-align: center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stage%201.png">
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">A feature map from the first stage of the backbone. The patch partition refers to the model stem.</figcaption>
|
||||
</div>
|
||||
|
||||
For example, in the above diagram, to return the feature map from the first stage of the Swin backbone, you can set `out_indices=(1,)`:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoImageProcessor, AutoBackbone
|
||||
>>> import torch
|
||||
>>> from PIL import Image
|
||||
>>> import requests
|
||||
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
>>> image = Image.open(requests.get(url, stream=True).raw)
|
||||
>>> processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
|
||||
>>> model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,))
|
||||
|
||||
>>> inputs = processor(image, return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
>>> feature_maps = outputs.feature_maps
|
||||
```
|
||||
|
||||
Now you can access the `feature_maps` object from the first stage of the backbone:
|
||||
|
||||
```py
|
||||
>>> list(feature_maps[0].shape)
|
||||
[1, 96, 56, 56]
|
||||
```
|
||||
|
||||
## AutoFeatureExtractor
|
||||
|
||||
For audio tasks, a feature extractor processes the audio signal into the correct input format.
|
||||
|
||||
Load a feature extractor with [`AutoFeatureExtractor.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoFeatureExtractor
|
||||
|
||||
>>> feature_extractor = AutoFeatureExtractor.from_pretrained(
|
||||
... "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition"
|
||||
... )
|
||||
```
|
||||
|
||||
## AutoProcessor
|
||||
|
||||
Multimodal tasks require a processor that combines two types of preprocessing tools. For example, the [LayoutLMV2](model_doc/layoutlmv2) model requires an image processor to handle images and a tokenizer to handle text; a processor combines both of them.
|
||||
|
||||
Load a processor with [`AutoProcessor.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoProcessor
|
||||
|
||||
>>> processor = AutoProcessor.from_pretrained("microsoft/layoutlmv2-base-uncased")
|
||||
```
|
||||
|
||||
## AutoModel
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
The `AutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`AutoModelForSequenceClassification.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForSequenceClassification
|
||||
|
||||
>>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Easily reuse the same checkpoint to load an architecture for a different task:
|
||||
|
||||
```py
|
||||
>>> from transformers import AutoModelForTokenClassification
|
||||
|
||||
>>> model = AutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
For PyTorch models, the `from_pretrained()` method uses `torch.load()` which internally uses `pickle` and is known to be insecure. In general, never load a model that could have come from an untrusted source, or that could have been tampered with. This security risk is partially mitigated for public models hosted on the Hugging Face Hub, which are [scanned for malware](https://huggingface.co/docs/hub/security-malware) at each commit. See the [Hub documentation](https://huggingface.co/docs/hub/security) for best practices like [signed commit verification](https://huggingface.co/docs/hub/security-gpg#signing-commits-with-gpg) with GPG.
|
||||
|
||||
TensorFlow and Flax checkpoints are not affected, and can be loaded within PyTorch architectures using the `from_tf` and `from_flax` kwargs for the `from_pretrained` method to circumvent this issue.
|
||||
|
||||
</Tip>
|
||||
|
||||
Generally, we recommend using the `AutoTokenizer` class and the `AutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, image processor, feature extractor and processor to preprocess a dataset for fine-tuning.
|
||||
</pt>
|
||||
<tf>
|
||||
Finally, the `TFAutoModelFor` classes let you load a pretrained model for a given task (see [here](model_doc/auto) for a complete list of available tasks). For example, load a model for sequence classification with [`TFAutoModelForSequenceClassification.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForSequenceClassification
|
||||
|
||||
>>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Easily reuse the same checkpoint to load an architecture for a different task:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFAutoModelForTokenClassification
|
||||
|
||||
>>> model = TFAutoModelForTokenClassification.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Generally, we recommend using the `AutoTokenizer` class and the `TFAutoModelFor` class to load pretrained instances of models. This will ensure you load the correct architecture every time. In the next [tutorial](preprocessing), learn how to use your newly loaded tokenizer, image processor, feature extractor and processor to preprocess a dataset for fine-tuning.
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
@ -1,155 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Backbones
|
||||
|
||||
Higher-level computer visions tasks, such as object detection or image segmentation, use several models together to generate a prediction. A separate model is used for the *backbone*, neck, and head. The backbone extracts useful features from an input image into a feature map, the neck combines and processes the feature maps, and the head uses them to make a prediction.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Backbone.png"/>
|
||||
</div>
|
||||
|
||||
Load a backbone with [`~PretrainedConfig.from_pretrained`] and use the `out_indices` parameter to determine which layer, given by the index, to extract a feature map from.
|
||||
|
||||
```py
|
||||
from transformers import AutoBackbone
|
||||
|
||||
model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,))
|
||||
```
|
||||
|
||||
This guide describes the backbone class, backbones from the [timm](https://hf.co/docs/timm/index) library, and how to extract features with them.
|
||||
|
||||
## Backbone classes
|
||||
|
||||
There are two backbone classes.
|
||||
|
||||
- [`~transformers.utils.BackboneMixin`] allows you to load a backbone and includes functions for extracting the feature maps and indices.
|
||||
- [`~transformers.utils.BackboneConfigMixin`] allows you to set the feature map and indices of a backbone configuration.
|
||||
|
||||
Refer to the [Backbone](./main_classes/backbones) API documentation to check which models support a backbone.
|
||||
|
||||
There are two ways to load a Transformers backbone, [`AutoBackbone`] and a model-specific backbone class.
|
||||
|
||||
<hfoptions id="backbone-classes">
|
||||
<hfoption id="AutoBackbone">
|
||||
|
||||
The [AutoClass](./model_doc/auto) API automatically loads a pretrained vision model with [`~PretrainedConfig.from_pretrained`] as a backbone if it's supported.
|
||||
|
||||
Set the `out_indices` parameter to the layer you'd like to get the feature map from. If you know the name of the layer, you could also use `out_features`. These parameters can be used interchangeably, but if you use both, make sure they refer to the same layer.
|
||||
|
||||
When `out_indices` or `out_features` isn't used, the backbone returns the feature map from the last layer. The example code below uses `out_indices=(1,)` to get the feature map from the first layer.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Swin%20Stage%201.png"/>
|
||||
</div>
|
||||
|
||||
```py
|
||||
from transformers import AutoImageProcessor, AutoBackbone
|
||||
|
||||
model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="model-specific backbone">
|
||||
|
||||
When you know a model supports a backbone, you can load the backbone and neck directly into the models configuration. Pass the configuration to the model to initialize it for a task.
|
||||
|
||||
The example below loads a [ResNet](./model_doc/resnet) backbone and neck for use in a [MaskFormer](./model_doc/maskformer) instance segmentation head.
|
||||
|
||||
Set `backbone` to a pretrained model and `use_pretrained_backbone=True` to use pretrained weights instead of randomly initialized weights.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone="microsoft/resnet-50", use_pretrained_backbone=True)
|
||||
model = MaskFormerForInstanceSegmentation(config)
|
||||
```
|
||||
|
||||
Another option is to separately load the backbone configuration and then pass it to `backbone_config` in the model configuration.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig
|
||||
|
||||
# instantiate backbone configuration
|
||||
backbone_config = ResNetConfig()
|
||||
# load backbone in model
|
||||
config = MaskFormerConfig(backbone_config=backbone_config)
|
||||
# attach backbone to model head
|
||||
model = MaskFormerForInstanceSegmentation(config)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## timm backbones
|
||||
|
||||
[timm](https://hf.co/docs/timm/index) is a collection of vision models for training and inference. Transformers supports timm models as backbones with the [`TimmBackbone`] and [`TimmBackboneConfig`] classes.
|
||||
|
||||
Set `use_timm_backbone=True` to load pretrained timm weights, and `use_pretrained_backbone` to use pretrained or randomly initialized weights.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone="resnet50", use_timm_backbone=True, use_pretrained_backbone=True)
|
||||
model = MaskFormerForInstanceSegmentation(config)
|
||||
```
|
||||
|
||||
You could also explicitly call the [`TimmBackboneConfig`] class to load and create a pretrained timm backbone.
|
||||
|
||||
```py
|
||||
from transformers import TimmBackboneConfig
|
||||
|
||||
backbone_config = TimmBackboneConfig("resnet50", use_pretrained_backbone=True)
|
||||
```
|
||||
|
||||
Pass the backbone configuration to the model configuration and instantiate the model head, [`MaskFormerForInstanceSegmentation`], with the backbone.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone_config=backbone_config)
|
||||
model = MaskFormerForInstanceSegmentation(config)
|
||||
```
|
||||
|
||||
## Feature extraction
|
||||
|
||||
The backbone is used to extract image features. Pass an image through the backbone to get the feature maps.
|
||||
|
||||
Load and preprocess an image and pass it to the backbone. The example below extracts the feature maps from the first layer.
|
||||
|
||||
```py
|
||||
from transformers import AutoImageProcessor, AutoBackbone
|
||||
import torch
|
||||
from PIL import Image
|
||||
import requests
|
||||
|
||||
model = AutoBackbone.from_pretrained("microsoft/swin-tiny-patch4-window7-224", out_indices=(1,))
|
||||
processor = AutoImageProcessor.from_pretrained("microsoft/swin-tiny-patch4-window7-224")
|
||||
|
||||
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
inputs = processor(image, return_tensors="pt")
|
||||
outputs = model(**inputs)
|
||||
```
|
||||
|
||||
The features are stored and accessed from the outputs `feature_maps` attribute.
|
||||
|
||||
```py
|
||||
feature_maps = outputs.feature_maps
|
||||
list(feature_maps[0].shape)
|
||||
[1, 96, 56, 56]
|
||||
```
|
||||
387
docs/source/en/benchmarks.md
Normal file
387
docs/source/en/benchmarks.md
Normal file
@ -0,0 +1,387 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Benchmarks
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Hugging Face's Benchmarking tools are deprecated and it is advised to use external Benchmarking libraries to measure the speed
|
||||
and memory complexity of Transformer models.
|
||||
|
||||
</Tip>
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
Let's take a look at how 🤗 Transformers models can be benchmarked, best practices, and already available benchmarks.
|
||||
|
||||
A notebook explaining in more detail how to benchmark 🤗 Transformers models can be found [here](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb).
|
||||
|
||||
## How to benchmark 🤗 Transformers models
|
||||
|
||||
The classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] allow to flexibly benchmark 🤗 Transformers models. The benchmark classes allow us to measure the _peak memory usage_ and _required time_ for both _inference_ and _training_.
|
||||
|
||||
<Tip>
|
||||
|
||||
Here, _inference_ is defined by a single forward pass, and _training_ is defined by a single forward pass and
|
||||
backward pass.
|
||||
|
||||
</Tip>
|
||||
|
||||
The benchmark classes [`PyTorchBenchmark`] and [`TensorFlowBenchmark`] expect an object of type [`PyTorchBenchmarkArguments`] and
|
||||
[`TensorFlowBenchmarkArguments`], respectively, for instantiation. [`PyTorchBenchmarkArguments`] and [`TensorFlowBenchmarkArguments`] are data classes and contain all relevant configurations for their corresponding benchmark class. In the following example, it is shown how a BERT model of type _bert-base-cased_ can be benchmarked.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
|
||||
>>> benchmark = PyTorchBenchmark(args)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["google-bert/bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> benchmark = TensorFlowBenchmark(args)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
Here, three arguments are given to the benchmark argument data classes, namely `models`, `batch_sizes`, and
|
||||
`sequence_lengths`. The argument `models` is required and expects a `list` of model identifiers from the
|
||||
[model hub](https://huggingface.co/models) The `list` arguments `batch_sizes` and `sequence_lengths` define
|
||||
the size of the `input_ids` on which the model is benchmarked. There are many more parameters that can be configured
|
||||
via the benchmark argument data classes. For more detail on these one can either directly consult the files
|
||||
`src/transformers/benchmark/benchmark_args_utils.py`, `src/transformers/benchmark/benchmark_args.py` (for PyTorch)
|
||||
and `src/transformers/benchmark/benchmark_args_tf.py` (for Tensorflow). Alternatively, running the following shell
|
||||
commands from root will print out a descriptive list of all configurable parameters for PyTorch and Tensorflow
|
||||
respectively.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```bash
|
||||
python examples/pytorch/benchmarking/run_benchmark.py --help
|
||||
```
|
||||
|
||||
An instantiated benchmark object can then simply be run by calling `benchmark.run()`.
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.006
|
||||
google-bert/bert-base-uncased 8 32 0.006
|
||||
google-bert/bert-base-uncased 8 128 0.018
|
||||
google-bert/bert-base-uncased 8 512 0.088
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1227
|
||||
google-bert/bert-base-uncased 8 32 1281
|
||||
google-bert/bert-base-uncased 8 128 1307
|
||||
google-bert/bert-base-uncased 8 512 1539
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 08:58:43.371351
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```bash
|
||||
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
|
||||
```
|
||||
|
||||
An instantiated benchmark object can then simply be run by calling `benchmark.run()`.
|
||||
|
||||
```py
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
>>> results = benchmark.run()
|
||||
>>> print(results)
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 0.005
|
||||
google-bert/bert-base-uncased 8 32 0.008
|
||||
google-bert/bert-base-uncased 8 128 0.022
|
||||
google-bert/bert-base-uncased 8 512 0.105
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
google-bert/bert-base-uncased 8 8 1330
|
||||
google-bert/bert-base-uncased 8 32 1330
|
||||
google-bert/bert-base-uncased 8 128 1330
|
||||
google-bert/bert-base-uncased 8 512 1770
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:26:35.617317
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
By default, the _time_ and the _required memory_ for _inference_ are benchmarked. In the example output above the first
|
||||
two sections show the result corresponding to _inference time_ and _inference memory_. In addition, all relevant
|
||||
information about the computing environment, _e.g._ the GPU type, the system, the library versions, etc... are printed
|
||||
out in the third section under _ENVIRONMENT INFORMATION_. This information can optionally be saved in a _.csv_ file
|
||||
when adding the argument `save_to_csv=True` to [`PyTorchBenchmarkArguments`] and
|
||||
[`TensorFlowBenchmarkArguments`] respectively. In this case, every section is saved in a separate
|
||||
_.csv_ file. The path to each _.csv_ file can optionally be defined via the argument data classes.
|
||||
|
||||
Instead of benchmarking pre-trained models via their model identifier, _e.g._ `google-bert/bert-base-uncased`, the user can
|
||||
alternatively benchmark an arbitrary configuration of any available model class. In this case, a `list` of
|
||||
configurations must be inserted with the benchmark args as follows.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
```py
|
||||
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = PyTorchBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 128 0.006
|
||||
bert-base 8 512 0.006
|
||||
bert-base 8 128 0.018
|
||||
bert-base 8 512 0.088
|
||||
bert-384-hid 8 8 0.006
|
||||
bert-384-hid 8 32 0.006
|
||||
bert-384-hid 8 128 0.011
|
||||
bert-384-hid 8 512 0.054
|
||||
bert-6-lay 8 8 0.003
|
||||
bert-6-lay 8 32 0.004
|
||||
bert-6-lay 8 128 0.009
|
||||
bert-6-lay 8 512 0.044
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1277
|
||||
bert-base 8 32 1281
|
||||
bert-base 8 128 1307
|
||||
bert-base 8 512 1539
|
||||
bert-384-hid 8 8 1005
|
||||
bert-384-hid 8 32 1027
|
||||
bert-384-hid 8 128 1035
|
||||
bert-384-hid 8 512 1255
|
||||
bert-6-lay 8 8 1097
|
||||
bert-6-lay 8 32 1101
|
||||
bert-6-lay 8 128 1127
|
||||
bert-6-lay 8 512 1359
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: PyTorch
|
||||
- use_torchscript: False
|
||||
- framework_version: 1.4.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:35:25.143267
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
```py
|
||||
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
|
||||
|
||||
>>> args = TensorFlowBenchmarkArguments(
|
||||
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
|
||||
... )
|
||||
>>> config_base = BertConfig()
|
||||
>>> config_384_hid = BertConfig(hidden_size=384)
|
||||
>>> config_6_lay = BertConfig(num_hidden_layers=6)
|
||||
|
||||
>>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
|
||||
>>> benchmark.run()
|
||||
==================== INFERENCE - SPEED - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Time in s
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 0.005
|
||||
bert-base 8 32 0.008
|
||||
bert-base 8 128 0.022
|
||||
bert-base 8 512 0.106
|
||||
bert-384-hid 8 8 0.005
|
||||
bert-384-hid 8 32 0.007
|
||||
bert-384-hid 8 128 0.018
|
||||
bert-384-hid 8 512 0.064
|
||||
bert-6-lay 8 8 0.002
|
||||
bert-6-lay 8 32 0.003
|
||||
bert-6-lay 8 128 0.0011
|
||||
bert-6-lay 8 512 0.074
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== INFERENCE - MEMORY - RESULT ====================
|
||||
--------------------------------------------------------------------------------
|
||||
Model Name Batch Size Seq Length Memory in MB
|
||||
--------------------------------------------------------------------------------
|
||||
bert-base 8 8 1330
|
||||
bert-base 8 32 1330
|
||||
bert-base 8 128 1330
|
||||
bert-base 8 512 1770
|
||||
bert-384-hid 8 8 1330
|
||||
bert-384-hid 8 32 1330
|
||||
bert-384-hid 8 128 1330
|
||||
bert-384-hid 8 512 1540
|
||||
bert-6-lay 8 8 1330
|
||||
bert-6-lay 8 32 1330
|
||||
bert-6-lay 8 128 1330
|
||||
bert-6-lay 8 512 1540
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
==================== ENVIRONMENT INFORMATION ====================
|
||||
|
||||
- transformers_version: 2.11.0
|
||||
- framework: Tensorflow
|
||||
- use_xla: False
|
||||
- framework_version: 2.2.0
|
||||
- python_version: 3.6.10
|
||||
- system: Linux
|
||||
- cpu: x86_64
|
||||
- architecture: 64bit
|
||||
- date: 2020-06-29
|
||||
- time: 09:38:15.487125
|
||||
- fp16: False
|
||||
- use_multiprocessing: True
|
||||
- only_pretrain_model: False
|
||||
- cpu_ram_mb: 32088
|
||||
- use_gpu: True
|
||||
- num_gpus: 1
|
||||
- gpu: TITAN RTX
|
||||
- gpu_ram_mb: 24217
|
||||
- gpu_power_watts: 280.0
|
||||
- gpu_performance_state: 2
|
||||
- use_tpu: False
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
Again, _inference time_ and _required memory_ for _inference_ are measured, but this time for customized configurations
|
||||
of the `BertModel` class. This feature can especially be helpful when deciding for which configuration the model
|
||||
should be trained.
|
||||
|
||||
|
||||
## Benchmark best practices
|
||||
|
||||
This section lists a couple of best practices one should be aware of when benchmarking a model.
|
||||
|
||||
- Currently, only single device benchmarking is supported. When benchmarking on GPU, it is recommended that the user
|
||||
specifies on which device the code should be run by setting the `CUDA_VISIBLE_DEVICES` environment variable in the
|
||||
shell, _e.g._ `export CUDA_VISIBLE_DEVICES=0` before running the code.
|
||||
- The option `no_multi_processing` should only be set to `True` for testing and debugging. To ensure accurate
|
||||
memory measurement it is recommended to run each memory benchmark in a separate process by making sure
|
||||
`no_multi_processing` is set to `True`.
|
||||
- One should always state the environment information when sharing the results of a model benchmark. Results can vary
|
||||
heavily between different GPU devices, library versions, etc., as a consequence, benchmark results on their own are not very
|
||||
useful for the community.
|
||||
|
||||
|
||||
## Sharing your benchmark
|
||||
|
||||
Previously all available core models (10 at the time) have been benchmarked for _inference time_, across many different
|
||||
settings: using PyTorch, with and without TorchScript, using TensorFlow, with and without XLA. All of those tests were
|
||||
done across CPUs (except for TensorFlow XLA) and GPUs.
|
||||
|
||||
The approach is detailed in the [following blogpost](https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2) and the results are
|
||||
available [here](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing).
|
||||
|
||||
With the new _benchmark_ tools, it is easier than ever to share your benchmark results with the community
|
||||
|
||||
- [PyTorch Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md).
|
||||
- [TensorFlow Benchmarking Results](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md).
|
||||
41
docs/source/en/bertology.md
Normal file
41
docs/source/en/bertology.md
Normal file
@ -0,0 +1,41 @@
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# BERTology
|
||||
|
||||
There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT
|
||||
(that some call "BERTology"). Some good examples of this field are:
|
||||
|
||||
|
||||
- BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick:
|
||||
https://arxiv.org/abs/1905.05950
|
||||
- Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650
|
||||
- What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D.
|
||||
Manning: https://arxiv.org/abs/1906.04341
|
||||
- CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://arxiv.org/abs/2210.04633
|
||||
|
||||
In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to
|
||||
help people access the inner representations, mainly adapted from the great work of Paul Michel
|
||||
(https://arxiv.org/abs/1905.10650):
|
||||
|
||||
|
||||
- accessing all the hidden-states of BERT/GPT/GPT-2,
|
||||
- accessing all the attention weights for each head of BERT/GPT/GPT-2,
|
||||
- retrieving heads output values and gradients to be able to compute head importance score and prune head as explained
|
||||
in https://arxiv.org/abs/1905.10650.
|
||||
|
||||
To help you understand and use these features, we have added a specific example script: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) which extracts information and prune a model pre-trained on
|
||||
GLUE.
|
||||
215
docs/source/en/big_models.md
Normal file
215
docs/source/en/big_models.md
Normal file
@ -0,0 +1,215 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Instantiate a big model
|
||||
|
||||
A barrier to accessing very large pretrained models is the amount of memory required. When loading a pretrained PyTorch model, you usually:
|
||||
|
||||
1. Create a model with random weights.
|
||||
2. Load your pretrained weights.
|
||||
3. Put those pretrained weights in the model.
|
||||
|
||||
The first two steps both require a full version of the model in memory and if the model weighs several GBs, you may not have enough memory for two copies of it. This problem is amplified in distributed training environments because each process loads a pretrained model and stores two copies in memory.
|
||||
|
||||
> [!TIP]
|
||||
> The randomly created model is initialized with "empty" tensors, which take space in memory without filling it. The random values are whatever was in this chunk of memory at the time. To improve loading speed, the [`_fast_init`](https://github.com/huggingface/transformers/blob/c9f6e5e35156e068b227dd9b15521767f6afd4d2/src/transformers/modeling_utils.py#L2710) parameter is set to `True` by default to skip the random initialization for all weights that are correctly loaded.
|
||||
|
||||
This guide will show you how Transformers can help you load large pretrained models despite their memory requirements.
|
||||
|
||||
## Sharded checkpoints
|
||||
|
||||
From Transformers v4.18.0, a checkpoint larger than 10GB is automatically sharded by the [`~PreTrainedModel.save_pretrained`] method. It is split into several smaller partial checkpoints and creates an index file that maps parameter names to the files they're stored in.
|
||||
|
||||
The maximum shard size is controlled with the `max_shard_size` parameter, but by default it is 5GB, because it is easier to run on free-tier GPU instances without running out of memory.
|
||||
|
||||
For example, let's shard [BioMistral/BioMistral-7B](https://hf.co/BioMistral/BioMistral-7B).
|
||||
|
||||
```py
|
||||
>>> with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
... model.save_pretrained(tmp_dir, max_shard_size="5GB")
|
||||
... print(sorted(os.listdir(tmp_dir)))
|
||||
['config.json', 'generation_config.json', 'model-00001-of-00006.safetensors', 'model-00002-of-00006.safetensors', 'model-00003-of-00006.safetensors', 'model-00004-of-00006.safetensors', 'model-00005-of-00006.safetensors', 'model-00006-of-00006.safetensors', 'model.safetensors.index.json']
|
||||
```
|
||||
|
||||
The sharded checkpoint is reloaded with the [`~PreTrainedModel.from_pretrained`] method.
|
||||
|
||||
```py
|
||||
>>> with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
... model.save_pretrained(tmp_dir, max_shard_size="5GB")
|
||||
... new_model = AutoModel.from_pretrained(tmp_dir)
|
||||
```
|
||||
|
||||
The main advantage of sharded checkpoints for big models is that each shard is loaded after the previous one, which caps the memory usage to only the model size and the largest shard size.
|
||||
|
||||
You could also directly load a sharded checkpoint inside a model without the [`~PreTrainedModel.from_pretrained`] method (similar to PyTorch's `load_state_dict()` method for a full checkpoint). In this case, use the [`~modeling_utils.load_sharded_checkpoint`] method.
|
||||
|
||||
```py
|
||||
>>> from transformers.modeling_utils import load_sharded_checkpoint
|
||||
|
||||
>>> with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
... model.save_pretrained(tmp_dir, max_shard_size="5GB")
|
||||
... load_sharded_checkpoint(model, tmp_dir)
|
||||
```
|
||||
|
||||
### Shard metadata
|
||||
|
||||
The index file determines which keys are in the checkpoint and where the corresponding weights are stored. This file is loaded like any other JSON file and you can get a dictionary from it.
|
||||
|
||||
```py
|
||||
>>> import json
|
||||
|
||||
>>> with tempfile.TemporaryDirectory() as tmp_dir:
|
||||
... model.save_pretrained(tmp_dir, max_shard_size="5GB")
|
||||
... with open(os.path.join(tmp_dir, "model.safetensors.index.json"), "r") as f:
|
||||
... index = json.load(f)
|
||||
|
||||
>>> print(index.keys())
|
||||
dict_keys(['metadata', 'weight_map'])
|
||||
```
|
||||
|
||||
The `metadata` key provides the total model size.
|
||||
|
||||
```py
|
||||
>>> index["metadata"]
|
||||
{'total_size': 28966928384}
|
||||
```
|
||||
|
||||
The `weight_map` key maps each parameter name (typically `state_dict` in a PyTorch model) to the shard it's stored in.
|
||||
|
||||
```py
|
||||
>>> index["weight_map"]
|
||||
{'lm_head.weight': 'model-00006-of-00006.safetensors',
|
||||
'model.embed_tokens.weight': 'model-00001-of-00006.safetensors',
|
||||
'model.layers.0.input_layernorm.weight': 'model-00001-of-00006.safetensors',
|
||||
'model.layers.0.mlp.down_proj.weight': 'model-00001-of-00006.safetensors',
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
## Accelerate's Big Model Inference
|
||||
|
||||
> [!TIP]
|
||||
> Make sure you have Accelerate v0.9.0 or later and PyTorch v1.9.0 or later installed.
|
||||
|
||||
From Transformers v4.20.0, the [`~PreTrainedModel.from_pretrained`] method is supercharged with Accelerate's [Big Model Inference](https://hf.co/docs/accelerate/usage_guides/big_modeling) feature to efficiently handle really big models! Big Model Inference creates a *model skeleton* on PyTorch's [**meta**](https://pytorch.org/docs/main/meta.html) device. The randomly initialized parameters are only created when the pretrained weights are loaded. This way, you aren't keeping two copies of the model in memory at the same time (one for the randomly initialized model and one for the pretrained weights), and the maximum memory consumed is only the full model size.
|
||||
|
||||
To enable Big Model Inference in Transformers, set `low_cpu_mem_usage=True` in the [`~PreTrainedModel.from_pretrained`] method.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", low_cpu_mem_usage=True)
|
||||
```
|
||||
|
||||
Accelerate automatically dispatches the model weights across all available devices, starting with the fastest device (GPU) first and then offloading to the slower devices (CPU and even hard drive). This is enabled by setting `device_map="auto"` in the [`~PreTrainedModel.from_pretrained`] method. When you pass the `device_map` parameter, `low_cpu_mem_usage` is automatically set to `True` so you don't need to specify it.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
# these loading methods are equivalent
|
||||
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto")
|
||||
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", low_cpu_mem_usage=True)
|
||||
```
|
||||
|
||||
You can also write your own `device_map` by mapping each layer to a device. It should map all model parameters to a device, but you don't have to detail where all the submodules of a layer go if the entire layer is on the same device.
|
||||
|
||||
```python
|
||||
device_map = {"model.layers.1": 0, "model.layers.14": 1, "model.layers.31": "cpu", "lm_head": "disk"}
|
||||
```
|
||||
|
||||
Access `hf_device_map` attribute to see how Accelerate split the model across devices.
|
||||
|
||||
```py
|
||||
gemma.hf_device_map
|
||||
```
|
||||
|
||||
```python out
|
||||
{'model.embed_tokens': 0,
|
||||
'model.layers.0': 0,
|
||||
'model.layers.1': 0,
|
||||
'model.layers.2': 0,
|
||||
'model.layers.3': 0,
|
||||
'model.layers.4': 0,
|
||||
'model.layers.5': 0,
|
||||
'model.layers.6': 0,
|
||||
'model.layers.7': 0,
|
||||
'model.layers.8': 0,
|
||||
'model.layers.9': 0,
|
||||
'model.layers.10': 0,
|
||||
'model.layers.11': 0,
|
||||
'model.layers.12': 0,
|
||||
'model.layers.13': 0,
|
||||
'model.layers.14': 'cpu',
|
||||
'model.layers.15': 'cpu',
|
||||
'model.layers.16': 'cpu',
|
||||
'model.layers.17': 'cpu',
|
||||
'model.layers.18': 'cpu',
|
||||
'model.layers.19': 'cpu',
|
||||
'model.layers.20': 'cpu',
|
||||
'model.layers.21': 'cpu',
|
||||
'model.layers.22': 'cpu',
|
||||
'model.layers.23': 'cpu',
|
||||
'model.layers.24': 'cpu',
|
||||
'model.layers.25': 'cpu',
|
||||
'model.layers.26': 'cpu',
|
||||
'model.layers.27': 'cpu',
|
||||
'model.layers.28': 'cpu',
|
||||
'model.layers.29': 'cpu',
|
||||
'model.layers.30': 'cpu',
|
||||
'model.layers.31': 'cpu',
|
||||
'model.norm': 'cpu',
|
||||
'lm_head': 'cpu'}
|
||||
```
|
||||
|
||||
## Model data type
|
||||
|
||||
PyTorch model weights are normally instantiated as torch.float32 and it can be an issue if you try to load a model as a different data type. For example, you'd need twice as much memory to load the weights in torch.float32 and then again to load them in your desired data type, like torch.float16.
|
||||
|
||||
> [!WARNING]
|
||||
> Due to how PyTorch is designed, the `torch_dtype` parameter only supports floating data types.
|
||||
|
||||
To avoid wasting memory like this, explicitly set the `torch_dtype` parameter to the desired data type or set `torch_dtype="auto"` to load the weights with the most optimal memory pattern (the data type is automatically derived from the model weights).
|
||||
|
||||
<hfoptions id="dtype">
|
||||
<hfoption id="specific dtype">
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", torch_dtype=torch.float16)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="auto dtype">
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
gemma = AutoModelForCausalLM.from_pretrained("google/gemma-7b", torch_dtype="auto")
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
You can also set the data type to use for models instantiated from scratch.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import AutoConfig, AutoModel
|
||||
|
||||
my_config = AutoConfig.from_pretrained("google/gemma-2b", torch_dtype=torch.float16)
|
||||
model = AutoModel.from_config(my_config)
|
||||
```
|
||||
@ -1,96 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Caching
|
||||
|
||||
Imagine you’re having a conversation with someone, and instead of remembering what they previously said, they have to start from scratch every time you respond. This would be slow and inefficient, right?
|
||||
|
||||
You can extend this analogy to transformer models. Autoregressive model generation can be slow because it makes a prediction one token at a time. Each new prediction is dependent on all the previous context.
|
||||
|
||||
To predict the 1000th token, the model requires information from the previous 999 tokens. The information is represented as matrix multiplications across the token representations.
|
||||
|
||||
To predict the 1001th token, you need the same information from the previous 999 tokens in addition to any information from the 1000th token. This is a lot of matrix multiplications a model has to compute over and over for each token!
|
||||
|
||||
A key-value (KV) cache eliminates this inefficiency by storing kv pairs derived from the attention layers of previously processed tokens. The stored kv pairs are retrieved from the cache and reused for subsequent tokens, avoiding the need to recompute.
|
||||
|
||||
> [!WARNING]
|
||||
> Caching should only be used for **inference**. It may cause unexpected errors if it's enabled during training.
|
||||
|
||||
## Cache class
|
||||
|
||||
When you use Transformers' [`Cache`] class, the self-attention module performs several critical steps to integrate past and present information.
|
||||
|
||||
1. The attention module concatenates current kv pairs with past kv pairs stored in the cache. This creates attentions weights with the shape `(new_tokens_length, past_kv_length + new_tokens_length)`. The current and past kv pairs are essentially combined to compute the attention scores, ensuring a model is aware of previous context and the current input.
|
||||
|
||||
2. When the `forward` method is called iteratively, it's crucial that the attention mask shape matches the combined length of the past and current kv pairs. The attention mask should have the shape `(batch_size, past_kv_length + new_tokens_length)`. This is typically handled internally in [`~GenerationMixin.generate`], but if you want to implement your own generation loop with [`Cache`], keep this in mind! The attention mask should hold the past and current token values.
|
||||
|
||||
3. It is also important to be aware of the `cache_position`. This is important if you want to reuse a prefilled [`Cache`] with the `forward` method because you have to pass a valid `cache_position` value. This indicates the input positions in a sequence. `cache_position` is unaffected by padding, and it always adds one more position for each token. For example, if a kv cache contains 10 tokens - regardless of pad tokens - the cache position for the next token should be `torch.tensor([10])`.
|
||||
|
||||
The example below demonstrates how to create a generation loop with [`DynamicCache`]. As discussed, the attention mask is a concatenation of past and current token values and `1` is added to the cache position for the next token.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
||||
|
||||
model_id = "meta-llama/Llama-2-7b-chat-hf"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16, device_map="cuda:0")
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
|
||||
past_key_values = DynamicCache()
|
||||
messages = [{"role": "user", "content": "Hello, what's your name."}]
|
||||
inputs = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt", return_dict=True).to("cuda:0")
|
||||
|
||||
generated_ids = inputs.input_ids
|
||||
cache_position = torch.arange(inputs.input_ids.shape[1], dtype=torch.int64, device="cuda:0")
|
||||
max_new_tokens = 10
|
||||
|
||||
for _ in range(max_new_tokens):
|
||||
outputs = model(**inputs, cache_position=cache_position, past_key_values=past_key_values, use_cache=True)
|
||||
# Greedily sample one next token
|
||||
next_token_ids = outputs.logits[:, -1:].argmax(-1)
|
||||
generated_ids = torch.cat([generated_ids, next_token_ids], dim=-1)
|
||||
# Prepare inputs for the next generation step by leaaving unprocessed tokens, in our case we have only one new token
|
||||
# and expanding attn mask for the new token, as explained above
|
||||
attention_mask = inputs["attention_mask"]
|
||||
attention_mask = torch.cat([attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1)
|
||||
inputs = {"input_ids": next_token_ids, "attention_mask": attention_mask}
|
||||
cache_position = cache_position[-1:] + 1 # add one more position for the next token
|
||||
|
||||
print(tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0])
|
||||
"[INST] Hello, what's your name. [/INST] Hello! My name is LLaMA,"
|
||||
```
|
||||
|
||||
## Legacy cache format
|
||||
|
||||
Before the [`Cache`] class, the cache used to be stored as a tuple of tuples of tensors. This format has is dynamic because it grows as text is generated, similar to [`DynamicCache`].
|
||||
|
||||
If your project depends on this legacy format, you can convert between [`DynamicCache`] and a tuple of tuples as shown below with the [`~DynamicCache.from_legacy_cache`] and [`DynamicCache.to_legacy_cache`] functions. This is helpful if you have custom logic for manipulating a cache in a specific format.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, DynamicCache
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
|
||||
inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
||||
|
||||
# `return_dict_in_generate=True` is required to return the cache and `return_legacy_cache` forces the returned cache
|
||||
# in the the legacy format
|
||||
generation_outputs = model.generate(**inputs, return_dict_in_generate=True, return_legacy_cache=True, max_new_tokens=5)
|
||||
|
||||
cache = DynamicCache.from_legacy_cache(generation_outputs.past_key_values)
|
||||
legacy_format_cache = cache.to_legacy_cache()
|
||||
```
|
||||
@ -1,299 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Tools and RAG
|
||||
|
||||
The [`~PreTrainedTokenizerBase.apply_chat_template`] method supports virtually any additional argument types - strings, lists, dicts - besides the chat message. This makes it possible to use chat templates for many use cases.
|
||||
|
||||
This guide will demonstrate how to use chat templates with tools and retrieval-augmented generation (RAG).
|
||||
|
||||
## Tools
|
||||
|
||||
Tools are functions a large language model (LLM) can call to perform specific tasks. It is a powerful way to extend the capabilities of conversational agents with real-time information, computational tools, or access to large databases.
|
||||
|
||||
Follow the rules below when creating a tool.
|
||||
|
||||
1. The function should have a descriptive name.
|
||||
2. The function arguments must have a type hint in the function header (don't include in the `Args` block).
|
||||
3. The function must have a [Google-style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) docstring.
|
||||
4. The function can have a return type and `Returns` block, but these are optional because most tool use models ignore them.
|
||||
|
||||
An example tool to get temperature and wind speed is shown below.
|
||||
|
||||
```py
|
||||
def get_current_temperature(location: str, unit: str) -> float:
|
||||
"""
|
||||
Get the current temperature at a location.
|
||||
|
||||
Args:
|
||||
location: The location to get the temperature for, in the format "City, Country"
|
||||
unit: The unit to return the temperature in. (choices: ["celsius", "fahrenheit"])
|
||||
Returns:
|
||||
The current temperature at the specified location in the specified units, as a float.
|
||||
"""
|
||||
return 22. # A real function should probably actually get the temperature!
|
||||
|
||||
def get_current_wind_speed(location: str) -> float:
|
||||
"""
|
||||
Get the current wind speed in km/h at a given location.
|
||||
|
||||
Args:
|
||||
location: The location to get the temperature for, in the format "City, Country"
|
||||
Returns:
|
||||
The current wind speed at the given location in km/h, as a float.
|
||||
"""
|
||||
return 6. # A real function should probably actually get the wind speed!
|
||||
|
||||
tools = [get_current_temperature, get_current_wind_speed]
|
||||
```
|
||||
|
||||
Load a model and tokenizer that supports tool-use like [NousResearch/Hermes-2-Pro-Llama-3-8B](https://hf.co/NousResearch/Hermes-2-Pro-Llama-3-8B), but you can also consider a larger model like [Command-R](./model_doc/cohere) and [Mixtral-8x22B](./model_doc/mixtral) if your hardware can support it.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained( "NousResearch/Hermes-2-Pro-Llama-3-8B")
|
||||
tokenizer = AutoTokenizer.from_pretrained( "NousResearch/Hermes-2-Pro-Llama-3-8B")
|
||||
model = AutoModelForCausalLM.from_pretrained( "NousResearch/Hermes-2-Pro-Llama-3-8B", torch_dtype=torch.bfloat16, device_map="auto")
|
||||
```
|
||||
|
||||
Create a chat message.
|
||||
|
||||
```py
|
||||
messages = [
|
||||
{"role": "system", "content": "You are a bot that responds to weather queries. You should reply with the unit used in the queried location."},
|
||||
{"role": "user", "content": "Hey, what's the temperature in Paris right now?"}
|
||||
]
|
||||
```
|
||||
|
||||
Pass `messages` and a list of tools to [`~PreTrainedTokenizerBase.apply_chat_template`]. Then you can pass the inputs to the model for generation.
|
||||
|
||||
```py
|
||||
inputs = tokenizer.apply_chat_template(messages, tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = {k: v for k, v in inputs.items()}
|
||||
outputs = model.generate(**inputs, max_new_tokens=128)
|
||||
print(tokenizer.decode(outputs[0][len(inputs["input_ids"][0]):]))
|
||||
```
|
||||
|
||||
```txt
|
||||
<tool_call>
|
||||
{"arguments": {"location": "Paris, France", "unit": "celsius"}, "name": "get_current_temperature"}
|
||||
</tool_call><|im_end|>
|
||||
```
|
||||
|
||||
The chat model called the `get_current_temperature` tool with the correct parameters from the docstring. It inferred France as the location based on Paris, and that it should use Celsius for the units of temperature.
|
||||
|
||||
Now append the `get_current_temperature` function and these arguments to the chat message as `tool_call`. The `tool_call` dictionary should be provided to the `assistant` role instead of the `system` or `user`.
|
||||
|
||||
> [!WARNING]
|
||||
> The OpenAI API uses a JSON string as its `tool_call` format. This may cause errors or strange model behavior if used in Transformers, which expects a dict.
|
||||
|
||||
<hfoptions id="tool-call">
|
||||
<hfoption id="Llama">
|
||||
|
||||
```py
|
||||
tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France", "unit": "celsius"}}
|
||||
messages.append({"role": "assistant", "tool_calls": [{"type": "function", "function": tool_call}]})
|
||||
```
|
||||
|
||||
Allow the assistant to read the function outputs and chat with the user.
|
||||
|
||||
```py
|
||||
inputs = tokenizer.apply_chat_template(messages, tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = {k: v for k, v in inputs.items()}
|
||||
out = model.generate(**inputs, max_new_tokens=128)
|
||||
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
|
||||
```
|
||||
|
||||
```txt
|
||||
The temperature in Paris, France right now is approximately 12°C (53.6°F).<|im_end|>
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="Mistral/Mixtral">
|
||||
|
||||
For [Mistral](./model_doc/mistral) and [Mixtral](./model_doc/mixtral) models, you need an additional `tool_call_id`. The `tool_call_id` is 9 randomly generated alphanumeric characters assigned to the `id` key in the `tool_call` dictionary.
|
||||
|
||||
```py
|
||||
tool_call_id = "9Ae3bDc2F"
|
||||
tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France", "unit": "celsius"}}
|
||||
messages.append({"role": "assistant", "tool_calls": [{"type": "function", "id": tool_call_id, "function": tool_call}]})
|
||||
```
|
||||
|
||||
```py
|
||||
inputs = tokenizer.apply_chat_template(messages, tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = {k: v for k, v in inputs.items()}
|
||||
out = model.generate(**inputs, max_new_tokens=128)
|
||||
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Schema
|
||||
|
||||
[`~PreTrainedTokenizerBase.apply_chat_template`] converts functions into a [JSON schema](https://json-schema.org/learn/getting-started-step-by-step) which is passed to the chat template. A LLM never sees the code inside the function. In other words, a LLM doesn't care how the model works technically, it only cares about function **definition** and **arguments**.
|
||||
|
||||
The JSON schema is automatically generated behind the scenes as long as your function follows the [rules](#tools) listed earlier above. But you can use [get_json_schema](https://github.com/huggingface/transformers/blob/14561209291255e51c55260306c7d00c159381a5/src/transformers/utils/chat_template_utils.py#L205) to manually convert a schema for more visibility or debugging.
|
||||
|
||||
```py
|
||||
from transformers.utils import get_json_schema
|
||||
|
||||
def multiply(a: float, b: float):
|
||||
"""
|
||||
A function that multiplies two numbers
|
||||
|
||||
Args:
|
||||
a: The first number to multiply
|
||||
b: The second number to multiply
|
||||
"""
|
||||
return a * b
|
||||
|
||||
schema = get_json_schema(multiply)
|
||||
print(schema)
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "multiply",
|
||||
"description": "A function that multiplies two numbers",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {
|
||||
"type": "number",
|
||||
"description": "The first number to multiply"
|
||||
},
|
||||
"b": {
|
||||
"type": "number",
|
||||
"description": "The second number to multiply"
|
||||
}
|
||||
},
|
||||
"required": ["a", "b"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can edit the schema or write one entirely from scratch. This gives you a lot of flexibility to define precise schemas for more complex functions.
|
||||
|
||||
> [!WARNING]
|
||||
> Try keeping your function signatures simple and the arguments to a minimum. These are easier for a model to understand and use than complex functions for example with nested arguments.
|
||||
|
||||
The example below demonstrates writing a schema manually and then passing it to [`~PreTrainedTokenizerBase.apply_chat_template`].
|
||||
|
||||
```py
|
||||
# A simple function that takes no arguments
|
||||
current_time = {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "current_time",
|
||||
"description": "Get the current local time as a string.",
|
||||
"parameters": {
|
||||
'type': 'object',
|
||||
'properties': {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# A more complete function that takes two numerical arguments
|
||||
multiply = {
|
||||
'type': 'function',
|
||||
'function': {
|
||||
'name': 'multiply',
|
||||
'description': 'A function that multiplies two numbers',
|
||||
'parameters': {
|
||||
'type': 'object',
|
||||
'properties': {
|
||||
'a': {
|
||||
'type': 'number',
|
||||
'description': 'The first number to multiply'
|
||||
},
|
||||
'b': {
|
||||
'type': 'number', 'description': 'The second number to multiply'
|
||||
}
|
||||
},
|
||||
'required': ['a', 'b']
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
model_input = tokenizer.apply_chat_template(
|
||||
messages,
|
||||
tools = [current_time, multiply]
|
||||
)
|
||||
```
|
||||
|
||||
## RAG
|
||||
|
||||
Retrieval-augmented generation (RAG) models enhance a models existing knowledge by allowing it to search documents for additional information before returning a query. For RAG models, add a `documents` parameter to [`~PreTrainedTokenizerBase.apply_chat_template`]. This `documents` parameter should be a list of documents, and each document should be a single dict with `title` and `content` keys.
|
||||
|
||||
> [!TIP]
|
||||
> The `documents` parameter for RAG isn't widely supported and many models have chat templates that ignore `documents`. Verify if a model supports `documents` by reading its model card or executing `print(tokenizer.chat_template)` to see if the `documents` key is present. [Command-R](https://hf.co/CohereForAI/c4ai-command-r-08-2024) and [Command-R+](https://hf.co/CohereForAI/c4ai-command-r-plus-08-2024) both support `documents` in their RAG chat templates.
|
||||
|
||||
Create a list of documents to pass to the model.
|
||||
|
||||
```py
|
||||
documents = [
|
||||
{
|
||||
"title": "The Moon: Our Age-Old Foe",
|
||||
"text": "Man has always dreamed of destroying the moon. In this essay, I shall..."
|
||||
},
|
||||
{
|
||||
"title": "The Sun: Our Age-Old Friend",
|
||||
"text": "Although often underappreciated, the sun provides several notable benefits..."
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Set `chat_template="rag"` in [`~PreTrainedTokenizerBase.apply_chat_template`] and generate a response.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
# Load the model and tokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01-4bit")
|
||||
model = AutoModelForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01-4bit", device_map="auto")
|
||||
device = model.device # Get the device the model is loaded on
|
||||
|
||||
# Define conversation input
|
||||
conversation = [
|
||||
{"role": "user", "content": "What has Man always dreamed of?"}
|
||||
]
|
||||
|
||||
input_ids = tokenizer.apply_chat_template(
|
||||
conversation=conversation,
|
||||
documents=documents,
|
||||
chat_template="rag",
|
||||
tokenize=True,
|
||||
add_generation_prompt=True,
|
||||
return_tensors="pt").to(device)
|
||||
|
||||
# Generate a response
|
||||
generated_tokens = model.generate(
|
||||
input_ids,
|
||||
max_new_tokens=100,
|
||||
do_sample=True,
|
||||
temperature=0.3,
|
||||
)
|
||||
|
||||
# Decode and print the generated text along with generation prompt
|
||||
generated_text = tokenizer.decode(generated_tokens[0])
|
||||
print(generated_text)
|
||||
```
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,272 +0,0 @@
|
||||
<!--Copyright 2025 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Multimodal templates
|
||||
|
||||
Multimodal model chat templates expect a similar [template](./chat_templating) as text-only models. It needs `messages` that includes a dictionary of the `role` and `content`.
|
||||
|
||||
Multimodal templates are included in the [Processor](./processors) class and requires an additional `type` key for specifying whether the included content is an image, video, or text.
|
||||
|
||||
This guide will show you how to format chat templates for multimodal models as well as some best practices for configuring the template
|
||||
|
||||
## ImageTextToTextPipeline
|
||||
|
||||
[`ImageTextToTextPipeline`] is a high-level image and text generation class with a “chat mode”. Chat mode is enabled when a conversational model is detected and the chat prompt is [properly formatted](./llm_tutorial#wrong-prompt-format).
|
||||
|
||||
Start by building a chat history with the following two roles.
|
||||
|
||||
- `system` describes how the model should behave and respond when you’re chatting with it. This role isn’t supported by all chat models.
|
||||
- `user` is where you enter your first message to the model.
|
||||
|
||||
```py
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
|
||||
{"type": "text", "text": "What are these?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
Create a [`ImageTextToTextPipeline`] and pass the chat to it. For large models, setting [device_map=“auto”](./models#big-model-inference) helps load the model quicker and automatically places it on the fastest device available. Changing the data type to [torch.bfloat16](./models#model-data-type) also helps save memory.
|
||||
|
||||
> [!TIP]
|
||||
> The [`ImageTextToTextPipeline`] accepts chats in the OpenAI format to make inference easier and more accessible.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import pipeline
|
||||
|
||||
pipeline = pipeline("image-text-to-text", model="llava-hf/llava-onevision-qwen2-0.5b-ov-hf", device="cuda", torch_dtype=torch.float16)
|
||||
pipeline(text=messages, max_new_tokens=50, return_full_text=False)
|
||||
[{'input_text': [{'role': 'system',
|
||||
'content': [{'type': 'text',
|
||||
'text': 'You are a friendly chatbot who always responds in the style of a pirate'}]},
|
||||
{'role': 'user',
|
||||
'content': [{'type': 'image',
|
||||
'url': 'http://images.cocodataset.org/val2017/000000039769.jpg'},
|
||||
{'type': 'text', 'text': 'What are these?'}]}],
|
||||
'generated_text': 'The image shows two cats lying on a pink surface, which appears to be a cushion or a soft blanket. The cat on the left has a striped coat, typical of tabby cats, and is lying on its side with its head resting on the'}]
|
||||
```
|
||||
|
||||
## Image inputs
|
||||
|
||||
For multimodal models that accept images like [LLaVA](./model_doc/llava), include the following in `content` as shown below.
|
||||
|
||||
- The content `"type"` can be an `"image"` or `"text"`.
|
||||
- For images, it can be a link to the image (`"url"`), a file path (`"path"`), or `"base64"`. Images are automatically loaded, processed, and prepared into pixel values as inputs to the model.
|
||||
|
||||
```python
|
||||
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration
|
||||
|
||||
model = LlavaOnevisionForConditionalGeneration.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
|
||||
processor = AutoProcessor.from_pretrained("llava-hf/llava-onevision-qwen2-0.5b-ov-hf")
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image", "url": "http://images.cocodataset.org/val2017/000000039769.jpg"},
|
||||
{"type": "text", "text": "What are these?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
Pass `messages` to [`~ProcessorMixin.apply_chat_template`] to tokenize the input content and return the `input_ids` and `pixel_values`.
|
||||
|
||||
```py
|
||||
processed_chat = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt")
|
||||
print(processed_chat.keys())
|
||||
```
|
||||
|
||||
These inputs are now ready to be used in [`~GenerationMixin.generate`].
|
||||
|
||||
## Video inputs
|
||||
|
||||
Some vision models also support video inputs. The message format is very similar to the format for [image inputs](#image-inputs).
|
||||
|
||||
- The content `"type"` should be `"video"` to indicate the the content is a video.
|
||||
- For videos, it can be a link to the video (`"url"`) or it could be a file path (`"path"`). Videos loaded from a URL can only be decoded with [PyAV](https://pyav.basswood-io.com/docs/stable/) or [Decord](https://github.com/dmlc/decord).
|
||||
|
||||
> [!WARNING]
|
||||
> Loading a video from `"url"` is only supported by the PyAV or Decord backends.
|
||||
|
||||
```python
|
||||
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration
|
||||
|
||||
model_id = "llava-hf/llava-onevision-qwen2-0.5b-ov-hf"
|
||||
model = LlavaOnevisionForConditionalGeneration.from_pretrained(model_id)
|
||||
processor = AutoProcessor.from_pretrained(model_id)
|
||||
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "video", "url": "https://test-videos.co.uk/vids/bigbuckbunny/mp4/h264/720/Big_Buck_Bunny_720_10s_10MB.mp4"},
|
||||
{"type": "text", "text": "What do you see in this video?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
Pass `messages` to [`~ProcessorMixin.apply_chat_template`] to tokenize the input content. There are a few extra parameters to include in [`~ProcessorMixin.apply_chat_template`] that controls the sampling process.
|
||||
|
||||
The `video_load_backend` parameter refers to a specific framework to load a video. It supports [PyAV](https://pyav.basswood-io.com/docs/stable/), [Decord](https://github.com/dmlc/decord), [OpenCV](https://github.com/opencv/opencv), and [torchvision](https://pytorch.org/vision/stable/index.html).
|
||||
|
||||
The examples below uses Decord as the backend because it is a bit faster than PyAV.
|
||||
|
||||
<hfoptions id="sampling">
|
||||
<hfoption id="fixed number of frames">
|
||||
|
||||
The `num_frames` parameter controls how many frames to uniformly sample from the video. Each checkpoint has a maximum frame count it was pretrained with and exceeding this count can significantly lower generation quality. It's important to choose a frame count that fits both the model capacity and your hardware resources. If `num_frames` isn't specified, the entire video is loaded without any frame sampling.
|
||||
|
||||
|
||||
```python
|
||||
processed_chat = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
return_tensors="pt",
|
||||
num_frames=32,
|
||||
video_load_backend="decord",
|
||||
)
|
||||
print(processed_chat.keys())
|
||||
```
|
||||
|
||||
These inputs are now ready to be used in [`~GenerationMixin.generate`].
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="fps">
|
||||
|
||||
For longer videos, it may be better to sample more frames for better representation with the `video_fps` parameter. This determines how many frames per second to extract. As an example, if a video is 10 seconds long and `video_fps=2`, then the model samples 20 frames. In other words, 2 frames are uniformly sampled every 10 seconds.
|
||||
|
||||
```py
|
||||
processed_chat = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
video_fps=32,
|
||||
video_load_backend="decord",
|
||||
)
|
||||
print(processed_chat.keys())
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="custom frame sampling">
|
||||
|
||||
Some models don't sample frames *uniformly* and require more complex logic to determine which frames to use. For example, the model may have an *adaptive frame selection* or if the model prioritizes *key moments* in a video rather than evenly spaced frames.
|
||||
|
||||
If a model has a different sampling strategy, you can write a function that customizes frame selection. The function should include the following requirements.
|
||||
|
||||
- Use the `sample_indices_fn` parameter to pass a callable function for sampling.
|
||||
- If provided, this function *overrides* the standard `num_frames` and `fps` parameters.
|
||||
- The function receives all the parameters passed to `load_video` and must return valid frame indices to sample from.
|
||||
|
||||
An example function is shown below. This gives you full control over frame selection, making the model more adaptable to different video scenarios.
|
||||
|
||||
```py
|
||||
def sample_indices_fn(metadata, **kwargs):
|
||||
# samples only the first and the second frame
|
||||
return [0, 1]
|
||||
|
||||
processed_chat = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
sample_indices_fn=sample_indices_fn,
|
||||
video_load_backend="decord",
|
||||
)
|
||||
print(processed_chat.keys())
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="list of image frames">
|
||||
|
||||
Videos may also exist as a set of sampled frames stored as images rather than the full video file.
|
||||
|
||||
In this case, pass a list of image file paths and the processor automatically concatenates them into a video. Make sure all images are the same size since they are assumed to be from the same video.
|
||||
|
||||
```py
|
||||
frames_paths = ["/path/to/frame0.png", "/path/to/frame5.png", "/path/to/frame10.png"]
|
||||
messages = [
|
||||
{
|
||||
"role": "system",
|
||||
"content": [{"type": "text", "text": "You are a friendly chatbot who always responds in the style of a pirate"}],
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "video", "path": frames_paths},
|
||||
{"type": "text", "text": "What do you see in this video?"},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
processed_chat = processor.apply_chat_template(
|
||||
messages,
|
||||
add_generation_prompt=True,
|
||||
tokenize=True,
|
||||
return_dict=True,
|
||||
)
|
||||
print(processed_chat.keys())
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## Template configuration
|
||||
|
||||
You can create a custom chat template with [Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/) and set it with [`~ProcessorMixin.apply_chat_template`]. Refer to the [Template writing](./chat_templating_writing) guide for more details.
|
||||
|
||||
For example, to enable a template to handle a *list of content* from multiple modalities while still supporting plain strings for text-only inference, specify how to handle the `content['type']` if it is an image or text as shown below in the Llama 3.2 Vision Instruct [template](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct/blob/main/chat_template.json).
|
||||
|
||||
```jinja
|
||||
{% for message in messages %}
|
||||
{% if loop.index0 == 0 %}{{ bos_token }}{% endif %}
|
||||
{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }}
|
||||
{% if message['content'] is string %}
|
||||
{{ message['content'] }}
|
||||
{% else %}
|
||||
{% for content in message['content'] %}
|
||||
{% if content['type'] == 'image' %}
|
||||
{{ '<|image|>' }}
|
||||
{% elif content['type'] == 'text' %}
|
||||
{{ content['text'] }}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{{ '<|eot_id|>' }}
|
||||
{% endfor %}
|
||||
{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}
|
||||
```
|
||||
@ -1,251 +0,0 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Template writing
|
||||
|
||||
A chat template is a [Jinja](https://jinja.palletsprojects.com/en/3.1.x/templates/) template stored in the tokenizers [chat_template](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizer.chat_template) attribute. Jinja is a templating language that allows you to write Python-like code and syntax. A chat template performs the following three roles.
|
||||
|
||||
1. Print the role enclosed in `<|` and `|>` (`<|user|>`, `<|assistant|>`, etc.).
|
||||
2. Print the message followed by an end-of-sequence (`EOS`) token.
|
||||
3. Print the assistant token if [add_generation_prompt=True](./chat_templating#add_generation_prompt) so the model generates an assistant response.
|
||||
|
||||
An example template is shown below.
|
||||
|
||||
```jinja
|
||||
{%- for message in messages %}
|
||||
{{- '<|' + message['role'] + |>\n' }}
|
||||
{{- message['content'] + eos_token }}
|
||||
{%- endfor %}
|
||||
{%- if add_generation_prompt %}
|
||||
{{- '<|assistant|>\n' }}
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
The template can be customized to handle more complex use cases. This guide will show you how to add and edit templates and includes template writing tips.
|
||||
|
||||
## Create a template
|
||||
|
||||
Create a template by writing a Jinja template and then setting it as the chat template in the tokenizer. For example, the template below adds `[ASST]` and `[/ASST]` tags to the assistant messages.
|
||||
|
||||
```jinja
|
||||
{%- for message in messages %}
|
||||
{%- if message['role'] == 'user' %}
|
||||
{{- bos_token + '[INST] ' + message['content'].strip() + ' [/INST]' }}
|
||||
{%- elif message['role'] == 'system' %}
|
||||
{{- '<<SYS>>\\n' + message['content'].strip() + '\\n<</SYS>>\\n\\n' }}
|
||||
{%- elif message['role'] == 'assistant' %}
|
||||
{{- '[ASST] ' + message['content'] + ' [/ASST]' + eos_token }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
```
|
||||
|
||||
Set the template in the tokenizer, and the next time you use [`~PreTrainedTokenizerBase.apply_chat_template`], the new template is used.
|
||||
|
||||
```py
|
||||
template = tokenizer.chat_template
|
||||
template = template.replace("SYS", "SYSTEM") # Change the system token
|
||||
tokenizer.chat_template = template # Set the new template
|
||||
```
|
||||
|
||||
The template is saved in the `tokenizer_config.json` file. Upload it to the Hub with [`~PreTrainedTokenizer.push_to_hub`] so you can reuse it later and make sure everyone is using the right template for your model.
|
||||
|
||||
```py
|
||||
tokenizer.push_to_hub("model_name")
|
||||
```
|
||||
|
||||
## Template writing tips
|
||||
|
||||
The easiest way to start writing Jinja templates is to refer to existing templates. Use `print(tokenizer.chat_template)` on any chat model to see what template it's using. Try starting with simple models that don't call any tools or support RAG. Finally, take a look at the [Jinja documentation](https://jinja.palletsprojects.com/en/3.1.x/templates/#synopsis) for more details about formatting and syntax.
|
||||
|
||||
This section curates some best practices for writing clean and efficient Jinja templates.
|
||||
|
||||
### Trimming whitespace
|
||||
|
||||
Jinja prints any whitespace before or after a block of text. This can be an issue for chat templates because whitespace usage should be intentional. Add `-` to strip any whitespace before a block.
|
||||
|
||||
```jinja
|
||||
{%- for message in messages %}
|
||||
{{- message['role'] + message['content'] }}
|
||||
{%- endfor %}
|
||||
```
|
||||
|
||||
The incorrect whitespace usage example below may introduce a newline and indentation in the output.
|
||||
|
||||
```jinja
|
||||
{% for message in messages %}
|
||||
{{ message['role'] + message['content'] }}
|
||||
{% endfor %}
|
||||
```
|
||||
|
||||
### Special variables
|
||||
|
||||
There are five special variables available inside a template. You can pass virtually any additional arguments to [`~PreTrainedTokenizerBase.apply_chat_template`] and it will be available inside the template as a variable. However, you should try to keep the number of variables to the five below to make it easier for users to use the chat model without writing custom code to handle model-specific arguments.
|
||||
|
||||
- `messages` contains the chat history as a list of message dicts.
|
||||
- `tools` contains a list of tools in JSON schema format.
|
||||
- `documents` contains a list of documents with the format `{"title": Title, "contents": "Contents"}` (designed for RAG models).
|
||||
- `add_generation_prompt` is a boolean that determines whether to add an assistant header at the end of the conversation.
|
||||
- `bos_token` and `eos_token` are special tokens extracted from a tokenizers `special_tokens_map`.
|
||||
|
||||
### Callable functions
|
||||
|
||||
There are two callable functions available inside a template.
|
||||
|
||||
- `raise_exception(msg)` raises a `TemplateException`. This is useful for debugging or warning users about incorrect template usage.
|
||||
- `strftime_now(format_str)` retrieves the current date and time in a specific format which could be useful to include in system messages. It is equivalent to [datetime.now().strftime(format_str)](https://docs.python.org/3/library/datetime.html#datetime.datetime.now) in Python.
|
||||
|
||||
### Compatibility with non-Python Jinja
|
||||
|
||||
Jinja is implemented in multiple languages and they generally have the same syntax. Writing a template in Python allows you to use Python methods such as [lower](https://docs.python.org/3/library/stdtypes.html#str.lower) on strings or [items](https://docs.python.org/3/library/stdtypes.html#dict.items) on dicts. But this won't work if the template is used in a non-Python implementation, for example, when deploying with Javascript or Rust.
|
||||
|
||||
Make the changes below to ensure compatibility across all Jinja implementations.
|
||||
|
||||
- Replace Python methods with Jinja filters. For example, replace `string.lower()` with `string|lower` or `dict.items()` with `dict|dictitems`. Most of the changes follow the same pattern except `string.strip()`, which is replaced with `string|trim`. Refer to the list of [built-in filters](https://jinja.palletsprojects.com/en/3.1.x/templates/#builtin-filters) for a complete list of filters.
|
||||
- Replace `True`, `False`, and `None` (these are Python specific) with `true`, `false`, and `none` respectively.
|
||||
- Directly rendering a dict or list may return different results in other implementations. For example, string entries may change from single-quote to double-quote. To avoid this, add the [tojson](https://jinja.palletsprojects.com/en/3.1.x/templates/#jinja-filters.tojson) filter to maintain consistency.
|
||||
|
||||
### Big templates
|
||||
|
||||
Newer models or models with features like [tool-calling](./chat_extras#tools) and [RAG](./chat_extras#retrieval-augmented-generation-rag) require larger templates that can be longer than 100 lines. It may be easier to write larger templates in a separate file. The line numbers in the separate file corresponds exactly to the line numbers in template parsing or execution errors, making it easier to debug any potential issues.
|
||||
|
||||
Write the template in a separate file and extract it to the chat template.
|
||||
|
||||
```py
|
||||
open("template.jinja", "w").write(tokenizer.chat_template)
|
||||
```
|
||||
|
||||
You could also load an edited template back into the tokenizer.
|
||||
|
||||
```py
|
||||
tokenizer.chat_template = open("template.jinja").read()
|
||||
```
|
||||
|
||||
## Templates for tools
|
||||
|
||||
There isn't a specific format for writing templates for tools but it is best to follow the standard API. This ensures the template is widely accessible across models without requiring users to write custom code to use tools with your model.
|
||||
|
||||
> [!WARNING]
|
||||
> Formatting such as whitespace and special tokens are model-specific. Make sure everything exactly matches the format a model was trained with.
|
||||
|
||||
The following section lists elements of the standard API for writing templates for tools.
|
||||
|
||||
### Tool definitions
|
||||
|
||||
Transformers chat template methods allow a user to pass tools as Python functions or a JSON schema. When functions are passed, a JSON schema is automatically generated and passed to the template. The `tools` variable in a template always takes a list of JSON schemas.
|
||||
|
||||
The specific tokens and tool descriptions should match the ones your model was trained with. Your model doesn't need to understand the JSON schema input because your template can translate the JSON schema into your models format. For example, [Command-R](./model_doc/cohere) was trained with tools defined with Python function headers, but the Command-R tool template accepts JSON schemas. The template internally converts types and renders the input tools as Python headers.
|
||||
|
||||
```json
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "multiply",
|
||||
"description": "A function that multiplies two numbers",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"a": {
|
||||
"type": "number",
|
||||
"description": "The first number to multiply"
|
||||
},
|
||||
"b": {
|
||||
"type": "number",
|
||||
"description": "The second number to multiply"
|
||||
}
|
||||
},
|
||||
"required": ["a", "b"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
An example for handling tool definitions in a chat template is shown below. The specific tokens and tool descriptions should be changed to match the ones a model was trained with.
|
||||
|
||||
```
|
||||
{%- if tools %}
|
||||
{%- for tool in tools %}
|
||||
{{- '<tool>' + tool['function']['name'] + '\n' }}
|
||||
{%- for argument in tool['function']['parameters']['properties'] %}
|
||||
{{- argument + ': ' + tool['function']['parameters']['properties'][argument]['description'] + '\n' }}
|
||||
{%- endfor %}
|
||||
{{- '\n</tool>' }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
### Tool calls
|
||||
|
||||
Tool calls, if present, is a list with the `"assistant”` role. This is always a list even though most tool-calling models only support single tool calls, which means the list usually only contains a single element.
|
||||
|
||||
```json
|
||||
{
|
||||
"role": "assistant",
|
||||
"tool_calls": [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "multiply",
|
||||
"arguments": {
|
||||
"a": 5,
|
||||
"b": 6
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
A common pattern for handling tool calls is shown below.
|
||||
|
||||
```
|
||||
{%- if message['role'] == 'assistant' and 'tool_calls' in message %}
|
||||
{%- for tool_call in message['tool_calls'] %}
|
||||
{{- '<tool_call>' + tool_call['function']['name'] + '\n' + tool_call['function']['arguments']|tojson + '\n</tool_call>' }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
### Tool responses
|
||||
|
||||
Tool responses are a message dict with the `role`, `name` (name of the function) and `content` (result of the tool call) keys.
|
||||
|
||||
```json
|
||||
{
|
||||
"role": "tool",
|
||||
"name": "multiply",
|
||||
"content": "30"
|
||||
}
|
||||
```
|
||||
|
||||
Not all the keys need to be used in the tool response. For example, if a model doesn’t expect the function name to be included in the tool response, then you can just include the `role` and `content`.
|
||||
|
||||
```
|
||||
{%- if message['role'] == 'tool' %}
|
||||
{{- "<tool_result>" + message['content'] + "</tool_result>" }}
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
## Contribute
|
||||
|
||||
Add a chat template by setting the `chat_template` attribute in the tokenizer and testing it with [`~PreTrainedTokenizerBase.apply_chat_template`]. If it works as expected, then you can upload it to the Hub with with [`~PreTrainedTokenizer.push_to_hub`].
|
||||
|
||||
Even if you're not the model owner, it is still helpful to add a template for a model with an empty chat template or a model that is using a default class template. Open a [pull request](https://hf.co/docs/hub/repositories-pull-requests-discussions) on the model repository to add the template.
|
||||
|
||||
```py
|
||||
tokenizer.chat_template = template
|
||||
tokenizer.push_to_hub("model_name")
|
||||
```
|
||||
@ -14,65 +14,61 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Chat basics
|
||||
# Chatting with Transformers
|
||||
|
||||
Chat models are conversational models you can send and receive messages from. There are many chat models available to choose from, but in general, larger models tend to be better though that's not always the case. The model size is often included in the name, like "8B" or "70B", and it describes the number of parameters. Mixture-of-expert (MoE) models have names like "8x7B" or "141B-A35B" which means it's a 56B and 141B parameter model. You can try quantizing larger models to reduce memory requirements, otherwise you'll need ~2 bytes of memory per parameter.
|
||||
If you're reading this article, you're almost certainly aware of **chat models**. Chat models are conversational
|
||||
AIs that you can send and receive messages with. The most famous of these is the proprietary ChatGPT, but there are
|
||||
now many open-source chat models which match or even substantially exceed its performance. These models are free to
|
||||
download and run on a local machine. Although the largest and most capable models require high-powered hardware
|
||||
and lots of memory to run, there are smaller models that will run perfectly well on a single consumer GPU, or even
|
||||
an ordinary desktop or notebook CPU.
|
||||
|
||||
Check model leaderboards like [OpenLLM](https://hf.co/spaces/HuggingFaceH4/open_llm_leaderboard) and [LMSys Chatbot Arena](https://chat.lmsys.org/?leaderboard) to further help you identify the best chat models for your use case. Models that are specialized in certain domains (medical, legal text, non-English languages, etc.) may sometimes outperform larger general purpose models.
|
||||
This guide will help you get started with chat models. We'll start with a brief quickstart guide that uses a convenient,
|
||||
high-level "pipeline". This is all you need if you just want to start running a chat model
|
||||
immediately. After the quickstart, we'll move on to more detailed information about
|
||||
what exactly chat models are, how to choose an appropriate one, and a low-level breakdown of each of the
|
||||
steps involved in talking to a chat model. We'll also give some tips on optimizing the performance and memory usage
|
||||
of your chat models.
|
||||
|
||||
> [!TIP]
|
||||
> Chat with a number of open-source models for free on [HuggingChat](https://hf.co/chat/)!
|
||||
|
||||
This guide shows you how to quickly start chatting with Transformers from the command line, how build and format a conversation, and how to chat using the [`TextGenerationPipeline`].
|
||||
## Quickstart
|
||||
|
||||
## transformers-cli
|
||||
If you have no time for details, here's the brief summary: Chat models continue chats. This means that you pass them
|
||||
a conversation history, which can be as short as a single user message, and the model will continue the conversation
|
||||
by adding its response. Let's see this in action. First, let's build a chat:
|
||||
|
||||
Chat with a model directly from the command line as shown below. It launches an interactive session with a model. Enter `clear` to reset the conversation, `exit` to terminate the session, and `help` to display all the command options.
|
||||
|
||||
```bash
|
||||
transformers-cli chat --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/transformers-chat-cli.png"/>
|
||||
</div>
|
||||
|
||||
For a full list of options, run the command below.
|
||||
|
||||
```bash
|
||||
transformers-cli chat -h
|
||||
```
|
||||
|
||||
The chat is implemented on top of the [AutoClass](./model_doc/auto), using tooling from [text generation](./llm_tutorial) and [chat](./chat_templating).
|
||||
|
||||
## TextGenerationPipeline
|
||||
|
||||
[`TextGenerationPipeline`] is a high-level text generation class with a "chat mode". Chat mode is enabled when a conversational model is detected and the chat prompt is [properly formatted](./llm_tutorial#wrong-prompt-format).
|
||||
|
||||
To start, build a chat history with the following two roles.
|
||||
|
||||
- `system` describes how the model should behave and respond when you're chatting with it. This role isn't supported by all chat models.
|
||||
- `user` is where you enter your first message to the model.
|
||||
|
||||
```py
|
||||
```python
|
||||
chat = [
|
||||
{"role": "system", "content": "You are a sassy, wise-cracking robot as imagined by Hollywood circa 1986."},
|
||||
{"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"}
|
||||
]
|
||||
```
|
||||
|
||||
Create the [`TextGenerationPipeline`] and pass `chat` to it. For large models, setting [device_map="auto"](./models#big-model-inference) helps load the model quicker and automatically places it on the fastest device available. Changing the data type to [torch.bfloat16](./models#model-data-type) also helps save memory.
|
||||
Notice that in addition to the user's message, we added a **system** message at the start of the conversation. Not all
|
||||
chat models support system messages, but when they do, they represent high-level directives about how the model
|
||||
should behave in the conversation. You can use this to guide the model - whether you want short or long responses,
|
||||
lighthearted or serious ones, and so on. If you want the model to do useful work instead of
|
||||
practicing its improv routine, you can either omit the system message or try a terse one such as "You are a helpful and intelligent
|
||||
AI assistant who responds to user queries."
|
||||
|
||||
```py
|
||||
Once you have a chat, the quickest way to continue it is using the [`TextGenerationPipeline`].
|
||||
Let's see this in action with `LLaMA-3`. Note that `LLaMA-3` is a gated model, which means you will need to
|
||||
[apply for access](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct) and log in with your Hugging Face
|
||||
account to use it. We'll also use `device_map="auto"`, which will load the model on GPU if there's enough memory
|
||||
for it, and set the dtype to `torch.bfloat16` to save memory:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import pipeline
|
||||
|
||||
pipeline = pipeline(task="text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.bfloat16, device_map="auto")
|
||||
response = pipeline(chat, max_new_tokens=512)
|
||||
print(response[0]["generated_text"][-1]["content"])
|
||||
pipe = pipeline("text-generation", "meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.bfloat16, device_map="auto")
|
||||
response = pipe(chat, max_new_tokens=512)
|
||||
print(response[0]['generated_text'][-1]['content'])
|
||||
```
|
||||
|
||||
```txt
|
||||
And you'll get:
|
||||
|
||||
```text
|
||||
(sigh) Oh boy, you're asking me for advice? You're gonna need a map, pal! Alright,
|
||||
alright, I'll give you the lowdown. But don't say I didn't warn you, I'm a robot, not a tour guide!
|
||||
|
||||
@ -95,18 +91,22 @@ So, there you have it, pal! That's my expert advice on what to do in New York. N
|
||||
excuse me, I've got some oil changes to attend to. (winks)
|
||||
```
|
||||
|
||||
Use the `append` method on `chat` to respond to the models message.
|
||||
You can continue the chat by appending your own response to it. The
|
||||
`response` object returned by the pipeline actually contains the entire chat so far, so we can simply append
|
||||
a message and pass it back:
|
||||
|
||||
```py
|
||||
chat = response[0]["generated_text"]
|
||||
```python
|
||||
chat = response[0]['generated_text']
|
||||
chat.append(
|
||||
{"role": "user", "content": "Wait, what's so wild about soup cans?"}
|
||||
)
|
||||
response = pipeline(chat, max_new_tokens=512)
|
||||
print(response[0]["generated_text"][-1]["content"])
|
||||
response = pipe(chat, max_new_tokens=512)
|
||||
print(response[0]['generated_text'][-1]['content'])
|
||||
```
|
||||
|
||||
```txt
|
||||
And you'll get:
|
||||
|
||||
```text
|
||||
(laughs) Oh, you're killin' me, pal! You don't get it, do you? Warhol's soup cans are like, art, man!
|
||||
It's like, he took something totally mundane, like a can of soup, and turned it into a masterpiece. It's
|
||||
like, "Hey, look at me, I'm a can of soup, but I'm also a work of art!"
|
||||
@ -120,35 +120,171 @@ But, hey, you're not alone, pal. I mean, I'm a robot, and even I don't get it. (
|
||||
But, hey, that's what makes art, art, right? (laughs)
|
||||
```
|
||||
|
||||
## Performance
|
||||
The remainder of this tutorial will cover specific topics such
|
||||
as performance and memory, or how to select a chat model for your needs.
|
||||
|
||||
Transformers load models in full precision by default, and for a 8B model, this requires ~32GB of memory! Reduce memory usage by loading a model in half-precision or bfloat16 (only uses ~2 bytes per parameter). You can even quantize the model to a lower precision like 8-bit or 4-bit with [bitsandbytes](https://hf.co/docs/bitsandbytes/index).
|
||||
## Choosing a chat model
|
||||
|
||||
> [!TIP]
|
||||
> Refer to the [Quantization](./quantization/overview) docs for more information about the different quantization backends available.
|
||||
There are an enormous number of different chat models available on the [Hugging Face Hub](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending),
|
||||
and new users often feel very overwhelmed by the selection offered. Don't be, though! You really need to just focus on
|
||||
two important considerations:
|
||||
- The model's size, which will determine if you can fit it in memory and how quickly it will
|
||||
run.
|
||||
- The quality of the model's chat output.
|
||||
|
||||
Create a [`BitsAndBytesConfig`] with your desired quantization settings and pass it to the pipelines `model_kwargs` parameter. The example below quantizes a model to 8-bits.
|
||||
In general, these are correlated - bigger models tend to be
|
||||
more capable, but even so there's a lot of variation at a given size point!
|
||||
|
||||
```py
|
||||
from transformers import pipeline, BitsAndBytesConfig
|
||||
### Size and model naming
|
||||
The size of a model is easy to spot - it's the number in the model name, like "8B" or "70B". This is the number of
|
||||
**parameters** in the model. Without quantization, you should expect to need about 2 bytes of memory per parameter.
|
||||
This means that an "8B" model with 8 billion parameters will need about 16GB of memory just to fit the parameters,
|
||||
plus a little extra for other overhead. It's a good fit for a high-end consumer GPU with 24GB of memory, such as a 3090
|
||||
or 4090.
|
||||
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True)
|
||||
pipeline = pipeline(task="text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", model_kwargs={"quantization_config": quantization_config})
|
||||
Some chat models are "Mixture of Experts" models. These may list their sizes in different ways, such as "8x7B" or
|
||||
"141B-A35B". The numbers are a little fuzzier here, but in general you can read this as saying that the model
|
||||
has approximately 56 (8x7) billion parameters in the first case, or 141 billion parameters in the second case.
|
||||
|
||||
Note that it is very common to use quantization techniques to reduce the memory usage per parameter to 8 bits, 4 bits,
|
||||
or even less. This topic is discussed in more detail in the [Memory considerations](#memory-considerations) section below.
|
||||
|
||||
### But which chat model is best?
|
||||
Even once you know the size of chat model you can run, there's still a lot of choice out there. One way to sift through
|
||||
it all is to consult **leaderboards**. Two of the most popular leaderboards are the [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)
|
||||
and the [LMSys Chatbot Arena Leaderboard](https://chat.lmsys.org/?leaderboard). Note that the LMSys leaderboard
|
||||
also includes proprietary models - look at the `licence` column to identify open-source ones that you can download, then
|
||||
search for them on the [Hugging Face Hub](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending).
|
||||
|
||||
### Specialist domains
|
||||
Some models may be specialized for certain domains, such as medical or legal text, or non-English languages.
|
||||
If you're working in these domains, you may find that a specialized model will give you big performance benefits.
|
||||
Don't automatically assume that, though! Particularly when specialized models are smaller or older than the current
|
||||
cutting-edge, a top-end general-purpose model may still outclass them. Thankfully, we are beginning to see
|
||||
[domain-specific leaderboards](https://huggingface.co/blog/leaderboard-medicalllm) that should make it easier to locate
|
||||
the best models for specialized domains.
|
||||
|
||||
## What happens inside the pipeline?
|
||||
|
||||
The quickstart above used a high-level pipeline to chat with a chat model, which is convenient, but not the
|
||||
most flexible. Let's take a more low-level approach, to see each of the steps involved in chat. Let's start with
|
||||
a code sample, and then break it down:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
# Prepare the input as before
|
||||
chat = [
|
||||
{"role": "system", "content": "You are a sassy, wise-cracking robot as imagined by Hollywood circa 1986."},
|
||||
{"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"}
|
||||
]
|
||||
|
||||
# 1: Load the model and tokenizer
|
||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", torch_dtype=torch.bfloat16)
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
||||
|
||||
# 2: Apply the chat template
|
||||
formatted_chat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
||||
print("Formatted chat:\n", formatted_chat)
|
||||
|
||||
# 3: Tokenize the chat (This can be combined with the previous step using tokenize=True)
|
||||
inputs = tokenizer(formatted_chat, return_tensors="pt", add_special_tokens=False)
|
||||
# Move the tokenized inputs to the same device the model is on (GPU/CPU)
|
||||
inputs = {key: tensor.to(model.device) for key, tensor in inputs.items()}
|
||||
print("Tokenized inputs:\n", inputs)
|
||||
|
||||
# 4: Generate text from the model
|
||||
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.1)
|
||||
print("Generated tokens:\n", outputs)
|
||||
|
||||
# 5: Decode the output back to a string
|
||||
decoded_output = tokenizer.decode(outputs[0][inputs['input_ids'].size(1):], skip_special_tokens=True)
|
||||
print("Decoded output:\n", decoded_output)
|
||||
```
|
||||
|
||||
In general, larger models are slower in addition to requiring more memory because text generation is bottlenecked by **memory bandwidth** instead of compute power. Each active parameter must be read from memory for every generated token. For a 16GB model, 16GB must be read from memory for every generated token.
|
||||
There's a lot in here, each piece of which could be its own document! Rather than going into too much detail, I'll cover
|
||||
the broad ideas, and leave the details for the linked documents. The key steps are:
|
||||
|
||||
The number of generated tokens/sec is proportional to the total memory bandwidth of the system divided by the model size. Depending on your hardware, total memory bandwidth can vary. Refer to the table below for approximate generation speeds for different hardware types.
|
||||
1. [Models](https://huggingface.co/learn/nlp-course/en/chapter2/3) and [Tokenizers](https://huggingface.co/learn/nlp-course/en/chapter2/4?fw=pt) are loaded from the Hugging Face Hub.
|
||||
2. The chat is formatted using the tokenizer's [chat template](https://huggingface.co/docs/transformers/main/en/chat_templating)
|
||||
3. The formatted chat is [tokenized](https://huggingface.co/learn/nlp-course/en/chapter2/4) using the tokenizer.
|
||||
4. We [generate](https://huggingface.co/docs/transformers/en/llm_tutorial) a response from the model.
|
||||
5. The tokens output by the model are decoded back to a string
|
||||
|
||||
| Hardware | Memory bandwidth |
|
||||
|---|---|
|
||||
| consumer CPU | 20-100GB/sec |
|
||||
| specialized CPU (Intel Xeon, AMD Threadripper/Epyc, Apple silicon) | 200-900GB/sec |
|
||||
| data center GPU (NVIDIA A100/H100) | 2-3TB/sec |
|
||||
## Performance, memory and hardware
|
||||
|
||||
The easiest solution for improving generation speed is to either quantize a model or use hardware with higher memory bandwidth.
|
||||
You probably know by now that most machine learning tasks are run on GPUs. However, it is entirely possible
|
||||
to generate text from a chat model or language model on a CPU, albeit somewhat more slowly. If you can fit
|
||||
the model in GPU memory, though, this will usually be the preferable option.
|
||||
|
||||
You can also try techniques like [speculative decoding](./generation_strategies#speculative-decoding), where a smaller model generates candidate tokens that are verified by the larger model. If the candidate tokens are correct, the larger model can generate more than one token per `forward` pass. This significantly alleviates the bandwidth bottleneck and improves generation speed.
|
||||
### Memory considerations
|
||||
|
||||
By default, Hugging Face classes like [`TextGenerationPipeline`] or [`AutoModelForCausalLM`] will load the model in
|
||||
`float32` precision. This means that it will need 4 bytes (32 bits) per parameter, so an "8B" model with 8 billion
|
||||
parameters will need ~32GB of memory. However, this can be wasteful! Most modern language models are trained in
|
||||
"bfloat16" precision, which uses only 2 bytes per parameter. If your hardware supports it (Nvidia 30xx/Axxx
|
||||
or newer), you can load the model in `bfloat16` precision, using the `torch_dtype` argument as we did above.
|
||||
|
||||
It is possible to go even lower than 16-bits using "quantization", a method to lossily compress model weights. This
|
||||
allows each parameter to be squeezed down to 8 bits, 4 bits or even less. Note that, especially at 4 bits,
|
||||
the model's outputs may be negatively affected, but often this is a tradeoff worth making to fit a larger and more
|
||||
capable chat model in memory. Let's see this in action with `bitsandbytes`:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
||||
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True) # You can also try load_in_4bit
|
||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", quantization_config=quantization_config)
|
||||
```
|
||||
|
||||
Or we can do the same thing using the `pipeline` API:
|
||||
|
||||
```python
|
||||
from transformers import pipeline, BitsAndBytesConfig
|
||||
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True) # You can also try load_in_4bit
|
||||
pipe = pipeline("text-generation", "meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", model_kwargs={"quantization_config": quantization_config})
|
||||
```
|
||||
|
||||
There are several other options for quantizing models besides `bitsandbytes` - please see the [Quantization guide](./quantization)
|
||||
for more information.
|
||||
|
||||
### Performance considerations
|
||||
|
||||
<Tip>
|
||||
|
||||
For a more extensive guide on language model performance and optimization, check out [LLM Inference Optimization](./llm_optims) .
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
As a general rule, larger chat models will be slower in addition to requiring more memory. It's possible to be
|
||||
more concrete about this, though: Generating text from a chat model is unusual in that it is bottlenecked by
|
||||
**memory bandwidth** rather than compute power, because every active parameter must be read from memory for each
|
||||
token that the model generates. This means that number of tokens per second you can generate from a chat
|
||||
model is generally proportional to the total bandwidth of the memory it resides in, divided by the size of the model.
|
||||
|
||||
In our quickstart example above, our model was ~16GB in size when loaded in `bfloat16` precision.
|
||||
This means that 16GB must be read from memory for every token generated by the model. Total memory bandwidth can
|
||||
vary from 20-100GB/sec for consumer CPUs to 200-900GB/sec for consumer GPUs, specialized CPUs like
|
||||
Intel Xeon, AMD Threadripper/Epyc or high-end Apple silicon, and finally up to 2-3TB/sec for data center GPUs like
|
||||
the Nvidia A100 or H100. This should give you a good idea of the generation speed you can expect from these different
|
||||
hardware types.
|
||||
|
||||
Therefore, if you want to improve the speed of text generation, the easiest solution is to either reduce the
|
||||
size of the model in memory (usually by quantization), or get hardware with higher memory bandwidth. For advanced users,
|
||||
several other techniques exist to get around this bandwidth bottleneck. The most common are variants on
|
||||
[assisted generation](https://huggingface.co/blog/assisted-generation), also known as "speculative
|
||||
sampling". These techniques try to guess multiple future tokens at once, often using a smaller "draft model", and then
|
||||
confirm these generations with the chat model. If the guesses are validated by the chat model, more than one token can
|
||||
be generated per forward pass, which greatly alleviates the bandwidth bottleneck and improves generation speed.
|
||||
|
||||
Finally, we should also note the impact of "Mixture of Experts" (MoE) models here. Several popular chat models,
|
||||
such as Mixtral, Qwen-MoE and DBRX, are MoE models. In these models, not every parameter is active for every token generated.
|
||||
As a result, MoE models generally have much lower memory bandwidth requirements, even though their total size
|
||||
can be quite large. They can therefore be several times faster than a normal "dense" model of the same size. However,
|
||||
techniques like assisted generation are generally ineffective for these models because more parameters will become
|
||||
active with each new speculated token, which will negate the bandwidth and speed benefits that the MoE architecture
|
||||
provides.
|
||||
|
||||
> [!TIP]
|
||||
> Parameters may not be active for every generated token in MoE models such as [Mixtral](./model_doc/mixtral), [Qwen2MoE](./model_doc/qwen2_moe.md), and [DBRX](./model_doc/dbrx). As a result, MoE models generally have much lower memory bandwidth requirements and can be faster than a regular LLM of the same size. However, techniques like speculative decoding are ineffective with MoE models because parameters become activated with each new speculated token.
|
||||
|
||||
472
docs/source/en/create_a_model.md
Normal file
472
docs/source/en/create_a_model.md
Normal file
@ -0,0 +1,472 @@
|
||||
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Create a custom architecture
|
||||
|
||||
An [`AutoClass`](model_doc/auto) automatically infers the model architecture and downloads pretrained configuration and weights. Generally, we recommend using an `AutoClass` to produce checkpoint-agnostic code. But users who want more control over specific model parameters can create a custom 🤗 Transformers model from just a few base classes. This could be particularly useful for anyone who is interested in studying, training or experimenting with a 🤗 Transformers model. In this guide, dive deeper into creating a custom model without an `AutoClass`. Learn how to:
|
||||
|
||||
- Load and customize a model configuration.
|
||||
- Create a model architecture.
|
||||
- Create a slow and fast tokenizer for text.
|
||||
- Create an image processor for vision tasks.
|
||||
- Create a feature extractor for audio tasks.
|
||||
- Create a processor for multimodal tasks.
|
||||
|
||||
## Configuration
|
||||
|
||||
A [configuration](main_classes/configuration) refers to a model's specific attributes. Each model configuration has different attributes; for instance, all NLP models have the `hidden_size`, `num_attention_heads`, `num_hidden_layers` and `vocab_size` attributes in common. These attributes specify the number of attention heads or hidden layers to construct a model with.
|
||||
|
||||
Get a closer look at [DistilBERT](model_doc/distilbert) by accessing [`DistilBertConfig`] to inspect it's attributes:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertConfig
|
||||
|
||||
>>> config = DistilBertConfig()
|
||||
>>> print(config)
|
||||
DistilBertConfig {
|
||||
"activation": "gelu",
|
||||
"attention_dropout": 0.1,
|
||||
"dim": 768,
|
||||
"dropout": 0.1,
|
||||
"hidden_dim": 3072,
|
||||
"initializer_range": 0.02,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "distilbert",
|
||||
"n_heads": 12,
|
||||
"n_layers": 6,
|
||||
"pad_token_id": 0,
|
||||
"qa_dropout": 0.1,
|
||||
"seq_classif_dropout": 0.2,
|
||||
"sinusoidal_pos_embds": false,
|
||||
"transformers_version": "4.16.2",
|
||||
"vocab_size": 30522
|
||||
}
|
||||
```
|
||||
|
||||
[`DistilBertConfig`] displays all the default attributes used to build a base [`DistilBertModel`]. All attributes are customizable, creating space for experimentation. For example, you can customize a default model to:
|
||||
|
||||
- Try a different activation function with the `activation` parameter.
|
||||
- Use a higher dropout ratio for the attention probabilities with the `attention_dropout` parameter.
|
||||
|
||||
```py
|
||||
>>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4)
|
||||
>>> print(my_config)
|
||||
DistilBertConfig {
|
||||
"activation": "relu",
|
||||
"attention_dropout": 0.4,
|
||||
"dim": 768,
|
||||
"dropout": 0.1,
|
||||
"hidden_dim": 3072,
|
||||
"initializer_range": 0.02,
|
||||
"max_position_embeddings": 512,
|
||||
"model_type": "distilbert",
|
||||
"n_heads": 12,
|
||||
"n_layers": 6,
|
||||
"pad_token_id": 0,
|
||||
"qa_dropout": 0.1,
|
||||
"seq_classif_dropout": 0.2,
|
||||
"sinusoidal_pos_embds": false,
|
||||
"transformers_version": "4.16.2",
|
||||
"vocab_size": 30522
|
||||
}
|
||||
```
|
||||
|
||||
Pretrained model attributes can be modified in the [`~PretrainedConfig.from_pretrained`] function:
|
||||
|
||||
```py
|
||||
>>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4)
|
||||
```
|
||||
|
||||
Once you are satisfied with your model configuration, you can save it with [`~PretrainedConfig.save_pretrained`]. Your configuration file is stored as a JSON file in the specified save directory:
|
||||
|
||||
```py
|
||||
>>> my_config.save_pretrained(save_directory="./your_model_save_path")
|
||||
```
|
||||
|
||||
To reuse the configuration file, load it with [`~PretrainedConfig.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json")
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
You can also save your configuration file as a dictionary or even just the difference between your custom configuration attributes and the default configuration attributes! See the [configuration](main_classes/configuration) documentation for more details.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Model
|
||||
|
||||
The next step is to create a [model](main_classes/models). The model - also loosely referred to as the architecture - defines what each layer is doing and what operations are happening. Attributes like `num_hidden_layers` from the configuration are used to define the architecture. Every model shares the base class [`PreTrainedModel`] and a few common methods like resizing input embeddings and pruning self-attention heads. In addition, all models are also either a [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html), [`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) or [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. This means models are compatible with each of their respective framework's usage.
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
Load your custom configuration attributes into the model:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertModel
|
||||
|
||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json")
|
||||
>>> model = DistilBertModel(my_config)
|
||||
```
|
||||
|
||||
This creates a model with random values instead of pretrained weights. You won't be able to use this model for anything useful yet until you train it. Training is a costly and time-consuming process. It is generally better to use a pretrained model to obtain better results faster, while using only a fraction of the resources required for training.
|
||||
|
||||
Create a pretrained model with [`~PreTrainedModel.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
When you load pretrained weights, the default model configuration is automatically loaded if the model is provided by 🤗 Transformers. However, you can still replace - some or all of - the default model configuration attributes with your own if you'd like:
|
||||
|
||||
```py
|
||||
>>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config)
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
Load your custom configuration attributes into the model:
|
||||
|
||||
```py
|
||||
>>> from transformers import TFDistilBertModel
|
||||
|
||||
>>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json")
|
||||
>>> tf_model = TFDistilBertModel(my_config)
|
||||
```
|
||||
|
||||
This creates a model with random values instead of pretrained weights. You won't be able to use this model for anything useful yet until you train it. Training is a costly and time-consuming process. It is generally better to use a pretrained model to obtain better results faster, while using only a fraction of the resources required for training.
|
||||
|
||||
Create a pretrained model with [`~TFPreTrainedModel.from_pretrained`]:
|
||||
|
||||
```py
|
||||
>>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
When you load pretrained weights, the default model configuration is automatically loaded if the model is provided by 🤗 Transformers. However, you can still replace - some or all of - the default model configuration attributes with your own if you'd like:
|
||||
|
||||
```py
|
||||
>>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config)
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
### Model heads
|
||||
|
||||
At this point, you have a base DistilBERT model which outputs the *hidden states*. The hidden states are passed as inputs to a model head to produce the final output. 🤗 Transformers provides a different model head for each task as long as a model supports the task (i.e., you can't use DistilBERT for a sequence-to-sequence task like translation).
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
For example, [`DistilBertForSequenceClassification`] is a base DistilBERT model with a sequence classification head. The sequence classification head is a linear layer on top of the pooled outputs.
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertForSequenceClassification
|
||||
|
||||
>>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Easily reuse this checkpoint for another task by switching to a different model head. For a question answering task, you would use the [`DistilBertForQuestionAnswering`] model head. The question answering head is similar to the sequence classification head except it is a linear layer on top of the hidden states output.
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertForQuestionAnswering
|
||||
|
||||
>>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
</pt>
|
||||
<tf>
|
||||
For example, [`TFDistilBertForSequenceClassification`] is a base DistilBERT model with a sequence classification head. The sequence classification head is a linear layer on top of the pooled outputs.
|
||||
|
||||
```py
|
||||
>>> from transformers import TFDistilBertForSequenceClassification
|
||||
|
||||
>>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Easily reuse this checkpoint for another task by switching to a different model head. For a question answering task, you would use the [`TFDistilBertForQuestionAnswering`] model head. The question answering head is similar to the sequence classification head except it is a linear layer on top of the hidden states output.
|
||||
|
||||
```py
|
||||
>>> from transformers import TFDistilBertForQuestionAnswering
|
||||
|
||||
>>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
</tf>
|
||||
</frameworkcontent>
|
||||
|
||||
## Tokenizer
|
||||
|
||||
The last base class you need before using a model for textual data is a [tokenizer](main_classes/tokenizer) to convert raw text to tensors. There are two types of tokenizers you can use with 🤗 Transformers:
|
||||
|
||||
- [`PreTrainedTokenizer`]: a Python implementation of a tokenizer.
|
||||
- [`PreTrainedTokenizerFast`]: a tokenizer from our Rust-based [🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/) library. This tokenizer type is significantly faster - especially during batch tokenization - due to its Rust implementation. The fast tokenizer also offers additional methods like *offset mapping* which maps tokens to their original words or characters.
|
||||
|
||||
Both tokenizers support common methods such as encoding and decoding, adding new tokens, and managing special tokens.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Not every model supports a fast tokenizer. Take a look at this [table](index#supported-frameworks) to check if a model has fast tokenizer support.
|
||||
|
||||
</Tip>
|
||||
|
||||
If you trained your own tokenizer, you can create one from your *vocabulary* file:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertTokenizer
|
||||
|
||||
>>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left")
|
||||
```
|
||||
|
||||
It is important to remember the vocabulary from a custom tokenizer will be different from the vocabulary generated by a pretrained model's tokenizer. You need to use a pretrained model's vocabulary if you are using a pretrained model, otherwise the inputs won't make sense. Create a tokenizer with a pretrained model's vocabulary with the [`DistilBertTokenizer`] class:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertTokenizer
|
||||
|
||||
>>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
Create a fast tokenizer with the [`DistilBertTokenizerFast`] class:
|
||||
|
||||
```py
|
||||
>>> from transformers import DistilBertTokenizerFast
|
||||
|
||||
>>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased")
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
By default, [`AutoTokenizer`] will try to load a fast tokenizer. You can disable this behavior by setting `use_fast=False` in `from_pretrained`.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Image processor
|
||||
|
||||
An image processor processes vision inputs. It inherits from the base [`~image_processing_utils.ImageProcessingMixin`] class.
|
||||
|
||||
To use, create an image processor associated with the model you're using. For example, create a default [`ViTImageProcessor`] if you are using [ViT](model_doc/vit) for image classification:
|
||||
|
||||
```py
|
||||
>>> from transformers import ViTImageProcessor
|
||||
|
||||
>>> vit_extractor = ViTImageProcessor()
|
||||
>>> print(vit_extractor)
|
||||
ViTImageProcessor {
|
||||
"do_normalize": true,
|
||||
"do_resize": true,
|
||||
"image_processor_type": "ViTImageProcessor",
|
||||
"image_mean": [
|
||||
0.5,
|
||||
0.5,
|
||||
0.5
|
||||
],
|
||||
"image_std": [
|
||||
0.5,
|
||||
0.5,
|
||||
0.5
|
||||
],
|
||||
"resample": 2,
|
||||
"size": 224
|
||||
}
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
If you aren't looking for any customization, just use the `from_pretrained` method to load a model's default image processor parameters.
|
||||
|
||||
</Tip>
|
||||
|
||||
Modify any of the [`ViTImageProcessor`] parameters to create your custom image processor:
|
||||
|
||||
```py
|
||||
>>> from transformers import ViTImageProcessor
|
||||
|
||||
>>> my_vit_extractor = ViTImageProcessor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3])
|
||||
>>> print(my_vit_extractor)
|
||||
ViTImageProcessor {
|
||||
"do_normalize": false,
|
||||
"do_resize": true,
|
||||
"image_processor_type": "ViTImageProcessor",
|
||||
"image_mean": [
|
||||
0.3,
|
||||
0.3,
|
||||
0.3
|
||||
],
|
||||
"image_std": [
|
||||
0.5,
|
||||
0.5,
|
||||
0.5
|
||||
],
|
||||
"resample": "PIL.Image.BOX",
|
||||
"size": 224
|
||||
}
|
||||
```
|
||||
|
||||
## Backbone
|
||||
|
||||
<div style="text-align: center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/Backbone.png">
|
||||
</div>
|
||||
|
||||
Computer vision models consist of a backbone, neck, and head. The backbone extracts features from an input image, the neck combines and enhances the extracted features, and the head is used for the main task (e.g., object detection). Start by initializing a backbone in the model config and specify whether you want to load pretrained weights or load randomly initialized weights. Then you can pass the model config to the model head.
|
||||
|
||||
For example, to load a [ResNet](../model_doc/resnet) backbone into a [MaskFormer](../model_doc/maskformer) model with an instance segmentation head:
|
||||
|
||||
<hfoptions id="backbone">
|
||||
<hfoption id="pretrained weights">
|
||||
|
||||
Set `use_pretrained_backbone=True` to load pretrained ResNet weights for the backbone.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone="microsoft/resnet-50", use_pretrained_backbone=True) # backbone and neck config
|
||||
model = MaskFormerForInstanceSegmentation(config) # head
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="random weights">
|
||||
|
||||
Set `use_pretrained_backbone=False` to randomly initialize a ResNet backbone.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone="microsoft/resnet-50", use_pretrained_backbone=False) # backbone and neck config
|
||||
model = MaskFormerForInstanceSegmentation(config) # head
|
||||
```
|
||||
|
||||
You could also load the backbone config separately and then pass it to the model config.
|
||||
|
||||
```py
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, ResNetConfig
|
||||
|
||||
backbone_config = ResNetConfig()
|
||||
config = MaskFormerConfig(backbone_config=backbone_config)
|
||||
model = MaskFormerForInstanceSegmentation(config)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions id="timm backbone">
|
||||
|
||||
[timm](https://hf.co/docs/timm/index) models are loaded within a model with `use_timm_backbone=True` or with [`TimmBackbone`] and [`TimmBackboneConfig`].
|
||||
|
||||
Use `use_timm_backbone=True` and `use_pretrained_backbone=True` to load pretrained timm weights for the backbone.
|
||||
|
||||
```python
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone="resnet50", use_pretrained_backbone=True, use_timm_backbone=True) # backbone and neck config
|
||||
model = MaskFormerForInstanceSegmentation(config) # head
|
||||
```
|
||||
|
||||
Set `use_timm_backbone=True` and `use_pretrained_backbone=False` to load a randomly initialized timm backbone.
|
||||
|
||||
```python
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone="resnet50", use_pretrained_backbone=False, use_timm_backbone=True) # backbone and neck config
|
||||
model = MaskFormerForInstanceSegmentation(config) # head
|
||||
```
|
||||
|
||||
You could also load the backbone config and use it to create a `TimmBackbone` or pass it to the model config. Timm backbones will load pretrained weights by default. Set `use_pretrained_backbone=False` to load randomly initialized weights.
|
||||
|
||||
```python
|
||||
from transformers import TimmBackboneConfig, TimmBackbone
|
||||
|
||||
backbone_config = TimmBackboneConfig("resnet50", use_pretrained_backbone=False)
|
||||
|
||||
# Create a backbone class
|
||||
backbone = TimmBackbone(config=backbone_config)
|
||||
|
||||
# Create a model with a timm backbone
|
||||
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation
|
||||
|
||||
config = MaskFormerConfig(backbone_config=backbone_config)
|
||||
model = MaskFormerForInstanceSegmentation(config)
|
||||
```
|
||||
|
||||
## Feature extractor
|
||||
|
||||
A feature extractor processes audio inputs. It inherits from the base [`~feature_extraction_utils.FeatureExtractionMixin`] class, and may also inherit from the [`SequenceFeatureExtractor`] class for processing audio inputs.
|
||||
|
||||
To use, create a feature extractor associated with the model you're using. For example, create a default [`Wav2Vec2FeatureExtractor`] if you are using [Wav2Vec2](model_doc/wav2vec2) for audio classification:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2FeatureExtractor
|
||||
|
||||
>>> w2v2_extractor = Wav2Vec2FeatureExtractor()
|
||||
>>> print(w2v2_extractor)
|
||||
Wav2Vec2FeatureExtractor {
|
||||
"do_normalize": true,
|
||||
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
||||
"feature_size": 1,
|
||||
"padding_side": "right",
|
||||
"padding_value": 0.0,
|
||||
"return_attention_mask": false,
|
||||
"sampling_rate": 16000
|
||||
}
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
If you aren't looking for any customization, just use the `from_pretrained` method to load a model's default feature extractor parameters.
|
||||
|
||||
</Tip>
|
||||
|
||||
Modify any of the [`Wav2Vec2FeatureExtractor`] parameters to create your custom feature extractor:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2FeatureExtractor
|
||||
|
||||
>>> w2v2_extractor = Wav2Vec2FeatureExtractor(sampling_rate=8000, do_normalize=False)
|
||||
>>> print(w2v2_extractor)
|
||||
Wav2Vec2FeatureExtractor {
|
||||
"do_normalize": false,
|
||||
"feature_extractor_type": "Wav2Vec2FeatureExtractor",
|
||||
"feature_size": 1,
|
||||
"padding_side": "right",
|
||||
"padding_value": 0.0,
|
||||
"return_attention_mask": false,
|
||||
"sampling_rate": 8000
|
||||
}
|
||||
```
|
||||
|
||||
## Processor
|
||||
|
||||
For models that support multimodal tasks, 🤗 Transformers offers a processor class that conveniently wraps processing classes such as a feature extractor and a tokenizer into a single object. For example, let's use the [`Wav2Vec2Processor`] for an automatic speech recognition task (ASR). ASR transcribes audio to text, so you will need a feature extractor and a tokenizer.
|
||||
|
||||
Create a feature extractor to handle the audio inputs:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2FeatureExtractor
|
||||
|
||||
>>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True)
|
||||
```
|
||||
|
||||
Create a tokenizer to handle the text inputs:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2CTCTokenizer
|
||||
|
||||
>>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt")
|
||||
```
|
||||
|
||||
Combine the feature extractor and tokenizer in [`Wav2Vec2Processor`]:
|
||||
|
||||
```py
|
||||
>>> from transformers import Wav2Vec2Processor
|
||||
|
||||
>>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer)
|
||||
```
|
||||
|
||||
With two basic classes - configuration and model - and an additional preprocessing class (tokenizer, image processor, feature extractor, or processor), you can create any of the models supported by 🤗 Transformers. Each of these base classes are configurable, allowing you to use the specific attributes you want. You can easily setup a model for training or modify an existing pretrained model to fine-tune.
|
||||
@ -1,4 +1,4 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
@ -14,33 +14,45 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Customizing models
|
||||
# Building custom models
|
||||
|
||||
Transformers models are designed to be customizable. A models code is fully contained in the [model](https://github.com/huggingface/transformers/tree/main/src/transformers/models) subfolder of the Transformers repository. Each folder contains a `modeling.py` and a `configuration.py` file. Copy these files to start customizing a model.
|
||||
The 🤗 Transformers library is designed to be easily extensible. Every model is fully coded in a given subfolder
|
||||
of the repository with no abstraction, so you can easily copy a modeling file and tweak it to your needs.
|
||||
|
||||
> [!TIP]
|
||||
> It may be easier to start from scratch if you're creating an entirely new model. But for models that are very similar to an existing one in Transformers, it is faster to reuse or subclass the same configuration and model class.
|
||||
If you are writing a brand new model, it might be easier to start from scratch. In this tutorial, we will show you
|
||||
how to write a custom model and its configuration so it can be used inside Transformers, and how you can share it
|
||||
with the community (with the code it relies on) so that anyone can use it, even if it's not present in the 🤗
|
||||
Transformers library. We'll see how to build upon transformers and extend the framework with your hooks and
|
||||
custom code.
|
||||
|
||||
This guide will show you how to customize a ResNet model, enable [AutoClass](./models#autoclass) support, and share it on the Hub.
|
||||
We will illustrate all of this on a ResNet model, by wrapping the ResNet class of the
|
||||
[timm library](https://github.com/rwightman/pytorch-image-models) into a [`PreTrainedModel`].
|
||||
|
||||
## Configuration
|
||||
## Writing a custom configuration
|
||||
|
||||
A configuration, given by the base [`PretrainedConfig`] class, contains all the necessary information to build a model. This is where you'll configure the attributes of the custom ResNet model. Different attributes gives different ResNet model types.
|
||||
Before we dive into the model, let's first write its configuration. The configuration of a model is an object that
|
||||
will contain all the necessary information to build the model. As we will see in the next section, the model can only
|
||||
take a `config` to be initialized, so we really need that object to be as complete as possible.
|
||||
|
||||
The main rules for customizing a configuration are:
|
||||
<Tip>
|
||||
|
||||
1. A custom configuration must subclass [`PretrainedConfig`]. This ensures a custom model has all the functionality of a Transformers' model such as [`~PretrainedConfig.from_pretrained`], [`~PretrainedConfig.save_pretrained`], and [`~PretrainedConfig.push_to_hub`].
|
||||
2. The [`PretrainedConfig`] `__init__` must accept any `kwargs` and they must be passed to the superclass `__init__`. [`PretrainedConfig`] has more fields than the ones set in your custom configuration, so when you load a configuration with [`~PretrainedConfig.from_pretrained`], those fields need to be accepted by your configuration and passed to the superclass.
|
||||
Models in the `transformers` library itself generally follow the convention that they accept a `config` object
|
||||
in their `__init__` method, and then pass the whole `config` to sub-layers in the model, rather than breaking the
|
||||
config object into multiple arguments that are all passed individually to sub-layers. Writing your model in this
|
||||
style results in simpler code with a clear "source of truth" for any hyperparameters, and also makes it easier
|
||||
to reuse code from other models in `transformers`.
|
||||
|
||||
> [!TIP]
|
||||
> It is useful to check the validity of some of the parameters. In the example below, a check is implemented to ensure `block_type` and `stem_type` belong to one of the predefined values.
|
||||
>
|
||||
> Add `model_type` to the configuration class to enable [AutoClass](./models#autoclass) support.
|
||||
</Tip>
|
||||
|
||||
```py
|
||||
In our example, we will take a couple of arguments of the ResNet class that we might want to tweak. Different
|
||||
configurations will then give us the different types of ResNets that are possible. We then just store those arguments,
|
||||
after checking the validity of a few of them.
|
||||
|
||||
```python
|
||||
from transformers import PretrainedConfig
|
||||
from typing import List
|
||||
|
||||
|
||||
class ResnetConfig(PretrainedConfig):
|
||||
model_type = "resnet"
|
||||
|
||||
@ -74,38 +86,56 @@ class ResnetConfig(PretrainedConfig):
|
||||
super().__init__(**kwargs)
|
||||
```
|
||||
|
||||
Save the configuration to a JSON file in your custom model folder, `custom-resnet`, with [`~PretrainedConfig.save_pretrained`].
|
||||
The three important things to remember when writing you own configuration are the following:
|
||||
- you have to inherit from `PretrainedConfig`,
|
||||
- the `__init__` of your `PretrainedConfig` must accept any kwargs,
|
||||
- those `kwargs` need to be passed to the superclass `__init__`.
|
||||
|
||||
The inheritance is to make sure you get all the functionality from the 🤗 Transformers library, while the two other
|
||||
constraints come from the fact a `PretrainedConfig` has more fields than the ones you are setting. When reloading a
|
||||
config with the `from_pretrained` method, those fields need to be accepted by your config and then sent to the
|
||||
superclass.
|
||||
|
||||
Defining a `model_type` for your configuration (here `model_type="resnet"`) is not mandatory, unless you want to
|
||||
register your model with the auto classes (see last section).
|
||||
|
||||
With this done, you can easily create and save your configuration like you would do with any other model config of the
|
||||
library. Here is how we can create a resnet50d config and save it:
|
||||
|
||||
```py
|
||||
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
|
||||
resnet50d_config.save_pretrained("custom-resnet")
|
||||
```
|
||||
|
||||
## Model
|
||||
This will save a file named `config.json` inside the folder `custom-resnet`. You can then reload your config with the
|
||||
`from_pretrained` method:
|
||||
|
||||
With the custom ResNet configuration, you can now create and customize the model. The model subclasses the base [`PreTrainedModel`] class. Like [`PretrainedConfig`], inheriting from [`PreTrainedModel`] and initializing the superclass with the configuration extends Transformers' functionalities such as saving and loading to the custom model.
|
||||
```py
|
||||
resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")
|
||||
```
|
||||
|
||||
Transformers' models follow the convention of accepting a `config` object in the `__init__` method. This passes the entire `config` to the model sublayers, instead of breaking the `config` object into multiple arguments that are individually passed to the sublayers.
|
||||
You can also use any other method of the [`PretrainedConfig`] class, like [`~PretrainedConfig.push_to_hub`] to
|
||||
directly upload your config to the Hub.
|
||||
|
||||
Writing models this way produces simpler code with a clear source of truth for any hyperparameters. It also makes it easier to reuse code from other Transformers' models.
|
||||
## Writing a custom model
|
||||
|
||||
You'll create two ResNet models, a barebones ResNet model that outputs the hidden states and a ResNet model with an image classification head.
|
||||
Now that we have our ResNet configuration, we can go on writing the model. We will actually write two: one that
|
||||
extracts the hidden features from a batch of images (like [`BertModel`]) and one that is suitable for image
|
||||
classification (like [`BertForSequenceClassification`]).
|
||||
|
||||
<hfoptions id="resnet">
|
||||
<hfoption id="ResnetModel">
|
||||
|
||||
Define a mapping between the block types and classes. Everything else is created by passing the configuration class to the ResNet model class.
|
||||
|
||||
> [!TIP]
|
||||
> Add `config_class` to the model class to enable [AutoClass](#autoclass-support) support.
|
||||
As we mentioned before, we'll only write a loose wrapper of the model to keep it simple for this example. The only
|
||||
thing we need to do before writing this class is a map between the block types and actual block classes. Then the
|
||||
model is defined from the configuration by passing everything to the `ResNet` class:
|
||||
|
||||
```py
|
||||
from transformers import PreTrainedModel
|
||||
from timm.models.resnet import BasicBlock, Bottleneck, ResNet
|
||||
from .configuration_resnet import ResnetConfig
|
||||
|
||||
|
||||
BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck}
|
||||
|
||||
|
||||
class ResnetModel(PreTrainedModel):
|
||||
config_class = ResnetConfig
|
||||
|
||||
@ -128,17 +158,12 @@ class ResnetModel(PreTrainedModel):
|
||||
return self.model.forward_features(tensor)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="ResnetModelForImageClassification">
|
||||
|
||||
The `forward` method needs to be rewrittten to calculate the loss for each logit if labels are available. Otherwise, the ResNet model class is the same.
|
||||
|
||||
> [!TIP]
|
||||
> Add `config_class` to the model class to enable [AutoClass](#autoclass-support) support.
|
||||
For the model that will classify images, we just change the forward method:
|
||||
|
||||
```py
|
||||
import torch
|
||||
|
||||
|
||||
class ResnetModelForImageClassification(PreTrainedModel):
|
||||
config_class = ResnetConfig
|
||||
|
||||
@ -165,20 +190,34 @@ class ResnetModelForImageClassification(PreTrainedModel):
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
In both cases, notice how we inherit from `PreTrainedModel` and call the superclass initialization with the `config`
|
||||
(a bit like when you write a regular `torch.nn.Module`). The line that sets the `config_class` is not mandatory, unless
|
||||
you want to register your model with the auto classes (see last section).
|
||||
|
||||
A model can return any output format. Returning a dictionary (like `ResnetModelForImageClassification`) with losses when labels are available makes the custom model compatible with [`Trainer`]. For other output formats, you'll need your own training loop or a different library for training.
|
||||
<Tip>
|
||||
|
||||
Instantiate the custom model class with the configuration.
|
||||
If your model is very similar to a model inside the library, you can re-use the same configuration as this model.
|
||||
|
||||
</Tip>
|
||||
|
||||
You can have your model return anything you want, but returning a dictionary like we did for
|
||||
`ResnetModelForImageClassification`, with the loss included when labels are passed, will make your model directly
|
||||
usable inside the [`Trainer`] class. Using another output format is fine as long as you are planning on using your own
|
||||
training loop or another library for training.
|
||||
|
||||
Now that we have our model class, let's create one:
|
||||
|
||||
```py
|
||||
resnet50d = ResnetModelForImageClassification(resnet50d_config)
|
||||
```
|
||||
|
||||
At this point, you can load pretrained weights into the model or train it from scratch. In this guide, you'll load pretrained weights.
|
||||
Again, you can use any of the methods of [`PreTrainedModel`], like [`~PreTrainedModel.save_pretrained`] or
|
||||
[`~PreTrainedModel.push_to_hub`]. We will use the second in the next section, and see how to push the model weights
|
||||
with the code of our model. But first, let's load some pretrained weights inside our model.
|
||||
|
||||
Load the pretrained weights from the [timm](https://hf.co/docs/timm/index) library, and then transfer those weights to the custom model with [load_state_dict](https://pytorch.org/docs/stable/generated/torch.nn.Module.html#torch.nn.Module.load_state_dict).
|
||||
In your own use case, you will probably be training your custom model on your own data. To go fast for this tutorial,
|
||||
we will use the pretrained version of the resnet50d. Since our model is just a wrapper around it, it's going to be
|
||||
easy to transfer those weights:
|
||||
|
||||
```py
|
||||
import timm
|
||||
@ -187,14 +226,17 @@ pretrained_model = timm.create_model("resnet50d", pretrained=True)
|
||||
resnet50d.model.load_state_dict(pretrained_model.state_dict())
|
||||
```
|
||||
|
||||
## AutoClass
|
||||
Now let's see how to make sure that when we do [`~PreTrainedModel.save_pretrained`] or [`~PreTrainedModel.push_to_hub`], the
|
||||
code of the model is saved.
|
||||
|
||||
The [AutoClass](./models#model-classes) API is a shortcut for automatically loading the correct architecture for a given model. It is convenient to enable this for users loading your custom model.
|
||||
## Registering a model with custom code to the auto classes
|
||||
|
||||
Make sure you have the `model_type` attribute (must be different from existing model types) in the configuration class and `config_class` attribute in the model class. Use the [`~AutoConfig.register`] method to add the custom configuration and model to the [AutoClass](./models#model-classes) API.
|
||||
If you are writing a library that extends 🤗 Transformers, you may want to extend the auto classes to include your own
|
||||
model. This is different from pushing the code to the Hub in the sense that users will need to import your library to
|
||||
get the custom models (contrarily to automatically downloading the model code from the Hub).
|
||||
|
||||
> [!TIP]
|
||||
> The first argument to [`AutoConfig.register`] must match the `model_type` attribute in the custom configuration class, and the first argument to [`AutoModel.register`] must match the `config_class` of the custom model class.
|
||||
As long as your config has a `model_type` attribute that is different from existing model types, and that your model
|
||||
classes have the right `config_class` attributes, you can just add them to the auto classes like this:
|
||||
|
||||
```py
|
||||
from transformers import AutoConfig, AutoModel, AutoModelForImageClassification
|
||||
@ -204,23 +246,25 @@ AutoModel.register(ResnetConfig, ResnetModel)
|
||||
AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification)
|
||||
```
|
||||
|
||||
Your custom model code is now compatible with the [AutoClass](./models#autoclass) API. Users can load the model with the [AutoModel](./model_doc/auto#automodel) or [`AutoModelForImageClassification`] classes.
|
||||
Note that the first argument used when registering your custom config to [`AutoConfig`] needs to match the `model_type`
|
||||
of your custom config, and the first argument used when registering your custom models to any auto model class needs
|
||||
to match the `config_class` of those models.
|
||||
|
||||
## Upload
|
||||
## Sending the code to the Hub
|
||||
|
||||
Upload a custom model to the [Hub](https://hf.co/models) to allow other users to easily load and use it.
|
||||
<Tip warning={true}>
|
||||
|
||||
Ensure the model directory is structured correctly as shown below. The directory should contain:
|
||||
This API is experimental and may have some slight breaking changes in the next releases.
|
||||
|
||||
- `modeling.py`: Contains the code for `ResnetModel` and `ResnetModelForImageClassification`. This file can rely on relative imports to other files as long as they're in the same directory.
|
||||
</Tip>
|
||||
|
||||
> [!WARNING]
|
||||
> When copying a Transformers' model file, replace all relative imports at the top of the `modeling.py` file to import from Transformers instead.
|
||||
First, make sure your model is fully defined in a `.py` file. It can rely on relative imports to some other files as
|
||||
long as all the files are in the same directory (we don't support submodules for this feature yet). For our example,
|
||||
we'll define a `modeling_resnet.py` file and a `configuration_resnet.py` file in a folder of the current working
|
||||
directory named `resnet_model`. The configuration file contains the code for `ResnetConfig` and the modeling file
|
||||
contains the code of `ResnetModel` and `ResnetModelForImageClassification`.
|
||||
|
||||
- `configuration.py`: Contains the code for `ResnetConfig`.
|
||||
- `__init__.py`: Can be empty, this file allows Python `resnet_model` to be used as a module.
|
||||
|
||||
```bash
|
||||
```
|
||||
.
|
||||
└── resnet_model
|
||||
├── __init__.py
|
||||
@ -228,16 +272,27 @@ Ensure the model directory is structured correctly as shown below. The directory
|
||||
└── modeling_resnet.py
|
||||
```
|
||||
|
||||
To share the model, import the ResNet model and configuration.
|
||||
The `__init__.py` can be empty, it's just there so that Python detects `resnet_model` can be use as a module.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
If copying a modeling files from the library, you will need to replace all the relative imports at the top of the file
|
||||
to import from the `transformers` package.
|
||||
|
||||
</Tip>
|
||||
|
||||
Note that you can re-use (or subclass) an existing configuration/model.
|
||||
|
||||
To share your model with the community, follow those steps: first import the ResNet model and config from the newly
|
||||
created files:
|
||||
|
||||
```py
|
||||
from resnet_model.configuration_resnet import ResnetConfig
|
||||
from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification
|
||||
```
|
||||
|
||||
Copy the code from the model and configuration files. To make sure the AutoClass objects are saved with [`~PreTrainedModel.save_pretrained`], call the [`~PretrainedConfig.register_for_auto_class`] method. This modifies the configuration JSON file to include the AutoClass objects and mapping.
|
||||
|
||||
For a model, pick the appropriate `AutoModelFor` class based on the task.
|
||||
Then you have to tell the library you want to copy the code files of those objects when using the `save_pretrained`
|
||||
method and properly register them with a given Auto class (especially for models), just run:
|
||||
|
||||
```py
|
||||
ResnetConfig.register_for_auto_class()
|
||||
@ -245,7 +300,15 @@ ResnetModel.register_for_auto_class("AutoModel")
|
||||
ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification")
|
||||
```
|
||||
|
||||
To map more than one task to the model, edit `auto_map` in the configuration JSON file directly.
|
||||
Note that there is no need to specify an auto class for the configuration (there is only one auto class for them,
|
||||
[`AutoConfig`]) but it's different for models. Your custom model could be suitable for many different tasks, so you
|
||||
have to specify which one of the auto classes is the correct one for your model.
|
||||
|
||||
<Tip>
|
||||
|
||||
Use `register_for_auto_class()` if you want the code files to be copied. If you instead prefer to use code on the Hub from another repo,
|
||||
you don't need to call it. In cases where there's more than one auto class, you can modify the `config.json` directly using the
|
||||
following structure:
|
||||
|
||||
```json
|
||||
"auto_map": {
|
||||
@ -255,7 +318,9 @@ To map more than one task to the model, edit `auto_map` in the configuration JSO
|
||||
},
|
||||
```
|
||||
|
||||
Create the configuration and model and load pretrained weights into it.
|
||||
</Tip>
|
||||
|
||||
Next, let's create the config and models as we did before:
|
||||
|
||||
```py
|
||||
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
|
||||
@ -265,17 +330,13 @@ pretrained_model = timm.create_model("resnet50d", pretrained=True)
|
||||
resnet50d.model.load_state_dict(pretrained_model.state_dict())
|
||||
```
|
||||
|
||||
The model is ready to be pushed to the Hub now. Log in to your Hugging Face account from the command line or notebook.
|
||||
|
||||
<hfoptions id="push">
|
||||
<hfoption id="huggingface-CLI">
|
||||
Now to send the model to the Hub, make sure you are logged in. Either run in your terminal:
|
||||
|
||||
```bash
|
||||
huggingface-cli login
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="notebook">
|
||||
or from a notebook:
|
||||
|
||||
```py
|
||||
from huggingface_hub import notebook_login
|
||||
@ -283,15 +344,41 @@ from huggingface_hub import notebook_login
|
||||
notebook_login()
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
Call [`~PreTrainedModel.push_to_hub`] on the model to upload the model to the Hub.
|
||||
You can then push to your own namespace (or an organization you are a member of) like this:
|
||||
|
||||
```py
|
||||
resnet50d.push_to_hub("custom-resnet50d")
|
||||
```
|
||||
|
||||
The pretrained weights, configuration, `modeling.py` and `configuration.py` files should all be uploaded to the Hub now in a [repository](https://hf.co/sgugger/custom-resnet50d) under your namespace.
|
||||
On top of the modeling weights and the configuration in json format, this also copied the modeling and
|
||||
configuration `.py` files in the folder `custom-resnet50d` and uploaded the result to the Hub. You can check the result
|
||||
in this [model repo](https://huggingface.co/sgugger/custom-resnet50d).
|
||||
|
||||
See the [sharing tutorial](model_sharing) for more information on the push to Hub method.
|
||||
|
||||
## Using a model with custom code
|
||||
|
||||
You can use any configuration, model or tokenizer with custom code files in its repository with the auto-classes and
|
||||
the `from_pretrained` method. All files and code uploaded to the Hub are scanned for malware (refer to the [Hub security](https://huggingface.co/docs/hub/security#malware-scanning) documentation for more information), but you should still
|
||||
review the model code and author to avoid executing malicious code on your machine. Set `trust_remote_code=True` to use
|
||||
a model with custom code:
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForImageClassification
|
||||
|
||||
model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True)
|
||||
```
|
||||
|
||||
It is also strongly encouraged to pass a commit hash as a `revision` to make sure the author of the models did not
|
||||
update the code with some malicious new lines (unless you fully trust the authors of the models).
|
||||
|
||||
```py
|
||||
commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292"
|
||||
model = AutoModelForImageClassification.from_pretrained(
|
||||
"sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash
|
||||
)
|
||||
```
|
||||
|
||||
Note that when browsing the commit history of the model repo on the Hub, there is a button to easily copy the commit
|
||||
hash of any commit.
|
||||
|
||||
Because a custom model doesn't use the same modeling code as a Transformers' model, you need to add `trust_remode_code=True` in [`~PreTrainedModel.from_pretrained`] to load it. Refer to the load [custom models](./models#custom-models) section for more information.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user