mirror of
https://github.com/huggingface/transformers.git
synced 2025-10-21 17:48:57 +08:00
Compare commits
177 Commits
quickfix_g
...
muellerzr-
Author | SHA1 | Date | |
---|---|---|---|
90a9702589 | |||
979d24e7fd | |||
6b7d64ac1c | |||
03c12d0d63 | |||
e969d884a6 | |||
0d86727354 | |||
edeca4387c | |||
979f4774f6 | |||
7ed9789e21 | |||
566302686a | |||
cff06aac6f | |||
28952248b1 | |||
9ea1eacd11 | |||
97c0f45b9c | |||
52a0213755 | |||
2d37085817 | |||
963ed98bed | |||
409fcfdfcc | |||
1ca9ff5c91 | |||
b9bc691e8d | |||
2e3f8f7474 | |||
eb5b968c5d | |||
746104ba6f | |||
51e6526b38 | |||
db70426854 | |||
c79bfc71b8 | |||
b017a9eb11 | |||
38d58a4427 | |||
fbff27623a | |||
e259d6d1e0 | |||
9a6956baab | |||
4987463de7 | |||
b127fb8fdc | |||
c409cd8177 | |||
5129671290 | |||
92a75ff6b1 | |||
39bfb2f514 | |||
5c1027bf09 | |||
3d79dcbda0 | |||
74e19e81e2 | |||
5c84682f16 | |||
f4c86d0416 | |||
f9ed05dd03 | |||
f1a385b1de | |||
e0b87b0f40 | |||
3bfd3e4803 | |||
386931d950 | |||
c35d2ccf5a | |||
7591ca5bc5 | |||
27903de7ec | |||
6101d934a1 | |||
7ee4363d19 | |||
d47a9e8ce5 | |||
c6b23fda65 | |||
9956c2bc98 | |||
834ec7b1cc | |||
d1f39c484d | |||
6f0ecf1049 | |||
892d51caee | |||
746e1148cf | |||
ab0ac3b98f | |||
3806faa171 | |||
7562366d4b | |||
3bf6dd8aa1 | |||
9578c2597e | |||
26f043bd4d | |||
3562772969 | |||
a378a54a57 | |||
72d4a3f9c1 | |||
894d421ee5 | |||
93e0e1a852 | |||
19e6e80e10 | |||
8defc95df3 | |||
0a7af19f4d | |||
e3a5f35cd5 | |||
1dbd9d3693 | |||
371b9c1486 | |||
adb91179b9 | |||
970a16ec7f | |||
22e6f14525 | |||
d806fa3e92 | |||
a26de15139 | |||
09e6579d2d | |||
273c0afc8f | |||
18199b34e5 | |||
975b988bfe | |||
f1d822ba33 | |||
ee8c01f839 | |||
99d67f1a09 | |||
bf97d4aa6d | |||
9282413611 | |||
eeea71209a | |||
8b94d28f97 | |||
c42d264549 | |||
6baa6f276a | |||
af638c4afe | |||
f6e2586a36 | |||
3bb7b05229 | |||
c6d484e38c | |||
87134662f7 | |||
1dde50c7d2 | |||
078d5a88cd | |||
9800e6d170 | |||
c63a3d0f17 | |||
01c4fc455b | |||
65f4bc99f9 | |||
fd06ad5438 | |||
13e645bb40 | |||
85345bb439 | |||
37204848f1 | |||
61d89c19d8 | |||
93e538ae2e | |||
59e8f1919c | |||
5f6c080b62 | |||
8a4857c0db | |||
f1b720ed62 | |||
e55b33ceb4 | |||
54b7703682 | |||
8260cb311e | |||
843e5e20ca | |||
52cb4034ad | |||
6806d33567 | |||
8ec028aded | |||
1c36db697a | |||
0b066bed14 | |||
f20d0e81ea | |||
a27182b7fc | |||
cf32ee1753 | |||
8f9fa3b081 | |||
70d5df6107 | |||
5fd7ca7bc9 | |||
c215523528 | |||
f3c8b18053 | |||
d6751d91c8 | |||
ab7e893d09 | |||
e840127370 | |||
8820fe8b8c | |||
0cea2081a3 | |||
95a77819db | |||
6577c77d93 | |||
20a04497a8 | |||
78d78cdf8a | |||
9485289f37 | |||
df323476a3 | |||
a22ff36e0e | |||
c1357834e8 | |||
9d2ab8824c | |||
5bcbdff159 | |||
c3cd9d807e | |||
cc25757a44 | |||
481e15604a | |||
b5016d5de7 | |||
a5a8291ad1 | |||
29c3a0fa01 | |||
a29eabd0eb | |||
2a5a6ad18a | |||
f1c8542ff7 | |||
126cbdb365 | |||
ce4b28830a | |||
7f777ab7d9 | |||
4996990d61 | |||
b7ea171403 | |||
8a3c55eb21 | |||
50837f2060 | |||
e31a7a2638 | |||
bd251e4955 | |||
342e3f9f20 | |||
8f2b6d5e3d | |||
7c11491208 | |||
48101cf8d1 | |||
e7f4ace092 | |||
e4522fe399 | |||
7728b78855 | |||
838d141fb4 | |||
85817d98fb | |||
54ac39c648 | |||
0164560353 |
@ -34,64 +34,44 @@ jobs:
|
||||
- run: echo 'export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)"' >> "$BASH_ENV" && source "$BASH_ENV"
|
||||
- run: mkdir -p test_preparation
|
||||
- run: python utils/tests_fetcher.py | tee tests_fetched_summary.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/tests_fetched_summary.txt
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
cp test_list.txt test_preparation/test_list.txt
|
||||
else
|
||||
touch test_preparation/test_list.txt
|
||||
fi
|
||||
- run: |
|
||||
if [ -f examples_test_list.txt ]; then
|
||||
mv examples_test_list.txt test_preparation/examples_test_list.txt
|
||||
else
|
||||
touch test_preparation/examples_test_list.txt
|
||||
fi
|
||||
- run: |
|
||||
if [ -f filtered_test_list_cross_tests.txt ]; then
|
||||
mv filtered_test_list_cross_tests.txt test_preparation/filtered_test_list_cross_tests.txt
|
||||
else
|
||||
touch test_preparation/filtered_test_list_cross_tests.txt
|
||||
fi
|
||||
- run: |
|
||||
if [ -f doctest_list.txt ]; then
|
||||
cp doctest_list.txt test_preparation/doctest_list.txt
|
||||
else
|
||||
touch test_preparation/doctest_list.txt
|
||||
fi
|
||||
- run: |
|
||||
if [ -f test_repo_utils.txt ]; then
|
||||
mv test_repo_utils.txt test_preparation/test_repo_utils.txt
|
||||
else
|
||||
touch test_preparation/test_repo_utils.txt
|
||||
fi
|
||||
- run: python utils/tests_fetcher.py --filter_tests
|
||||
- run: |
|
||||
if [ -f test_list.txt ]; then
|
||||
mv test_list.txt test_preparation/filtered_test_list.txt
|
||||
else
|
||||
touch test_preparation/filtered_test_list.txt
|
||||
fi
|
||||
- store_artifacts:
|
||||
path: test_preparation/test_list.txt
|
||||
- store_artifacts:
|
||||
path: test_preparation/doctest_list.txt
|
||||
- store_artifacts:
|
||||
path: ~/transformers/test_preparation/filtered_test_list.txt
|
||||
- store_artifacts:
|
||||
path: test_preparation/examples_test_list.txt
|
||||
- run: export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)" && echo $GIT_COMMIT_MESSAGE && python .circleci/create_circleci_config.py --fetcher_folder test_preparation
|
||||
- run: |
|
||||
if [ ! -s test_preparation/generated_config.yml ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
if [ ! -s test_preparation/generated_config.yml ]; then
|
||||
echo "No tests to run, exiting early!"
|
||||
circleci-agent step halt
|
||||
fi
|
||||
|
||||
- store_artifacts:
|
||||
path: test_preparation/generated_config.yml
|
||||
path: test_preparation
|
||||
|
||||
- run:
|
||||
name: "Retrieve Artifact Paths"
|
||||
env:
|
||||
CIRCLE_TOKEN: ${{ secrets.CI_ARTIFACT_TOKEN }}
|
||||
command: |
|
||||
project_slug="gh/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}"
|
||||
job_number=${CIRCLE_BUILD_NUM}
|
||||
url="https://circleci.com/api/v2/project/${project_slug}/${job_number}/artifacts"
|
||||
curl -o test_preparation/artifacts.json ${url}
|
||||
- run:
|
||||
name: "Prepare pipeline parameters"
|
||||
command: |
|
||||
python utils/process_test_artifacts.py
|
||||
|
||||
# To avoid too long generated_config.yaml on the continuation orb, we pass the links to the artifacts as parameters.
|
||||
# Otherwise the list of tests was just too big. Explicit is good but for that it was a limitation.
|
||||
# We used:
|
||||
|
||||
# https://circleci.com/docs/api/v2/index.html#operation/getJobArtifacts : to get the job artifacts
|
||||
# We could not pass a nested dict, which is why we create the test_file_... parameters for every single job
|
||||
|
||||
- store_artifacts:
|
||||
path: test_preparation/filtered_test_list_cross_tests.txt
|
||||
path: test_preparation/transformed_artifacts.json
|
||||
- store_artifacts:
|
||||
path: test_preparation/artifacts.json
|
||||
- continuation/continue:
|
||||
parameters: test_preparation/transformed_artifacts.json
|
||||
configuration_path: test_preparation/generated_config.yml
|
||||
|
||||
# To run all tests for the nightly build
|
||||
|
@ -32,7 +32,7 @@ COMMON_ENV_VARIABLES = {
|
||||
"RUN_PT_FLAX_CROSS_TESTS": False,
|
||||
}
|
||||
# Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical
|
||||
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "v": None}
|
||||
COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "dist": "loadfile", "vvv": None, "rsf":None}
|
||||
DEFAULT_DOCKER_IMAGE = [{"image": "cimg/python:3.8.12"}]
|
||||
|
||||
|
||||
@ -50,16 +50,15 @@ class EmptyJob:
|
||||
class CircleCIJob:
|
||||
name: str
|
||||
additional_env: Dict[str, Any] = None
|
||||
cache_name: str = None
|
||||
cache_version: str = "0.8.2"
|
||||
docker_image: List[Dict[str, str]] = None
|
||||
install_steps: List[str] = None
|
||||
marker: Optional[str] = None
|
||||
parallelism: Optional[int] = 1
|
||||
parallelism: Optional[int] = 0
|
||||
pytest_num_workers: int = 12
|
||||
pytest_options: Dict[str, Any] = None
|
||||
resource_class: Optional[str] = "2xlarge"
|
||||
tests_to_run: Optional[List[str]] = None
|
||||
num_test_files_per_worker: Optional[int] = 10
|
||||
# This should be only used for doctest job!
|
||||
command_timeout: Optional[int] = None
|
||||
|
||||
@ -67,8 +66,6 @@ class CircleCIJob:
|
||||
# Deal with defaults for mutable attributes.
|
||||
if self.additional_env is None:
|
||||
self.additional_env = {}
|
||||
if self.cache_name is None:
|
||||
self.cache_name = self.name
|
||||
if self.docker_image is None:
|
||||
# Let's avoid changing the default list and make a copy.
|
||||
self.docker_image = copy.deepcopy(DEFAULT_DOCKER_IMAGE)
|
||||
@ -79,155 +76,96 @@ class CircleCIJob:
|
||||
self.docker_image[0]["image"] = f"{self.docker_image[0]['image']}:dev"
|
||||
print(f"Using {self.docker_image} docker image")
|
||||
if self.install_steps is None:
|
||||
self.install_steps = []
|
||||
self.install_steps = ["uv venv && uv pip install ."]
|
||||
if self.pytest_options is None:
|
||||
self.pytest_options = {}
|
||||
if isinstance(self.tests_to_run, str):
|
||||
self.tests_to_run = [self.tests_to_run]
|
||||
if self.parallelism is None:
|
||||
self.parallelism = 1
|
||||
else:
|
||||
test_file = os.path.join("test_preparation" , f"{self.job_name}_test_list.txt")
|
||||
print("Looking for ", test_file)
|
||||
if os.path.exists(test_file):
|
||||
with open(test_file) as f:
|
||||
expanded_tests = f.read().strip().split("\n")
|
||||
self.tests_to_run = expanded_tests
|
||||
print("Found:", expanded_tests)
|
||||
else:
|
||||
self.tests_to_run = []
|
||||
print("not Found")
|
||||
|
||||
def to_dict(self):
|
||||
env = COMMON_ENV_VARIABLES.copy()
|
||||
env.update(self.additional_env)
|
||||
|
||||
cache_branch_prefix = os.environ.get("CIRCLE_BRANCH", "pull")
|
||||
if cache_branch_prefix != "main":
|
||||
cache_branch_prefix = "pull"
|
||||
|
||||
job = {
|
||||
"docker": self.docker_image,
|
||||
"environment": env,
|
||||
}
|
||||
if self.resource_class is not None:
|
||||
job["resource_class"] = self.resource_class
|
||||
if self.parallelism is not None:
|
||||
job["parallelism"] = self.parallelism
|
||||
steps = [
|
||||
"checkout",
|
||||
{"attach_workspace": {"at": "test_preparation"}},
|
||||
]
|
||||
steps.extend([{"run": l} for l in self.install_steps])
|
||||
steps.append({"run": {"name": "Show installed libraries and their size", "command": """du -h -d 1 "$(pip -V | cut -d ' ' -f 4 | sed 's/pip//g')" | grep -vE "dist-info|_distutils_hack|__pycache__" | sort -h | tee installed.txt || true"""}})
|
||||
steps.append({"run": {"name": "Show installed libraries and their versions", "command": """pip list --format=freeze | tee installed.txt || true"""}})
|
||||
|
||||
steps.append({"run":{"name":"Show biggest libraries","command":"""dpkg-query --show --showformat='${Installed-Size}\t${Package}\n' | sort -rh | head -25 | sort -h | awk '{ package=$2; sub(".*/", "", package); printf("%.5f GB %s\n", $1/1024/1024, package)}' || true"""}})
|
||||
steps.append({"store_artifacts": {"path": "installed.txt"}})
|
||||
|
||||
all_options = {**COMMON_PYTEST_OPTIONS, **self.pytest_options}
|
||||
pytest_flags = [f"--{key}={value}" if (value is not None or key in ["doctest-modules"]) else f"-{key}" for key, value in all_options.items()]
|
||||
pytest_flags.append(
|
||||
f"--make-reports={self.name}" if "examples" in self.name else f"--make-reports=tests_{self.name}"
|
||||
)
|
||||
|
||||
steps.append({"run": {"name": "Create `test-results` directory", "command": "mkdir test-results"}})
|
||||
test_command = ""
|
||||
if self.command_timeout:
|
||||
test_command = f"timeout {self.command_timeout} "
|
||||
# junit familiy xunit1 is necessary to support splitting on test name or class name with circleci split
|
||||
test_command += f"python3 -m pytest -rsfE -p no:warnings -o junit_family=xunit1 --tb=short --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags)
|
||||
|
||||
if self.parallelism == 1:
|
||||
if self.tests_to_run is None:
|
||||
test_command += " << pipeline.parameters.tests_to_run >>"
|
||||
else:
|
||||
test_command += " " + " ".join(self.tests_to_run)
|
||||
else:
|
||||
# We need explicit list instead of `pipeline.parameters.tests_to_run` (only available at job runtime)
|
||||
tests = self.tests_to_run
|
||||
if tests is None:
|
||||
folder = os.environ["test_preparation_dir"]
|
||||
test_file = os.path.join(folder, "filtered_test_list.txt")
|
||||
if os.path.exists(test_file): # We take this job's tests from the filtered test_list.txt
|
||||
with open(test_file) as f:
|
||||
tests = f.read().split(" ")
|
||||
|
||||
# expand the test list
|
||||
if tests == ["tests"]:
|
||||
tests = [os.path.join("tests", x) for x in os.listdir("tests")]
|
||||
expanded_tests = []
|
||||
for test in tests:
|
||||
if test.endswith(".py"):
|
||||
expanded_tests.append(test)
|
||||
elif test == "tests/models":
|
||||
if "tokenization" in self.name:
|
||||
expanded_tests.extend(glob.glob("tests/models/**/test_tokenization*.py", recursive=True))
|
||||
elif self.name in ["flax","torch","tf"]:
|
||||
name = self.name if self.name != "torch" else ""
|
||||
if self.name == "torch":
|
||||
all_tests = glob.glob(f"tests/models/**/test_modeling_{name}*.py", recursive=True)
|
||||
filtered = [k for k in all_tests if ("_tf_") not in k and "_flax_" not in k]
|
||||
expanded_tests.extend(filtered)
|
||||
else:
|
||||
expanded_tests.extend(glob.glob(f"tests/models/**/test_modeling_{name}*.py", recursive=True))
|
||||
else:
|
||||
expanded_tests.extend(glob.glob("tests/models/**/test_modeling*.py", recursive=True))
|
||||
elif test == "tests/pipelines":
|
||||
expanded_tests.extend(glob.glob("tests/models/**/test_modeling*.py", recursive=True))
|
||||
else:
|
||||
expanded_tests.append(test)
|
||||
tests = " ".join(expanded_tests)
|
||||
|
||||
# Each executor to run ~10 tests
|
||||
n_executors = max(len(expanded_tests) // 10, 1)
|
||||
# Avoid empty test list on some executor(s) or launching too many executors
|
||||
if n_executors > self.parallelism:
|
||||
n_executors = self.parallelism
|
||||
job["parallelism"] = n_executors
|
||||
|
||||
# Need to be newline separated for the command `circleci tests split` below
|
||||
command = f'echo {tests} | tr " " "\\n" >> tests.txt'
|
||||
steps.append({"run": {"name": "Get tests", "command": command}})
|
||||
|
||||
command = 'TESTS=$(circleci tests split tests.txt) && echo $TESTS > splitted_tests.txt'
|
||||
steps.append({"run": {"name": "Split tests", "command": command}})
|
||||
|
||||
steps.append({"store_artifacts": {"path": "tests.txt"}})
|
||||
steps.append({"store_artifacts": {"path": "splitted_tests.txt"}})
|
||||
|
||||
test_command = ""
|
||||
if self.command_timeout:
|
||||
test_command = f"timeout {self.command_timeout} "
|
||||
test_command += f"python3 -m pytest -rsfE -p no:warnings --tb=short -o junit_family=xunit1 --junitxml=test-results/junit.xml -n {self.pytest_num_workers} " + " ".join(pytest_flags)
|
||||
test_command += " $(cat splitted_tests.txt)"
|
||||
if self.marker is not None:
|
||||
test_command += f" -m {self.marker}"
|
||||
|
||||
if self.name == "pr_documentation_tests":
|
||||
# can't use ` | tee tee tests_output.txt` as usual
|
||||
test_command += " > tests_output.txt"
|
||||
# Save the return code, so we can check if it is timeout in the next step.
|
||||
test_command += '; touch "$?".txt'
|
||||
# Never fail the test step for the doctest job. We will check the results in the next step, and fail that
|
||||
# step instead if the actual test failures are found. This is to avoid the timeout being reported as test
|
||||
# failure.
|
||||
test_command = f"({test_command}) || true"
|
||||
else:
|
||||
test_command = f"({test_command} | tee tests_output.txt)"
|
||||
steps.append({"run": {"name": "Run tests", "command": test_command}})
|
||||
|
||||
steps.append({"run": {"name": "Skipped tests", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --skip"}})
|
||||
steps.append({"run": {"name": "Failed tests", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --fail"}})
|
||||
steps.append({"run": {"name": "Errors", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --errors"}})
|
||||
|
||||
steps.append({"store_test_results": {"path": "test-results"}})
|
||||
steps.append({"store_artifacts": {"path": "tests_output.txt"}})
|
||||
steps.append({"store_artifacts": {"path": "test-results/junit.xml"}})
|
||||
steps.append({"store_artifacts": {"path": "reports"}})
|
||||
|
||||
# Examples special case: we need to download NLTK files in advance to avoid cuncurrency issues
|
||||
timeout_cmd = f"timeout {self.command_timeout} " if self.command_timeout else ""
|
||||
marker_cmd = f"-m '{self.marker}'" if self.marker is not None else ""
|
||||
additional_flags = f" -p no:warning -o junit_family=xunit1 --junitxml=test-results/junit.xml"
|
||||
parallel = f' << pipeline.parameters.{self.job_name}_parallelism >> '
|
||||
steps = [
|
||||
"checkout",
|
||||
{"attach_workspace": {"at": "test_preparation"}},
|
||||
{"run": "apt-get update && apt-get install -y curl"},
|
||||
{"run": " && ".join(self.install_steps)},
|
||||
{"run": {"name": "Download NLTK files", "command": """python -c "import nltk; nltk.download('punkt', quiet=True)" """} if "example" in self.name else "echo Skipping"},
|
||||
{"run": {
|
||||
"name": "Show installed libraries and their size",
|
||||
"command": """du -h -d 1 "$(pip -V | cut -d ' ' -f 4 | sed 's/pip//g')" | grep -vE "dist-info|_distutils_hack|__pycache__" | sort -h | tee installed.txt || true"""}
|
||||
},
|
||||
{"run": {
|
||||
"name": "Show installed libraries and their versions",
|
||||
"command": """pip list --format=freeze | tee installed.txt || true"""}
|
||||
},
|
||||
{"run": {
|
||||
"name": "Show biggest libraries",
|
||||
"command": """dpkg-query --show --showformat='${Installed-Size}\t${Package}\n' | sort -rh | head -25 | sort -h | awk '{ package=$2; sub(".*/", "", package); printf("%.5f GB %s\n", $1/1024/1024, package)}' || true"""}
|
||||
},
|
||||
{"run": {"name": "Create `test-results` directory", "command": "mkdir test-results"}},
|
||||
{"run": {"name": "Get files to test", "command":f'curl -L -o {self.job_name}_test_list.txt <<pipeline.parameters.{self.job_name}_test_list>>' if self.name != "pr_documentation_tests" else 'echo "Skipped"'}},
|
||||
{"run": {"name": "Split tests across parallel nodes: show current parallel tests",
|
||||
"command": f"TESTS=$(circleci tests split --split-by=timings {self.job_name}_test_list.txt) && echo $TESTS > splitted_tests.txt && echo $TESTS | tr ' ' '\n'" if self.parallelism else f"awk '{{printf \"%s \", $0}}' {self.job_name}_test_list.txt > splitted_tests.txt"
|
||||
}
|
||||
},
|
||||
{"run": {
|
||||
"name": "Run tests",
|
||||
"command": f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {additional_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"}
|
||||
},
|
||||
{"run": {"name": "Expand to show skipped tests", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --skip"}},
|
||||
{"run": {"name": "Failed tests: show reasons", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --fail"}},
|
||||
{"run": {"name": "Errors", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --errors"}},
|
||||
{"store_test_results": {"path": "test-results"}},
|
||||
{"store_artifacts": {"path": "test-results/junit.xml"}},
|
||||
{"store_artifacts": {"path": "reports"}},
|
||||
{"store_artifacts": {"path": "tests.txt"}},
|
||||
{"store_artifacts": {"path": "splitted_tests.txt"}},
|
||||
{"store_artifacts": {"path": "installed.txt"}},
|
||||
]
|
||||
if self.parallelism:
|
||||
job["parallelism"] = parallel
|
||||
job["steps"] = steps
|
||||
return job
|
||||
|
||||
@property
|
||||
def job_name(self):
|
||||
return self.name if "examples" in self.name else f"tests_{self.name}"
|
||||
return self.name if ("examples" in self.name or "pipeline" in self.name or "pr_documentation" in self.name) else f"tests_{self.name}"
|
||||
|
||||
|
||||
# JOBS
|
||||
torch_and_tf_job = CircleCIJob(
|
||||
"torch_and_tf",
|
||||
docker_image=[{"image":"huggingface/transformers-torch-tf-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
additional_env={"RUN_PT_TF_CROSS_TESTS": True},
|
||||
marker="is_pt_tf_cross_test",
|
||||
pytest_options={"rA": None, "durations": 0},
|
||||
@ -238,7 +176,6 @@ torch_and_flax_job = CircleCIJob(
|
||||
"torch_and_flax",
|
||||
additional_env={"RUN_PT_FLAX_CROSS_TESTS": True},
|
||||
docker_image=[{"image":"huggingface/transformers-torch-jax-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
marker="is_pt_flax_cross_test",
|
||||
pytest_options={"rA": None, "durations": 0},
|
||||
)
|
||||
@ -246,35 +183,46 @@ torch_and_flax_job = CircleCIJob(
|
||||
torch_job = CircleCIJob(
|
||||
"torch",
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
marker="not generate",
|
||||
parallelism=6,
|
||||
pytest_num_workers=4
|
||||
pytest_num_workers=8
|
||||
)
|
||||
|
||||
generate_job = CircleCIJob(
|
||||
"generate",
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
marker="generate",
|
||||
parallelism=6,
|
||||
pytest_num_workers=8
|
||||
)
|
||||
|
||||
tokenization_job = CircleCIJob(
|
||||
"tokenization",
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=4
|
||||
parallelism=8,
|
||||
pytest_num_workers=16
|
||||
)
|
||||
|
||||
processor_job = CircleCIJob(
|
||||
"processors",
|
||||
docker_image=[{"image": "huggingface/transformers-torch-light"}],
|
||||
parallelism=8,
|
||||
pytest_num_workers=6
|
||||
)
|
||||
|
||||
tf_job = CircleCIJob(
|
||||
"tf",
|
||||
docker_image=[{"image":"huggingface/transformers-tf-light"}],
|
||||
install_steps=["uv venv", "uv pip install -e."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=4,
|
||||
pytest_num_workers=16,
|
||||
)
|
||||
|
||||
|
||||
flax_job = CircleCIJob(
|
||||
"flax",
|
||||
docker_image=[{"image":"huggingface/transformers-jax-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=6,
|
||||
pytest_num_workers=4
|
||||
pytest_num_workers=16
|
||||
)
|
||||
|
||||
|
||||
@ -282,8 +230,8 @@ pipelines_torch_job = CircleCIJob(
|
||||
"pipelines_torch",
|
||||
additional_env={"RUN_PIPELINE_TESTS": True},
|
||||
docker_image=[{"image":"huggingface/transformers-torch-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
marker="is_pipeline_test",
|
||||
parallelism=4
|
||||
)
|
||||
|
||||
|
||||
@ -291,8 +239,8 @@ pipelines_tf_job = CircleCIJob(
|
||||
"pipelines_tf",
|
||||
additional_env={"RUN_PIPELINE_TESTS": True},
|
||||
docker_image=[{"image":"huggingface/transformers-tf-light"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
marker="is_pipeline_test",
|
||||
parallelism=4
|
||||
)
|
||||
|
||||
|
||||
@ -300,34 +248,24 @@ custom_tokenizers_job = CircleCIJob(
|
||||
"custom_tokenizers",
|
||||
additional_env={"RUN_CUSTOM_TOKENIZERS": True},
|
||||
docker_image=[{"image": "huggingface/transformers-custom-tokenizers"}],
|
||||
install_steps=["uv venv","uv pip install -e ."],
|
||||
parallelism=None,
|
||||
resource_class=None,
|
||||
tests_to_run=[
|
||||
"./tests/models/bert_japanese/test_tokenization_bert_japanese.py",
|
||||
"./tests/models/openai/test_tokenization_openai.py",
|
||||
"./tests/models/clip/test_tokenization_clip.py",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
examples_torch_job = CircleCIJob(
|
||||
"examples_torch",
|
||||
additional_env={"OMP_NUM_THREADS": 8},
|
||||
cache_name="torch_examples",
|
||||
docker_image=[{"image":"huggingface/transformers-examples-torch"}],
|
||||
# TODO @ArthurZucker remove this once docker is easier to build
|
||||
install_steps=["uv venv && uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"],
|
||||
pytest_num_workers=1,
|
||||
pytest_num_workers=8,
|
||||
)
|
||||
|
||||
|
||||
examples_tensorflow_job = CircleCIJob(
|
||||
"examples_tensorflow",
|
||||
cache_name="tensorflow_examples",
|
||||
additional_env={"OMP_NUM_THREADS": 8},
|
||||
docker_image=[{"image":"huggingface/transformers-examples-tf"}],
|
||||
install_steps=["uv venv && uv pip install . && uv pip install -r examples/tensorflow/_tests_requirements.txt"],
|
||||
parallelism=8
|
||||
pytest_num_workers=16,
|
||||
)
|
||||
|
||||
|
||||
@ -336,12 +274,12 @@ hub_job = CircleCIJob(
|
||||
additional_env={"HUGGINGFACE_CO_STAGING": True},
|
||||
docker_image=[{"image":"huggingface/transformers-torch-light"}],
|
||||
install_steps=[
|
||||
"uv venv && uv pip install .",
|
||||
'uv venv && uv pip install .',
|
||||
'git config --global user.email "ci@dummy.com"',
|
||||
'git config --global user.name "ci"',
|
||||
],
|
||||
marker="is_staging_test",
|
||||
pytest_num_workers=1,
|
||||
pytest_num_workers=2,
|
||||
)
|
||||
|
||||
|
||||
@ -349,8 +287,7 @@ onnx_job = CircleCIJob(
|
||||
"onnx",
|
||||
docker_image=[{"image":"huggingface/transformers-torch-tf-light"}],
|
||||
install_steps=[
|
||||
"uv venv && uv pip install .",
|
||||
"uv pip install --upgrade eager pip",
|
||||
"uv venv",
|
||||
"uv pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba]",
|
||||
],
|
||||
pytest_options={"k onnx": None},
|
||||
@ -360,15 +297,7 @@ onnx_job = CircleCIJob(
|
||||
|
||||
exotic_models_job = CircleCIJob(
|
||||
"exotic_models",
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
docker_image=[{"image":"huggingface/transformers-exotic-models"}],
|
||||
tests_to_run=[
|
||||
"tests/models/*layoutlmv*",
|
||||
"tests/models/*nat",
|
||||
"tests/models/deta",
|
||||
"tests/models/udop",
|
||||
"tests/models/nougat",
|
||||
],
|
||||
pytest_num_workers=12,
|
||||
parallelism=4,
|
||||
pytest_options={"durations": 100},
|
||||
@ -378,11 +307,8 @@ exotic_models_job = CircleCIJob(
|
||||
repo_utils_job = CircleCIJob(
|
||||
"repo_utils",
|
||||
docker_image=[{"image":"huggingface/transformers-consistency"}],
|
||||
install_steps=["uv venv && uv pip install ."],
|
||||
parallelism=None,
|
||||
pytest_num_workers=1,
|
||||
pytest_num_workers=4,
|
||||
resource_class="large",
|
||||
tests_to_run="tests/repo_utils",
|
||||
)
|
||||
|
||||
|
||||
@ -391,28 +317,18 @@ repo_utils_job = CircleCIJob(
|
||||
# the bash output redirection.)
|
||||
py_command = 'from utils.tests_fetcher import get_doctest_files; to_test = get_doctest_files() + ["dummy.py"]; to_test = " ".join(to_test); print(to_test)'
|
||||
py_command = f"$(python3 -c '{py_command}')"
|
||||
command = f'echo "{py_command}" > pr_documentation_tests_temp.txt'
|
||||
command = f'echo """{py_command}""" > pr_documentation_tests_temp.txt'
|
||||
doc_test_job = CircleCIJob(
|
||||
"pr_documentation_tests",
|
||||
docker_image=[{"image":"huggingface/transformers-consistency"}],
|
||||
additional_env={"TRANSFORMERS_VERBOSITY": "error", "DATASETS_VERBOSITY": "error", "SKIP_CUDA_DOCTEST": "1"},
|
||||
install_steps=[
|
||||
# Add an empty file to keep the test step running correctly even no file is selected to be tested.
|
||||
"uv venv && pip install .",
|
||||
"touch dummy.py",
|
||||
{
|
||||
"name": "Get files to test",
|
||||
"command": command,
|
||||
},
|
||||
{
|
||||
"name": "Show information in `Get files to test`",
|
||||
"command":
|
||||
"cat pr_documentation_tests_temp.txt"
|
||||
},
|
||||
{
|
||||
"name": "Get the last line in `pr_documentation_tests.txt`",
|
||||
"command":
|
||||
"tail -n1 pr_documentation_tests_temp.txt | tee pr_documentation_tests.txt"
|
||||
},
|
||||
command,
|
||||
"cat pr_documentation_tests_temp.txt",
|
||||
"tail -n1 pr_documentation_tests_temp.txt | tee pr_documentation_tests_test_list.txt"
|
||||
],
|
||||
tests_to_run="$(cat pr_documentation_tests.txt)", # noqa
|
||||
pytest_options={"-doctest-modules": None, "doctest-glob": "*.md", "dist": "loadfile", "rvsA": None},
|
||||
@ -420,121 +336,37 @@ doc_test_job = CircleCIJob(
|
||||
pytest_num_workers=1,
|
||||
)
|
||||
|
||||
REGULAR_TESTS = [
|
||||
torch_and_tf_job,
|
||||
torch_and_flax_job,
|
||||
torch_job,
|
||||
tf_job,
|
||||
flax_job,
|
||||
custom_tokenizers_job,
|
||||
hub_job,
|
||||
onnx_job,
|
||||
exotic_models_job,
|
||||
tokenization_job
|
||||
]
|
||||
EXAMPLES_TESTS = [
|
||||
examples_torch_job,
|
||||
examples_tensorflow_job,
|
||||
]
|
||||
PIPELINE_TESTS = [
|
||||
pipelines_torch_job,
|
||||
pipelines_tf_job,
|
||||
]
|
||||
REGULAR_TESTS = [torch_and_tf_job, torch_and_flax_job, torch_job, tf_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job] # fmt: skip
|
||||
EXAMPLES_TESTS = [examples_torch_job, examples_tensorflow_job]
|
||||
PIPELINE_TESTS = [pipelines_torch_job, pipelines_tf_job]
|
||||
REPO_UTIL_TESTS = [repo_utils_job]
|
||||
DOC_TESTS = [doc_test_job]
|
||||
|
||||
ALL_TESTS = REGULAR_TESTS + EXAMPLES_TESTS + PIPELINE_TESTS + REPO_UTIL_TESTS + DOC_TESTS + [custom_tokenizers_job] + [exotic_models_job] # fmt: skip
|
||||
|
||||
def create_circleci_config(folder=None):
|
||||
if folder is None:
|
||||
folder = os.getcwd()
|
||||
# Used in CircleCIJob.to_dict() to expand the test list (for using parallelism)
|
||||
os.environ["test_preparation_dir"] = folder
|
||||
jobs = []
|
||||
all_test_file = os.path.join(folder, "test_list.txt")
|
||||
if os.path.exists(all_test_file):
|
||||
with open(all_test_file) as f:
|
||||
all_test_list = f.read()
|
||||
else:
|
||||
all_test_list = []
|
||||
if len(all_test_list) > 0:
|
||||
jobs.extend(PIPELINE_TESTS)
|
||||
|
||||
test_file = os.path.join(folder, "filtered_test_list.txt")
|
||||
if os.path.exists(test_file):
|
||||
with open(test_file) as f:
|
||||
test_list = f.read()
|
||||
else:
|
||||
test_list = []
|
||||
if len(test_list) > 0:
|
||||
jobs.extend(REGULAR_TESTS)
|
||||
|
||||
extended_tests_to_run = set(test_list.split())
|
||||
# Extend the test files for cross test jobs
|
||||
for job in jobs:
|
||||
if job.job_name in ["tests_torch_and_tf", "tests_torch_and_flax"]:
|
||||
for test_path in copy.copy(extended_tests_to_run):
|
||||
dir_path, fn = os.path.split(test_path)
|
||||
if fn.startswith("test_modeling_tf_"):
|
||||
fn = fn.replace("test_modeling_tf_", "test_modeling_")
|
||||
elif fn.startswith("test_modeling_flax_"):
|
||||
fn = fn.replace("test_modeling_flax_", "test_modeling_")
|
||||
else:
|
||||
if job.job_name == "test_torch_and_tf":
|
||||
fn = fn.replace("test_modeling_", "test_modeling_tf_")
|
||||
elif job.job_name == "test_torch_and_flax":
|
||||
fn = fn.replace("test_modeling_", "test_modeling_flax_")
|
||||
new_test_file = str(os.path.join(dir_path, fn))
|
||||
if os.path.isfile(new_test_file):
|
||||
if new_test_file not in extended_tests_to_run:
|
||||
extended_tests_to_run.add(new_test_file)
|
||||
extended_tests_to_run = sorted(extended_tests_to_run)
|
||||
for job in jobs:
|
||||
if job.job_name in ["tests_torch_and_tf", "tests_torch_and_flax"]:
|
||||
job.tests_to_run = extended_tests_to_run
|
||||
fn = "filtered_test_list_cross_tests.txt"
|
||||
f_path = os.path.join(folder, fn)
|
||||
with open(f_path, "w") as fp:
|
||||
fp.write(" ".join(extended_tests_to_run))
|
||||
|
||||
example_file = os.path.join(folder, "examples_test_list.txt")
|
||||
if os.path.exists(example_file) and os.path.getsize(example_file) > 0:
|
||||
with open(example_file, "r", encoding="utf-8") as f:
|
||||
example_tests = f.read()
|
||||
for job in EXAMPLES_TESTS:
|
||||
framework = job.name.replace("examples_", "").replace("torch", "pytorch")
|
||||
if example_tests == "all":
|
||||
job.tests_to_run = [f"examples/{framework}"]
|
||||
else:
|
||||
job.tests_to_run = [f for f in example_tests.split(" ") if f.startswith(f"examples/{framework}")]
|
||||
|
||||
if len(job.tests_to_run) > 0:
|
||||
jobs.append(job)
|
||||
|
||||
doctest_file = os.path.join(folder, "doctest_list.txt")
|
||||
if os.path.exists(doctest_file):
|
||||
with open(doctest_file) as f:
|
||||
doctest_list = f.read()
|
||||
else:
|
||||
doctest_list = []
|
||||
if len(doctest_list) > 0:
|
||||
jobs.extend(DOC_TESTS)
|
||||
|
||||
repo_util_file = os.path.join(folder, "test_repo_utils.txt")
|
||||
if os.path.exists(repo_util_file) and os.path.getsize(repo_util_file) > 0:
|
||||
jobs.extend(REPO_UTIL_TESTS)
|
||||
jobs = [k for k in ALL_TESTS if os.path.isfile(os.path.join("test_preparation" , f"{k.job_name}_test_list.txt") )]
|
||||
print("The following jobs will be run ", jobs)
|
||||
|
||||
if len(jobs) == 0:
|
||||
jobs = [EmptyJob()]
|
||||
config = {"version": "2.1"}
|
||||
config["parameters"] = {
|
||||
# Only used to accept the parameters from the trigger
|
||||
"nightly": {"type": "boolean", "default": False},
|
||||
"tests_to_run": {"type": "string", "default": test_list},
|
||||
print("Full list of job name inputs", {j.job_name + "_test_list":{"type":"string", "default":''} for j in jobs})
|
||||
config = {
|
||||
"version": "2.1",
|
||||
"parameters": {
|
||||
# Only used to accept the parameters from the trigger
|
||||
"nightly": {"type": "boolean", "default": False},
|
||||
"tests_to_run": {"type": "string", "default": ''},
|
||||
**{j.job_name + "_test_list":{"type":"string", "default":''} for j in jobs},
|
||||
**{j.job_name + "_parallelism":{"type":"integer", "default":1} for j in jobs},
|
||||
},
|
||||
"jobs" : {j.job_name: j.to_dict() for j in jobs},
|
||||
"workflows": {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}}
|
||||
}
|
||||
config["jobs"] = {j.job_name: j.to_dict() for j in jobs}
|
||||
config["workflows"] = {"version": 2, "run_tests": {"jobs": [j.job_name for j in jobs]}}
|
||||
with open(os.path.join(folder, "generated_config.yml"), "w") as f:
|
||||
f.write(yaml.dump(config, indent=2, width=1000000, sort_keys=False))
|
||||
f.write(yaml.dump(config, sort_keys=False, default_flow_style=False).replace("' << pipeline", " << pipeline").replace(">> '", " >>"))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
2
.github/workflows/add-model-like.yml
vendored
2
.github/workflows/add-model-like.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
sudo apt -y update && sudo apt install -y libsndfile1-dev
|
||||
|
||||
- name: Load cached virtual environment
|
||||
uses: actions/cache@v2
|
||||
uses: actions/cache@v4
|
||||
id: cache
|
||||
with:
|
||||
path: ~/venv/
|
||||
|
4
.github/workflows/benchmark.yml
vendored
4
.github/workflows/benchmark.yml
vendored
@ -31,12 +31,12 @@ jobs:
|
||||
if: github.event_name == 'schedule'
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install optimum-benchmark>=0.2.0
|
||||
python3 -m pip install optimum-benchmark>=0.3.0
|
||||
HF_TOKEN=${{ secrets.TRANSFORMERS_BENCHMARK_TOKEN }} python3 benchmark/benchmark.py --repo_id hf-internal-testing/benchmark_results --path_in_repo $(date +'%Y-%m-%d') --config-dir benchmark/config --config-name generation --commit=${{ github.sha }} backend.model=google/gemma-2b backend.cache_implementation=null,static backend.torch_compile=false,true --multirun
|
||||
|
||||
- name: Benchmark (merged to main event)
|
||||
if: github.event_name == 'push' && github.ref_name == 'main'
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
python3 -m pip install optimum-benchmark>=0.2.0
|
||||
python3 -m pip install optimum-benchmark>=0.3.0
|
||||
HF_TOKEN=${{ secrets.TRANSFORMERS_BENCHMARK_TOKEN }} python3 benchmark/benchmark.py --repo_id hf-internal-testing/benchmark_results_merge_event --path_in_repo $(date +'%Y-%m-%d') --config-dir benchmark/config --config-name generation --commit=${{ github.sha }} backend.model=google/gemma-2b backend.cache_implementation=null,static backend.torch_compile=false,true --multirun
|
||||
|
2
.github/workflows/build-ci-docker-images.yml
vendored
2
.github/workflows/build-ci-docker-images.yml
vendored
@ -74,4 +74,4 @@ jobs:
|
||||
slack_channel: "#transformers-ci-circleci-images"
|
||||
title: 🤗 New docker images for CircleCI are pushed.
|
||||
status: ${{ job.status }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
|
||||
|
2
.github/workflows/check_tiny_models.yml
vendored
2
.github/workflows/check_tiny_models.yml
vendored
@ -23,7 +23,7 @@ jobs:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Set up Python 3.8
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
# Semantic version range syntax or exact version of a Python version
|
||||
python-version: '3.8'
|
||||
|
2
.github/workflows/release-conda.yml
vendored
2
.github/workflows/release-conda.yml
vendored
@ -19,7 +19,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install miniconda
|
||||
uses: conda-incubator/setup-miniconda@v2
|
||||
|
38
.github/workflows/self-push-amd.yml
vendored
38
.github/workflows/self-push-amd.yml
vendored
@ -64,23 +64,24 @@ jobs:
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
test_map: ${{ steps.set-matrix.outputs.test_map }}
|
||||
env:
|
||||
# `CI_BRANCH_PUSH`: The branch name from the push event
|
||||
# `CI_BRANCH_WORKFLOW_RUN`: The name of the branch on which this workflow is triggered by `workflow_run` event
|
||||
# `CI_SHA_PUSH`: The commit SHA from the push event
|
||||
# `CI_SHA_WORKFLOW_RUN`: The commit SHA that triggers this workflow by `workflow_run` event
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
# Necessary to get the correct branch name and commit SHA for `workflow_run` event
|
||||
# We also take into account the `push` event (we might want to test some changes in a branch)
|
||||
- name: Prepare custom environment variables
|
||||
shell: bash
|
||||
# `CI_BRANCH_PUSH`: The branch name from the push event
|
||||
# `CI_BRANCH_WORKFLOW_RUN`: The name of the branch on which this workflow is triggered by `workflow_run` event
|
||||
# `CI_BRANCH`: The non-empty branch name from the above two (one and only one of them is empty)
|
||||
# `CI_SHA_PUSH`: The commit SHA from the push event
|
||||
# `CI_SHA_WORKFLOW_RUN`: The commit SHA that triggers this workflow by `workflow_run` event
|
||||
# `CI_SHA`: The non-empty commit SHA from the above two (one and only one of them is empty)
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -159,6 +160,12 @@ jobs:
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-amd-gpu-push-ci # <--- We test only for PyTorch for now
|
||||
options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
env:
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
# Necessary to get the correct branch name and commit SHA for `workflow_run` event
|
||||
# We also take into account the `push` event (we might want to test some changes in a branch)
|
||||
@ -166,11 +173,7 @@ jobs:
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -256,6 +259,12 @@ jobs:
|
||||
# run_tests_torch_cuda_extensions_single_gpu,
|
||||
# run_tests_torch_cuda_extensions_multi_gpu
|
||||
]
|
||||
env:
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
- name: Preliminary job status
|
||||
shell: bash
|
||||
@ -271,11 +280,7 @@ jobs:
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -324,6 +329,7 @@ jobs:
|
||||
# We pass `needs.setup_gpu.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
pip install huggingface_hub
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service.py "${{ needs.setup_gpu.outputs.matrix }}"
|
||||
|
70
.github/workflows/self-push.yml
vendored
70
.github/workflows/self-push.yml
vendored
@ -40,23 +40,24 @@ jobs:
|
||||
outputs:
|
||||
matrix: ${{ steps.set-matrix.outputs.matrix }}
|
||||
test_map: ${{ steps.set-matrix.outputs.test_map }}
|
||||
env:
|
||||
# `CI_BRANCH_PUSH`: The branch name from the push event
|
||||
# `CI_BRANCH_WORKFLOW_RUN`: The name of the branch on which this workflow is triggered by `workflow_run` event
|
||||
# `CI_SHA_PUSH`: The commit SHA from the push event
|
||||
# `CI_SHA_WORKFLOW_RUN`: The commit SHA that triggers this workflow by `workflow_run` event
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
# Necessary to get the correct branch name and commit SHA for `workflow_run` event
|
||||
# We also take into account the `push` event (we might want to test some changes in a branch)
|
||||
- name: Prepare custom environment variables
|
||||
shell: bash
|
||||
# `CI_BRANCH_PUSH`: The branch name from the push event
|
||||
# `CI_BRANCH_WORKFLOW_RUN`: The name of the branch on which this workflow is triggered by `workflow_run` event
|
||||
# `CI_BRANCH`: The non-empty branch name from the above two (one and only one of them is empty)
|
||||
# `CI_SHA_PUSH`: The commit SHA from the push event
|
||||
# `CI_SHA_WORKFLOW_RUN`: The commit SHA that triggers this workflow by `workflow_run` event
|
||||
# `CI_SHA`: The non-empty commit SHA from the above two (one and only one of them is empty)
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -135,6 +136,12 @@ jobs:
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu-push-ci
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
env:
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
# Necessary to get the correct branch name and commit SHA for `workflow_run` event
|
||||
# We also take into account the `push` event (we might want to test some changes in a branch)
|
||||
@ -142,11 +149,7 @@ jobs:
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -228,6 +231,12 @@ jobs:
|
||||
container:
|
||||
image: huggingface/transformers-all-latest-gpu-push-ci
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
env:
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
# Necessary to get the correct branch name and commit SHA for `workflow_run` event
|
||||
# We also take into account the `push` event (we might want to test some changes in a branch)
|
||||
@ -235,11 +244,7 @@ jobs:
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -321,6 +326,12 @@ jobs:
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
|
||||
options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
env:
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
# Necessary to get the correct branch name and commit SHA for `workflow_run` event
|
||||
# We also take into account the `push` event (we might want to test some changes in a branch)
|
||||
@ -328,11 +339,7 @@ jobs:
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -411,6 +418,12 @@ jobs:
|
||||
container:
|
||||
image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
env:
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
# Necessary to get the correct branch name and commit SHA for `workflow_run` event
|
||||
# We also take into account the `push` event (we might want to test some changes in a branch)
|
||||
@ -418,11 +431,7 @@ jobs:
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -500,6 +509,12 @@ jobs:
|
||||
run_tests_torch_cuda_extensions_single_gpu,
|
||||
run_tests_torch_cuda_extensions_multi_gpu
|
||||
]
|
||||
env:
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
CI_BRANCH_PUSH: ${{ github.event.ref }}
|
||||
CI_BRANCH_WORKFLOW_RUN: ${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH: ${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN: ${{ github.event.workflow_run.head_sha }}
|
||||
steps:
|
||||
- name: Preliminary job status
|
||||
shell: bash
|
||||
@ -513,11 +528,7 @@ jobs:
|
||||
shell: bash
|
||||
# For the meaning of these environment variables, see the job `Setup`
|
||||
run: |
|
||||
CI_BRANCH_PUSH=${{ github.event.ref }}
|
||||
CI_BRANCH_PUSH=${CI_BRANCH_PUSH/'refs/heads/'/''}
|
||||
CI_BRANCH_WORKFLOW_RUN=${{ github.event.workflow_run.head_branch }}
|
||||
CI_SHA_PUSH=${{ github.event.head_commit.id }}
|
||||
CI_SHA_WORKFLOW_RUN=${{ github.event.workflow_run.head_sha }}
|
||||
echo $CI_BRANCH_PUSH
|
||||
echo $CI_BRANCH_WORKFLOW_RUN
|
||||
echo $CI_SHA_PUSH
|
||||
@ -563,6 +574,7 @@ jobs:
|
||||
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
pip install slack_sdk
|
||||
pip install huggingface_hub
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||
|
1
.github/workflows/self-scheduled-amd.yml
vendored
1
.github/workflows/self-scheduled-amd.yml
vendored
@ -506,6 +506,7 @@ jobs:
|
||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
sudo apt-get install -y curl
|
||||
pip install huggingface_hub
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||
|
3
.github/workflows/self-scheduled-caller.yml
vendored
3
.github/workflows/self-scheduled-caller.yml
vendored
@ -2,9 +2,6 @@ name: Self-hosted runner (scheduled)
|
||||
|
||||
|
||||
on:
|
||||
repository_dispatch:
|
||||
schedule:
|
||||
- cron: "17 2 * * *"
|
||||
push:
|
||||
branches:
|
||||
- run_scheduled_ci*
|
||||
|
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@ -15,7 +15,7 @@ jobs:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.8
|
||||
|
||||
|
@ -48,6 +48,7 @@ limitations under the License.
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
|
@ -101,7 +101,7 @@ def summarize(run_dir, metrics, expand_metrics=False):
|
||||
# post-processing of report: show a few selected/important metric
|
||||
for metric in metrics:
|
||||
keys = metric.split(".")
|
||||
value = report
|
||||
value = report.to_dict()
|
||||
current = metrics_values
|
||||
for key in keys:
|
||||
# Avoid KeyError when a user's specified metric has typo.
|
||||
|
@ -2,13 +2,14 @@ FROM python:3.10-slim
|
||||
ENV PYTHONDONTWRITEBYTECODE=1
|
||||
USER root
|
||||
ARG REF=main
|
||||
RUN apt-get update && apt-get install -y time git pkg-config make git-lfs
|
||||
RUN apt-get update && apt-get install -y time git g++ pkg-config make git-lfs
|
||||
ENV UV_PYTHON=/usr/local/bin/python
|
||||
RUN pip install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools GitPython
|
||||
RUN uv pip install --no-cache-dir --upgrade 'torch' --index-url https://download.pytorch.org/whl/cpu
|
||||
RUN pip install --no-cache-dir --upgrade 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu
|
||||
# tensorflow pin matching setup.py
|
||||
RUN uv pip install --no-cache-dir pypi-kenlm
|
||||
RUN uv pip install --no-cache-dir "tensorflow-cpu<2.16" "tf-keras<2.16"
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,torch-speech,vision,testing]"
|
||||
RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,quality,testing,torch-speech,vision]"
|
||||
RUN git lfs install
|
||||
|
||||
RUN pip uninstall -y transformers
|
||||
|
@ -22,7 +22,7 @@ RUN apt update && \
|
||||
apt clean && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip ninja "pydantic<2"
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip ninja "pydantic>=2.0.0"
|
||||
RUN python3 -m pip uninstall -y apex torch torchvision torchaudio
|
||||
RUN python3 -m pip install torch==$PYTORCH torchvision==$TORCH_VISION torchaudio==$TORCH_AUDIO --index-url https://download.pytorch.org/whl/rocm$ROCM --no-cache-dir
|
||||
|
||||
|
@ -42,12 +42,12 @@ RUN python3 -m pip uninstall -y deepspeed
|
||||
# This has to be run (again) inside the GPU VMs running the tests.
|
||||
# The installation works here, but some tests fail, if we don't pre-build deepspeed again in the VMs running the tests.
|
||||
# TODO: Find out why test fail.
|
||||
RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install "deepspeed<=0.14.0" --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||
RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
|
||||
|
||||
# When installing in editable mode, `transformers` is not recognized as a package.
|
||||
# this line must be added in order for python to be aware of transformers.
|
||||
RUN cd transformers && python3 setup.py develop
|
||||
|
||||
# The base image ships with `pydantic==1.8.2` which is not working - i.e. the next command fails
|
||||
RUN python3 -m pip install -U --no-cache-dir "pydantic<2"
|
||||
RUN python3 -m pip install -U --no-cache-dir "pydantic>=2.0.0"
|
||||
RUN python3 -c "from deepspeed.launcher.runner import main"
|
||||
|
@ -54,4 +54,4 @@ The fields you should add are `local` (with the name of the file containing the
|
||||
|
||||
Once you have translated the `_toctree.yml` file, you can start translating the [MDX](https://mdxjs.com/) files associated with your docs chapter.
|
||||
|
||||
> 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/transformers/issues) and tag @stevhliu and @MKhalusova.
|
||||
> 🙋 If you'd like others to help you with the translation, you should [open an issue](https://github.com/huggingface/transformers/issues) and tag @stevhliu.
|
||||
|
@ -94,6 +94,8 @@
|
||||
title: Text to speech
|
||||
- local: tasks/image_text_to_text
|
||||
title: Image-text-to-text
|
||||
- local: tasks/video_text_to_text
|
||||
title: Video-text-to-text
|
||||
title: Multimodal
|
||||
- isExpanded: false
|
||||
sections:
|
||||
@ -120,7 +122,7 @@
|
||||
- local: custom_models
|
||||
title: Share a custom model
|
||||
- local: chat_templating
|
||||
title: Templates for chat models
|
||||
title: Chat templates
|
||||
- local: trainer
|
||||
title: Trainer
|
||||
- local: sagemaker
|
||||
@ -163,6 +165,8 @@
|
||||
title: FBGEMM_FP8
|
||||
- local: quantization/optimum
|
||||
title: Optimum
|
||||
- local: quantization/torchao
|
||||
title: TorchAO
|
||||
- local: quantization/contribute
|
||||
title: Contribute new quantization method
|
||||
title: Quantization Methods
|
||||
@ -370,6 +374,8 @@
|
||||
title: ESM
|
||||
- local: model_doc/falcon
|
||||
title: Falcon
|
||||
- local: model_doc/falcon_mamba
|
||||
title: FalconMamba
|
||||
- local: model_doc/fastspeech2_conformer
|
||||
title: FastSpeech2Conformer
|
||||
- local: model_doc/flan-t5
|
||||
@ -408,6 +414,8 @@
|
||||
title: GPTSAN Japanese
|
||||
- local: model_doc/gpt-sw3
|
||||
title: GPTSw3
|
||||
- local: model_doc/granite
|
||||
title: Granite
|
||||
- local: model_doc/herbert
|
||||
title: HerBERT
|
||||
- local: model_doc/ibert
|
||||
@ -510,6 +518,8 @@
|
||||
title: Qwen2Audio
|
||||
- local: model_doc/qwen2_moe
|
||||
title: Qwen2MoE
|
||||
- local: model_doc/qwen2_vl
|
||||
title: Qwen2VL
|
||||
- local: model_doc/rag
|
||||
title: RAG
|
||||
- local: model_doc/realm
|
||||
@ -692,6 +702,8 @@
|
||||
title: Bark
|
||||
- local: model_doc/clap
|
||||
title: CLAP
|
||||
- local: model_doc/dac
|
||||
title: dac
|
||||
- local: model_doc/encodec
|
||||
title: EnCodec
|
||||
- local: model_doc/hiera
|
||||
@ -818,7 +830,7 @@
|
||||
title: Llava
|
||||
- local: model_doc/llava_next
|
||||
title: LLaVA-NeXT
|
||||
- local: model_doc/llava-next-video
|
||||
- local: model_doc/llava_next_video
|
||||
title: LLaVa-NeXT-Video
|
||||
- local: model_doc/lxmert
|
||||
title: LXMERT
|
||||
|
@ -126,12 +126,13 @@ Additionally, `llm_engine` can also take a `grammar` argument. In the case where
|
||||
|
||||
You will also need a `tools` argument which accepts a list of `Tools` - it can be an empty list. You can also add the default toolbox on top of your `tools` list by defining the optional argument `add_base_tools=True`.
|
||||
|
||||
Now you can create an agent, like [`CodeAgent`], and run it. For convenience, we also provide the [`HfEngine`] class that uses `huggingface_hub.InferenceClient` under the hood.
|
||||
Now you can create an agent, like [`CodeAgent`], and run it. You can also create a [`TransformersEngine`] with a pre-initialized pipeline to run inference on your local machine using `transformers`.
|
||||
For convenience, since agentic behaviours generally require stronger models such as `Llama-3.1-70B-Instruct` that are harder to run locally for now, we also provide the [`HfApiEngine`] class that initializes a `huggingface_hub.InferenceClient` under the hood.
|
||||
|
||||
```python
|
||||
from transformers import CodeAgent, HfEngine
|
||||
from transformers import CodeAgent, HfApiEngine
|
||||
|
||||
llm_engine = HfEngine(model="meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
llm_engine = HfApiEngine(model="meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True)
|
||||
|
||||
agent.run(
|
||||
@ -141,7 +142,7 @@ agent.run(
|
||||
```
|
||||
|
||||
This will be handy in case of emergency baguette need!
|
||||
You can even leave the argument `llm_engine` undefined, and an [`HfEngine`] will be created by default.
|
||||
You can even leave the argument `llm_engine` undefined, and an [`HfApiEngine`] will be created by default.
|
||||
|
||||
```python
|
||||
from transformers import CodeAgent
|
||||
@ -282,7 +283,8 @@ Transformers comes with a default toolbox for empowering agents, that you can ad
|
||||
- **Speech to text**: given an audio recording of a person talking, transcribe the speech into text ([Whisper](./model_doc/whisper))
|
||||
- **Text to speech**: convert text to speech ([SpeechT5](./model_doc/speecht5))
|
||||
- **Translation**: translates a given sentence from source language to target language.
|
||||
- **Python code interpreter**: runs your the LLM generated Python code in a secure environment. This tool will only be added to [`ReactJsonAgent`] if you use `add_base_tools=True`, since code-based tools can already execute Python code
|
||||
- **DuckDuckGo search***: performs a web search using DuckDuckGo browser.
|
||||
- **Python code interpreter**: runs your the LLM generated Python code in a secure environment. This tool will only be added to [`ReactJsonAgent`] if you initialize it with `add_base_tools=True`, since code-based agent can already natively execute Python code
|
||||
|
||||
|
||||
You can manually use a tool by calling the [`load_tool`] function and a task to perform.
|
||||
@ -521,14 +523,14 @@ import gradio as gr
|
||||
from transformers import (
|
||||
load_tool,
|
||||
ReactCodeAgent,
|
||||
HfEngine,
|
||||
HfApiEngine,
|
||||
stream_to_gradio,
|
||||
)
|
||||
|
||||
# Import tool from Hub
|
||||
image_generation_tool = load_tool("m-ric/text-to-image")
|
||||
|
||||
llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
llm_engine = HfApiEngine("meta-llama/Meta-Llama-3-70B-Instruct")
|
||||
|
||||
# Initialize the agent with the image generation tool
|
||||
agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine)
|
||||
|
@ -14,7 +14,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Templates for Chat Models
|
||||
# Chat Templates
|
||||
|
||||
## Introduction
|
||||
|
||||
@ -26,26 +26,7 @@ Much like tokenization, different models expect very different input formats for
|
||||
**chat templates** as a feature. Chat templates are part of the tokenizer. They specify how to convert conversations,
|
||||
represented as lists of messages, into a single tokenizable string in the format that the model expects.
|
||||
|
||||
Let's make this concrete with a quick example using the `BlenderBot` model. BlenderBot has an extremely simple default
|
||||
template, which mostly just adds whitespace between rounds of dialogue:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
||||
|
||||
>>> chat = [
|
||||
... {"role": "user", "content": "Hello, how are you?"},
|
||||
... {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
||||
... {"role": "user", "content": "I'd like to show off how chat templating works!"},
|
||||
... ]
|
||||
|
||||
>>> tokenizer.apply_chat_template(chat, tokenize=False)
|
||||
" Hello, how are you? I'm doing great. How can I help you today? I'd like to show off how chat templating works!</s>"
|
||||
```
|
||||
|
||||
Notice how the entire chat is condensed into a single string. If we use `tokenize=True`, which is the default setting,
|
||||
that string will also be tokenized for us. To see a more complex template in action, though, let's use the
|
||||
`mistralai/Mistral-7B-Instruct-v0.1` model.
|
||||
Let's make this concrete with a quick example using the `mistralai/Mistral-7B-Instruct-v0.1` model:
|
||||
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer
|
||||
@ -61,8 +42,26 @@ that string will also be tokenized for us. To see a more complex template in act
|
||||
"<s>[INST] Hello, how are you? [/INST]I'm doing great. How can I help you today?</s> [INST] I'd like to show off how chat templating works! [/INST]"
|
||||
```
|
||||
|
||||
Note that this time, the tokenizer has added the control tokens [INST] and [/INST] to indicate the start and end of
|
||||
user messages (but not assistant messages!). Mistral-instruct was trained with these tokens, but BlenderBot was not.
|
||||
Notice how the tokenizer has added the control tokens [INST] and [/INST] to indicate the start and end of
|
||||
user messages (but not assistant messages!), and the entire chat is condensed into a single string.
|
||||
If we use `tokenize=True`, which is the default setting, that string will also be tokenized for us.
|
||||
|
||||
Now, try the same code, but swap in the `HuggingFaceH4/zephyr-7b-beta` model instead, and you should get:
|
||||
|
||||
```text
|
||||
<|user|>
|
||||
Hello, how are you?</s>
|
||||
<|assistant|>
|
||||
I'm doing great. How can I help you today?</s>
|
||||
<|user|>
|
||||
I'd like to show off how chat templating works!</s>
|
||||
```
|
||||
|
||||
Both Zephyr and Mistral-Instruct were fine-tuned from the same base model, `Mistral-7B-v0.1`. However, they were trained
|
||||
with totally different chat formats. Without chat templates, you would have to write manual formatting code for each
|
||||
model, and it's very easy to make minor errors that hurt performance! Chat templates handle the details of formatting
|
||||
for you, allowing you to write universal code that works for any model.
|
||||
|
||||
|
||||
## How do I use chat templates?
|
||||
|
||||
@ -71,7 +70,7 @@ and `content` keys, and then pass it to the [`~PreTrainedTokenizer.apply_chat_te
|
||||
you'll get output that's ready to go! When using chat templates as input for model generation, it's also a good idea
|
||||
to use `add_generation_prompt=True` to add a [generation prompt](#what-are-generation-prompts).
|
||||
|
||||
Here's an example of preparing input for `model.generate()`, using the `Zephyr` assistant model:
|
||||
Here's an example of preparing input for `model.generate()`, using `Zephyr` again:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
@ -160,7 +159,7 @@ messages = [
|
||||
]
|
||||
```
|
||||
|
||||
Here's what this will look like without a generation prompt, using the ChatML template we saw in the Zephyr example:
|
||||
Here's what this will look like without a generation prompt, for a model that uses standard "ChatML" formatting:
|
||||
|
||||
```python
|
||||
tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=False)
|
||||
@ -193,10 +192,47 @@ message. Remember, chat models are still just language models - they're trained
|
||||
special kind of text to them! You need to guide them with appropriate control tokens, so they know what they're
|
||||
supposed to be doing.
|
||||
|
||||
Not all models require generation prompts. Some models, like BlenderBot and LLaMA, don't have any
|
||||
Not all models require generation prompts. Some models, like LLaMA, don't have any
|
||||
special tokens before bot responses. In these cases, the `add_generation_prompt` argument will have no effect. The exact
|
||||
effect that `add_generation_prompt` has will depend on the template being used.
|
||||
|
||||
## What does "continue_last_message" do?
|
||||
|
||||
When passing a list of messages to `apply_chat_template` or `TextGenerationPipeline`, you can choose
|
||||
to format the chat so the model will continue the final message in the chat instead of starting a new one. This is done
|
||||
by removing any end-of-sequence tokens that indicate the end of the final message, so that the model will simply
|
||||
extend the final message when it begins to generate text. This is useful for "prefilling" the model's response.
|
||||
|
||||
Here's an example:
|
||||
|
||||
```python
|
||||
chat = [
|
||||
{"role": "user", "content": "Can you format the answer in JSON?"},
|
||||
{"role": "assistant", "content": '{"name": "'},
|
||||
]
|
||||
|
||||
formatted_chat = tokenizer.apply_chat_template(chat, tokenize=True, return_dict=True, continue_last_message=True)
|
||||
model.generate(**formatted_chat)
|
||||
```
|
||||
|
||||
The model will generate text that continues the JSON string, rather than starting a new message. This approach
|
||||
can be very useful for improving the accuracy of the model's instruction-following when you know how you want
|
||||
it to start its replies.
|
||||
|
||||
Because `add_generation_prompt` adds the tokens that start a new message, and `continue_last_message` removes any
|
||||
end-of-message tokens from the final message, it does not make sense to use them together. As a result, you'll
|
||||
get an error if you try!
|
||||
|
||||
<Tip>
|
||||
|
||||
The default behaviour of `TextGenerationPipeline` is to set `add_generation_prompt=True` so that it starts a new
|
||||
message. However, if the final message in the input chat has the "assistant" role, it will assume that this message is
|
||||
a prefill and switch to `continue_final_message=True` instead, because most models do not support multiple
|
||||
consecutive assistant messages. You can override this behaviour by explicitly passing the `continue_last_message`
|
||||
argument when calling the pipeline.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Can I use chat templates in training?
|
||||
|
||||
Yes! This is a good way to ensure that the chat template matches the tokens the model sees during training.
|
||||
@ -235,13 +271,14 @@ The sun.</s>
|
||||
From here, just continue training like you would with a standard language modelling task, using the `formatted_chat` column.
|
||||
|
||||
<Tip>
|
||||
If you format text with `apply_chat_template(tokenize=False)` and then tokenize it in a separate step, you should set the argument
|
||||
`add_special_tokens=False`. If you use `apply_chat_template(tokenize=True)`, you don't need to worry about this!
|
||||
|
||||
By default, some tokenizers add special tokens like `<bos>` and `<eos>` to text they tokenize. Chat templates should
|
||||
always include all of the special tokens they need, and so adding extra special tokens with
|
||||
the default `add_special_tokens=True` can result in incorrect or duplicated special tokens, which will hurt model
|
||||
performance.
|
||||
already include all the special tokens they need, and so additional special tokens will often be incorrect or
|
||||
duplicated, which will hurt model performance.
|
||||
|
||||
Therefore, if you format text with `apply_chat_template(tokenize=False)`, you should set the argument
|
||||
`add_special_tokens=False` when you tokenize that text later. If you use `apply_chat_template(tokenize=True)`, you don't need to worry about this!
|
||||
|
||||
</Tip>
|
||||
|
||||
## Advanced: Extra inputs to chat templates
|
||||
@ -325,7 +362,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
checkpoint = "NousResearch/Hermes-2-Pro-Llama-3-8B"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint, revision="pr/13")
|
||||
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
||||
model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.bfloat16, device_map="auto")
|
||||
```
|
||||
|
||||
@ -370,7 +407,7 @@ messages = [
|
||||
Now, let's apply the chat template and generate a response:
|
||||
|
||||
```python
|
||||
inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = tokenizer.apply_chat_template(messages, tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
||||
out = model.generate(**inputs, max_new_tokens=128)
|
||||
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
|
||||
@ -388,29 +425,56 @@ The model has called the function with valid arguments, in the format requested
|
||||
inferred that we're most likely referring to the Paris in France, and it remembered that, as the home of SI units,
|
||||
the temperature in France should certainly be displayed in Celsius.
|
||||
|
||||
Let's append the model's tool call to the conversation. Note that we generate a random `tool_call_id` here. These IDs
|
||||
are not used by all models, but they allow models to issue multiple tool calls at once and keep track of which response
|
||||
corresponds to which call. You can generate them any way you like, but they should be unique within each chat.
|
||||
<Tip>
|
||||
|
||||
The output format above is specific to the `Hermes-2-Pro` model we're using in this example. Other models may emit different
|
||||
tool call formats, and you may need to do some manual parsing at this step. For example, `Llama-3.1` models will emit
|
||||
slightly different JSON, with `parameters` instead of `arguments`. Regardless of the format the model outputs, you
|
||||
should add the tool call to the conversation in the format below, with `tool_calls`, `function` and `arguments` keys.
|
||||
|
||||
</Tip>
|
||||
|
||||
Next, let's append the model's tool call to the conversation.
|
||||
|
||||
```python
|
||||
tool_call_id = "vAHdf3" # Random ID, should be unique for each tool call
|
||||
tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France", "unit": "celsius"}}
|
||||
messages.append({"role": "assistant", "tool_calls": [{"id": tool_call_id, "type": "function", "function": tool_call}]})
|
||||
messages.append({"role": "assistant", "tool_calls": [{"type": "function", "function": tool_call}]})
|
||||
```
|
||||
|
||||
|
||||
Now that we've added the tool call to the conversation, we can call the function and append the result to the
|
||||
conversation. Since we're just using a dummy function for this example that always returns 22.0, we can just append
|
||||
that result directly. Again, note the `tool_call_id` - this should match the ID used in the tool call above.
|
||||
that result directly.
|
||||
|
||||
```python
|
||||
messages.append({"role": "tool", "name": "get_current_temperature", "content": "22.0"})
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
Some model architectures, notably Mistral/Mixtral, also require a `tool_call_id` here, which should be
|
||||
9 randomly-generated alphanumeric characters, and assigned to the `id` key of the tool call
|
||||
dictionary. The same key should also be assigned to the `tool_call_id` key of the tool response dictionary below, so
|
||||
that tool calls can be matched to tool responses. So, for Mistral/Mixtral models, the code above would be:
|
||||
|
||||
```python
|
||||
tool_call_id = "9Ae3bDc2F" # Random ID, 9 alphanumeric characters
|
||||
tool_call = {"name": "get_current_temperature", "arguments": {"location": "Paris, France", "unit": "celsius"}}
|
||||
messages.append({"role": "assistant", "tool_calls": [{"type": "function", "id": tool_call_id, "function": tool_call}]})
|
||||
```
|
||||
|
||||
and
|
||||
|
||||
```python
|
||||
messages.append({"role": "tool", "tool_call_id": tool_call_id, "name": "get_current_temperature", "content": "22.0"})
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
Finally, let's let the assistant read the function outputs and continue chatting with the user:
|
||||
|
||||
```python
|
||||
inputs = tokenizer.apply_chat_template(messages, chat_template="tool_use", tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = tokenizer.apply_chat_template(messages, tools=tools, add_generation_prompt=True, return_dict=True, return_tensors="pt")
|
||||
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
||||
out = model.generate(**inputs, max_new_tokens=128)
|
||||
print(tokenizer.decode(out[0][len(inputs["input_ids"][0]):]))
|
||||
@ -426,14 +490,6 @@ Although this was a simple demo with dummy tools and a single call, the same tec
|
||||
multiple real tools and longer conversations. This can be a powerful way to extend the capabilities of conversational
|
||||
agents with real-time information, computational tools like calculators, or access to large databases.
|
||||
|
||||
<Tip>
|
||||
Not all of the tool-calling features shown above are used by all models. Some use tool call IDs, others simply use the function name and
|
||||
match tool calls to results using the ordering, and there are several models that use neither and only issue one tool
|
||||
call at a time to avoid confusion. If you want your code to be compatible across as many models as possible, we
|
||||
recommend structuring your tools calls like we've shown here, and returning tool results in the order that
|
||||
they were issued by the model. The chat templates on each model should handle the rest.
|
||||
</Tip>
|
||||
|
||||
### Understanding tool schemas
|
||||
|
||||
Each function you pass to the `tools` argument of `apply_chat_template` is converted into a
|
||||
@ -573,32 +629,17 @@ model_input = tokenizer.apply_chat_template(
|
||||
## Advanced: How do chat templates work?
|
||||
|
||||
The chat template for a model is stored on the `tokenizer.chat_template` attribute. If no chat template is set, the
|
||||
default template for that model class is used instead. Let's take a look at the template for `BlenderBot`:
|
||||
|
||||
```python
|
||||
|
||||
>>> from transformers import AutoTokenizer
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
|
||||
|
||||
>>> tokenizer.chat_template
|
||||
"{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}"
|
||||
```
|
||||
|
||||
That's kind of intimidating. Let's clean it up a little to make it more readable. In the process, though, we also make
|
||||
sure that the newlines and indentation we add don't end up being included in the template output - see the tip on
|
||||
[trimming whitespace](#trimming-whitespace) below!
|
||||
default template for that model class is used instead. Let's take a look at a `Zephyr` chat template, though note this
|
||||
one is a little simplified from the actual one!
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
{%- if message['role'] == 'user' %}
|
||||
{{- ' ' }}
|
||||
{%- endif %}
|
||||
{{- message['content'] }}
|
||||
{%- if not loop.last %}
|
||||
{{- ' ' }}
|
||||
{%- endif %}
|
||||
{{- '<|' + message['role'] + |>\n' }}
|
||||
{{- message['content'] + eos_token }}
|
||||
{%- endfor %}
|
||||
{{- eos_token }}
|
||||
{%- if add_generation_prompt %}
|
||||
{{- '<|assistant|>\n' }}
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
If you've never seen one of these before, this is a [Jinja template](https://jinja.palletsprojects.com/en/3.1.x/templates/).
|
||||
@ -606,25 +647,23 @@ Jinja is a templating language that allows you to write simple code that generat
|
||||
syntax resembles Python. In pure Python, this template would look something like this:
|
||||
|
||||
```python
|
||||
for idx, message in enumerate(messages):
|
||||
if message['role'] == 'user':
|
||||
print(' ')
|
||||
print(message['content'])
|
||||
if not idx == len(messages) - 1: # Check for the last message in the conversation
|
||||
print(' ')
|
||||
print(eos_token)
|
||||
for message in messages:
|
||||
print(f'<|{message["role"]}|>')
|
||||
print(message['content'] + eos_token)
|
||||
if add_generation_prompt:
|
||||
print('<|assistant|>')
|
||||
```
|
||||
|
||||
Effectively, the template does three things:
|
||||
1. For each message, if the message is a user message, add a blank space before it, otherwise print nothing.
|
||||
2. Add the message content
|
||||
3. If the message is not the last message, add two spaces after it. After the final message, print the EOS token.
|
||||
1. For each message, print the role enclosed in `<|` and `|>`, like `<|user|>` or `<|assistant|>`.
|
||||
2. Next, print the content of the message, followed by the end-of-sequence token.
|
||||
3. Finally, if `add_generation_prompt` is set, print the assistant token, so that the model knows to start generating
|
||||
an assistant response.
|
||||
|
||||
This is a pretty simple template - it doesn't add any control tokens, and it doesn't support "system" messages, which
|
||||
are a common way to give the model directives about how it should behave in the subsequent conversation.
|
||||
But Jinja gives you a lot of flexibility to do those things! Let's see a Jinja template that can format inputs
|
||||
similarly to the way LLaMA formats them (note that the real LLaMA template includes handling for default system
|
||||
messages and slightly different system message handling in general - don't use this one in your actual code!)
|
||||
This is a pretty simple template but Jinja gives you a lot of flexibility to do more complex things! Let's see a Jinja
|
||||
template that can format inputs similarly to the way LLaMA formats them (note that the real LLaMA template includes
|
||||
handling for default system messages and slightly different system message handling in general - don't use this one
|
||||
in your actual code!)
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
@ -638,8 +677,8 @@ messages and slightly different system message handling in general - don't use t
|
||||
{%- endfor %}
|
||||
```
|
||||
|
||||
Hopefully if you stare at this for a little bit you can see what this template is doing - it adds specific tokens based
|
||||
on the "role" of each message, which represents who sent it. User, assistant and system messages are clearly
|
||||
Hopefully if you stare at this for a little bit you can see what this template is doing - it adds specific tokens like
|
||||
`[INST]` and `[/INST]` based on the role of each message. User, assistant and system messages are clearly
|
||||
distinguishable to the model because of the tokens they're wrapped in.
|
||||
|
||||
## Advanced: Adding and editing chat templates
|
||||
@ -765,14 +804,23 @@ it's time to put an end to them!
|
||||
|
||||
## Advanced: Template writing tips
|
||||
|
||||
If you're unfamiliar with Jinja, we generally find that the easiest way to write a chat template is to first
|
||||
write a short Python script that formats messages the way you want, and then convert that script into a template.
|
||||
<Tip>
|
||||
|
||||
Remember that the template handler will receive the conversation history as a variable called `messages`.
|
||||
The easiest way to get started with writing Jinja templates is to take a look at some existing ones. You can use
|
||||
`print(tokenizer.chat_template)` for any chat model to see what template it's using. In general, models that support tool use have
|
||||
much more complex templates than other models - so when you're just getting started, they're probably a bad example
|
||||
to learn from! You can also take a look at the
|
||||
[Jinja documentation](https://jinja.palletsprojects.com/en/3.1.x/templates/#synopsis) for details
|
||||
of general Jinja formatting and syntax.
|
||||
|
||||
</Tip>
|
||||
|
||||
Jinja templates in `transformers` are identical to Jinja templates elsewhere. The main thing to know is that
|
||||
the conversation history will be accessible inside your template as a variable called `messages`.
|
||||
You will be able to access `messages` in your template just like you can in Python, which means you can loop over
|
||||
it with `{% for message in messages %}` or access individual messages with `{{ messages[0] }}`, for example.
|
||||
|
||||
You can also use the following tips to convert your code to Jinja:
|
||||
You can also use the following tips to write clean, efficient Jinja templates:
|
||||
|
||||
### Trimming whitespace
|
||||
|
||||
@ -797,46 +845,35 @@ rather than like this:
|
||||
Adding `-` will strip any whitespace that comes before the block. The second example looks innocent, but the newline
|
||||
and indentation may end up being included in the output, which is probably not what you want!
|
||||
|
||||
### For loops
|
||||
|
||||
For loops in Jinja look like this:
|
||||
|
||||
```
|
||||
{%- for message in messages %}
|
||||
{{- message['content'] }}
|
||||
{%- endfor %}
|
||||
```
|
||||
|
||||
Note that whatever's inside the {{ expression block }} will be printed to the output. You can use operators like
|
||||
`+` to combine strings inside expression blocks.
|
||||
|
||||
### If statements
|
||||
|
||||
If statements in Jinja look like this:
|
||||
|
||||
```
|
||||
{%- if message['role'] == 'user' %}
|
||||
{{- message['content'] }}
|
||||
{%- endif %}
|
||||
```
|
||||
|
||||
Note how where Python uses whitespace to mark the beginnings and ends of `for` and `if` blocks, Jinja requires you
|
||||
to explicitly end them with `{% endfor %}` and `{% endif %}`.
|
||||
|
||||
### Special variables
|
||||
|
||||
Inside your template, you will have access to the list of `messages`, but you can also access several other special
|
||||
variables. These include special tokens like `bos_token` and `eos_token`, as well as the `add_generation_prompt`
|
||||
variable that we discussed above. You can also use the `loop` variable to access information about the current loop
|
||||
iteration, for example using `{% if loop.last %}` to check if the current message is the last message in the
|
||||
conversation. Here's an example that puts these ideas together to add a generation prompt at the end of the
|
||||
conversation if add_generation_prompt is `True`:
|
||||
Inside your template, you will have access several special variables. The most important of these is `messages`,
|
||||
which contains the chat history as a list of message dicts. However, there are several others. Not every
|
||||
variable will be used in every template. The most common other variables are:
|
||||
|
||||
```
|
||||
{%- if loop.last and add_generation_prompt %}
|
||||
{{- bos_token + 'Assistant:\n' }}
|
||||
{%- endif %}
|
||||
```
|
||||
- `tools` contains a list of tools in JSON schema format. Will be `None` or undefined if no tools are passed.
|
||||
- `documents` contains a list of documents in the format `{"title": "Title", "contents": "Contents"}`, used for retrieval-augmented generation. Will be `None` or undefined if no documents are passed.
|
||||
- `add_generation_prompt` is a bool that is `True` if the user has requested a generation prompt, and `False` otherwise. If this is set, your template should add the header for an assistant message to the end of the conversation. If your model doesn't have a specific header for assistant messages, you can ignore this flag.
|
||||
- **Special tokens** like `bos_token` and `eos_token`. These are extracted from `tokenizer.special_tokens_map`. The exact tokens available inside each template will differ depending on the parent tokenizer.
|
||||
|
||||
<Tip>
|
||||
|
||||
You can actually pass any `kwarg` to `apply_chat_template`, and it will be accessible inside the template as a variable. In general,
|
||||
we recommend trying to stick to the core variables above, as it will make your model harder to use if users have
|
||||
to write custom code to pass model-specific `kwargs`. However, we're aware that this field moves quickly, so if you
|
||||
have a new use-case that doesn't fit in the core API, feel free to use a new `kwarg` for it! If a new `kwarg`
|
||||
becomes common we may promote it into the core API and create a standard, documented format for it.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Callable functions
|
||||
|
||||
There is also a short list of callable functions available to you inside your templates. These are:
|
||||
|
||||
- `raise_exception(msg)`: Raises a `TemplateException`. This is useful for debugging, and for telling users when they're
|
||||
doing something that your template doesn't support.
|
||||
- `strftime_now(format_str)`: Equivalent to `datetime.now().strftime(format_str)` in Python. This is used for getting
|
||||
the current date/time in a specific format, which is sometimes included in system messages.
|
||||
|
||||
### Compatibility with non-Python Jinja
|
||||
|
||||
@ -855,4 +892,25 @@ all implementations of Jinja:
|
||||
in the Jinja documentation for more.
|
||||
- Replace `True`, `False` and `None`, which are Python-specific, with `true`, `false` and `none`.
|
||||
- Directly rendering a dict or list may give different results in other implementations (for example, string entries
|
||||
might change from single-quoted to double-quoted). Adding the `tojson` filter can help to ensure consistency here.
|
||||
might change from single-quoted to double-quoted). Adding the `tojson` filter can help to ensure consistency here.
|
||||
|
||||
### Writing and debugging larger templates
|
||||
|
||||
When this feature was introduced, most templates were quite small, the Jinja equivalent of a "one-liner" script.
|
||||
However, with new models and features like tool-use and RAG, some templates can be 100 lines long or more. When
|
||||
writing templates like these, it's a good idea to write them in a separate file, using a text editor. You can easily
|
||||
extract a chat template to a file:
|
||||
|
||||
```python
|
||||
open("template.jinja", "w").write(tokenizer.chat_template)
|
||||
```
|
||||
|
||||
Or load the edited template back into the tokenizer:
|
||||
|
||||
```python
|
||||
tokenizer.chat_template = open("template.jinja").read()
|
||||
```
|
||||
|
||||
As an added bonus, when you write a long, multi-line template in a separate file, line numbers in that file will
|
||||
exactly correspond to line numbers in template parsing or execution errors. This will make it much easier to
|
||||
identify the source of issues.
|
@ -185,7 +185,7 @@ class ResnetModelForImageClassification(PreTrainedModel):
|
||||
def forward(self, tensor, labels=None):
|
||||
logits = self.model(tensor)
|
||||
if labels is not None:
|
||||
loss = torch.nn.cross_entropy(logits, labels)
|
||||
loss = torch.nn.functional.cross_entropy(logits, labels)
|
||||
return {"loss": loss, "logits": logits}
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
@ -46,16 +46,30 @@ The initial supported quantization types are decided according to the popular qu
|
||||
on the Hub.
|
||||
|
||||
- F32
|
||||
- F16
|
||||
- BF16
|
||||
- Q4_0
|
||||
- Q4_1
|
||||
- Q5_0
|
||||
- Q5_1
|
||||
- Q8_0
|
||||
- Q2_K
|
||||
- Q3_K
|
||||
- Q4_0
|
||||
- Q4_K
|
||||
- Q5_K
|
||||
- Q6_K
|
||||
- Q8_0
|
||||
- IQ1_S
|
||||
- IQ1_M
|
||||
- IQ2_XXS
|
||||
- IQ2_XS
|
||||
- IQ2_S
|
||||
- IQ3_XXS
|
||||
- IQ3_S
|
||||
- IQ4_XS
|
||||
- IQ4_NL
|
||||
|
||||
We take example from the excellent [99991/pygguf](https://github.com/99991/pygguf) Python parser to dequantize the
|
||||
weights.
|
||||
> [!NOTE]
|
||||
> To support gguf dequantization, `gguf>=0.10.0` installation is required.
|
||||
|
||||
### Supported model architectures
|
||||
|
||||
|
@ -105,6 +105,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [CPM-Ant](model_doc/cpmant) | ✅ | ❌ | ❌ |
|
||||
| [CTRL](model_doc/ctrl) | ✅ | ✅ | ❌ |
|
||||
| [CvT](model_doc/cvt) | ✅ | ✅ | ❌ |
|
||||
| [DAC](model_doc/dac) | ✅ | ❌ | ❌ |
|
||||
| [Data2VecAudio](model_doc/data2vec) | ✅ | ❌ | ❌ |
|
||||
| [Data2VecText](model_doc/data2vec) | ✅ | ❌ | ❌ |
|
||||
| [Data2VecVision](model_doc/data2vec) | ✅ | ✅ | ❌ |
|
||||
@ -120,7 +121,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [DETR](model_doc/detr) | ✅ | ❌ | ❌ |
|
||||
| [DialoGPT](model_doc/dialogpt) | ✅ | ✅ | ✅ |
|
||||
| [DiNAT](model_doc/dinat) | ✅ | ❌ | ❌ |
|
||||
| [DINOv2](model_doc/dinov2) | ✅ | ❌ | ❌ |
|
||||
| [DINOv2](model_doc/dinov2) | ✅ | ❌ | ✅ |
|
||||
| [DistilBERT](model_doc/distilbert) | ✅ | ✅ | ✅ |
|
||||
| [DiT](model_doc/dit) | ✅ | ❌ | ✅ |
|
||||
| [DonutSwin](model_doc/donut) | ✅ | ❌ | ❌ |
|
||||
@ -136,6 +137,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [ESM](model_doc/esm) | ✅ | ✅ | ❌ |
|
||||
| [FairSeq Machine-Translation](model_doc/fsmt) | ✅ | ❌ | ❌ |
|
||||
| [Falcon](model_doc/falcon) | ✅ | ❌ | ❌ |
|
||||
| [FalconMamba](model_doc/falcon_mamba) | ✅ | ❌ | ❌ |
|
||||
| [FastSpeech2Conformer](model_doc/fastspeech2_conformer) | ✅ | ❌ | ❌ |
|
||||
| [FLAN-T5](model_doc/flan-t5) | ✅ | ✅ | ✅ |
|
||||
| [FLAN-UL2](model_doc/flan-ul2) | ✅ | ✅ | ✅ |
|
||||
@ -156,6 +158,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [GPT-Sw3](model_doc/gpt-sw3) | ✅ | ✅ | ✅ |
|
||||
| [GPTBigCode](model_doc/gpt_bigcode) | ✅ | ❌ | ❌ |
|
||||
| [GPTSAN-japanese](model_doc/gptsan-japanese) | ✅ | ❌ | ❌ |
|
||||
| [Granite](model_doc/granite) | ✅ | ❌ | ❌ |
|
||||
| [Graphormer](model_doc/graphormer) | ✅ | ❌ | ❌ |
|
||||
| [Grounding DINO](model_doc/grounding-dino) | ✅ | ❌ | ❌ |
|
||||
| [GroupViT](model_doc/groupvit) | ✅ | ✅ | ❌ |
|
||||
@ -185,7 +188,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Llama3](model_doc/llama3) | ✅ | ❌ | ✅ |
|
||||
| [LLaVa](model_doc/llava) | ✅ | ❌ | ❌ |
|
||||
| [LLaVA-NeXT](model_doc/llava_next) | ✅ | ❌ | ❌ |
|
||||
| [LLaVa-NeXT-Video](model_doc/llava-next-video) | ✅ | ❌ | ❌ |
|
||||
| [LLaVa-NeXT-Video](model_doc/llava_next_video) | ✅ | ❌ | ❌ |
|
||||
| [Longformer](model_doc/longformer) | ✅ | ✅ | ❌ |
|
||||
| [LongT5](model_doc/longt5) | ✅ | ❌ | ✅ |
|
||||
| [LUKE](model_doc/luke) | ✅ | ❌ | ❌ |
|
||||
@ -258,6 +261,7 @@ Flax), PyTorch, and/or TensorFlow.
|
||||
| [Qwen2](model_doc/qwen2) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2Audio](model_doc/qwen2_audio) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2MoE](model_doc/qwen2_moe) | ✅ | ❌ | ❌ |
|
||||
| [Qwen2VL](model_doc/qwen2_vl) | ✅ | ❌ | ❌ |
|
||||
| [RAG](model_doc/rag) | ✅ | ✅ | ❌ |
|
||||
| [REALM](model_doc/realm) | ✅ | ❌ | ❌ |
|
||||
| [RecurrentGemma](model_doc/recurrent_gemma) | ✅ | ❌ | ❌ |
|
||||
|
@ -140,9 +140,6 @@ generation.
|
||||
[[autodoc]] ForcedEOSTokenLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] ForceTokensLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] HammingDiversityLogitsProcessor
|
||||
- __call__
|
||||
|
||||
@ -158,9 +155,6 @@ generation.
|
||||
[[autodoc]] LogitsProcessorList
|
||||
- __call__
|
||||
|
||||
[[autodoc]] LogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] MinLengthLogitsProcessor
|
||||
- __call__
|
||||
|
||||
@ -396,6 +390,11 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
||||
- get_seq_length
|
||||
- reset
|
||||
|
||||
[[autodoc]] OffloadedStaticCache
|
||||
- update
|
||||
- get_seq_length
|
||||
- reset
|
||||
|
||||
[[autodoc]] HybridCache
|
||||
- update
|
||||
- get_seq_length
|
||||
@ -421,4 +420,3 @@ A [`Constraint`] can be used to force the generation to include specific tokens
|
||||
|
||||
[[autodoc]] WatermarkDetector
|
||||
- __call__
|
||||
|
||||
|
@ -96,14 +96,15 @@ with the [`~DynamicCache`] class being the default cache for most models. It all
|
||||
|
||||
Refer to the table below to see the difference between cache types and choose the one that suits best for your use-case.
|
||||
|
||||
| Cache Type | Memory Efficient | Supports torch.compile() | Initialization Recommended | Latency | Long Context Generation |
|
||||
|---------------------|------------------|--------------------------|----------------------------|----------|--------------------------|
|
||||
| Dynamic Cache | No | No | No | Mid | No |
|
||||
| Static Cache | No | Yes | Yes | High | No |
|
||||
| Quantized Cache | Yes | No | No | Low | Yes |
|
||||
| Offloaded Cache | Yes | No | No | Low | No |
|
||||
| Sliding Window Cache| No | Yes | Yes | High | No |
|
||||
| Sink Cache | Yes | No | Yes | Mid | Yes |
|
||||
| Cache Type | Memory Efficient | Supports torch.compile() | Initialization Recommended | Latency | Long Context Generation |
|
||||
|------------------------|------------------|--------------------------|----------------------------|---------|-------------------------|
|
||||
| Dynamic Cache | No | No | No | Mid | No |
|
||||
| Static Cache | No | Yes | Yes | High | No |
|
||||
| Offloaded Cache | Yes | No | No | Low | Yes |
|
||||
| Offloaded Static Cache | No | Yes | Yes | High | Yes |
|
||||
| Quantized Cache | Yes | No | No | Low | Yes |
|
||||
| Sliding Window Cache | No | Yes | Yes | High | No |
|
||||
| Sink Cache | Yes | No | Yes | Mid | Yes |
|
||||
|
||||
|
||||
These cache classes can be set with a `cache_implementation` argument when generating. To learn about the available options for the cache_implementation flag, please refer to the [API Documentation](./main_classes/text_generation.md#transformers.GenerationConfig). Now, let's explore each cache type in detail and see how to use them. Note that the below examples are for decoder-only Tranformer-based models. We also support ["Model-Specific Cache"] classes for models such as Mamba or Jamba, keep reading for more details.
|
||||
@ -142,7 +143,7 @@ I like rock music because it's loud and energetic. It's a great way to express m
|
||||
I like rock music because it's loud and energetic. I like to listen to it when I'm feeling
|
||||
```
|
||||
|
||||
## OffloadedCache
|
||||
## Offloaded Cache
|
||||
|
||||
Similarly to KV cache quantization, [`~OffloadedCache`] strategy aims to reduce GPU VRAM usage.
|
||||
It does so by moving the KV cache for most layers to the CPU.
|
||||
@ -154,7 +155,8 @@ Thus, it can serve as a drop-in replacement or a fallback for it.
|
||||
Depending on your model and the characteristics of your generation task (size of context, number of generated tokens, number of beams, etc.)
|
||||
you may notice a small degradation in generation throughput compared to the default KV cache implementation.
|
||||
|
||||
To enable KV cache offloading, pass `cache_implementation="offloaded"` in the `generation_config` or directky to the `generate()` call.
|
||||
To enable KV cache offloading, pass `cache_implementation="offloaded"` in the `generation_config` or directly to the `generate()` call.
|
||||
Use `cache_implementation="offloaded_static"` for an offloaded static cache (see also [Offloaded Static Cache](#offloaded-static-cache) below).
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
@ -216,7 +218,6 @@ retrying with cache_implementation='offloaded'
|
||||
before successfully generating 40 beams.
|
||||
|
||||
|
||||
|
||||
### Static Cache
|
||||
|
||||
Since the "DynamicCache" dynamically grows with each generation step, it prevents you from taking advantage of JIT optimizations. The [`~StaticCache`] pre-allocates
|
||||
@ -238,6 +239,28 @@ For more examples with Static Cache and JIT compilation, take a look at [StaticC
|
||||
"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
|
||||
```
|
||||
|
||||
|
||||
## Offloaded Static Cache
|
||||
|
||||
Like [`~OffloadedCache`] exists for offloading a "DynamicCache", there is also an offloaded static cache. It fully supports
|
||||
JIT optimizations. Just pass `cache_implementation="offloaded_static"` in the `generation_config` or directly to the `generate()` call.
|
||||
This will use the [`~OffloadedStaticCache`] implementation instead.
|
||||
|
||||
```python
|
||||
>>> import torch
|
||||
>>> from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf")
|
||||
>>> model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf", torch_dtype=torch.float16, device_map="auto")
|
||||
>>> inputs = tokenizer("Hello, my name is", return_tensors="pt").to(model.device)
|
||||
|
||||
>>> # simply pass the cache implementation="static"
|
||||
>>> out = model.generate(**inputs, do_sample=False, max_new_tokens=20, cache_implementation="offloaded_static")
|
||||
>>> tokenizer.batch_decode(out, skip_special_tokens=True)[0]
|
||||
"Hello, my name is [Your Name], and I am a [Your Profession] with [Number of Years] of"
|
||||
```
|
||||
|
||||
|
||||
### Sliding Window Cache
|
||||
|
||||
As the name suggests, this cache type implements a sliding window over previous keys and values, retaining only the last `sliding_window` tokens. It should be used with models like Mistral that support sliding window attention. Additionally, similar to Static Cache, this one is JIT-friendly and can be used with the same compile tecniques as Static Cache.
|
||||
|
@ -99,7 +99,7 @@ model.generation_config.max_new_tokens = 16
|
||||
|
||||
past_key_values = StaticCache(
|
||||
config=model.config,
|
||||
max_batch_size=1,
|
||||
batch_size=1,
|
||||
# If you plan to reuse the cache, make sure the cache length is large enough for all cases
|
||||
max_cache_len=prompt_length+(model.generation_config.max_new_tokens*2),
|
||||
device=model.device,
|
||||
@ -161,7 +161,7 @@ There are a few important things you must do to enable static kv-cache and `torc
|
||||
batch_size, seq_length = inputs["input_ids"].shape
|
||||
with torch.no_grad():
|
||||
past_key_values = StaticCache(
|
||||
config=model.config, max_batch_size=2, max_cache_len=4096, device=torch_device, dtype=model.dtype
|
||||
config=model.config, batch_size=2, max_cache_len=4096, device=torch_device, dtype=model.dtype
|
||||
)
|
||||
cache_position = torch.arange(seq_length, device=torch_device)
|
||||
generated_ids = torch.zeros(
|
||||
|
@ -267,5 +267,6 @@ While the autoregressive generation process is relatively straightforward, makin
|
||||
|
||||
1. [`optimum`](https://github.com/huggingface/optimum), an extension of 🤗 Transformers that optimizes for specific hardware devices.
|
||||
2. [`outlines`](https://github.com/outlines-dev/outlines), a library where you can constrain text generation (e.g. to generate JSON files);
|
||||
3. [`text-generation-inference`](https://github.com/huggingface/text-generation-inference), a production-ready server for LLMs;
|
||||
4. [`text-generation-webui`](https://github.com/oobabooga/text-generation-webui), a UI for text generation;
|
||||
3. [`SynCode`](https://github.com/uiuc-focal-lab/syncode), a library for context-free grammar guided generation. (e.g. JSON, SQL, Python)
|
||||
4. [`text-generation-inference`](https://github.com/huggingface/text-generation-inference), a production-ready server for LLMs;
|
||||
5. [`text-generation-webui`](https://github.com/oobabooga/text-generation-webui), a UI for text generation;
|
||||
|
@ -87,12 +87,33 @@ These engines have the following specification:
|
||||
1. Follow the [messages format](../chat_templating.md) for its input (`List[Dict[str, str]]`) and return a string.
|
||||
2. Stop generating outputs *before* the sequences passed in the argument `stop_sequences`
|
||||
|
||||
### HfEngine
|
||||
### TransformersEngine
|
||||
|
||||
For convenience, we have added a `HfEngine` that implements the points above and uses an inference endpoint for the execution of the LLM.
|
||||
For convenience, we have added a `TransformersEngine` that implements the points above, taking a pre-initialized `Pipeline` as input.
|
||||
|
||||
```python
|
||||
>>> from transformers import HfEngine
|
||||
>>> from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TransformersEngine
|
||||
|
||||
>>> model_name = "HuggingFaceTB/SmolLM-135M-Instruct"
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
>>> model = AutoModelForCausalLM.from_pretrained(model_name)
|
||||
|
||||
>>> pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
||||
|
||||
>>> engine = TransformersEngine(pipe)
|
||||
>>> engine([{"role": "user", "content": "Ok!"}], stop_sequences=["great"])
|
||||
|
||||
"What a "
|
||||
```
|
||||
|
||||
[[autodoc]] TransformersEngine
|
||||
|
||||
### HfApiEngine
|
||||
|
||||
The `HfApiEngine` is an engine that wraps an [HF Inference API](https://huggingface.co/docs/api-inference/index) client for the execution of the LLM.
|
||||
|
||||
```python
|
||||
>>> from transformers import HfApiEngine
|
||||
|
||||
>>> messages = [
|
||||
... {"role": "user", "content": "Hello, how are you?"},
|
||||
@ -100,12 +121,12 @@ For convenience, we have added a `HfEngine` that implements the points above and
|
||||
... {"role": "user", "content": "No need to help, take it easy."},
|
||||
... ]
|
||||
|
||||
>>> HfEngine()(messages, stop_sequences=["conversation"])
|
||||
>>> HfApiEngine()(messages, stop_sequences=["conversation"])
|
||||
|
||||
"That's very kind of you to say! It's always nice to have a relaxed "
|
||||
```
|
||||
|
||||
[[autodoc]] HfEngine
|
||||
[[autodoc]] HfApiEngine
|
||||
|
||||
|
||||
## Agent Types
|
||||
|
@ -61,3 +61,7 @@ Learn how to quantize models in the [Quantization](../quantization) guide.
|
||||
|
||||
[[autodoc]] FbgemmFp8Config
|
||||
|
||||
## TorchAoConfig
|
||||
|
||||
[[autodoc]] TorchAoConfig
|
||||
|
||||
|
@ -59,7 +59,52 @@ This model was contributed by [lysandre](https://huggingface.co/lysandre). This
|
||||
- Layers are split in groups that share parameters (to save memory).
|
||||
Next sentence prediction is replaced by a sentence ordering prediction: in the inputs, we have two sentences A and B (that are consecutive) and we either feed A followed by B or B followed by A. The model must predict if they have been swapped or not.
|
||||
|
||||
### Using Scaled Dot Product Attention (SDPA)
|
||||
|
||||
PyTorch includes a native scaled dot-product attention (SDPA) operator as part of `torch.nn.functional`. This function
|
||||
encompasses several implementations that can be applied depending on the inputs and the hardware in use. See the
|
||||
[official documentation](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)
|
||||
or the [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)
|
||||
page for more information.
|
||||
|
||||
SDPA is used by default for `torch>=2.1.1` when an implementation is available, but you may also set
|
||||
`attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
|
||||
|
||||
```
|
||||
from transformers import AlbertModel
|
||||
model = AlbertModel.from_pretrained("albert/albert-base-v1", torch_dtype=torch.float16, attn_implementation="sdpa")
|
||||
...
|
||||
```
|
||||
|
||||
For the best speedups, we recommend loading the model in half-precision (e.g. `torch.float16` or `torch.bfloat16`).
|
||||
|
||||
On a local benchmark (GeForce RTX 2060-8GB, PyTorch 2.3.1, OS Ubuntu 20.04) with `float16`, we saw the
|
||||
following speedups during training and inference.
|
||||
|
||||
#### Training for 100 iterations
|
||||
|
||||
|batch_size|seq_len|Time per batch (eager - s)| Time per batch (sdpa - s)| Speedup (%)| Eager peak mem (MB)| sdpa peak mem (MB)| Mem saving (%)|
|
||||
|----------|-------|--------------------------|--------------------------|------------|--------------------|-------------------|---------------|
|
||||
|2 |256 |0.028 |0.024 |14.388 |358.411 |321.088 |11.624 |
|
||||
|2 |512 |0.049 |0.041 |17.681 |753.458 |602.660 |25.022 |
|
||||
|4 |256 |0.044 |0.039 |12.246 |679.534 |602.660 |12.756 |
|
||||
|4 |512 |0.090 |0.076 |18.472 |1434.820 |1134.140 |26.512 |
|
||||
|8 |256 |0.081 |0.072 |12.664 |1283.825 |1134.140 |13.198 |
|
||||
|8 |512 |0.170 |0.143 |18.957 |2820.398 |2219.695 |27.062 |
|
||||
|
||||
#### Inference with 50 batches
|
||||
|
||||
|batch_size|seq_len|Per token latency eager (ms)|Per token latency SDPA (ms)|Speedup (%) |Mem eager (MB)|Mem BT (MB)|Mem saved (%)|
|
||||
|----------|-------|----------------------------|---------------------------|------------|--------------|-----------|-------------|
|
||||
|4 |128 |0.083 |0.071 |16.967 |48.319 |48.45 |-0.268 |
|
||||
|4 |256 |0.148 |0.127 |16.37 |63.4 |63.922 |-0.817 |
|
||||
|4 |512 |0.31 |0.247 |25.473 |110.092 |94.343 |16.693 |
|
||||
|8 |128 |0.137 |0.124 |11.102 |63.4 |63.66 |-0.409 |
|
||||
|8 |256 |0.271 |0.231 |17.271 |91.202 |92.246 |-1.132 |
|
||||
|8 |512 |0.602 |0.48 |25.47 |186.159 |152.564 |22.021 |
|
||||
|16 |128 |0.252 |0.224 |12.506 |91.202 |91.722 |-0.567 |
|
||||
|16 |256 |0.526 |0.448 |17.604 |148.378 |150.467 |-1.388 |
|
||||
|16 |512 |1.203 |0.96 |25.365 |338.293 |271.102 |24.784 |
|
||||
|
||||
This model was contributed by [lysandre](https://huggingface.co/lysandre). This model jax version was contributed by
|
||||
[kamalkraj](https://huggingface.co/kamalkraj). The original code can be found [here](https://github.com/google-research/ALBERT).
|
||||
|
@ -87,4 +87,17 @@ If you're interested in submitting a resource to be included here, please feel f
|
||||
|
||||
[[autodoc]] Blip2ForConditionalGeneration
|
||||
- forward
|
||||
- generate
|
||||
- generate
|
||||
|
||||
## Blip2ForImageTextRetrieval
|
||||
|
||||
[[autodoc]] Blip2ForImageTextRetrieval
|
||||
- forward
|
||||
|
||||
## Blip2TextModelWithProjection
|
||||
|
||||
[[autodoc]] Blip2TextModelWithProjection
|
||||
|
||||
## Blip2VisionModelWithProjection
|
||||
|
||||
[[autodoc]] Blip2VisionModelWithProjection
|
||||
|
80
docs/source/en/model_doc/dac.md
Normal file
80
docs/source/en/model_doc/dac.md
Normal file
@ -0,0 +1,80 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# DAC
|
||||
|
||||
## Overview
|
||||
|
||||
|
||||
The DAC model was proposed in [Descript Audio Codec: High-Fidelity Audio Compression with Improved RVQGAN](https://arxiv.org/abs/2306.06546) by Rithesh Kumar, Prem Seetharaman, Alejandro Luebs, Ishaan Kumar, Kundan Kumar.
|
||||
|
||||
The Descript Audio Codec (DAC) model is a powerful tool for compressing audio data, making it highly efficient for storage and transmission. By compressing 44.1 KHz audio into tokens at just 8kbps bandwidth, the DAC model enables high-quality audio processing while significantly reducing the data footprint. This is particularly useful in scenarios where bandwidth is limited or storage space is at a premium, such as in streaming applications, remote conferencing, and archiving large audio datasets.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Language models have been successfully used to model natural signals, such as images, speech, and music. A key component of these models is a high quality neural compression model that can compress high-dimensional natural signals into lower dimensional discrete tokens. To that end, we introduce a high-fidelity universal neural audio compression algorithm that achieves ~90x compression of 44.1 KHz audio into tokens at just 8kbps bandwidth. We achieve this by combining advances in high-fidelity audio generation with better vector quantization techniques from the image domain, along with improved adversarial and reconstruction losses. We compress all domains (speech, environment, music, etc.) with a single universal model, making it widely applicable to generative modeling of all audio. We compare with competing audio compression algorithms, and find our method outperforms them significantly. We provide thorough ablations for every design choice, as well as open-source code and trained model weights. We hope our work can lay the foundation for the next generation of high-fidelity audio modeling.*
|
||||
|
||||
This model was contributed by [Kamil Akesbi](https://huggingface.co/kamilakesbi).
|
||||
The original code can be found [here](https://github.com/descriptinc/descript-audio-codec/tree/main?tab=readme-ov-file).
|
||||
|
||||
|
||||
## Model structure
|
||||
|
||||
The Descript Audio Codec (DAC) model is structured into three distinct stages:
|
||||
|
||||
1. Encoder Model: This stage compresses the input audio, reducing its size while retaining essential information.
|
||||
2. Residual Vector Quantizer (RVQ) Model: Working in tandem with the encoder, this model quantizes the latent codes of the audio, refining the compression and ensuring high-quality reconstruction.
|
||||
3. Decoder Model: This final stage reconstructs the audio from its compressed form, restoring it to a state that closely resembles the original input.
|
||||
|
||||
## Usage example
|
||||
|
||||
Here is a quick example of how to encode and decode an audio using this model:
|
||||
|
||||
```python
|
||||
>>> from datasets import load_dataset, Audio
|
||||
>>> from transformers import DacModel, AutoProcessor
|
||||
>>> librispeech_dummy = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
|
||||
|
||||
>>> model = DacModel.from_pretrained("descript/dac_16khz")
|
||||
>>> processor = AutoProcessor.from_pretrained("descript/dac_16khz")
|
||||
>>> librispeech_dummy = librispeech_dummy.cast_column("audio", Audio(sampling_rate=processor.sampling_rate))
|
||||
>>> audio_sample = librispeech_dummy[-1]["audio"]["array"]
|
||||
>>> inputs = processor(raw_audio=audio_sample, sampling_rate=processor.sampling_rate, return_tensors="pt")
|
||||
|
||||
>>> encoder_outputs = model.encode(inputs["input_values"])
|
||||
>>> # Get the intermediate audio codes
|
||||
>>> audio_codes = encoder_outputs.audio_codes
|
||||
>>> # Reconstruct the audio from its quantized representation
|
||||
>>> audio_values = model.decode(encoder_outputs.quantized_representation)
|
||||
>>> # or the equivalent with a forward pass
|
||||
>>> audio_values = model(inputs["input_values"]).audio_values
|
||||
```
|
||||
|
||||
## DacConfig
|
||||
|
||||
[[autodoc]] DacConfig
|
||||
|
||||
## DacFeatureExtractor
|
||||
|
||||
[[autodoc]] DacFeatureExtractor
|
||||
- __call__
|
||||
|
||||
## DacModel
|
||||
|
||||
[[autodoc]] DacModel
|
||||
- decode
|
||||
- encode
|
||||
- forward
|
@ -153,7 +153,7 @@ In short, one should prepare the data either in COCO detection or COCO panoptic
|
||||
[`~transformers.DetrImageProcessor`] to create `pixel_values`, `pixel_mask` and optional
|
||||
`labels`, which can then be used to train (or fine-tune) a model. For evaluation, one should first convert the
|
||||
outputs of the model using one of the postprocessing methods of [`~transformers.DetrImageProcessor`]. These can
|
||||
be be provided to either `CocoEvaluator` or `PanopticEvaluator`, which allow you to calculate metrics like
|
||||
be provided to either `CocoEvaluator` or `PanopticEvaluator`, which allow you to calculate metrics like
|
||||
mean Average Precision (mAP) and Panoptic Quality (PQ). The latter objects are implemented in the [original repository](https://github.com/facebookresearch/detr). See the [example notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DETR) for more info regarding evaluation.
|
||||
|
||||
## Resources
|
||||
|
@ -72,6 +72,9 @@ If you're interested in submitting a resource to be included here, please feel f
|
||||
|
||||
[[autodoc]] Dinov2Config
|
||||
|
||||
<frameworkcontent>
|
||||
<pt>
|
||||
|
||||
## Dinov2Model
|
||||
|
||||
[[autodoc]] Dinov2Model
|
||||
@ -81,3 +84,20 @@ If you're interested in submitting a resource to be included here, please feel f
|
||||
|
||||
[[autodoc]] Dinov2ForImageClassification
|
||||
- forward
|
||||
|
||||
</pt>
|
||||
<jax>
|
||||
|
||||
## FlaxDinov2Model
|
||||
|
||||
[[autodoc]] FlaxDinov2Model
|
||||
- __call__
|
||||
|
||||
|
||||
## FlaxDinov2ForImageClassification
|
||||
|
||||
[[autodoc]] FlaxDinov2ForImageClassification
|
||||
- __call__
|
||||
|
||||
</jax>
|
||||
</frameworkcontent>
|
||||
|
116
docs/source/en/model_doc/falcon_mamba.md
Normal file
116
docs/source/en/model_doc/falcon_mamba.md
Normal file
@ -0,0 +1,116 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# FalconMamba
|
||||
|
||||
## Overview
|
||||
|
||||
The FalconMamba model was proposed by TII UAE (Technology Innovation Institute) in their release.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*We present FalconMamba, a new base large language model based on the novel Mamba architecture. FalconMamba is trained on 5.8 trillion tokens with carefully selected data mixtures. As a pure Mamba-based model, FalconMamba surpasses leading open-weight models based on Transformers, such as Mistral 7B, Llama3 8B, and Falcon2 11B. It is on par with Gemma 7B and outperforms models with different architecture designs, such as RecurrentGemma 9B. Currently, FalconMamba is the best-performing Mamba model in the literature at this scale, surpassing both existing Mamba and hybrid Mamba-Transformer models.
|
||||
Due to its architecture, FalconMamba is significantly faster at inference and requires substantially less memory for long sequence generation. Despite recent studies suggesting that hybrid Mamba-Transformer models outperform pure architecture designs, we argue and demonstrate that the pure Mamba design can achieve similar, even superior results compared to the hybrid design. We make the weights of our implementation of FalconMamba publicly available under a permissive license.*
|
||||
|
||||
Tips:
|
||||
|
||||
- FalconMamba is mostly based on Mamba architecutre, the same [tips and best practices](./mamba) would be relevant here.
|
||||
|
||||
The model has been trained on approximtely 6T tokens consisting a mixture of many data sources such as RefineWeb, Cosmopedia and Math data.
|
||||
|
||||
For more details about the training procedure and the architecture, have a look at [the technical paper of FalconMamba]() (coming soon).
|
||||
|
||||
# Usage
|
||||
|
||||
Below we demonstrate how to use the model:
|
||||
|
||||
```python
|
||||
from transformers import FalconMambaForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b")
|
||||
model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b")
|
||||
|
||||
input_ids = tokenizer("Hey how are you doing?", return_tensors= "pt")["input_ids"]
|
||||
|
||||
out = model.generate(input_ids, max_new_tokens=10)
|
||||
print(tokenizer.batch_decode(out))
|
||||
```
|
||||
|
||||
The architecture is also compatible with `torch.compile` for faster generation:
|
||||
|
||||
```python
|
||||
from transformers import FalconMambaForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b")
|
||||
model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b", torch_dtype=torch.bfloat16).to(0)
|
||||
model = torch.compile(model)
|
||||
|
||||
input_ids = tokenizer("Hey how are you doing?", return_tensors= "pt")["input_ids"]
|
||||
|
||||
out = model.generate(input_ids, max_new_tokens=10)
|
||||
print(tokenizer.batch_decode(out))
|
||||
```
|
||||
|
||||
If you have access to a GPU that is compatible with `bitsandbytes`, you can also quantize the model in 4-bit precision:
|
||||
|
||||
```python
|
||||
from transformers import FalconMambaForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b")
|
||||
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
|
||||
model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b", quantization_config=quantization_config)
|
||||
|
||||
input_ids = tokenizer("Hey how are you doing?", return_tensors= "pt")["input_ids"]
|
||||
|
||||
out = model.generate(input_ids, max_new_tokens=10)
|
||||
print(tokenizer.batch_decode(out))
|
||||
```
|
||||
|
||||
You can also play with the instruction fine-tuned model:
|
||||
|
||||
```python
|
||||
from transformers import FalconMambaForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("tiiuae/falcon-mamba-7b-instruct")
|
||||
model = FalconMambaForCausalLM.from_pretrained("tiiuae/falcon-mamba-7b-instruct")
|
||||
|
||||
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
||||
messages = [
|
||||
{"role": "user", "content": "How many helicopters can a human eat in one sitting?"},
|
||||
]
|
||||
input_ids = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True).input_ids
|
||||
|
||||
outputs = model.generate(input_ids)
|
||||
print(tokenizer.decode(outputs[0]))
|
||||
```
|
||||
|
||||
## FalconMambaConfig
|
||||
|
||||
[[autodoc]] FalconMambaConfig
|
||||
|
||||
## FalconMambaModel
|
||||
|
||||
[[autodoc]] FalconMambaModel
|
||||
- forward
|
||||
|
||||
## FalconMambaLMHeadModel
|
||||
|
||||
[[autodoc]] FalconMambaForCausalLM
|
||||
- forward
|
74
docs/source/en/model_doc/granite.md
Normal file
74
docs/source/en/model_doc/granite.md
Normal file
@ -0,0 +1,74 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Granite
|
||||
|
||||
## Overview
|
||||
|
||||
The Granite model was proposed in [Power Scheduler: A Batch Size and Token Number Agnostic Learning Rate Scheduler](https://arxiv.org/abs/2408.13359) by Yikang Shen, Matthew Stallone, Mayank Mishra, Gaoyuan Zhang, Shawn Tan, Aditya Prasad, Adriana Meza Soria, David D. Cox and Rameswar Panda.
|
||||
|
||||
PowerLM-3B is a 3B state-of-the-art small language model trained with the Power learning rate scheduler. It is trained on a wide range of open-source and synthetic datasets with permissive licenses. PowerLM-3B has shown promising results compared to other models in the size categories across various benchmarks, including natural language multi-choices, code generation, and math reasoning.
|
||||
|
||||
The abstract from the paper is the following:
|
||||
|
||||
*Finding the optimal learning rate for language model pretraining is a challenging task.
|
||||
This is not only because there is a complicated correlation between learning rate, batch size, number of training tokens, model size, and other hyperparameters but also because it is prohibitively expensive to perform a hyperparameter search for large language models with Billions or Trillions of parameters. Recent studies propose using small proxy models and small corpus to perform hyperparameter searches and transposing the optimal parameters to large models and large corpus. While the zero-shot transferability is theoretically and empirically proven for model size related hyperparameters, like depth and width, the zero-shot transfer from small corpus to large corpus is underexplored.
|
||||
In this paper, we study the correlation between optimal learning rate, batch size, and number of training tokens for the recently proposed WSD scheduler. After thousands of small experiments, we found a power-law relationship between variables and demonstrated its transferability across model sizes. Based on the observation, we propose a new learning rate scheduler, Power scheduler, that is agnostic about the number of training tokens and batch size. The experiment shows that combining the Power scheduler with Maximum Update Parameterization (\mup) can consistently achieve impressive performance with one set of hyperparameters regardless of the number of training tokens, batch size, model size, and even model architecture. Our 3B dense and MoE models trained with the Power scheduler achieve comparable performance as state-of-the-art small language models.
|
||||
We [open source](https://huggingface.co/collections/ibm/power-lm-66be64ae647ddf11b9808000) these pretrained models.*
|
||||
|
||||
Tips:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_path = "ibm/PowerLM-3b"
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
||||
|
||||
# drop device_map if running on CPU
|
||||
model = AutoModelForCausalLM.from_pretrained(model_path, device_map="auto")
|
||||
model.eval()
|
||||
|
||||
# change input text as desired
|
||||
prompt = "Write a code to find the maximum value in a list of numbers."
|
||||
|
||||
# tokenize the text
|
||||
input_tokens = tokenizer(prompt, return_tensors="pt")
|
||||
# generate output tokens
|
||||
output = model.generate(**input_tokens, max_new_tokens=100)
|
||||
# decode output tokens into text
|
||||
output = tokenizer.batch_decode(output)
|
||||
# loop over the batch to print, in this example the batch size is 1
|
||||
for i in output:
|
||||
print(i)
|
||||
```
|
||||
|
||||
This model was contributed by [mayank-mishra](https://huggingface.co/mayank-mishra).
|
||||
|
||||
|
||||
## GraniteConfig
|
||||
|
||||
[[autodoc]] GraniteConfig
|
||||
|
||||
## GraniteModel
|
||||
|
||||
[[autodoc]] GraniteModel
|
||||
- forward
|
||||
|
||||
## GraniteForCausalLM
|
||||
|
||||
[[autodoc]] GraniteForCausalLM
|
||||
- forward
|
@ -39,11 +39,11 @@ The original code can be found [here](https://github.com/state-spaces/mamba).
|
||||
|
||||
### A simple generation example:
|
||||
```python
|
||||
from transformers import MambaConfig, MambaForCausalLM, AutoTokenizer
|
||||
from transformers import Mamba2Config, Mamba2ForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
model_id = 'mistralai/Mamba-Codestral-7B-v0.1'
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id, revision='refs/pr/9', from_slow=True, legacy=False)
|
||||
model = MambaForCausalLM.from_pretrained(model_id, revision='refs/pr/9')
|
||||
model = Mamba2ForCausalLM.from_pretrained(model_id, revision='refs/pr/9')
|
||||
input_ids = tokenizer("Hey how are you doing?", return_tensors= "pt")["input_ids"]
|
||||
|
||||
out = model.generate(input_ids, max_new_tokens=10)
|
||||
|
329
docs/source/en/model_doc/qwen2_vl.md
Normal file
329
docs/source/en/model_doc/qwen2_vl.md
Normal file
@ -0,0 +1,329 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Qwen2_VL
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
The [Qwen2_VL](https://qwenlm.github.io/blog/qwen2-vl/) is a major update to our [Qwen-VL](https://arxiv.org/pdf/2308.12966) model from the Qwen team.
|
||||
|
||||
The abstract from the blog is the following:
|
||||
|
||||
*This blog introduces Qwen2-VL, an advanced version of the Qwen-VL model that has undergone significant enhancements over the past year. Key improvements include enhanced image comprehension, advanced video understanding, integrated visual agent functionality, and expanded multilingual support. The model architecture has been optimized for handling arbitrary image resolutions through Naive Dynamic Resolution support and utilizes Multimodal Rotary Position Embedding (M-ROPE) to effectively process both 1D textual and multi-dimensional visual data. This updated model demonstrates competitive performance against leading AI systems like GPT-4o and Claude 3.5 Sonnet in vision-related tasks and ranks highly among open-source models in text capabilities. These advancements make Qwen2-VL a versatile tool for various applications requiring robust multimodal processing and reasoning abilities.*
|
||||
|
||||
|
||||
## Usage example
|
||||
|
||||
### Single Media inference
|
||||
|
||||
The model can accept both images and videos as input. Here's an example code for inference.
|
||||
|
||||
```python
|
||||
|
||||
from PIL import Image
|
||||
import requests
|
||||
import torch
|
||||
from torchvision import io
|
||||
from typing import Dict
|
||||
from transformers import Qwen2VLForConditionalGeneration, AutoTokenizer, AutoProcessor
|
||||
|
||||
# Load the model in half-precision on the available device(s)
|
||||
model = Qwen2VLForConditionalGeneration.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", device_map="auto")
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct")
|
||||
|
||||
# Image
|
||||
url = "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg"
|
||||
image = Image.open(requests.get(url, stream=True).raw)
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role":"user",
|
||||
"content":[
|
||||
{
|
||||
"type":"image",
|
||||
},
|
||||
{
|
||||
"type":"text",
|
||||
"text":"Describe this image."
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
# Preprocess the inputs
|
||||
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Describe this image.<|im_end|>\n<|im_start|>assistant\n'
|
||||
|
||||
inputs = processor(text=[text_prompt], images=[image], padding=True, return_tensors="pt")
|
||||
inputs = inputs.to('cuda')
|
||||
|
||||
# Inference: Generation of the output
|
||||
output_ids = model.generate(**inputs, max_new_tokens=128)
|
||||
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
|
||||
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
print(output_text)
|
||||
|
||||
|
||||
|
||||
# Video
|
||||
def fetch_video(ele: Dict, nframe_factor=2):
|
||||
if isinstance(ele['video'], str):
|
||||
def round_by_factor(number: int, factor: int) -> int:
|
||||
return round(number / factor) * factor
|
||||
|
||||
video = ele["video"]
|
||||
if video.startswith("file://"):
|
||||
video = video[7:]
|
||||
|
||||
video, _, info = io.read_video(
|
||||
video,
|
||||
start_pts=ele.get("video_start", 0.0),
|
||||
end_pts=ele.get("video_end", None),
|
||||
pts_unit="sec",
|
||||
output_format="TCHW",
|
||||
)
|
||||
assert not ("fps" in ele and "nframes" in ele), "Only accept either `fps` or `nframes`"
|
||||
if "nframes" in ele:
|
||||
nframes = round_by_factor(ele["nframes"], nframe_factor)
|
||||
else:
|
||||
fps = ele.get("fps", 1.0)
|
||||
nframes = round_by_factor(video.size(0) / info["video_fps"] * fps, nframe_factor)
|
||||
idx = torch.linspace(0, video.size(0) - 1, nframes, dtype=torch.int64)
|
||||
return video[idx]
|
||||
|
||||
video_info = {"type": "video", "video": "/path/to/video.mp4", "fps": 1.0}
|
||||
video = fetch_video(video_info)
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "video"},
|
||||
{"type": "text", "text": "What happened in the video?"},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
# Preprocess the inputs
|
||||
text_prompt = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|video_pad|><|vision_end|>What happened in the video?<|im_end|>\n<|im_start|>assistant\n'
|
||||
|
||||
inputs = processor(text=[text_prompt], videos=[video], padding=True, return_tensors="pt")
|
||||
inputs = inputs.to('cuda')
|
||||
|
||||
# Inference: Generation of the output
|
||||
output_ids = model.generate(**inputs, max_new_tokens=128)
|
||||
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
|
||||
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
print(output_text)
|
||||
|
||||
```
|
||||
|
||||
|
||||
### Batch Mixed Media Inference
|
||||
|
||||
The model can batch inputs composed of mixed samples of various types such as images, videos, and text. Here is an example.
|
||||
|
||||
```python
|
||||
|
||||
image1 = Image.open("/path/to/image1.jpg")
|
||||
image2 = Image.open("/path/to/image2.jpg")
|
||||
image3 = Image.open("/path/to/image3.jpg")
|
||||
image4 = Image.open("/path/to/image4.jpg")
|
||||
image5 = Image.open("/path/to/image5.jpg")
|
||||
video = fetch_video({
|
||||
"type": "video",
|
||||
"video": "/path/to/video.mp4",
|
||||
"fps": 1.0
|
||||
})
|
||||
|
||||
# Conversation for the first image
|
||||
conversation1 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "Describe this image."}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
# Conversation with two images
|
||||
conversation2 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "What is written in the pictures?"}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
# Conversation with pure text
|
||||
conversation3 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "who are you?"
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
# Conversation with mixed midia
|
||||
conversation4 = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "image"},
|
||||
{"type": "video"},
|
||||
{"type": "text", "text": "What are the common elements in these medias?"},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
conversations = [conversation1, conversation2, conversation3, conversation4]
|
||||
# Preparation for batch inference
|
||||
texts = [processor.apply_chat_template(msg, add_generation_prompt=True) for msg in conversations]
|
||||
inputs = processor(
|
||||
text=texts,
|
||||
images=[image1, image2, image3, image4, image5],
|
||||
videos=[video],
|
||||
padding=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
inputs = inputs.to('cuda')
|
||||
|
||||
# Batch Inference
|
||||
output_ids = model.generate(**inputs, max_new_tokens=128)
|
||||
generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(inputs.input_ids, output_ids)]
|
||||
output_text = processor.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
|
||||
print(output_text)
|
||||
```
|
||||
|
||||
### Usage Tips
|
||||
|
||||
#### Image Resolution for performance boost
|
||||
|
||||
The model supports a wide range of resolution inputs. By default, it uses the native resolution for input, but higher resolutions can enhance performance at the cost of more computation. Users can set the minimum and maximum number of pixels to achieve an optimal configuration for their needs.
|
||||
|
||||
```python
|
||||
|
||||
min_pixels = 224*224
|
||||
max_pixels = 2048*2048
|
||||
processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-7B-Instruct", min_pixels=min_pixels, max_pixels=max_pixels)
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
#### Multiple Image Inputs
|
||||
|
||||
By default, images and video content are directly included in the conversation. When handling multiple images, it's helpful to add labels to the images and videos for better reference. Users can control this behavior with the following settings:
|
||||
|
||||
|
||||
|
||||
```python
|
||||
|
||||
conversation = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "image"},
|
||||
{"type": "text", "text": "Hello, how are you?"}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "I'm doing well, thank you for asking. How can I assist you today?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "Can you describe these images and video?"},
|
||||
{"type": "image"},
|
||||
{"type": "image"},
|
||||
{"type": "video"},
|
||||
{"type": "text", "text": "These are from my vacation."}
|
||||
]
|
||||
},
|
||||
{
|
||||
"role": "assistant",
|
||||
"content": "I'd be happy to describe the images and video for you. Could you please provide more context about your vacation?"
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "It was a trip to the mountains. Can you see the details in the images and video?"
|
||||
}
|
||||
]
|
||||
|
||||
# default:
|
||||
prompt_without_id = processor.apply_chat_template(conversation, add_generation_prompt=True)
|
||||
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\n<|vision_start|><|image_pad|><|vision_end|>Hello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing well, thank you for asking. How can I assist you today?<|im_end|>\n<|im_start|>user\nCan you describe these images and video?<|vision_start|><|image_pad|><|vision_end|><|vision_start|><|image_pad|><|vision_end|><|vision_start|><|video_pad|><|vision_end|>These are from my vacation.<|im_end|>\n<|im_start|>assistant\nI'd be happy to describe the images and video for you. Could you please provide more context about your vacation?<|im_end|>\n<|im_start|>user\nIt was a trip to the mountains. Can you see the details in the images and video?<|im_end|>\n<|im_start|>assistant\n'
|
||||
|
||||
|
||||
# add ids
|
||||
prompt_with_id = processor.apply_chat_template(conversation, add_generation_prompt=True, add_vision_id=True)
|
||||
# Excepted output: '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n<|im_start|>user\nPicture 1: <|vision_start|><|image_pad|><|vision_end|>Hello, how are you?<|im_end|>\n<|im_start|>assistant\nI'm doing well, thank you for asking. How can I assist you today?<|im_end|>\n<|im_start|>user\nCan you describe these images and video?Picture 2: <|vision_start|><|image_pad|><|vision_end|>Picture 3: <|vision_start|><|image_pad|><|vision_end|>Video 1: <|vision_start|><|video_pad|><|vision_end|>These are from my vacation.<|im_end|>\n<|im_start|>assistant\nI'd be happy to describe the images and video for you. Could you please provide more context about your vacation?<|im_end|>\n<|im_start|>user\nIt was a trip to the mountains. Can you see the details in the images and video?<|im_end|>\n<|im_start|>assistant\n'
|
||||
|
||||
```
|
||||
|
||||
#### Flash-Attention 2 to speed up generation
|
||||
|
||||
First, make sure to install the latest version of Flash Attention 2:
|
||||
|
||||
```bash
|
||||
pip install -U flash-attn --no-build-isolation
|
||||
```
|
||||
|
||||
Also, you should have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of the [flash attention repository](https://github.com/Dao-AILab/flash-attention). FlashAttention-2 can only be used when a model is loaded in `torch.float16` or `torch.bfloat16`.
|
||||
|
||||
To load and run a model using Flash Attention-2, simply add `attn_implementation="flash_attention_2"` when loading the model as follows:
|
||||
|
||||
```python
|
||||
from transformers import Qwen2VLForConditionalGeneration
|
||||
|
||||
model = Qwen2VLForConditionalGeneration.from_pretrained(
|
||||
"Qwen/Qwen2-VL-7B-Instruct",
|
||||
torch_dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
)
|
||||
```
|
||||
|
||||
|
||||
## Qwen2VLConfig
|
||||
|
||||
[[autodoc]] Qwen2VLConfig
|
||||
|
||||
## Qwen2VLImageProcessor
|
||||
|
||||
[[autodoc]] Qwen2VLImageProcessor
|
||||
- preprocess
|
||||
|
||||
## Qwen2VLProcessor
|
||||
|
||||
[[autodoc]] Qwen2VLProcessor
|
||||
|
||||
## Qwen2VLModel
|
||||
|
||||
[[autodoc]] Qwen2VLModel
|
||||
- forward
|
||||
|
||||
## Qwen2VLForConditionalGeneration
|
||||
|
||||
[[autodoc]] Qwen2VLForConditionalGeneration
|
||||
- forward
|
@ -34,7 +34,7 @@ Tips:
|
||||
- The model predicts much better results if input 2D points and/or input bounding boxes are provided
|
||||
- You can prompt multiple points for the same image, and predict a single mask.
|
||||
- Fine-tuning the model is not supported yet
|
||||
- According to the paper, textual input should be also supported. However, at this time of writing this seems to be not supported according to [the official repository](https://github.com/facebookresearch/segment-anything/issues/4#issuecomment-1497626844).
|
||||
- According to the paper, textual input should be also supported. However, at this time of writing this seems not to be supported according to [the official repository](https://github.com/facebookresearch/segment-anything/issues/4#issuecomment-1497626844).
|
||||
|
||||
|
||||
This model was contributed by [ybelkada](https://huggingface.co/ybelkada) and [ArthurZ](https://huggingface.co/ArthurZ).
|
||||
|
@ -93,12 +93,33 @@ from transformers import VitsTokenizer
|
||||
tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-eng")
|
||||
print(tokenizer.is_uroman)
|
||||
```
|
||||
If the is_uroman attribute is `True`, the tokenizer will automatically apply the `uroman` package to your text inputs, but you need to install uroman if not already installed using:
|
||||
```
|
||||
pip install --upgrade uroman
|
||||
```
|
||||
Note: Python version required to use `uroman` as python package should be >= `3.10`.
|
||||
You can use the tokenizer as usual without any additional preprocessing steps:
|
||||
```python
|
||||
import torch
|
||||
from transformers import VitsTokenizer, VitsModel, set_seed
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
If required, you should apply the uroman package to your text inputs **prior** to passing them to the `VitsTokenizer`,
|
||||
since currently the tokenizer does not support performing the pre-processing itself.
|
||||
tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-kor")
|
||||
model = VitsModel.from_pretrained("facebook/mms-tts-kor")
|
||||
text = "이봐 무슨 일이야"
|
||||
inputs = tokenizer(text=text, return_tensors="pt")
|
||||
|
||||
set_seed(555) # make deterministic
|
||||
with torch.no_grad():
|
||||
outputs = model(inputs["input_ids"])
|
||||
|
||||
waveform = outputs.waveform[0]
|
||||
```
|
||||
If you don't want to upgrade to python >= `3.10`, then you can use the `uroman` perl package to pre-process the text inputs to the Roman alphabet.
|
||||
To do this, first clone the uroman repository to your local machine and set the bash variable `UROMAN` to the local path:
|
||||
|
||||
|
||||
```bash
|
||||
git clone https://github.com/isi-nlp/uroman.git
|
||||
cd uroman
|
||||
|
@ -27,6 +27,27 @@ The abstract from the paper is the following:
|
||||
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ). The Tensorflow version of this model was contributed by [amyeroberts](https://huggingface.co/amyeroberts).
|
||||
The original code can be found [here](https://github.com/openai/whisper).
|
||||
|
||||
## Quick usage
|
||||
|
||||
You can run Whisper in less than 4 lines of code and transcribe in less than a minute!
|
||||
|
||||
```python
|
||||
# pip install transformers torch
|
||||
|
||||
import torch
|
||||
from transformers import pipeline
|
||||
|
||||
whisper = pipeline("automatic-speech-recognition", "openai/whisper-large-v3", torch_dtype=torch.float16, device="cuda:0")
|
||||
|
||||
transcription = whisper("<audio_file.mp3>")
|
||||
|
||||
print(transcription["text"])
|
||||
```
|
||||
|
||||
Voila! You can swap the model with any [Whisper checkpoints](https://huggingface.co/models?other=whisper&sort=downloads) on the Hugging Face Hub with the same pipeline based on your needs.
|
||||
|
||||
Bonus: You can replace `"cuda"` with `"mps"` to make it seamlessly work on Macs.
|
||||
|
||||
## Usage tips
|
||||
|
||||
- The model usually performs well without requiring any finetuning.
|
||||
|
@ -42,7 +42,7 @@ In total, we get 512 sequences each with length 512 and store them in a [`~datas
|
||||
>>> seq_len, dataset_size = 512, 512
|
||||
>>> dummy_data = {
|
||||
... "input_ids": np.random.randint(100, 30000, (dataset_size, seq_len)),
|
||||
... "labels": np.random.randint(0, 1, (dataset_size)),
|
||||
... "labels": np.random.randint(0, 2, (dataset_size)),
|
||||
... }
|
||||
>>> ds = Dataset.from_dict(dummy_data)
|
||||
>>> ds.set_format("pt")
|
||||
|
@ -51,6 +51,7 @@ FlashAttention-2 is currently supported for the following architectures:
|
||||
* [GPTNeo](https://huggingface.co/docs/transformers/model_doc/gpt_neo#transformers.GPTNeoModel)
|
||||
* [GPTNeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox#transformers.GPTNeoXModel)
|
||||
* [GPT-J](https://huggingface.co/docs/transformers/model_doc/gptj#transformers.GPTJModel)
|
||||
* [Granite](https://huggingface.co/docs/transformers/model_doc/granite#transformers.GraniteModel)
|
||||
* [Idefics2](https://huggingface.co/docs/transformers/model_doc/idefics2#transformers.Idefics2Model)
|
||||
* [Falcon](https://huggingface.co/docs/transformers/model_doc/falcon#transformers.FalconModel)
|
||||
* [JetMoe](https://huggingface.co/docs/transformers/model_doc/jetmoe#transformers.JetMoeModel)
|
||||
@ -73,17 +74,18 @@ FlashAttention-2 is currently supported for the following architectures:
|
||||
* [OPT](https://huggingface.co/docs/transformers/model_doc/opt#transformers.OPTModel)
|
||||
* [Phi](https://huggingface.co/docs/transformers/model_doc/phi#transformers.PhiModel)
|
||||
* [Phi3](https://huggingface.co/docs/transformers/model_doc/phi3#transformers.Phi3Model)
|
||||
* [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip)
|
||||
* [StableLm](https://huggingface.co/docs/transformers/model_doc/stablelm#transformers.StableLmModel)
|
||||
* [Starcoder2](https://huggingface.co/docs/transformers/model_doc/starcoder2#transformers.Starcoder2Model)
|
||||
* [Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2#transformers.Qwen2Model)
|
||||
* [Qwen2Audio](https://huggingface.co/docs/transformers/model_doc/qwen2_audio#transformers.Qwen2AudioEncoder)
|
||||
* [Qwen2MoE](https://huggingface.co/docs/transformers/model_doc/qwen2_moe#transformers.Qwen2MoeModel)
|
||||
* [Qwen2VL](https://huggingface.co/docs/transformers/model_doc/qwen2_vl#transformers.Qwen2VLModel)
|
||||
* [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperModel)
|
||||
* [Wav2Vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2Model)
|
||||
* [Hubert](https://huggingface.co/docs/transformers/model_doc/hubert#transformers.HubertModel)
|
||||
* [data2vec_audio](https://huggingface.co/docs/transformers/main/en/model_doc/data2vec#transformers.Data2VecAudioModel)
|
||||
* [Sew](https://huggingface.co/docs/transformers/main/en/model_doc/sew#transformers.SEWModel)
|
||||
* [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip)
|
||||
* [UniSpeech](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech#transformers.UniSpeechModel)
|
||||
* [unispeech_sat](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech-sat#transformers.UniSpeechSatModel)
|
||||
|
||||
@ -199,12 +201,15 @@ FlashAttention is more memory efficient, meaning you can train on much larger se
|
||||
PyTorch's [`torch.nn.functional.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) (SDPA) can also call FlashAttention and memory-efficient attention kernels under the hood. SDPA support is currently being added natively in Transformers and is used by default for `torch>=2.1.1` when an implementation is available. You may also set `attn_implementation="sdpa"` in `from_pretrained()` to explicitly request SDPA to be used.
|
||||
|
||||
For now, Transformers supports SDPA inference and training for the following architectures:
|
||||
* [Albert](https://huggingface.co/docs/transformers/model_doc/albert#transformers.AlbertModel)
|
||||
* [Audio Spectrogram Transformer](https://huggingface.co/docs/transformers/model_doc/audio-spectrogram-transformer#transformers.ASTModel)
|
||||
* [Bart](https://huggingface.co/docs/transformers/model_doc/bart#transformers.BartModel)
|
||||
* [Bert](https://huggingface.co/docs/transformers/model_doc/bert#transformers.BertModel)
|
||||
* [CamemBERT](https://huggingface.co/docs/transformers/model_doc/camembert#transformers.CamembertModel)
|
||||
* [Chameleon](https://huggingface.co/docs/transformers/model_doc/chameleon#transformers.Chameleon)
|
||||
* [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPModel)
|
||||
* [Cohere](https://huggingface.co/docs/transformers/model_doc/cohere#transformers.CohereModel)
|
||||
* [data2vec_audio](https://huggingface.co/docs/transformers/main/en/model_doc/data2vec#transformers.Data2VecAudioModel)
|
||||
* [Dbrx](https://huggingface.co/docs/transformers/model_doc/dbrx#transformers.DbrxModel)
|
||||
* [DeiT](https://huggingface.co/docs/transformers/model_doc/deit#transformers.DeiTModel)
|
||||
* [Dpr](https://huggingface.co/docs/transformers/model_doc/dpr#transformers.DprReader)
|
||||
@ -214,9 +219,16 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [GPT2](https://huggingface.co/docs/transformers/model_doc/gpt2)
|
||||
* [GPTBigCode](https://huggingface.co/docs/transformers/model_doc/gpt_bigcode#transformers.GPTBigCodeModel)
|
||||
* [GPTNeoX](https://huggingface.co/docs/transformers/model_doc/gpt_neox#transformers.GPTNeoXModel)
|
||||
* [Hubert](https://huggingface.co/docs/transformers/model_doc/hubert#transformers.HubertModel)
|
||||
* [Idefics](https://huggingface.co/docs/transformers/model_doc/idefics#transformers.IdeficsModel)
|
||||
* [Granite](https://huggingface.co/docs/transformers/model_doc/granite#transformers.GraniteModel)
|
||||
* [JetMoe](https://huggingface.co/docs/transformers/model_doc/jetmoe#transformers.JetMoeModel)
|
||||
* [Jamba](https://huggingface.co/docs/transformers/model_doc/jamba#transformers.JambaModel)
|
||||
* [Llama](https://huggingface.co/docs/transformers/model_doc/llama#transformers.LlamaModel)
|
||||
* [Mistral](https://huggingface.co/docs/transformers/model_doc/mistral#transformers.MistralModel)
|
||||
* [Mixtral](https://huggingface.co/docs/transformers/model_doc/mixtral#transformers.MixtralModel)
|
||||
* [Musicgen](https://huggingface.co/docs/transformers/model_doc/musicgen#transformers.MusicgenModel)
|
||||
* [MusicGen Melody](https://huggingface.co/docs/transformers/model_doc/musicgen_melody#transformers.MusicgenMelodyModel)
|
||||
* [OLMo](https://huggingface.co/docs/transformers/model_doc/olmo#transformers.OlmoModel)
|
||||
* [PaliGemma](https://huggingface.co/docs/transformers/model_doc/paligemma#transformers.PaliGemmaForConditionalGeneration)
|
||||
* [Phi](https://huggingface.co/docs/transformers/model_doc/phi#transformers.PhiModel)
|
||||
@ -230,6 +242,15 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2#transformers.Qwen2Model)
|
||||
* [Qwen2Audio](https://huggingface.co/docs/transformers/model_doc/qwen2_audio#transformers.Qwen2AudioEncoder)
|
||||
* [Qwen2MoE](https://huggingface.co/docs/transformers/model_doc/qwen2_moe#transformers.Qwen2MoeModel)
|
||||
* [RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta#transformers.RobertaModel)
|
||||
* [Sew](https://huggingface.co/docs/transformers/main/en/model_doc/sew#transformers.SEWModel)
|
||||
* [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip)
|
||||
* [StableLm](https://huggingface.co/docs/transformers/model_doc/stablelm#transformers.StableLmModel)
|
||||
* [Starcoder2](https://huggingface.co/docs/transformers/model_doc/starcoder2#transformers.Starcoder2Model)
|
||||
* [UniSpeech](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech#transformers.UniSpeechModel)
|
||||
* [unispeech_sat](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech-sat#transformers.UniSpeechSatModel)
|
||||
* [RoBERTa](https://huggingface.co/docs/transformers/model_doc/roberta#transformers.RobertaModel)
|
||||
* [Qwen2VL](https://huggingface.co/docs/transformers/model_doc/qwen2_vl#transformers.Qwen2VLModel)
|
||||
* [Musicgen](https://huggingface.co/docs/transformers/model_doc/musicgen#transformers.MusicgenModel)
|
||||
* [MusicGen Melody](https://huggingface.co/docs/transformers/model_doc/musicgen_melody#transformers.MusicgenMelodyModel)
|
||||
* [Nemotron](https://huggingface.co/docs/transformers/model_doc/nemotron)
|
||||
@ -239,12 +260,9 @@ For now, Transformers supports SDPA inference and training for the following arc
|
||||
* [ViTMSN](https://huggingface.co/docs/transformers/model_doc/vit_msn#transformers.ViTMSNModel)
|
||||
* [VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae#transformers.VideoMAEModell)
|
||||
* [wav2vec2](https://huggingface.co/docs/transformers/model_doc/wav2vec2#transformers.Wav2Vec2Model)
|
||||
* [Hubert](https://huggingface.co/docs/transformers/model_doc/hubert#transformers.HubertModel)
|
||||
* [data2vec_audio](https://huggingface.co/docs/transformers/main/en/model_doc/data2vec#transformers.Data2VecAudioModel)
|
||||
* [SigLIP](https://huggingface.co/docs/transformers/model_doc/siglip)
|
||||
* [Sew](https://huggingface.co/docs/transformers/main/en/model_doc/sew#transformers.SEWModel)
|
||||
* [UniSpeech](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech#transformers.UniSpeechModel)
|
||||
* [unispeech_sat](https://huggingface.co/docs/transformers/v4.39.3/en/model_doc/unispeech-sat#transformers.UniSpeechSatModel)
|
||||
* [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper#transformers.WhisperModel)
|
||||
* [XLM-RoBERTa](https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaModel)
|
||||
* [XLM-RoBERTa-XL](https://huggingface.co/docs/transformers/model_doc/xlm-roberta-xl#transformers.XLMRobertaXLModel)
|
||||
* [YOLOS](https://huggingface.co/docs/transformers/model_doc/yolos#transformers.YolosModel)
|
||||
|
||||
|
||||
|
@ -155,13 +155,20 @@ This example assumes that you have:
|
||||
The snippet below is an example of a Dockerfile that uses a base image that supports distributed CPU training and then
|
||||
extracts a Transformers release to the `/workspace` directory, so that the example scripts are included in the image:
|
||||
```dockerfile
|
||||
FROM intel/ai-workflows:torch-2.0.1-huggingface-multinode-py3.9
|
||||
FROM intel/intel-optimized-pytorch:2.3.0-pip-multinode
|
||||
|
||||
RUN apt-get update -y && \
|
||||
apt-get install -y --no-install-recommends --fix-missing \
|
||||
google-perftools \
|
||||
libomp-dev
|
||||
|
||||
WORKDIR /workspace
|
||||
|
||||
# Download and extract the transformers code
|
||||
ARG HF_TRANSFORMERS_VER="4.35.2"
|
||||
RUN mkdir transformers && \
|
||||
ARG HF_TRANSFORMERS_VER="4.44.0"
|
||||
RUN pip install --no-cache-dir \
|
||||
transformers==${HF_TRANSFORMERS_VER} && \
|
||||
mkdir transformers && \
|
||||
curl -sSL --retry 5 https://github.com/huggingface/transformers/archive/refs/tags/v${HF_TRANSFORMERS_VER}.tar.gz | tar -C transformers --strip-components=1 -xzf -
|
||||
```
|
||||
The image needs to be built and copied to the cluster's nodes or pushed to a container registry prior to deploying the
|
||||
@ -189,7 +196,6 @@ apiVersion: "kubeflow.org/v1"
|
||||
kind: PyTorchJob
|
||||
metadata:
|
||||
name: transformers-pytorchjob
|
||||
namespace: kubeflow
|
||||
spec:
|
||||
elasticPolicy:
|
||||
rdzvBackend: c10d
|
||||
@ -206,32 +212,27 @@ spec:
|
||||
- name: pytorch
|
||||
image: <image name>:<tag> # Specify the docker image to use for the worker pods
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- torchrun
|
||||
- /workspace/transformers/examples/pytorch/question-answering/run_qa.py
|
||||
- --model_name_or_path
|
||||
- "google-bert/bert-large-uncased"
|
||||
- --dataset_name
|
||||
- "squad"
|
||||
- --do_train
|
||||
- --do_eval
|
||||
- --per_device_train_batch_size
|
||||
- "12"
|
||||
- --learning_rate
|
||||
- "3e-5"
|
||||
- --num_train_epochs
|
||||
- "2"
|
||||
- --max_seq_length
|
||||
- "384"
|
||||
- --doc_stride
|
||||
- "128"
|
||||
- --output_dir
|
||||
- "/tmp/pvc-mount/output"
|
||||
- --no_cuda
|
||||
- --ddp_backend
|
||||
- "ccl"
|
||||
- --use_ipex
|
||||
- --bf16 # Specify --bf16 if your hardware supports bfloat16
|
||||
command: ["/bin/bash", "-c"]
|
||||
args:
|
||||
- >-
|
||||
cd /workspace/transformers;
|
||||
pip install -r /workspace/transformers/examples/pytorch/question-answering/requirements.txt;
|
||||
source /usr/local/lib/python3.10/dist-packages/oneccl_bindings_for_pytorch/env/setvars.sh;
|
||||
torchrun /workspace/transformers/examples/pytorch/question-answering/run_qa.py \
|
||||
--model_name_or_path distilbert/distilbert-base-uncased \
|
||||
--dataset_name squad \
|
||||
--do_train \
|
||||
--do_eval \
|
||||
--per_device_train_batch_size 12 \
|
||||
--learning_rate 3e-5 \
|
||||
--num_train_epochs 2 \
|
||||
--max_seq_length 384 \
|
||||
--doc_stride 128 \
|
||||
--output_dir /tmp/pvc-mount/output_$(date +%Y%m%d_%H%M%S) \
|
||||
--no_cuda \
|
||||
--ddp_backend ccl \
|
||||
--bf16 \
|
||||
--use_ipex;
|
||||
env:
|
||||
- name: LD_PRELOAD
|
||||
value: "/usr/lib/x86_64-linux-gnu/libtcmalloc.so.4.5.9:/usr/local/lib/libiomp5.so"
|
||||
@ -244,13 +245,13 @@ spec:
|
||||
- name: CCL_WORKER_COUNT
|
||||
value: "1"
|
||||
- name: OMP_NUM_THREADS # Can be tuned for optimal performance
|
||||
- value: "56"
|
||||
value: "240"
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200 # Update the CPU and memory limit values based on your nodes
|
||||
cpu: 240 # Update the CPU and memory limit values based on your nodes
|
||||
memory: 128Gi
|
||||
requests:
|
||||
cpu: 200 # Update the CPU and memory request values based on your nodes
|
||||
cpu: 240 # Update the CPU and memory request values based on your nodes
|
||||
memory: 128Gi
|
||||
volumeMounts:
|
||||
- name: pvc-volume
|
||||
@ -258,8 +259,8 @@ spec:
|
||||
- mountPath: /dev/shm
|
||||
name: dshm
|
||||
restartPolicy: Never
|
||||
nodeSelector: # Optionally use the node selector to specify what types of nodes to use for the workers
|
||||
node-type: spr
|
||||
nodeSelector: # Optionally use nodeSelector to match a certain node label for the worker pods
|
||||
node-type: gnr
|
||||
volumes:
|
||||
- name: pvc-volume
|
||||
persistentVolumeClaim:
|
||||
@ -287,10 +288,12 @@ set the same CPU and memory amounts for both the resource limits and requests.
|
||||
After the PyTorchJob spec has been updated with values appropriate for your cluster and training job, it can be deployed
|
||||
to the cluster using:
|
||||
```bash
|
||||
kubectl create -f pytorchjob.yaml
|
||||
export NAMESPACE=<specify your namespace>
|
||||
|
||||
kubectl create -f pytorchjob.yaml -n ${NAMESPACE}
|
||||
```
|
||||
|
||||
The `kubectl get pods -n kubeflow` command can then be used to list the pods in the `kubeflow` namespace. You should see
|
||||
The `kubectl get pods -n ${NAMESPACE}` command can then be used to list the pods in your namespace. You should see
|
||||
the worker pods for the PyTorchJob that was just deployed. At first, they will probably have a status of "Pending" as
|
||||
the containers get pulled and created, then the status should change to "Running".
|
||||
```
|
||||
@ -303,13 +306,13 @@ transformers-pytorchjob-worker-3 1/1 Running
|
||||
...
|
||||
```
|
||||
|
||||
The logs for worker can be viewed using `kubectl logs -n kubeflow <pod name>`. Add `-f` to stream the logs, for example:
|
||||
The logs for worker can be viewed using `kubectl logs <pod name> -n ${NAMESPACE}`. Add `-f` to stream the logs, for example:
|
||||
```bash
|
||||
kubectl logs -n kubeflow transformers-pytorchjob-worker-0 -f
|
||||
kubectl logs transformers-pytorchjob-worker-0 -n ${NAMESPACE} -f
|
||||
```
|
||||
|
||||
After the training job completes, the trained model can be copied from the PVC or storage location. When you are done
|
||||
with the job, the PyTorchJob resource can be deleted from the cluster using `kubectl delete -f pytorchjob.yaml`.
|
||||
with the job, the PyTorchJob resource can be deleted from the cluster using `kubectl delete -f pytorchjob.yaml -n ${NAMESPACE}`.
|
||||
|
||||
## Summary
|
||||
|
||||
|
@ -54,7 +54,7 @@ speech-to-text.
|
||||
Not the result you had in mind? Check out some of the [most downloaded automatic speech recognition models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending)
|
||||
on the Hub to see if you can get a better transcription.
|
||||
|
||||
Let's try the [Whisper large-v2](https://huggingface.co/openai/whisper-large) model from OpenAI. Whisper was released
|
||||
Let's try the [Whisper large-v2](https://huggingface.co/openai/whisper-large-v2) model from OpenAI. Whisper was released
|
||||
2 years later than Wav2Vec2, and was trained on close to 10x more data. As such, it beats Wav2Vec2 on most downstream
|
||||
benchmarks. It also has the added benefit of predicting punctuation and casing, neither of which are possible with
|
||||
Wav2Vec2.
|
||||
|
@ -56,4 +56,4 @@ Use the table below to help you decide which quantization method to use.
|
||||
| [HQQ](./hqq) | 🟢 | 🟢 | 🟢 | 🔴 | 🔴 | 🟢 | 1 - 8 | 🟢 | 🔴 | 🟢 | https://github.com/mobiusml/hqq/ |
|
||||
| [Quanto](./quanto) | 🟢 | 🟢 | 🟢 | 🔴 | 🟢 | 🟢 | 2 / 4 / 8 | 🔴 | 🔴 | 🟢 | https://github.com/huggingface/quanto |
|
||||
| [FBGEMM_FP8](./fbgemm_fp8.md) | 🟢 | 🔴 | 🟢 | 🔴 | 🔴 | 🔴 | 8 | 🔴 | 🟢 | 🟢 | https://github.com/pytorch/FBGEMM |
|
||||
|
||||
| [torchao](./torchao.md) | 🟢 | | 🟢 | 🔴 | partial support (int4 weight only) | | 4 / 8 | | 🟢🔴 | 🟢 | https://github.com/pytorch/ao |
|
||||
|
45
docs/source/en/quantization/torchao.md
Normal file
45
docs/source/en/quantization/torchao.md
Normal file
@ -0,0 +1,45 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# TorchAO
|
||||
|
||||
[TorchAO](https://github.com/pytorch/ao) is an architecture optimization library for PyTorch, it provides high performance dtypes, optimization techniques and kernels for inference and training, featuring composability with native PyTorch features like `torch.compile`, FSDP etc.. Some benchmark numbers can be found [here](https://github.com/pytorch/ao/tree/main?tab=readme-ov-file#without-intrusive-code-changes)
|
||||
|
||||
Before you begin, make sure the following libraries are installed with their latest version:
|
||||
|
||||
```bash
|
||||
pip install --upgrade torch torchao
|
||||
```
|
||||
|
||||
|
||||
```py
|
||||
from transformers import TorchAoConfig, AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_name = "meta-llama/Meta-Llama-3-8B"
|
||||
# We support int4_weight_only, int8_weight_only and int8_dynamic_activation_int8_weight
|
||||
# More examples and documentations for arguments can be found in https://github.com/pytorch/ao/tree/main/torchao/quantization#other-available-quantization-techniques
|
||||
quantization_config = TorchAoConfig("int4_weight_only", group_size=128)
|
||||
quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", quantization_config=quantization_config)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
||||
input_text = "What are we having for dinner?"
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
# compile the quantizd model to get speedup
|
||||
import torchao
|
||||
torchao.quantization.utils.recommended_inductor_config_setter()
|
||||
quantized_model = torch.compile(quantized_model, mode="max-autotune")
|
||||
|
||||
output = quantized_model.generate(**input_ids, max_new_tokens=10)
|
||||
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
torchao quantization is implemented with tensor subclasses, currently it does not work with huggingface serialization, both the safetensor option and [non-safetensor option](https://github.com/huggingface/transformers/issues/32364), we'll update here with instructions when it's working.
|
@ -90,7 +90,7 @@ The next step is to load a T5 tokenizer to process the English-French language p
|
||||
The preprocessing function you want to create needs to:
|
||||
|
||||
1. Prefix the input with a prompt so T5 knows this is a translation task. Some models capable of multiple NLP tasks require prompting for specific tasks.
|
||||
2. Tokenize the input (English) and target (French) separately because you can't tokenize French text with a tokenizer pretrained on an English vocabulary.
|
||||
2. Set the target language (French) in the `text_target` parameter to ensure the tokenizer processes the target text correctly. If you don't set `text_target`, the tokenizer processes the target text as English.
|
||||
3. Truncate sequences to be no longer than the maximum length set by the `max_length` parameter.
|
||||
|
||||
```py
|
||||
|
146
docs/source/en/tasks/video_text_to_text.md
Normal file
146
docs/source/en/tasks/video_text_to_text.md
Normal file
@ -0,0 +1,146 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Video-text-to-text
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
Video-text-to-text models, also known as video language models or vision language models with video input, are language models that take a video input. These models can tackle various tasks, from video question answering to video captioning.
|
||||
|
||||
These models have nearly the same architecture as [image-text-to-text](../image_text_to_text.md) models except for some changes to accept video data, since video data is essentially image frames with temporal dependencies. Some image-text-to-text models take in multiple images, but this alone is inadequate for a model to accept videos. Moreover, video-text-to-text models are often trained with all vision modalities. Each example might have videos, multiple videos, images and multiple images. Some of these models can also take interleaved inputs. For example, you can refer to a specific video inside a string of text by adding a video token in text like "What is happening in this video? `<video>`".
|
||||
|
||||
In this guide, we provide a brief overview of video LMs and show how to use them with Transformers for inference.
|
||||
|
||||
To begin with, there are multiple types of video LMs:
|
||||
- base models used for fine-tuning
|
||||
- chat fine-tuned models for conversation
|
||||
- instruction fine-tuned models
|
||||
|
||||
This guide focuses on inference with an instruction-tuned model, [llava-hf/llava-interleave-qwen-7b-hf](https://huggingface.co/llava-hf/llava-interleave-qwen-7b-hf) which can take in interleaved data. Alternatively, you can try [llava-interleave-qwen-0.5b-hf](https://huggingface.co/llava-hf/llava-interleave-qwen-0.5b-hf) if your hardware doesn't allow running a 7B model.
|
||||
|
||||
Let's begin installing the dependencies.
|
||||
|
||||
```bash
|
||||
pip install -q transformers accelerate flash_attn
|
||||
```
|
||||
|
||||
Let's initialize the model and the processor.
|
||||
|
||||
```python
|
||||
from transformers import LlavaProcessor, LlavaForConditionalGeneration
|
||||
import torch
|
||||
model_id = "llava-hf/llava-interleave-qwen-0.5b-hf"
|
||||
|
||||
processor = LlavaProcessor.from_pretrained(model_id)
|
||||
|
||||
model = LlavaForConditionalGeneration.from_pretrained(model_id, torch_dtype=torch.float16)
|
||||
model.to("cuda")
|
||||
```
|
||||
|
||||
Some models directly consume the `<video>` token, and others accept `<image>` tokens equal to the number of sampled frames. This model handles videos in the latter fashion. We will write a simple utility to handle image tokens, and another utility to get a video from a url and sample frames from it.
|
||||
|
||||
```python
|
||||
import uuid
|
||||
import requests
|
||||
import cv2
|
||||
|
||||
def replace_video_with_images(text, frames):
|
||||
return text.replace("<video>", "<image>" * frames)
|
||||
|
||||
def sample_frames(url, num_frames):
|
||||
|
||||
response = requests.get(url)
|
||||
path_id = str(uuid.uuid4())
|
||||
|
||||
path = f"./{path_id}.mp4"
|
||||
|
||||
with open(path, "wb") as f:
|
||||
f.write(response.content)
|
||||
|
||||
video = cv2.VideoCapture(path)
|
||||
total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
interval = total_frames // num_frames
|
||||
frames = []
|
||||
for i in range(total_frames):
|
||||
ret, frame = video.read()
|
||||
pil_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
||||
if not ret:
|
||||
continue
|
||||
if i % interval == 0:
|
||||
frames.append(pil_img)
|
||||
video.release()
|
||||
return frames
|
||||
```
|
||||
|
||||
Let's get our inputs. We will sample frames and concatenate them.
|
||||
|
||||
```python
|
||||
video_1 = "https://huggingface.co/spaces/merve/llava-interleave/resolve/main/cats_1.mp4"
|
||||
video_2 = "https://huggingface.co/spaces/merve/llava-interleave/resolve/main/cats_2.mp4"
|
||||
|
||||
video_1 = sample_frames(video_1, 6)
|
||||
video_2 = sample_frames(video_2, 6)
|
||||
|
||||
videos = video_1 + video_2
|
||||
|
||||
videos
|
||||
|
||||
# [<PIL.Image.Image image mode=RGB size=1920x1080>,
|
||||
# <PIL.Image.Image image mode=RGB size=1920x1080>,
|
||||
# <PIL.Image.Image image mode=RGB size=1920x1080>, ...]
|
||||
```
|
||||
|
||||
Both videos have cats.
|
||||
|
||||
<div class="container">
|
||||
<div class="video-container">
|
||||
<video width="400" controls>
|
||||
<source src="https://huggingface.co/spaces/merve/llava-interleave/resolve/main/cats_1.mp4" type="video/mp4">
|
||||
</video>
|
||||
</div>
|
||||
|
||||
<div class="video-container">
|
||||
<video width="400" controls>
|
||||
<source src="https://huggingface.co/spaces/merve/llava-interleave/resolve/main/cats_2.mp4" type="video/mp4">
|
||||
</video>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
Now we can preprocess the inputs.
|
||||
|
||||
This model has a prompt template that looks like following. First, we'll put all the sampled frames into one list. Since we have eight frames in each video, we will insert 12 `<image>` tokens to our prompt. Add `assistant` at the end of the prompt to trigger the model to give answers. Then we can preprocess.
|
||||
|
||||
```python
|
||||
user_prompt = "Are these two cats in these two videos doing the same thing?"
|
||||
toks = "<image>" * 12
|
||||
prompt = "<|im_start|>user"+ toks + f"\n{user_prompt}<|im_end|><|im_start|>assistant"
|
||||
inputs = processor(prompt, images=videos).to(model.device, model.dtype)
|
||||
```
|
||||
|
||||
We can now call [`~GenerationMixin.generate`] for inference. The model outputs the question in our input and answer, so we only take the text after the prompt and `assistant` part from the model output.
|
||||
|
||||
```python
|
||||
output = model.generate(**inputs, max_new_tokens=100, do_sample=False)
|
||||
print(processor.decode(output[0][2:], skip_special_tokens=True)[len(user_prompt)+10:])
|
||||
|
||||
# The first cat is shown in a relaxed state, with its eyes closed and a content expression, while the second cat is shown in a more active state, with its mouth open wide, possibly in a yawn or a vocalization.
|
||||
|
||||
|
||||
```
|
||||
|
||||
And voila!
|
||||
|
||||
To learn more about chat templates and token streaming for video-text-to-text models, refer to the [image-text-to-text](../image_text_to_text) task guide because these models work similarly.
|
@ -382,6 +382,41 @@ trainer.train()
|
||||
|
||||
Note layerwise optimization is a bit experimental and does not support DDP (Distributed Data Parallel), thus you can run the training script only on a single GPU. Please see [this appropriate section](https://github.com/jiaweizzhao/GaLore?tab=readme-ov-file#train-7b-model-with-a-single-gpu-with-24gb-memory) for more details. Other features such as gradient clipping, DeepSpeed, etc might not be supported out of the box. Please [raise an issue on GitHub](https://github.com/huggingface/transformers/issues) if you encounter such issue.
|
||||
|
||||
## Liger Kernel
|
||||
|
||||
[Liger-Kernel](https://github.com/linkedin/Liger-Kernel) Kernel is a collection of Triton kernels developed by Linkedin designed specifically for LLM training. We have implemented Hugging Face Compatible RMSNorm, RoPE, SwiGLU, CrossEntropy, FusedLinearCrossEntropy, and more to come. It can effectively increase multi-GPU training throughput by 20% and reduces memory usage by 60%. The kernel works out of the box with flash attention, PyTorch FSDP, and Microsoft DeepSpeed.
|
||||
|
||||
<Tip>
|
||||
Gain +20% throughput and reduce memory usage by 60% on LLaMA 3-8B model training. Achieve longer context lengths and larger batch sizes. It’s also useful if you want to scale up your model to multi-head training or large vocabulary sizes. Unleash multi-head training (medusa) and more. See details and examples in [Liger](https://github.com/linkedin/Liger-Kernel/tree/main/examples)
|
||||
</Tip>
|
||||
|
||||
First make sure to install Liger official repository:
|
||||
```bash
|
||||
pip install liger-kernel
|
||||
```
|
||||
|
||||
You should pass `use_liger_kernel=True` to apply liger kernel on your model, for example:
|
||||
|
||||
```py
|
||||
from transformers import TrainingArguments
|
||||
|
||||
training_args = TrainingArguments(
|
||||
output_dir="your-model",
|
||||
learning_rate=2e-5,
|
||||
per_device_train_batch_size=16,
|
||||
per_device_eval_batch_size=16,
|
||||
num_train_epochs=2,
|
||||
weight_decay=0.01,
|
||||
eval_strategy="epoch",
|
||||
save_strategy="epoch",
|
||||
load_best_model_at_end=True,
|
||||
push_to_hub=True,
|
||||
use_liger_kernel=True
|
||||
)
|
||||
```
|
||||
|
||||
The kernel supports the Llama, Gemma, Mistral, and Mixtral model architectures. The most up-to-date list of supported models can be found [here](https://github.com/linkedin/Liger-Kernel). When `use_liger_kernel` is set to `True`, the corresponding layers in the original model will be patched with Liger's efficient implementation, so you don't need to do anything extra other than setting the argument value.
|
||||
|
||||
## LOMO optimizer
|
||||
|
||||
The LOMO optimizers have been introduced in [Full Parameter Fine-Tuning for Large Language Models with Limited Resources](https://hf.co/papers/2306.09782) and [AdaLomo: Low-memory Optimization with Adaptive Learning Rate](https://hf.co/papers/2310.10195).
|
||||
@ -432,6 +467,57 @@ trainer = trl.SFTTrainer(
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
## GrokAdamW optimizer
|
||||
|
||||
The GrokAdamW optimizer is designed to enhance training performance and stability, particularly for models that benefit from grokking signal functions. To use GrokAdamW, first install the optimizer package with `pip install grokadamw`.
|
||||
|
||||
<Tip>
|
||||
|
||||
GrokAdamW is particularly useful for models that require advanced optimization techniques to achieve better performance and stability.
|
||||
|
||||
</Tip>
|
||||
|
||||
Below is a simple script to demonstrate how to fine-tune [google/gemma-2b](https://huggingface.co/google/gemma-2b) on the IMDB dataset using the GrokAdamW optimizer:
|
||||
|
||||
```python
|
||||
import torch
|
||||
import datasets
|
||||
from transformers import TrainingArguments, AutoTokenizer, AutoModelForCausalLM, Trainer
|
||||
|
||||
# Load the IMDB dataset
|
||||
train_dataset = datasets.load_dataset('imdb', split='train')
|
||||
|
||||
# Define the training arguments
|
||||
args = TrainingArguments(
|
||||
output_dir="./test-grokadamw",
|
||||
max_steps=1000,
|
||||
per_device_train_batch_size=4,
|
||||
optim="grokadamw",
|
||||
logging_strategy="steps",
|
||||
logging_steps=1,
|
||||
learning_rate=2e-5,
|
||||
save_strategy="no",
|
||||
run_name="grokadamw-imdb",
|
||||
)
|
||||
|
||||
# Load the model and tokenizer
|
||||
model_id = "google/gemma-2b"
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, low_cpu_mem_usage=True).to(0)
|
||||
|
||||
# Initialize the Trainer
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=args,
|
||||
train_dataset=train_dataset,
|
||||
)
|
||||
|
||||
# Train the model
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
This script demonstrates how to fine-tune the `google/gemma-2b` model on the IMDB dataset using the GrokAdamW optimizer. The `TrainingArguments` are configured to use GrokAdamW, and the dataset is passed to the `Trainer` for training.
|
||||
|
||||
## Accelerate and Trainer
|
||||
|
||||
The [`Trainer`] class is powered by [Accelerate](https://hf.co/docs/accelerate), a library for easily training PyTorch models in distributed environments with support for integrations such as [FullyShardedDataParallel (FSDP)](https://pytorch.org/blog/introducing-pytorch-fully-sharded-data-parallel-api/) and [DeepSpeed](https://www.deepspeed.ai/).
|
||||
|
@ -173,7 +173,7 @@ class ResnetModelForImageClassification(PreTrainedModel):
|
||||
def forward(self, tensor, labels=None):
|
||||
logits = self.model(tensor)
|
||||
if labels is not None:
|
||||
loss = torch.nn.cross_entropy(logits, labels)
|
||||
loss = torch.nn.functional.cross_entropy(logits, labels)
|
||||
return {"loss": loss, "logits": logits}
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
@ -174,7 +174,7 @@ class ResnetModelForImageClassification(PreTrainedModel):
|
||||
def forward(self, tensor, labels=None):
|
||||
logits = self.model(tensor)
|
||||
if labels is not None:
|
||||
loss = torch.nn.cross_entropy(logits, labels)
|
||||
loss = torch.nn.functional.cross_entropy(logits, labels)
|
||||
return {"loss": loss, "logits": logits}
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
@ -14,7 +14,7 @@ rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Templates for Chat Models
|
||||
# Chat Templates
|
||||
|
||||
## Introduction
|
||||
|
||||
|
@ -161,7 +161,7 @@ class ResnetModelForImageClassification(PreTrainedModel):
|
||||
def forward(self, tensor, labels=None):
|
||||
logits = self.model(tensor)
|
||||
if labels is not None:
|
||||
loss = torch.nn.cross_entropy(logits, labels)
|
||||
loss = torch.nn.functional.cross_entropy(logits, labels)
|
||||
return {"loss": loss, "logits": logits}
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
@ -139,9 +139,6 @@ generation_output[:2]
|
||||
[[autodoc]] ForcedEOSTokenLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] ForceTokensLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] HammingDiversityLogitsProcessor
|
||||
- __call__
|
||||
|
||||
@ -157,9 +154,6 @@ generation_output[:2]
|
||||
[[autodoc]] LogitsProcessorList
|
||||
- __call__
|
||||
|
||||
[[autodoc]] LogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] MinLengthLogitsProcessor
|
||||
- __call__
|
||||
|
||||
|
@ -27,8 +27,8 @@
|
||||
title: 에이전트
|
||||
- local: llm_tutorial
|
||||
title: 대규모 언어 모델로 생성하기
|
||||
- local: in_translation
|
||||
title: (번역중)Chatting with Transformers
|
||||
- local: conversations
|
||||
title: Transformers로 채팅하기
|
||||
title: 튜토리얼
|
||||
- sections:
|
||||
- isExpanded: false
|
||||
@ -79,8 +79,8 @@
|
||||
title: 이미지 특징 추출
|
||||
- local: tasks/mask_generation
|
||||
title: 마스크 생성
|
||||
- local: in_translation
|
||||
title: (번역중) Knowledge Distillation for Computer Vision
|
||||
- local: tasks/knowledge_distillation_for_image_classification
|
||||
title: 컴퓨터 비전(이미지 분류)를 위한 지식 증류(knowledge distillation)
|
||||
title: 컴퓨터 비전
|
||||
- isExpanded: false
|
||||
sections:
|
||||
@ -145,8 +145,8 @@
|
||||
title: bitsandbytes
|
||||
- local: in_translation
|
||||
title: (번역중) GPTQ
|
||||
- local: in_translation
|
||||
title: (번역중) AWQ
|
||||
- local: quantization/awq
|
||||
title: AWQ
|
||||
- local: in_translation
|
||||
title: (번역중) AQLM
|
||||
- local: in_translation
|
||||
@ -186,16 +186,18 @@
|
||||
- local: performance
|
||||
title: 성능 및 확장성
|
||||
- local: in_translation
|
||||
title: (번역중) LLM inference optimization
|
||||
title: (번역중) Quantization
|
||||
- local: llm_optims
|
||||
title: LLM 추론 최적화
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: (번역중) Methods and tools for efficient training on a single GPU
|
||||
- local: perf_train_gpu_many
|
||||
title: 다중 GPU에서 훈련 진행하기
|
||||
- local: deepspeed
|
||||
title: DeepSpeed
|
||||
- local: fsdp
|
||||
title: 완전 분할 데이터 병렬 처리
|
||||
- local: in_translation
|
||||
title: (번역중) DeepSpeed
|
||||
- local: perf_train_cpu
|
||||
title: CPU에서 훈련
|
||||
- local: perf_train_cpu_many
|
||||
@ -266,8 +268,8 @@
|
||||
title: (번역중) 개념 가이드
|
||||
- sections:
|
||||
- sections:
|
||||
- local: in_translation
|
||||
title: (번역중) Agents and Tools
|
||||
- local: main_classes/agent
|
||||
title: 에이전트와 도구
|
||||
- local: in_translation
|
||||
title: (번역중) Auto Classes
|
||||
- local: in_translation
|
||||
@ -302,8 +304,8 @@
|
||||
title: (번역중) Tokenizer
|
||||
- local: in_translation
|
||||
title: (번역중) Trainer
|
||||
- local: in_translation
|
||||
title: (번역중) DeepSpeed
|
||||
- local: deepspeed
|
||||
title: DeepSpeed
|
||||
- local: in_translation
|
||||
title: (번역중) Feature Extractor
|
||||
- local: in_translation
|
||||
|
306
docs/source/ko/conversations.md
Normal file
306
docs/source/ko/conversations.md
Normal file
@ -0,0 +1,306 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Transformers로 채팅하기[[chatting-with-transformers]]
|
||||
|
||||
이 글을 보고 있다면 **채팅 모델**에 대해 어느 정도 알고 계실 것입니다.
|
||||
채팅 모델이란 메세지를 주고받을 수 있는 대화형 인공지능입니다.
|
||||
대표적으로 ChatGPT가 있고, 이와 비슷하거나 더 뛰어난 오픈소스 채팅 모델이 많이 존재합니다.
|
||||
이러한 모델들은 무료 다운로드할 수 있으며, 로컬에서 실행할 수 있습니다.
|
||||
크고 무거운 모델은 고성능 하드웨어와 메모리가 필요하지만,
|
||||
저사양 GPU 혹은 일반 데스크탑이나 노트북 CPU에서도 잘 작동하는 소형 모델들도 있습니다.
|
||||
|
||||
이 가이드는 채팅 모델을 처음 사용하는 분들에게 유용할 것입니다.
|
||||
우리는 간편한 고수준(High-Level) "pipeline"을 통해 빠른 시작 가이드를 진행할 것입니다.
|
||||
가이드에는 채팅 모델을 바로 시작할 때 필요한 모든 정보가 담겨 있습니다.
|
||||
빠른 시작 가이드 이후에는 채팅 모델이 정확히 무엇인지, 적절한 모델을 선택하는 방법과,
|
||||
채팅 모델을 사용하는 각 단계의 저수준(Low-Level) 분석 등 더 자세한 정보를 다룰 것입니다.
|
||||
또한 채팅 모델의 성능과 메모리 사용을 최적화하는 방법에 대한 팁도 제공할 것입니다.
|
||||
|
||||
|
||||
## 빠른 시작[[quickstart]]
|
||||
|
||||
자세히 볼 여유가 없는 분들을 위해 간단히 요약해 보겠습니다:
|
||||
채팅 모델은 대화 메세지를 계속해서 생성해 나갑니다.
|
||||
즉, 짤막한 채팅 메세지를 모델에게 전달하면, 모델은 이를 바탕으로 응답을 추가하며 대화를 이어 나갑니다.
|
||||
이제 실제로 어떻게 작동하는지 살펴보겠습니다.
|
||||
먼저, 채팅을 만들어 보겠습니다:
|
||||
|
||||
|
||||
```python
|
||||
chat = [
|
||||
{"role": "system", "content": "You are a sassy, wise-cracking robot as imagined by Hollywood circa 1986."},
|
||||
{"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"}
|
||||
]
|
||||
```
|
||||
|
||||
주목하세요, 대화를 처음 시작할 때 유저 메세지 이외의도, 별도의 **시스템** 메세지가 필요할 수 있습니다.
|
||||
모든 채팅 모델이 시스템 메세지를 지원하는 것은 아니지만,
|
||||
지원하는 경우에는 시스템 메세지는 대화에서 모델이 어떻게 행동해야 하는지를 지시할 수 있습니다.
|
||||
예를 들어, 유쾌하거나 진지하고자 할 때, 짧은 답변이나 긴 답변을 원할 때 등을 설정할 수 있습니다.
|
||||
시스템 메세지를 생략하고
|
||||
"You are a helpful and intelligent AI assistant who responds to user queries."
|
||||
와 같은 간단한 프롬프트를 사용하는 것도 가능합니다.
|
||||
|
||||
채팅을 시작했다면 대화를 이어 나가는 가장 빠른 방법은 [`TextGenerationPipeline`]를 사용하는 것입니다.
|
||||
한번 `LLaMA-3`를 사용하여 이를 시연해 보겠습니다.
|
||||
우선 `LLaMA-3`를 사용하기 위해서는 승인이 필요합니다. [권한 신청](https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct)을 하고 Hugging Face 계정으로 로그인한 후에 사용할 수 있습니다.
|
||||
또한 우리는 `device_map="auto"`를 사용합니다. GPU 메모리가 충분하다면 로드될 것입니다.
|
||||
그리고 메모리 절약을 위해 dtype을 `torch.bfloat16`으로 설정할 것입니다.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import pipeline
|
||||
|
||||
pipe = pipeline("text-generation", "meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.bfloat16, device_map="auto")
|
||||
response = pipe(chat, max_new_tokens=512)
|
||||
print(response[0]['generated_text'][-1]['content'])
|
||||
```
|
||||
|
||||
이후 실행을 하면 아래와 같이 출력됩니다:
|
||||
|
||||
```text
|
||||
(sigh) Oh boy, you're asking me for advice? You're gonna need a map, pal! Alright,
|
||||
alright, I'll give you the lowdown. But don't say I didn't warn you, I'm a robot, not a tour guide!
|
||||
|
||||
So, you wanna know what's fun to do in the Big Apple? Well, let me tell you, there's a million
|
||||
things to do, but I'll give you the highlights. First off, you gotta see the sights: the Statue of
|
||||
Liberty, Central Park, Times Square... you know, the usual tourist traps. But if you're lookin' for
|
||||
something a little more... unusual, I'd recommend checkin' out the Museum of Modern Art. It's got
|
||||
some wild stuff, like that Warhol guy's soup cans and all that jazz.
|
||||
|
||||
And if you're feelin' adventurous, take a walk across the Brooklyn Bridge. Just watch out for
|
||||
those pesky pigeons, they're like little feathered thieves! (laughs) Get it? Thieves? Ah, never mind.
|
||||
|
||||
Now, if you're lookin' for some serious fun, hit up the comedy clubs in Greenwich Village. You might
|
||||
even catch a glimpse of some up-and-coming comedians... or a bunch of wannabes tryin' to make it big. (winks)
|
||||
|
||||
And finally, if you're feelin' like a real New Yorker, grab a slice of pizza from one of the many amazing
|
||||
pizzerias around the city. Just don't try to order a "robot-sized" slice, trust me, it won't end well. (laughs)
|
||||
|
||||
So, there you have it, pal! That's my expert advice on what to do in New York. Now, if you'll
|
||||
excuse me, I've got some oil changes to attend to. (winks)
|
||||
```
|
||||
|
||||
채팅을 계속하려면, 자신의 답장을 추가하면 됩니다.
|
||||
파이프라인에서 반환된 `response` 객체에는 현재까지 모든 채팅을 포함하고 있으므로
|
||||
메세지를 추가하고 다시 전달하기만 하면 됩니다.
|
||||
|
||||
```python
|
||||
chat = response[0]['generated_text']
|
||||
chat.append(
|
||||
{"role": "user", "content": "Wait, what's so wild about soup cans?"}
|
||||
)
|
||||
response = pipe(chat, max_new_tokens=512)
|
||||
print(response[0]['generated_text'][-1]['content'])
|
||||
```
|
||||
|
||||
이후 실행을 하면 아래와 같이 출력됩니다:
|
||||
|
||||
```text
|
||||
(laughs) Oh, you're killin' me, pal! You don't get it, do you? Warhol's soup cans are like, art, man!
|
||||
It's like, he took something totally mundane, like a can of soup, and turned it into a masterpiece. It's
|
||||
like, "Hey, look at me, I'm a can of soup, but I'm also a work of art!"
|
||||
(sarcastically) Oh, yeah, real original, Andy.
|
||||
|
||||
But, you know, back in the '60s, it was like, a big deal. People were all about challenging the
|
||||
status quo, and Warhol was like, the king of that. He took the ordinary and made it extraordinary.
|
||||
And, let me tell you, it was like, a real game-changer. I mean, who would've thought that a can of soup could be art? (laughs)
|
||||
|
||||
But, hey, you're not alone, pal. I mean, I'm a robot, and even I don't get it. (winks)
|
||||
But, hey, that's what makes art, art, right? (laughs)
|
||||
```
|
||||
|
||||
이 튜토리얼의 후반부에서는 성능과 메모리 관리,
|
||||
그리고 사용자의 필요에 맞는 채팅 모델 선택과 같은 구체적인 주제들을 다룰 것입니다.
|
||||
|
||||
## 채팅 모델 고르기[[choosing-a-chat-model]]
|
||||
|
||||
[Hugging Face Hub](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending)는 채팅 모델을 다양하게 제공하고 있습니다.
|
||||
처음 사용하는 사람에게는 모델을 선택하기가 어려울지 모릅니다.
|
||||
하지만 걱정하지 마세요! 두 가지만 명심하면 됩니다:
|
||||
|
||||
- 모델의 크기는 실행 속도와 메모리에 올라올 수 있는지 여부를 결정.
|
||||
- 모델이 생성한 출력의 품질.
|
||||
|
||||
일반적으로 이러한 요소들은 상관관계가 있습니다. 더 큰 모델일수록 더 뛰어난 성능을 보이는 경향이 있지만, 동일한 크기의 모델이라도 유의미한 차이가 날 수 있습니다!
|
||||
|
||||
### 모델의 명칭과 크기[[size-and-model-naming]]
|
||||
|
||||
모델의 크기는 모델 이름에 있는 숫자로 쉽게 알 수 있습니다.
|
||||
예를 들어, "8B" 또는 "70B"와 같은 숫자는 모델의 **파라미터** 수를 나타냅니다.
|
||||
양자화된 경우가 아니라면, 파라미터 하나당 약 2바이트의 메모리가 필요하다고 예상 가능합니다.
|
||||
따라서 80억 개의 파라미터를 가진 "8B" 모델은 16GB의 메모리를 차지하며, 추가적인 오버헤드를 위한 약간의 여유가 필요합니다.
|
||||
이는 3090이나 4090와 같은 24GB의 메모리를 갖춘 하이엔드 GPU에 적합합니다.
|
||||
|
||||
일부 채팅 모델은 "Mixture of Experts" 모델입니다.
|
||||
이러한 모델은 크기를 "8x7B" 또는 "141B-A35B"와 같이 다르게 표시하곤 합니다.
|
||||
숫자가 다소 모호하다 느껴질 수 있지만, 첫 번째 경우에는 약 56억(8x7) 개의 파라미터가 있고,
|
||||
두 번째 경우에는 약 141억 개의 파라미터가 있다고 해석할 수 있습니다.
|
||||
|
||||
양자화는 파라미터당 메모리 사용량을 8비트, 4비트, 또는 그 이하로 줄이는 데 사용됩니다.
|
||||
이 주제에 대해서는 아래의 [메모리 고려사항](#memory-considerations) 챕터에서 더 자세히 다룰 예정입니다.
|
||||
|
||||
### 그렇다면 어떤 채팅 모델이 가장 좋을까요?[[but-which-chat-model-is-best]]
|
||||
모델의 크기 외에도 고려할 점이 많습니다.
|
||||
이를 한눈에 살펴보려면 **리더보드**를 참고하는 것이 좋습니다.
|
||||
가장 인기 있는 리더보드 두 가지는 [OpenLLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)와 [LMSys Chatbot Arena Leaderboard](https://chat.lmsys.org/?leaderboard)입니다.
|
||||
LMSys 리더보드에는 독점 모델도 포함되어 있으니,
|
||||
`license` 열에서 접근 가능한 모델을 선택한 후
|
||||
[Hugging Face Hub](https://huggingface.co/models?pipeline_tag=text-generation&sort=trending)에서 검색해 보세요.
|
||||
|
||||
### 전문 분야[[specialist-domains]]
|
||||
일부 모델은 의료 또는 법률 텍스트와 같은 특정 도메인이나 비영어권 언어에 특화되어 있기도 합니다.
|
||||
이러한 도메인에서 작업할 경우 특화된 모델이 좋은 성능을 보일 수 있습니다.
|
||||
하지만 항상 그럴 것이라 단정하기는 힘듭니다.
|
||||
특히 모델의 크기가 작거나 오래된 모델인 경우,
|
||||
최신 범용 모델이 더 뛰어날 수 있습니다.
|
||||
다행히도 [domain-specific leaderboards](https://huggingface.co/blog/leaderboard-medicalllm)가 점차 등장하고 있어, 특정 도메인에 최고의 모델을 쉽게 찾을 수 있을 것입니다.
|
||||
|
||||
|
||||
## 파이프라인 내부는 어떻게 되어있는가?[[what-happens-inside-the-pipeline]]
|
||||
위의 빠른 시작에서는 고수준(High-Level) 파이프라인을 사용하였습니다.
|
||||
이는 간편한 방법이지만, 유연성은 떨어집니다.
|
||||
이제 더 저수준(Low-Level) 접근 방식을 통해 대화에 포함된 각 단계를 살펴보겠습니다.
|
||||
코드 샘플로 시작한 후 이를 분석해 보겠습니다:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
# 입력값을 사전에 준비해 놓습니다
|
||||
chat = [
|
||||
{"role": "system", "content": "You are a sassy, wise-cracking robot as imagined by Hollywood circa 1986."},
|
||||
{"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"}
|
||||
]
|
||||
|
||||
# 1: 모델과 토크나이저를 불러옵니다
|
||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", torch_dtype=torch.bfloat16)
|
||||
tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct")
|
||||
|
||||
# 2: 채팅 템플릿에 적용합니다
|
||||
formatted_chat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
||||
print("Formatted chat:\n", formatted_chat)
|
||||
|
||||
# 3: 채팅을 토큰화합니다 (바로 이전 과정에서 tokenized=True로 설정하면 한꺼번에 처리할 수 있습니다)
|
||||
inputs = tokenizer(formatted_chat, return_tensors="pt", add_special_tokens=False)
|
||||
# 토큰화된 입력값을 모델이 올라와 있는 기기(CPU/GPU)로 옮깁니다.
|
||||
inputs = {key: tensor.to(model.device) for key, tensor in inputs.items()}
|
||||
print("Tokenized inputs:\n", inputs)
|
||||
|
||||
# 4: 모델로부터 응답을 생성합니다
|
||||
outputs = model.generate(**inputs, max_new_tokens=512, temperature=0.1)
|
||||
print("Generated tokens:\n", outputs)
|
||||
|
||||
# 5: 모델이 출력한 토큰을 다시 문자열로 디코딩합니다
|
||||
decoded_output = tokenizer.decode(outputs[0][inputs['input_ids'].size(1):], skip_special_tokens=True)
|
||||
print("Decoded output:\n", decoded_output)
|
||||
```
|
||||
여기에는 각 부분이 자체 문서가 될 수 있을 만큼 많은 내용이 담겨 있습니다!
|
||||
너무 자세히 설명하기보다는 넓은 개념을 다루고, 세부 사항은 링크된 문서에서 다루겠습니다.
|
||||
주요 단계는 다음과 같습니다:
|
||||
|
||||
1. [모델](https://huggingface.co/learn/nlp-course/en/chapter2/3)과 [토크나이저](https://huggingface.co/learn/nlp-course/en/chapter2/4?fw=pt)를 Hugging Face Hub에서 로드합니다.
|
||||
2. 대화는 토크나이저의 [채팅 템플릿](https://huggingface.co/docs/transformers/main/en/chat_templating)을 사용하여 양식을 구성합니다.
|
||||
3. 구성된 채팅은 토크나이저를 사용하여 [토큰화](https://huggingface.co/learn/nlp-course/en/chapter2/4)됩니다.
|
||||
4. 모델에서 응답을 [생성](https://huggingface.co/docs/transformers/en/llm_tutorial)합니다.
|
||||
5. 모델이 출력한 토큰을 다시 문자열로 디코딩합니다.
|
||||
|
||||
## 성능, 메모리와 하드웨어[[performance-memory-and-hardware]]
|
||||
이제 대부분의 머신 러닝 작업이 GPU에서 실행된다는 것을 아실 겁니다.
|
||||
다소 느리기는 해도 CPU에서 채팅 모델이나 언어 모델로부터 텍스트를 생성하는 것도 가능합니다.
|
||||
하지만 모델을 GPU 메모리에 올려놓을 수만 있다면, GPU를 사용하는 것이 일반적으로 더 선호되는 방식입니다.
|
||||
|
||||
### 메모리 고려사항[[memory-considerations]]
|
||||
|
||||
기본적으로, [`TextGenerationPipeline`]이나 [`AutoModelForCausalLM`]과 같은
|
||||
Hugging Face 클래스는 모델을 `float32` 정밀도(Precision)로 로드합니다.
|
||||
이는 파라미터당 4바이트(32비트)를 필요로 하므로,
|
||||
80억 개의 파라미터를 가진 "8B" 모델은 약 32GB의 메모리를 필요로 한다는 것을 의미합니다.
|
||||
하지만 이는 낭비일 수 있습니다!
|
||||
대부분의 최신 언어 모델은 파라미터당 2바이트를 사용하는 "bfloat16" 정밀도(Precision)로 학습됩니다.
|
||||
하드웨어가 이를 지원하는 경우(Nvidia 30xx/Axxx 이상),
|
||||
`torch_dtype` 파라미터로 위와 같이 `bfloat16` 정밀도(Precision)로 모델을 로드할 수 있습니다.
|
||||
|
||||
또한, 16비트보다 더 낮은 정밀도(Precision)로 모델을 압축하는
|
||||
"양자화(quantization)" 방법을 사용할 수도 있습니다.
|
||||
이 방법은 모델의 가중치를 손실 압축하여 각 파라미터를 8비트,
|
||||
4비트 또는 그 이하로 줄일 수 있습니다.
|
||||
특히 4비트에서 모델의 출력이 부정적인 영향을 받을 수 있지만,
|
||||
더 크고 강력한 채팅 모델을 메모리에 올리기 위해 이 같은 트레이드오프를 감수할 가치가 있습니다.
|
||||
이제 `bitsandbytes`를 사용하여 이를 실제로 확인해 보겠습니다:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
||||
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True) # You can also try load_in_4bit
|
||||
model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", quantization_config=quantization_config)
|
||||
```
|
||||
|
||||
위의 작업은 `pipeline` API에도 적용 가능합니다:
|
||||
|
||||
```python
|
||||
from transformers import pipeline, BitsAndBytesConfig
|
||||
|
||||
quantization_config = BitsAndBytesConfig(load_in_8bit=True) # You can also try load_in_4bit
|
||||
pipe = pipeline("text-generation", "meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", model_kwargs={"quantization_config": quantization_config})
|
||||
```
|
||||
|
||||
`bitsandbytes` 외에도 모델을 양자화하는 다양한 방법이 있습니다.
|
||||
자세한 내용은 [Quantization guide](./quantization)를 참조해 주세요.
|
||||
|
||||
|
||||
### 성능 고려사항[[performance-considerations]]
|
||||
|
||||
<Tip>
|
||||
|
||||
언어 모델 성능과 최적화에 대한 보다 자세한 가이드는 [LLM Inference Optimization](./llm_optims)을 참고하세요.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
일반적으로 더 큰 채팅 모델은 메모리를 더 많이 요구하고,
|
||||
속도도 느려지는 경향이 있습니다. 구체적으로 말하자면,
|
||||
채팅 모델에서 텍스트를 생성할 때는 컴퓨팅 파워보다 **메모리 대역폭**이 병목 현상을 일으키는 경우가 많습니다.
|
||||
이는 모델이 토큰을 하나씩 생성할 때마다 파라미터를 메모리에서 읽어야 하기 때문입니다.
|
||||
따라서 채팅 모델에서 초당 생성할 수 있는 토큰 수는 모델이 위치한 메모리의 대역폭을 모델의 크기로 나눈 값에 비례합니다.
|
||||
|
||||
위의 예제에서는 모델이 bfloat16 정밀도(Precision)로 로드될 때 용량이 약 16GB였습니다.
|
||||
이 경우, 모델이 생성하는 각 토큰마다 16GB를 메모리에서 읽어야 한다는 의미입니다.
|
||||
총 메모리 대역폭은 소비자용 CPU에서는 20-100GB/sec,
|
||||
소비자용 GPU나 Intel Xeon, AMD Threadripper/Epyc,
|
||||
애플 실리콘과 같은 특수 CPU에서는 200-900GB/sec,
|
||||
데이터 센터 GPU인 Nvidia A100이나 H100에서는 최대 2-3TB/sec에 이를 수 있습니다.
|
||||
이러한 정보는 각자 하드웨어에서 생성 속도를 예상하는 데 도움이 될 것입니다.
|
||||
|
||||
따라서 텍스트 생성 속도를 개선하려면 가장 간단한 방법은 모델의 크기를 줄이거나(주로 양자화를 사용),
|
||||
메모리 대역폭이 더 높은 하드웨어를 사용하는 것입니다.
|
||||
이 대역폭 병목 현상을 피할 수 있는 고급 기술도 여러 가지 있습니다.
|
||||
가장 일반적인 방법은 [보조 생성](https://huggingface.co/blog/assisted-generation), "추측 샘플링"이라고 불리는 기술입니다.
|
||||
이 기술은 종종 더 작은 "초안 모델"을 사용하여 여러 개의 미래 토큰을 한 번에 추측한 후,
|
||||
채팅 모델로 생성 결과를 확인합니다.
|
||||
만약 채팅 모델이 추측을 확인하면, 한 번의 순전파에서 여러 개의 토큰을 생성할 수 있어
|
||||
병목 현상이 크게 줄어들고 생성 속도가 빨라집니다.
|
||||
|
||||
마지막으로, "Mixture of Experts" (MoE) 모델에 대해서도 짚고 넘어가 보도록 합니다.
|
||||
Mixtral, Qwen-MoE, DBRX와 같은 인기 있는 채팅 모델이 바로 MoE 모델입니다.
|
||||
이 모델들은 토큰을 생성할 때 모든 파라미터가 사용되지 않습니다.
|
||||
이로 인해 MoE 모델은 전체 크기가 상당히 클 수 있지만,
|
||||
차지하는 메모리 대역폭은 낮은 편입니다.
|
||||
따라서 동일한 크기의 일반 "조밀한(Dense)" 모델보다 몇 배 빠를 수 있습니다.
|
||||
하지만 보조 생성과 같은 기술은 MoE 모델에서 비효율적일 수 있습니다.
|
||||
새로운 추측된 토큰이 추가되면서 더 많은 파라미터가 활성화되기 때문에,
|
||||
MoE 아키텍처가 제공하는 속도 이점이 상쇄될 수 있습니다.
|
@ -169,7 +169,7 @@ class ResnetModelForImageClassification(PreTrainedModel):
|
||||
def forward(self, tensor, labels=None):
|
||||
logits = self.model(tensor)
|
||||
if labels is not None:
|
||||
loss = torch.nn.cross_entropy(logits, labels)
|
||||
loss = torch.nn.functional.cross_entropy(logits, labels)
|
||||
return {"loss": loss, "logits": logits}
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
1220
docs/source/ko/deepspeed.md
Normal file
1220
docs/source/ko/deepspeed.md
Normal file
File diff suppressed because it is too large
Load Diff
410
docs/source/ko/llm_optims.md
Normal file
410
docs/source/ko/llm_optims.md
Normal file
@ -0,0 +1,410 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# LLM 추론 최적화 [[llm-inference-optimization]]
|
||||
|
||||
대규모 언어 모델(LLM)은 채팅 및 코드 완성 모델과 같은 텍스트 생성 응용 프로그램을 한 단계 끌어올리며, 높은 수준의 이해력과 유창함을 보여주는 텍스트를 생성합니다. 그러나 LLM을 강력하게 만드는 요소인 그들의 크기는 동시에 추론 과정에서 도전 과제가 되기도 합니다.
|
||||
|
||||
기본적인 추론은 느립니다, 왜냐하면 LLM이 다음 토큰을 생성하기 위해 반복적으로 호출되어야 하기 때문입니다. 생성이 진행됨에 따라 입력 시퀀스가 길어져 처리 시간이 점점 길어집니다. 또한, LLM은 수십억 개의 매개변수를 가지고 있어 모든 가중치를 메모리에 저장하고 처리하는 데 어려움이 있습니다.
|
||||
|
||||
이 가이드는 LLM 추론을 가속하기 위해 Transformers에서 사용할 수 있는 최적화 기술을 사용하는 방법을 보여줍니다.
|
||||
|
||||
> [!TIP]
|
||||
> Hugging Face는 LLM을 추론에 최적화하여 배포하고 서비스하는 데 전념하는 라이브러리인 [Text Generation Inference (TGI)](https://hf.co/docs/text-generation-inference)을 제공합니다. 이 라이브러리는 처리량 증가를 위한 지속적인 배칭과 다중 GPU 추론을 위한 텐서 병렬화와 같은 Transformers에 포함되지 않은 배포 지향 최적화 기능을 포함합니다.
|
||||
|
||||
## 정적 kv-cache와 `torch.compile`[[static-kv-cache-and-torchcompile]]
|
||||
|
||||
디코딩 중에 LLM은 각 입력 토큰에 대한 key-value(kv) 값을 계산합니다. LLM은 자기회귀(autoregressive)이기 때문에 생성된 출력이 현재 입력의 일부가 되어 매번 동일한 kv 값을 계산합니다. 이는 매번 동일한 kv 값을 다시 계산하기 때문에 효율적이지 않습니다.
|
||||
|
||||
이를 최적화하기 위해, 이전 키(key)와 값(value)을 재계산하지 않고 저장하는 kv-cache를 사용할 수 있습니다. 그러나 kv-cache는 각 생성 단계에서 증가하며 동적이기 때문에 PyTorch 코드를 빠르고 최적화된 커널로 통합하는 강력한 최적화 도구인 [`torch.compile`](./perf_torch_compile)을 사용하는 데 제약이 있습니다.
|
||||
|
||||
*정적 kv-cache*는 최댓값을 미리 할당하여 이 문제를 해결하여 `torch.compile`과 결합할 수 있게 합니다. 이를 통해 최대 4배의 속도 향상이 가능합니다. 속도 향상은 모델 크기(더 큰 모델은 속도 향상이 적음)와 하드웨어에 따라 다를 수 있습니다.
|
||||
|
||||
> [!WARNING]
|
||||
현재 [Llama](./model_doc/llama2) 및 몇 가지 다른 모델만 정적 kv-cache와 `torch.compile`을 지원합니다. 실시간 모델 호환성 목록은 [이 이슈](https://github.com/huggingface/transformers/issues/28981)를 확인하십시오.
|
||||
|
||||
작업의 복잡성에 따라 세 가지 방식의 정적 kv-cache 사용 방법이 있습니다:
|
||||
1. 기본 사용법: `generation_config`에서 플래그를 설정하기만 하면 됩니다(권장);
|
||||
2. 고급 사용법: 여러 번의 생성이나 맞춤형 생성 루프를 위해 캐시 객체를 처리합니다;
|
||||
3. 고급 사용법: 단일 그래프가 필요한 경우, 전체 `generate` 함수를 하나의 그래프로 컴파일합니다.
|
||||
|
||||
올바른 탭을 선택하여 각 방법에 대한 추가 지침을 확인하세요.
|
||||
|
||||
> [!TIP]
|
||||
> `torch.compile`을 사용할 때 어떤 전략을 사용하든, LLM 입력을 제한된 값 세트로 왼쪽에 패딩하면 모양과 관련된 재컴파일을 피할 수 있습니다. [`pad_to_multiple_of` tokenizer flag](https://huggingface.co/docs/transformers/main_classes/tokenizer#transformers.PreTrainedTokenizer.__call__.pad_to_multiple_of)가 유용할 것입니다!
|
||||
|
||||
<hfoptions id="static-kv">
|
||||
<hfoption id="basic usage: generation_config">
|
||||
|
||||
이 예제에서는 [Gemma](https://hf.co/google/gemma-2b) 모델을 사용해 보겠습니다. 필요한 작업은 다음과 같습니다:
|
||||
1. 모델의 `generation_config` 속성에 접근하여 `cache_implementation`을 "static"으로 설정합니다;
|
||||
2. 모델의 `forward` 패스를 정적 kv-cache와 함께 컴파일하기 위해 `torch.compile`을 호출합니다.
|
||||
|
||||
이렇게 하면 끝입니다!
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # 긴 경고 메시지를 방지하기 위해 설정 :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
|
||||
model.generation_config.cache_implementation = "static"
|
||||
|
||||
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
outputs = model.generate(**input_ids)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference']
|
||||
```
|
||||
|
||||
`generate` 함수는 내부적으로 동일한 캐시 객체를 재사용하려고 시도하며, 이를 통해 각 호출 시 재컴파일의 필요성을 제거합니다. 재컴파일을 피하는 것은 `torch.compile`의 성능을 최대한 활용하는 데 매우 중요하며, 다음 사항에 유의해야 합니다:
|
||||
1. 배치 크기가 변경되거나 호출 간 최대 출력 길이가 증가하면 캐시를 다시 초기화해야 하며, 이로 인해 새로 컴파일을 해야 합니다;
|
||||
2. 컴파일된 함수의 첫 몇 번의 호출은 함수가 컴파일되는 동안 더 느립니다.
|
||||
|
||||
> [!WARNING]
|
||||
> 다중 턴 대화와 같은 정적 캐시의 고급 사용을 위해서는, 캐시 객체를 [`~GenerationMixin.generate`] 외부에서 인스턴스화하고 조작하는 것을 권장합니다. 고급 사용법 탭을 참조하세요.
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="advanced usage: control Static Cache">
|
||||
|
||||
[`StaticCache`] 객체는 `past_key_values` 인수로 모델의 [`~GenerationMixin.generate`] 함수에 전달할 수 있습니다. 이 객체는 캐시 내용을 유지하므로, 동적 캐시를 사용하는 것처럼 새로운 [`~GenerationMixin.generate`] 호출에 이를 전달하여 생성을 계속할 수 있습니다.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, StaticCache
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # 긴 경고 메시지를 방지하기 위해 설정 :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
|
||||
model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
prompt_length = input_ids.input_ids.shape[1]
|
||||
model.generation_config.max_new_tokens = 16
|
||||
|
||||
past_key_values = StaticCache(
|
||||
config=model.config,
|
||||
batch_size=1,
|
||||
# 캐시를 재사용할 계획이 있는 경우, 모든 경우에 충분한 캐시 길이를 설정해야 합니다
|
||||
max_cache_len=prompt_length+(model.generation_config.max_new_tokens*2),
|
||||
device=model.device,
|
||||
dtype=model.dtype
|
||||
)
|
||||
outputs = model.generate(**input_ids, past_key_values=past_key_values)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2']
|
||||
|
||||
# 생성된 텍스트와 동일한 캐시 객체를 전달하여, 중단한 곳에서 생성을 계속합니다.
|
||||
# 다중 턴 대화의 경우, 생성된 텍스트에 새로운 사용자 입력을 추가할 수 있습니다.
|
||||
new_input_ids = outputs
|
||||
outputs = model.generate(new_input_ids, past_key_values=past_key_values)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference frames. 2. The speed of light is constant in all inertial reference frames. 3.']
|
||||
```
|
||||
|
||||
> [!TIP]
|
||||
> 동일한 [`StaticCache`] 객체를 새로운 프롬프트에 사용하려면, 호출 간에 `.reset()` 메서드를 사용하여 그 내용을 초기화하는 것이 좋습니다.
|
||||
|
||||
더 깊이 들어가고 싶다면, [`StaticCache`] 객체를 모델의 `forward` 패스에 동일한 `past_key_values` 인수로 전달할 수도 있습니다. 이 전략을 사용하면, 현재 토큰과 이전에 생성된 토큰의 위치 및 캐시 위치를 바탕으로 다음 토큰을 디코딩하는 자체 함수를 작성할 수 있습니다.
|
||||
|
||||
```py
|
||||
from transformers import LlamaTokenizer, LlamaForCausalLM, StaticCache, logging
|
||||
from transformers.testing_utils import CaptureLogger
|
||||
import torch
|
||||
|
||||
prompts = [
|
||||
"Simply put, the theory of relativity states that ",
|
||||
"My favorite all time favorite condiment is ketchup.",
|
||||
]
|
||||
|
||||
NUM_TOKENS_TO_GENERATE = 40
|
||||
torch_device = "cuda"
|
||||
|
||||
tokenizer = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf", pad_token="</s>", padding_side="right")
|
||||
model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf", device_map="sequential")
|
||||
inputs = tokenizer(prompts, return_tensors="pt", padding=True).to(model.device)
|
||||
|
||||
def decode_one_tokens(model, cur_token, input_pos, cache_position, past_key_values):
|
||||
logits = model(
|
||||
cur_token,
|
||||
position_ids=input_pos,
|
||||
cache_position=cache_position,
|
||||
past_key_values=past_key_values,
|
||||
return_dict=False,
|
||||
use_cache=True
|
||||
)[0]
|
||||
new_token = torch.argmax(logits[:, -1], dim=-1)[:, None]
|
||||
return new_token
|
||||
```
|
||||
|
||||
`StaticCache` 메서드를 사용하여 정적 kv-cache와 `torch.compile`을 활성화하려면 몇 가지 중요한 작업을 수행해야 합니다:
|
||||
1. 추론에 모델을 사용하기 전에 [`StaticCache`] 인스턴스를 초기화합니다. 여기서 최대 배치 크기와 시퀀스 길이와 같은 매개변수를 설정할 수 있습니다.
|
||||
2. 정적 kv-cache와 함께 순전파를 컴파일하기 위해 모델에 `torch.compile`을 호출합니다.
|
||||
3. [torch.backends.cuda.sdp_kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) 컨텍스트 관리자에서 `enable_math=True`를 설정하여 네이티브 PyTorch C++ 구현된 스케일된 점곱 어텐션(scaled dot product attention)을 활성화하여 추론 속도를 더욱 높입니다.
|
||||
|
||||
```py
|
||||
batch_size, seq_length = inputs["input_ids"].shape
|
||||
with torch.no_grad():
|
||||
past_key_values = StaticCache(
|
||||
config=model.config, max_batch_size=2, max_cache_len=4096, device=torch_device, dtype=model.dtype
|
||||
)
|
||||
cache_position = torch.arange(seq_length, device=torch_device)
|
||||
generated_ids = torch.zeros(
|
||||
batch_size, seq_length + NUM_TOKENS_TO_GENERATE + 1, dtype=torch.int, device=torch_device
|
||||
)
|
||||
generated_ids[:, cache_position] = inputs["input_ids"].to(torch_device).to(torch.int)
|
||||
|
||||
logits = model(
|
||||
**inputs, cache_position=cache_position, past_key_values=past_key_values,return_dict=False, use_cache=True
|
||||
)[0]
|
||||
next_token = torch.argmax(logits[:, -1], dim=-1)[:, None]
|
||||
generated_ids[:, seq_length] = next_token[:, 0]
|
||||
|
||||
decode_one_tokens = torch.compile(decode_one_tokens, mode="reduce-overhead", fullgraph=True)
|
||||
cache_position = torch.tensor([seq_length + 1], device=torch_device)
|
||||
for _ in range(1, NUM_TOKENS_TO_GENERATE):
|
||||
with torch.backends.cuda.sdp_kernel(enable_flash=False, enable_mem_efficient=False, enable_math=True):
|
||||
next_token = decode_one_tokens(model, next_token.clone(), None, cache_position, past_key_values)
|
||||
generated_ids[:, cache_position] = next_token.int()
|
||||
cache_position += 1
|
||||
|
||||
text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
|
||||
text
|
||||
['Simply put, the theory of relativity states that 1) the speed of light is constant, 2) the speed of light is the same for all observers, and 3) the laws of physics are the same for all observers.',
|
||||
'My favorite all time favorite condiment is ketchup. I love it on everything. I love it on my eggs, my fries, my chicken, my burgers, my hot dogs, my sandwiches, my salads, my p']
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="advanced usage: end-to-end generate compilation">
|
||||
|
||||
전체 `generate` 함수를 컴파일하는 것은 코드 측면에서 기본 사용법보다 더 간단합니다. `generate` 함수에 대해 `torch.compile`을 호출하여 전체 함수를 컴파일하면 됩니다. 정적 캐시의 사용을 지정할 필요는 없습니다. 정적 캐시는 호환되지만, 벤치마크에서는 동적 캐시(기본 설정)가 더 빠른 것으로 나타났습니다.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
import os
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false" # 긴 경고 메시지를 방지하기 위해 설정 :)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("google/gemma-2b")
|
||||
model = AutoModelForCausalLM.from_pretrained("google/gemma-2b", device_map="auto")
|
||||
|
||||
model.generate = torch.compile(model.generate, mode="reduce-overhead", fullgraph=True)
|
||||
input_text = "The theory of special relativity states "
|
||||
input_ids = tokenizer(input_text, return_tensors="pt").to("cuda")
|
||||
|
||||
outputs = model.generate(**input_ids)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The theory of special relativity states 1. The speed of light is constant in all inertial reference']
|
||||
```
|
||||
|
||||
이 방법을 통해 모델의 forward 패스뿐만 아니라, 입력 준비, logit 처리기 작업 등을 포함한 모든 것을 컴파일합니다. 기본 사용 예제에 비해 `generate` 호출이 약간 더 빠를 수 있으며, 컴파일된 그래프는 더 특이한 하드웨어 장치나 사용 사례에 적합할 수 있습니다. 그러나 이 접근 방식을 사용하는 데는 몇 가지 큰 단점이 있습니다:
|
||||
1. 컴파일 속도가 훨씬 느립니다;
|
||||
2. `generate`의 모든 매개변수 설정은 `generation_config`를 통해서만 가능합니다;
|
||||
3. 많은 경고와 예외가 억제됩니다. -- 먼저 컴파일 되지 않은 형태로 테스트하는 것을 권장합니다;
|
||||
4. 현재 작업 중이지만 기능 제한이 심합니다(예: 작성 시점에서는 EOS 토큰이 선택되어도 생성이 중단되지 않습니다).
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## 추정 디코딩 [[speculative-decoding]]
|
||||
|
||||
> [!TIP]
|
||||
> 보다 심층적인 설명을 원한다면, [Assisted Generation: a new direction toward low-latency text generation](https://hf.co/blog/assisted-generation) 블로그 게시물을 확인하십시오!
|
||||
|
||||
자기 회귀의 또 다른 문제는 각 입력 토큰에 대해 순전파 중에 모델 가중치를 매번 로드해야 한다는 점입니다. 이는 수십억 개의 매개변수를 가진 LLM에는 느리고 번거롭습니다. 추정 디코딩(speculative decoding)은 더 작고 빠른 보조 모델을 사용하여 후보 토큰을 생성하고, 이를 큰 LLM이 단일 순전파에서 검증하여 이 속도 저하를 완화합니다. 검증된 토큰이 정확하다면, LLM은 본래 자체적으로 생성하는 것처럼 토큰을 얻을 수 있습니다. 전방 패스가 동일한 출력을 보장하기 때문에 정확도 저하가 없습니다.
|
||||
|
||||
가장 큰 속도 향상을 얻기 위해, 보조 모델은 빠르게 토큰을 생성할 수 있도록 LLM보다 훨씬 작아야 합니다. 보조 모델과 LLM 모델은 토큰을 다시 인코딩하고 디코딩하지 않도록 동일한 토크나이저를 공유해야 합니다.
|
||||
|
||||
> [!WARNING]
|
||||
> 추정 디코딩은 탐욕 검색과 샘플링 디코딩 전략에서만 지원되며, 배치 입력을 지원하지 않습니다.
|
||||
|
||||
보조 모델을 로드하고 이를 [`~GenerationMixin.generate`] 메서드에 전달하여 추정 디코딩을 활성화하십시오.
|
||||
|
||||
<hfoptions id="spec-decoding">
|
||||
<hfoption id="greedy search">
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
||||
inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device)
|
||||
assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device)
|
||||
outputs = model.generate(**inputs, assistant_model=assistant_model)
|
||||
tokenizer.batch_decode(outputs, skip_special_tokens=True)
|
||||
["Einstein's theory of relativity states that the speed of light is constant. "]
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="sampling">
|
||||
|
||||
추정 샘플링 디코딩(speculative sampling decoding)을 위해, 보조 모델 외에도 [`~GenerationMixin.generate`] 메서드에 `do_sample` 및 `temperature` 매개변수를 추가하십시오.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
||||
inputs = tokenizer("Einstein's theory of relativity states", return_tensors="pt").to(device)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device)
|
||||
assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device)
|
||||
outputs = model.generate(**inputs, assistant_model=assistant_model, do_sample=True, temperature=0.7)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
["Einstein's theory of relativity states that motion in the universe is not a straight line.\n"]
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
### 프롬프트 조회 디코딩 [[prompt-lookup-decoding]]
|
||||
|
||||
프롬프트 조회 디코딩은 탐욕 검색과 샘플링과도 호환되는 추정 디코딩의 변형입니다. 프롬프트 조회는 요약과 같은 입력 기반 작업에 특히 잘 작동합니다. 여기서는 프롬프트와 출력 간에 종종 겹치는 단어가 있습니다. 이러한 겹치는 n-그램이 LLM 후보 토큰으로 사용됩니다.
|
||||
|
||||
프롬프트 조회 디코딩을 활성화하려면 `prompt_lookup_num_tokens` 매개변수에 겹치는 토큰 수를 지정하십시오. 그런 다음 이 매개변수를 [`~GenerationMixin.generate`] 메서드에 전달할 수 있습니다.
|
||||
|
||||
<hfoptions id="pld">
|
||||
<hfoption id="greedy decoding">
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
||||
inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device)
|
||||
assistant_model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m").to(device)
|
||||
outputs = model.generate(**inputs, prompt_lookup_num_tokens=3)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
['The second law of thermodynamics states that entropy increases with temperature. ']
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="sampling">
|
||||
|
||||
샘플링과 함께 프롬프트 조회 디코딩을 사용하려면, [`~GenerationMixin.generate`] 메서드에 `do_sample` 및 `temperature` 매개변수를 추가하십시오.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
import torch
|
||||
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-1.3b")
|
||||
inputs = tokenizer("The second law of thermodynamics states", return_tensors="pt").to(device)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-1.3b").to(device)
|
||||
outputs = model.generate(**inputs, prompt_lookup_num_tokens=3, do_sample=True, temperature=0.7)
|
||||
print(tokenizer.batch_decode(outputs, skip_special_tokens=True))
|
||||
["The second law of thermodynamics states that energy cannot be created nor destroyed. It's not a"]
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## 어텐션 최적화 [[attention-optimizations]]
|
||||
|
||||
트랜스포머 모델의 알려진 문제는 셀프 어텐션 메커니즘이 입력 토큰 수와 함께 계산 및 메모리가 제곱으로 증가한다는 것입니다. 이 제한은 훨씬 더 긴 시퀀스를 처리하는 LLM에서는 더욱 커집니다. 이를 해결하기 위해 FlashAttention2 또는 PyTorch의 스케일된 점곱 어텐션을 사용해 보십시오. 이들은 더 메모리 효율적인 어텐션 구현으로 추론을 가속화할 수 있습니다.
|
||||
|
||||
### FlashAttention-2 [[flashattention-2]]
|
||||
|
||||
FlashAttention과 [FlashAttention-2](./perf_infer_gpu_one#flashattention-2)는 어텐션 계산을 더 작은 청크로 나누고 중간 읽기/쓰기 작업을 줄여 추론 속도를 높입니다. FlashAttention-2는 원래 FlashAttention 알고리즘을 개선하여 시퀀스 길이 차원에서도 병렬 처리를 수행하고 하드웨어에서 작업을 더 잘 분할하여 동기화 및 통신 오버헤드를 줄입니다.
|
||||
|
||||
FlashAttention-2를 사용하려면 [`~PreTrainedModel.from_pretrained`] 메서드에서 `attn_implementation="flash_attention_2"`를 설정하십시오.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
||||
|
||||
quant_config = BitsAndBytesConfig(load_in_8bit=True)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"google/gemma-2b",
|
||||
quantization_config=quant_config,
|
||||
torch_dtype=torch.bfloat16,
|
||||
attn_implementation="flash_attention_2",
|
||||
)
|
||||
```
|
||||
|
||||
### PyTorch 스케일된 점곱 어텐션(scaled dot product attention) [[pytorch-scaled-dot-product-attention]]
|
||||
|
||||
스케일된 점곱 어텐션(SDPA)는 PyTorch 2.0에서 자동으로 활성화되며, FlashAttention, xFormers, PyTorch의 C++ 구현을 지원합니다. SDPA는 CUDA 백엔드를 사용하는 경우 가장 성능이 좋은 어텐션 알고리즘을 선택합니다. 다른 백엔드에서는 SDPA가 PyTorch C++ 구현으로 기본 설정됩니다.
|
||||
|
||||
> [!TIP]
|
||||
> SDPA는 최신 PyTorch 버전이 설치되어 있으면 FlashAttention-2도 지원합니다.
|
||||
|
||||
세 가지 어텐션 알고리즘 중 하나를 명시적으로 활성화하거나 비활성화하려면 [torch.backends.cuda.sdp_kernel](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html) 컨텍스트 관리자를 사용하십시오. 예를 들어 FlashAttention을 활성화하려면 `enable_flash=True`로 설정하십시오.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"google/gemma-2b",
|
||||
torch_dtype=torch.bfloat16,
|
||||
)
|
||||
|
||||
with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False):
|
||||
outputs = model.generate(**inputs)
|
||||
```
|
||||
|
||||
## 양자화 [[quantization]]
|
||||
|
||||
양자화는 LLM 가중치를 더 낮은 정밀도로 저장하여 크기를 줄입니다. 이는 메모리 사용량을 줄이며 GPU 메모리에 제약이 있는 경우 추론을 위해 LLM을 로드하는 것을 더 용이하게 합니다. GPU가 충분하다면, 모델을 양자화할 필요는 없습니다. 추가적인 양자화 및 양자화 해제 단계로 인해 약간의 지연이 발생할 수 있기 때문입니다(AWQ 및 융합 AWQ 모듈 제외).
|
||||
|
||||
> [!TIP]
|
||||
> 다양한 양자화 라이브러리(자세한 내용은 [Quantization](./quantization) 가이드를 참조하십시오)가 있습니다. 여기에는 Quanto, AQLM, AWQ 및 AutoGPTQ가 포함됩니다. 사용 사례에 가장 잘 맞는 라이브러리를 사용해 보십시오. 또한 AutoGPTQ와 bitsandbytes를 비교하는 [Overview of natively supported quantization schemes in 🤗 Transformers](https://hf.co/blog/overview-quantization-transformers) 블로그 게시물을 읽어보는 것을 추천합니다.
|
||||
|
||||
아래의 모델 메모리 계산기를 사용하여 모델을 로드하는 데 필요한 메모리를 추정하고 비교해 보십시오. 예를 들어 [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)를 로드하는 데 필요한 메모리를 추정해 보십시오.
|
||||
|
||||
<iframe
|
||||
src="https://hf-accelerate-model-memory-usage.hf.space"
|
||||
frameborder="0"
|
||||
width="850"
|
||||
height="450"
|
||||
></iframe>
|
||||
|
||||
Mistral-7B-v0.1을 반정밀도로 로드하려면 [`~transformers.AutoModelForCausalLM.from_pretrained`] 메서드에서 `torch_dtype` 매개변수를 `torch.bfloat16`으로 설정하십시오. 이 경우 13.74GB의 메모리가 필요합니다.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM
|
||||
import torch
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"mistralai/Mistral-7B-v0.1", torch_dtype=torch.bfloat16, device_map="auto",
|
||||
)
|
||||
```
|
||||
|
||||
추론을 위해 양자화된 모델(8비트 또는 4비트)을 로드하려면 [bitsandbytes](https://hf.co/docs/bitsandbytes)를 사용하고 `load_in_4bit` 또는 `load_in_8bit` 매개변수를 `True`로 설정하십시오. 모델을 8비트로 로드하는 데는 6.87GB의 메모리만 필요합니다.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
||||
import torch
|
||||
|
||||
quant_config = BitsAndBytesConfig(load_in_8bit=True)
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"mistralai/Mistral-7B-v0.1", quantization_config=quant_config, device_map="auto"
|
||||
)
|
||||
```
|
134
docs/source/ko/main_classes/agent.md
Normal file
134
docs/source/ko/main_classes/agent.md
Normal file
@ -0,0 +1,134 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# 에이전트 & 도구 [[agents-tools]]
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
Transformers Agent는 실험 중인 API이므로 언제든지 변경될 수 있습니다.
|
||||
API나 기반 모델이 자주 업데이트되므로, 에이전트가 제공하는 결과물은 달라질 수 있습니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
에이전트와 도구에 대해 더 알아보려면 [소개 가이드](../transformers_agents)를 꼭 읽어보세요.
|
||||
이 페이지에는 기본 클래스에 대한 API 문서가 포함되어 있습니다.
|
||||
|
||||
## 에이전트 [[agents]]
|
||||
|
||||
우리는 기본 [`Agent`] 클래스를 기반으로 두 가지 유형의 에이전트를 제공합니다:
|
||||
- [`CodeAgent`]는 한 번에 동작합니다. 작업을 해결하기 위해 코드를 생성한 다음, 바로 실행합니다.
|
||||
- [`ReactAgent`]는 단계별로 동작하며, 각 단계는 하나의 생각, 하나의 도구 호출 및 실행으로 구성됩니다. 이 에이전트에는 두 가지 클래스가 있습니다:
|
||||
- [`ReactJsonAgent`]는 도구 호출을 JSON으로 작성합니다.
|
||||
- [`ReactCodeAgent`]는 도구 호출을 Python 코드로 작성합니다.
|
||||
|
||||
### Agent [[agent]]
|
||||
|
||||
[[autodoc]] Agent
|
||||
|
||||
### CodeAgent [[codeagent]]
|
||||
|
||||
[[autodoc]] CodeAgent
|
||||
|
||||
### React agents [[react-agents]]
|
||||
|
||||
[[autodoc]] ReactAgent
|
||||
|
||||
[[autodoc]] ReactJsonAgent
|
||||
|
||||
[[autodoc]] ReactCodeAgent
|
||||
|
||||
## Tools [[tools]]
|
||||
|
||||
### load_tool [[loadtool]]
|
||||
|
||||
[[autodoc]] load_tool
|
||||
|
||||
### Tool [[tool]]
|
||||
|
||||
[[autodoc]] Tool
|
||||
|
||||
### Toolbox [[toolbox]]
|
||||
|
||||
[[autodoc]] Toolbox
|
||||
|
||||
### PipelineTool [[pipelinetool]]
|
||||
|
||||
[[autodoc]] PipelineTool
|
||||
|
||||
### launch_gradio_demo [[launchgradiodemo]]
|
||||
|
||||
[[autodoc]] launch_gradio_demo
|
||||
|
||||
### ToolCollection [[toolcollection]]
|
||||
|
||||
[[autodoc]] ToolCollection
|
||||
|
||||
## 엔진 [[engines]]
|
||||
|
||||
에이전트 프레임워크에서 사용할 수 있는 엔진을 자유롭게 만들고 사용할 수 있습니다.
|
||||
이 엔진들은 다음과 같은 사양을 가지고 있습니다:
|
||||
1. 입력(`List[Dict[str, str]]`)에 대한 [메시지 형식](../chat_templating.md)을 따르고 문자열을 반환해야 합니다.
|
||||
2. 인수 `stop_sequences`에 시퀀스가 전달되기 *전에* 출력을 생성하는 것을 중지해야 합니다.
|
||||
|
||||
### HfApiEngine [[HfApiEngine]]
|
||||
|
||||
편의를 위해, 위의 사항을 구현하고 대규모 언어 모델 실행을 위해 추론 엔드포인트를 사용하는 `HfApiEngine`을 추가했습니다.
|
||||
|
||||
```python
|
||||
>>> from transformers import HfApiEngine
|
||||
|
||||
>>> messages = [
|
||||
... {"role": "user", "content": "Hello, how are you?"},
|
||||
... {"role": "assistant", "content": "I'm doing great. How can I help you today?"},
|
||||
... {"role": "user", "content": "No need to help, take it easy."},
|
||||
... ]
|
||||
|
||||
>>> HfApiEngine()(messages, stop_sequences=["conversation"])
|
||||
|
||||
"That's very kind of you to say! It's always nice to have a relaxed "
|
||||
```
|
||||
|
||||
[[autodoc]] HfApiEngine
|
||||
|
||||
|
||||
## 에이전트 유형 [[agent-types]]
|
||||
|
||||
에이전트는 도구 간의 모든 유형의 객체를 처리할 수 있습니다; 도구는 완전히 멀티모달이므로 텍스트, 이미지, 오디오, 비디오 등 다양한 유형을 수락하고 반환할 수 있습니다.
|
||||
도구 간의 호환성을 높이고 ipython (jupyter, colab, ipython 노트북, ...)에서 이러한
|
||||
반환 값을 올바르게 렌더링하기 위해 이러한 유형을 중심으로 래퍼 클래스를
|
||||
구현합니다.
|
||||
|
||||
래핑된 객체는 처음과 동일하게 작동해야 합니다; 텍스트 객체는 여전히 문자열로 작동해야 하며,
|
||||
이미지 객체는 여전히 `PIL.Image`로 작동해야 합니다.
|
||||
|
||||
이러한 유형에는 세 가지 특정 목적이 있습니다:
|
||||
|
||||
- `to_raw`를 호출하면 기본 객체가 반환되어야 합니다.
|
||||
- `to_string`을 호출하면 객체가 문자열로 반환되어야 합니다:
|
||||
`AgentText`의 경우 문자열이 될 수 있지만, 다른 경우에는 객체의 직렬화된 버전의 경로일 수 있습니다.
|
||||
- ipython 커널에서 표시할 때 객체가 올바르게 표시되어야 합니다.
|
||||
|
||||
### AgentText [[agenttext]]
|
||||
|
||||
[[autodoc]] transformers.agents.agent_types.AgentText
|
||||
|
||||
### AgentImage [[agentimage]]
|
||||
|
||||
[[autodoc]] transformers.agents.agent_types.AgentImage
|
||||
|
||||
### AgentAudio [[agentaudio]]
|
||||
|
||||
[[autodoc]] transformers.agents.agent_types.AgentAudio
|
233
docs/source/ko/quantization/awq.md
Normal file
233
docs/source/ko/quantization/awq.md
Normal file
@ -0,0 +1,233 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# AWQ [[awq]]
|
||||
|
||||
<Tip>
|
||||
|
||||
이 [노트북](https://colab.research.google.com/drive/1HzZH89yAXJaZgwJDhQj9LqSBux932BvY) 으로 AWQ 양자화를 실습해보세요 !
|
||||
|
||||
</Tip>
|
||||
|
||||
[Activation-aware Weight Quantization (AWQ)](https://hf.co/papers/2306.00978)은 모델의 모든 가중치를 양자화하지 않고, LLM 성능에 중요한 가중치를 유지합니다. 이로써 4비트 정밀도로 모델을 실행해도 성능 저하 없이 양자화 손실을 크게 줄일 수 있습니다.
|
||||
|
||||
AWQ 알고리즘을 사용하여 모델을 양자화할 수 있는 여러 라이브러리가 있습니다. 예를 들어 [llm-awq](https://github.com/mit-han-lab/llm-awq), [autoawq](https://github.com/casper-hansen/AutoAWQ) , [optimum-intel](https://huggingface.co/docs/optimum/main/en/intel/optimization_inc) 등이 있습니다. Transformers는 llm-awq, autoawq 라이브러리를 이용해 양자화된 모델을 가져올 수 있도록 지원합니다. 이 가이드에서는 autoawq로 양자화된 모델을 가져오는 방법을 보여드리나, llm-awq로 양자화된 모델의 경우도 유사한 절차를 따릅니다.
|
||||
|
||||
autoawq가 설치되어 있는지 확인하세요:
|
||||
|
||||
```bash
|
||||
pip install autoawq
|
||||
```
|
||||
|
||||
AWQ 양자화된 모델은 해당 모델의 [config.json](https://huggingface.co/TheBloke/zephyr-7B-alpha-AWQ/blob/main/config.json) 파일의 `quantization_config` 속성을 통해 식별할 수 있습니다.:
|
||||
|
||||
```json
|
||||
{
|
||||
"_name_or_path": "/workspace/process/huggingfaceh4_zephyr-7b-alpha/source",
|
||||
"architectures": [
|
||||
"MistralForCausalLM"
|
||||
],
|
||||
...
|
||||
...
|
||||
...
|
||||
"quantization_config": {
|
||||
"quant_method": "awq",
|
||||
"zero_point": true,
|
||||
"group_size": 128,
|
||||
"bits": 4,
|
||||
"version": "gemm"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
양자화된 모델은 [`~PreTrainedModel.from_pretrained`] 메서드를 사용하여 가져옵니다. 모델을 CPU에 가져왔다면, 먼저 모델을 GPU 장치로 옮겨야 합니다. `device_map` 파라미터를 사용하여 모델을 배치할 위치를 지정하세요:
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_id = "TheBloke/zephyr-7B-alpha-AWQ"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda:0")
|
||||
```
|
||||
|
||||
AWQ 양자화 모델을 가져오면 자동으로 성능상의 이유로 인해 가중치들의 기본값이 fp16으로 설정됩니다. 가중치를 다른 형식으로 가져오려면, `torch_dtype` 파라미터를 사용하세요:
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model_id = "TheBloke/zephyr-7B-alpha-AWQ"
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float32)
|
||||
```
|
||||
|
||||
추론을 더욱 가속화하기 위해 AWQ 양자화와 [FlashAttention-2](../perf_infer_gpu_one#flashattention-2) 를 결합 할 수 있습니다:
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("TheBloke/zephyr-7B-alpha-AWQ", attn_implementation="flash_attention_2", device_map="cuda:0")
|
||||
```
|
||||
|
||||
## 퓨즈된 모듈 [[fused-modules]]
|
||||
|
||||
퓨즈된 모듈은 정확도와 성능을 개선합니다. 퓨즈된 모듈은 [Llama](https://huggingface.co/meta-llama) 아키텍처와 [Mistral](https://huggingface.co/mistralai/Mistral-7B-v0.1) 아키텍처의 AWQ모듈에 기본적으로 지원됩니다. 그러나 지원되지 않는 아키텍처에 대해서도 AWQ 모듈을 퓨즈할 수 있습니다.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
퓨즈된 모듈은 FlashAttention-2와 같은 다른 최적화 기술과 결합할 수 없습니다.
|
||||
|
||||
</Tip>
|
||||
|
||||
|
||||
<hfoptions id="fuse">
|
||||
<hfoption id="supported architectures">
|
||||
|
||||
지원되는 아키텍처에서 퓨즈된 모듈을 활성화하려면, [`AwqConfig`] 를 생성하고 매개변수 `fuse_max_seq_len` 과 `do_fuse=True`를 설정해야 합니다. `fuse_max_seq_len` 매개변수는 전체 시퀀스 길이로, 컨텍스트 길이와 예상 생성 길이를 포함해야 합니다. 안전하게 사용하기 위해 더 큰 값으로 설정할 수 있습니다.
|
||||
|
||||
예를 들어, [TheBloke/Mistral-7B-OpenOrca-AWQ](https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-AWQ) 모델의 AWQ 모듈을 퓨즈해보겠습니다.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import AwqConfig, AutoModelForCausalLM
|
||||
|
||||
model_id = "TheBloke/Mistral-7B-OpenOrca-AWQ"
|
||||
|
||||
quantization_config = AwqConfig(
|
||||
bits=4,
|
||||
fuse_max_seq_len=512,
|
||||
do_fuse=True,
|
||||
)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config).to(0)
|
||||
```
|
||||
|
||||
[TheBloke/Mistral-7B-OpenOrca-AWQ](https://huggingface.co/TheBloke/Mistral-7B-OpenOrca-AWQ) 모델은 퓨즈된 모듈이 있는 경우와 없는 경우 모두 `batch_size=1` 로 성능 평가되었습니다.
|
||||
|
||||
<figcaption class="text-center text-gray-500 text-lg">퓨즈되지 않은 모듈</figcaption>
|
||||
|
||||
| 배치 크기 | 프리필 길이 | 디코드 길이 | 프리필 토큰/초 | 디코드 토큰/초 | 메모리 (VRAM) |
|
||||
|-------------:|-----------------:|----------------:|-------------------:|------------------:|:----------------|
|
||||
| 1 | 32 | 32 | 60.0984 | 38.4537 | 4.50 GB (5.68%) |
|
||||
| 1 | 64 | 64 | 1333.67 | 31.6604 | 4.50 GB (5.68%) |
|
||||
| 1 | 128 | 128 | 2434.06 | 31.6272 | 4.50 GB (5.68%) |
|
||||
| 1 | 256 | 256 | 3072.26 | 38.1731 | 4.50 GB (5.68%) |
|
||||
| 1 | 512 | 512 | 3184.74 | 31.6819 | 4.59 GB (5.80%) |
|
||||
| 1 | 1024 | 1024 | 3148.18 | 36.8031 | 4.81 GB (6.07%) |
|
||||
| 1 | 2048 | 2048 | 2927.33 | 35.2676 | 5.73 GB (7.23%) |
|
||||
|
||||
<figcaption class="text-center text-gray-500 text-lg">퓨즈된 모듈</figcaption>
|
||||
|
||||
| 배치 크기 | 프리필 길이 | 디코드 길이 | 프리필 토큰/초 | 디코드 토큰/초 | 메모리 (VRAM) |
|
||||
|-------------:|-----------------:|----------------:|-------------------:|------------------:|:----------------|
|
||||
| 1 | 32 | 32 | 81.4899 | 80.2569 | 4.00 GB (5.05%) |
|
||||
| 1 | 64 | 64 | 1756.1 | 106.26 | 4.00 GB (5.05%) |
|
||||
| 1 | 128 | 128 | 2479.32 | 105.631 | 4.00 GB (5.06%) |
|
||||
| 1 | 256 | 256 | 1813.6 | 85.7485 | 4.01 GB (5.06%) |
|
||||
| 1 | 512 | 512 | 2848.9 | 97.701 | 4.11 GB (5.19%) |
|
||||
| 1 | 1024 | 1024 | 3044.35 | 87.7323 | 4.41 GB (5.57%) |
|
||||
| 1 | 2048 | 2048 | 2715.11 | 89.4709 | 5.57 GB (7.04%) |
|
||||
|
||||
퓨즈된 모듈 및 퓨즈되지 않은 모듈의 속도와 처리량은 [optimum-benchmark](https://github.com/huggingface/optimum-benchmark)라이브러리를 사용하여 테스트 되었습니다.
|
||||
|
||||
<div class="flex gap-4">
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/fused_forward_memory_plot.png" alt="generate throughput per batch size" />
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500">포워드 피크 메모리 (forward peak memory)/배치 크기</figcaption>
|
||||
</div>
|
||||
<div>
|
||||
<img class="rounded-xl" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/quantization/fused_generate_throughput_plot.png" alt="forward latency per batch size" />
|
||||
<figcaption class="mt-2 text-center text-sm text-gray-500"> 생성 처리량/배치크기</figcaption>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="unsupported architectures">
|
||||
|
||||
퓨즈된 모듈을 지원하지 않는 아키텍처의 경우, `modules_to_fuse` 매개변수를 사용해 직접 퓨즈 매핑을 만들어 어떤 모듈을 퓨즈할지 정의해야합니다. 예로, [TheBloke/Yi-34B-AWQ](https://huggingface.co/TheBloke/Yi-34B-AWQ) 모델의 AWQ 모듈을 퓨즈하는 방법입니다.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import AwqConfig, AutoModelForCausalLM
|
||||
|
||||
model_id = "TheBloke/Yi-34B-AWQ"
|
||||
|
||||
quantization_config = AwqConfig(
|
||||
bits=4,
|
||||
fuse_max_seq_len=512,
|
||||
modules_to_fuse={
|
||||
"attention": ["q_proj", "k_proj", "v_proj", "o_proj"],
|
||||
"layernorm": ["ln1", "ln2", "norm"],
|
||||
"mlp": ["gate_proj", "up_proj", "down_proj"],
|
||||
"use_alibi": False,
|
||||
"num_attention_heads": 56,
|
||||
"num_key_value_heads": 8,
|
||||
"hidden_size": 7168
|
||||
}
|
||||
)
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config).to(0)
|
||||
```
|
||||
|
||||
`modules_to_fuse` 매개변수는 다음을 포함해야 합니다:
|
||||
|
||||
- `"attention"`: 어텐션 레이어는 다음 순서로 퓨즈하세요 : 쿼리 (query), 키 (key), 값 (value) , 출력 프로젝션 계층 (output projection layer). 해당 레이어를 퓨즈하지 않으려면 빈 리스트를 전달하세요.
|
||||
- `"layernorm"`: 사용자 정의 퓨즈 레이어 정규화로 교할 레이어 정규화 레이어명. 해당 레이어를 퓨즈하지 않으려면 빈 리스트를 전달하세요.
|
||||
- `"mlp"`: 단일 MLP 레이어로 퓨즈할 MLP 레이어 순서 : (게이트 (gate) (덴스(dense), 레이어(layer), 포스트 어텐션(post-attention)) / 위 / 아래 레이어).
|
||||
- `"use_alibi"`: 모델이 ALiBi positional embedding을 사용할 경우 설정합니다.
|
||||
- `"num_attention_heads"`: 어텐션 헤드 (attention heads)의 수를 설정합니다.
|
||||
- `"num_key_value_heads"`: 그룹화 쿼리 어텐션 (GQA)을 구현하는데 사용되는 키 값 헤드의 수를 설정합니다. `num_key_value_heads=num_attention_heads`로 설정할 경우, 모델은 다중 헤드 어텐션 (MHA)가 사용되며, `num_key_value_heads=1` 는 다중 쿼리 어텐션 (MQA)가, 나머지는 GQA가 사용됩니다.
|
||||
- `"hidden_size"`: 숨겨진 표현(hidden representations)의 차원을 설정합니다.
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
|
||||
|
||||
## ExLlama-v2 서포트 [[exllama-v2-support]]
|
||||
|
||||
최신 버전 `autoawq`는 빠른 프리필과 디코딩을 위해 ExLlama-v2 커널을 지원합니다. 시작하기 위해 먼저 최신 버전 `autoawq` 를 설치하세요 :
|
||||
|
||||
```bash
|
||||
pip install git+https://github.com/casper-hansen/AutoAWQ.git
|
||||
```
|
||||
|
||||
매개변수를 `version="exllama"`로 설정해 `AwqConfig()`를 생성하고 모델에 넘겨주세요.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, AwqConfig
|
||||
|
||||
quantization_config = AwqConfig(version="exllama")
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"TheBloke/Mistral-7B-Instruct-v0.1-AWQ",
|
||||
quantization_config=quantization_config,
|
||||
device_map="auto",
|
||||
)
|
||||
|
||||
input_ids = torch.randint(0, 100, (1, 128), dtype=torch.long, device="cuda")
|
||||
output = model(input_ids)
|
||||
print(output.logits)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("TheBloke/Mistral-7B-Instruct-v0.1-AWQ")
|
||||
input_ids = tokenizer.encode("How to make a cake", return_tensors="pt").to(model.device)
|
||||
output = model.generate(input_ids, do_sample=True, max_length=50, pad_token_id=50256)
|
||||
print(tokenizer.decode(output[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
이 기능은 AMD GPUs에서 지원됩니다.
|
||||
|
||||
</Tip>
|
@ -0,0 +1,193 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
# 컴퓨터 비전을 위한 지식 증류[[Knowledge-Distillation-for-Computer-Vision]]
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
지식 증류(Knowledge distillation)는 더 크고 복잡한 모델(교사)에서 더 작고 간단한 모델(학생)로 지식을 전달하는 기술입니다. 한 모델에서 다른 모델로 지식을 증류하기 위해, 특정 작업(이 경우 이미지 분류)에 대해 학습된 사전 훈련된 교사 모델을 사용하고, 랜덤으로 초기화된 학생 모델을 이미지 분류 작업에 대해 학습합니다. 그다음, 학생 모델이 교사 모델의 출력을 모방하여 두 모델의 출력 차이를 최소화하도록 훈련합니다. 이 기법은 Hinton 등 연구진의 [Distilling the Knowledge in a Neural Network](https://arxiv.org/abs/1503.02531)에서 처음 소개되었습니다. 이 가이드에서는 특정 작업에 맞춘 지식 증류를 수행할 것입니다. 이번에는 [beans dataset](https://huggingface.co/datasets/beans)을 사용할 것입니다.
|
||||
|
||||
이 가이드는 [미세 조정된 ViT 모델](https://huggingface.co/merve/vit-mobilenet-beans-224) (교사 모델)을 [MobileNet](https://huggingface.co/google/mobilenet_v2_1.4_224) (학생 모델)으로 증류하는 방법을 🤗 Transformers의 [Trainer API](https://huggingface.co/docs/transformers/en/main_classes/trainer#trainer) 를 사용하여 보여줍니다.
|
||||
|
||||
증류와 과정 평가를 위해 필요한 라이브러리를 설치해 봅시다.
|
||||
|
||||
|
||||
```bash
|
||||
pip install transformers datasets accelerate tensorboard evaluate --upgrade
|
||||
```
|
||||
|
||||
이 예제에서는 `merve/beans-vit-224` 모델을 교사 모델로 사용하고 있습니다. 이 모델은 beans 데이터셋에서 파인 튜닝된 `google/vit-base-patch16-224-in21k` 기반의 이미지 분류 모델입니다. 이 모델을 무작위로 초기화된 MobileNetV2로 증류해볼 것입니다.
|
||||
|
||||
이제 데이터셋을 로드하겠습니다.
|
||||
|
||||
```python
|
||||
from datasets import load_dataset
|
||||
|
||||
dataset = load_dataset("beans")
|
||||
```
|
||||
|
||||
이 경우 두 모델의 이미지 프로세서가 동일한 해상도로 동일한 출력을 반환하기 때문에, 두가지를 모두 사용할 수 있습니다. 데이터셋의 모든 분할마다 전처리를 적용하기 위해 `dataset`의 `map()` 메소드를 사용할 것 입니다.
|
||||
|
||||
|
||||
```python
|
||||
from transformers import AutoImageProcessor
|
||||
teacher_processor = AutoImageProcessor.from_pretrained("merve/beans-vit-224")
|
||||
|
||||
def process(examples):
|
||||
processed_inputs = teacher_processor(examples["image"])
|
||||
return processed_inputs
|
||||
|
||||
processed_datasets = dataset.map(process, batched=True)
|
||||
```
|
||||
|
||||
학생 모델(무작위로 초기화된 MobileNet)이 교사 모델(파인 튜닝된 비전 트랜스포머)을 모방하도록 할 것 입니다. 이를 위해 먼저 교사와 학생 모델의 로짓 출력값을 구합니다. 그런 다음 각 출력값을 매개변수 `temperature` 값으로 나누는데, 이 매개변수는 각 소프트 타겟의 중요도를 조절하는 역할을 합니다. 매개변수 `lambda` 는 증류 손실의 중요도에 가중치를 줍니다. 이 예제에서는 `temperature=5`와 `lambda=0.5`를 사용할 것입니다. 학생과 교사 간의 발산을 계산하기 위해 Kullback-Leibler Divergence 손실을 사용합니다. 두 데이터 P와 Q가 주어졌을 때, KL Divergence는 Q를 사용하여 P를 표현하는 데 얼만큼의 추가 정보가 필요한지를 말해줍니다. 두 데이터가 동일하다면, KL Divergence는 0이며, Q로 P를 설명하는 데 추가 정보가 필요하지 않음을 의미합니다. 따라서 지식 증류의 맥락에서 KL Divergence는 유용합니다.
|
||||
|
||||
|
||||
```python
|
||||
from transformers import TrainingArguments, Trainer
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class ImageDistilTrainer(Trainer):
|
||||
def __init__(self, teacher_model=None, student_model=None, temperature=None, lambda_param=None, *args, **kwargs):
|
||||
super().__init__(model=student_model, *args, **kwargs)
|
||||
self.teacher = teacher_model
|
||||
self.student = student_model
|
||||
self.loss_function = nn.KLDivLoss(reduction="batchmean")
|
||||
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
||||
self.teacher.to(device)
|
||||
self.teacher.eval()
|
||||
self.temperature = temperature
|
||||
self.lambda_param = lambda_param
|
||||
|
||||
def compute_loss(self, student, inputs, return_outputs=False):
|
||||
student_output = self.student(**inputs)
|
||||
|
||||
with torch.no_grad():
|
||||
teacher_output = self.teacher(**inputs)
|
||||
|
||||
# 교사와 학생의 소프트 타겟(soft targets) 계산
|
||||
|
||||
soft_teacher = F.softmax(teacher_output.logits / self.temperature, dim=-1)
|
||||
soft_student = F.log_softmax(student_output.logits / self.temperature, dim=-1)
|
||||
|
||||
# 손실(loss) 계산
|
||||
distillation_loss = self.loss_function(soft_student, soft_teacher) * (self.temperature ** 2)
|
||||
|
||||
# 실제 레이블 손실 계산
|
||||
student_target_loss = student_output.loss
|
||||
|
||||
# 최종 손실 계산
|
||||
loss = (1. - self.lambda_param) * student_target_loss + self.lambda_param * distillation_loss
|
||||
return (loss, student_output) if return_outputs else loss
|
||||
```
|
||||
|
||||
이제 Hugging Face Hub에 로그인하여 `Trainer`를 통해 Hugging Face Hub에 모델을 푸시할 수 있도록 하겠습니다.
|
||||
|
||||
|
||||
```python
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
notebook_login()
|
||||
```
|
||||
|
||||
이제 `TrainingArguments`, 교사 모델과 학생 모델을 설정하겠습니다.
|
||||
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForImageClassification, MobileNetV2Config, MobileNetV2ForImageClassification
|
||||
|
||||
training_args = TrainingArguments(
|
||||
output_dir="my-awesome-model",
|
||||
num_train_epochs=30,
|
||||
fp16=True,
|
||||
logging_dir=f"{repo_name}/logs",
|
||||
logging_strategy="epoch",
|
||||
eval_strategy="epoch",
|
||||
save_strategy="epoch",
|
||||
load_best_model_at_end=True,
|
||||
metric_for_best_model="accuracy",
|
||||
report_to="tensorboard",
|
||||
push_to_hub=True,
|
||||
hub_strategy="every_save",
|
||||
hub_model_id=repo_name,
|
||||
)
|
||||
|
||||
num_labels = len(processed_datasets["train"].features["labels"].names)
|
||||
|
||||
# 모델 초기화
|
||||
teacher_model = AutoModelForImageClassification.from_pretrained(
|
||||
"merve/beans-vit-224",
|
||||
num_labels=num_labels,
|
||||
ignore_mismatched_sizes=True
|
||||
)
|
||||
|
||||
# MobileNetV2 밑바닥부터 학습
|
||||
student_config = MobileNetV2Config()
|
||||
student_config.num_labels = num_labels
|
||||
student_model = MobileNetV2ForImageClassification(student_config)
|
||||
```
|
||||
|
||||
`compute_metrics` 함수를 사용하여 테스트 세트에서 모델을 평가할 수 있습니다. 이 함수는 훈련 과정에서 모델의 `accuracy`와 `f1`을 계산하는 데 사용됩니다.
|
||||
|
||||
|
||||
```python
|
||||
import evaluate
|
||||
import numpy as np
|
||||
|
||||
accuracy = evaluate.load("accuracy")
|
||||
|
||||
def compute_metrics(eval_pred):
|
||||
predictions, labels = eval_pred
|
||||
acc = accuracy.compute(references=labels, predictions=np.argmax(predictions, axis=1))
|
||||
return {"accuracy": acc["accuracy"]}
|
||||
```
|
||||
|
||||
정의한 훈련 인수로 `Trainer`를 초기화해봅시다. 또한 데이터 콜레이터(data collator)를 초기화하겠습니다.
|
||||
|
||||
```python
|
||||
from transformers import DefaultDataCollator
|
||||
|
||||
data_collator = DefaultDataCollator()
|
||||
trainer = ImageDistilTrainer(
|
||||
student_model=student_model,
|
||||
teacher_model=teacher_model,
|
||||
training_args=training_args,
|
||||
train_dataset=processed_datasets["train"],
|
||||
eval_dataset=processed_datasets["validation"],
|
||||
data_collator=data_collator,
|
||||
tokenizer=teacher_processor,
|
||||
compute_metrics=compute_metrics,
|
||||
temperature=5,
|
||||
lambda_param=0.5
|
||||
)
|
||||
```
|
||||
|
||||
이제 모델을 훈련할 수 있습니다.
|
||||
|
||||
```python
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
모델을 테스트 세트에서 평가할 수 있습니다.
|
||||
|
||||
```python
|
||||
trainer.evaluate(processed_datasets["test"])
|
||||
```
|
||||
|
||||
|
||||
테스트 세트에서 모델의 정확도는 72%에 도달했습니다. 증류의 효율성을 검증하기 위해 동일한 하이퍼파라미터로 beans 데이터셋에서 MobileNet을 처음부터 훈련하였고, 테스트 세트에서의 정확도는 63% 였습니다. 다양한 사전 훈련된 교사 모델, 학생 구조, 증류 매개변수를 시도해보시고 결과를 보고하기를 권장합니다. 증류된 모델의 훈련 로그와 체크포인트는 [이 저장소](https://huggingface.co/merve/vit-mobilenet-beans-224)에서 찾을 수 있으며, 처음부터 훈련된 MobileNetV2는 이 [저장소](https://huggingface.co/merve/resnet-mobilenet-beans-5)에서 찾을 수 있습니다.
|
@ -173,7 +173,7 @@ class ResnetModelForImageClassification(PreTrainedModel):
|
||||
def forward(self, tensor, labels=None):
|
||||
logits = self.model(tensor)
|
||||
if labels is not None:
|
||||
loss = torch.nn.cross_entropy(logits, labels)
|
||||
loss = torch.nn.functional.cross_entropy(logits, labels)
|
||||
return {"loss": loss, "logits": logits}
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
@ -154,7 +154,7 @@ class ResnetModelForImageClassification(PreTrainedModel):
|
||||
def forward(self, tensor, labels=None):
|
||||
logits = self.model(tensor)
|
||||
if labels is not None:
|
||||
loss = torch.nn.cross_entropy(logits, labels)
|
||||
loss = torch.nn.functional.cross_entropy(logits, labels)
|
||||
return {"loss": loss, "logits": logits}
|
||||
return {"logits": logits}
|
||||
```
|
||||
|
@ -133,9 +133,6 @@ generation_output[:2]
|
||||
[[autodoc]] ForcedEOSTokenLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] ForceTokensLogitsProcessor
|
||||
- __call__
|
||||
|
||||
[[autodoc]] HammingDiversityLogitsProcessor
|
||||
- __call__
|
||||
|
||||
@ -151,9 +148,6 @@ generation_output[:2]
|
||||
[[autodoc]] LogitsProcessorList
|
||||
- __call__
|
||||
|
||||
[[autodoc]] LogitsWarper
|
||||
- __call__
|
||||
|
||||
[[autodoc]] MinLengthLogitsProcessor
|
||||
- __call__
|
||||
|
||||
|
@ -104,7 +104,7 @@ for running remotely as well. You can easily customize the example used, command
|
||||
and type of compute hardware, and then run the script to automatically launch the example.
|
||||
|
||||
You can refer to
|
||||
[hardware setup](https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup)
|
||||
[hardware setup](https://www.run.house/docs/tutorials/quick-start-cloud)
|
||||
for more information about hardware and dependency setup with Runhouse, or this
|
||||
[Colab tutorial](https://colab.research.google.com/drive/1sh_aNQzJX5BKAdNeXthTNGxKz7sM9VPc) for a more in-depth
|
||||
walkthrough.
|
||||
|
@ -221,7 +221,7 @@ python run_clm_flax.py \
|
||||
Training should converge at a loss and perplexity
|
||||
of 3.24 and 25.72 respectively after 20 epochs on a single TPUv3-8.
|
||||
This should take less than ~21 hours.
|
||||
Training statistics can be accessed on [tfhub.de](https://tensorboard.dev/experiment/2zEhLwJ0Qp2FAkI3WVH9qA).
|
||||
Training statistics can be accessed on [tfhub.dev](https://tensorboard.dev/experiment/2zEhLwJ0Qp2FAkI3WVH9qA).
|
||||
|
||||
For a step-by-step walkthrough of how to do causal language modeling in Flax, please have a
|
||||
look at [this](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/causal_language_modeling_flax.ipynb) google colab.
|
||||
|
@ -30,6 +30,6 @@ python run_summarization_flax.py \
|
||||
--push_to_hub
|
||||
```
|
||||
|
||||
This should finish in 37min, with validation loss and ROUGE2 score of 1.7785 and 17.01 respectively after 6 epochs. training statistics can be accessed on [tfhub.de](https://tensorboard.dev/experiment/OcPfOIgXRMSJqYB4RdK2tA/#scalars).
|
||||
This should finish in 37min, with validation loss and ROUGE2 score of 1.7785 and 17.01 respectively after 6 epochs. training statistics can be accessed on [tfhub.dev](https://tensorboard.dev/experiment/OcPfOIgXRMSJqYB4RdK2tA/#scalars).
|
||||
|
||||
> Note that here we used default `generate` arguments, using arguments specific for `xsum` dataset should give better ROUGE scores.
|
||||
|
@ -22,5 +22,5 @@ If you would like to list benchmark results on your favorite models of the [mode
|
||||
|
||||
| Benchmark description | Results | Environment info | Author |
|
||||
|:----------|:-------------|:-------------|------:|
|
||||
| PyTorch Benchmark on inference for `google-bert/bert-base-cased` |[memory](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_memory.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Partick von Platen](https://github.com/patrickvonplaten) |
|
||||
| PyTorch Benchmark on inference for `google-bert/bert-base-cased` |[time](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_time.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Partick von Platen](https://github.com/patrickvonplaten) |
|
||||
| PyTorch Benchmark on inference for `google-bert/bert-base-cased` |[memory](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_memory.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Patrick von Platen](https://github.com/patrickvonplaten) |
|
||||
| PyTorch Benchmark on inference for `google-bert/bert-base-cased` |[time](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/inference_time.csv) | [env](https://github.com/patrickvonplaten/files_to_link_to/blob/master/bert_benchmark/env.csv) | [Patrick von Platen](https://github.com/patrickvonplaten) |
|
||||
|
@ -544,7 +544,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -723,7 +723,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -639,7 +639,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -638,7 +638,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -47,10 +47,10 @@ from transformers import (
|
||||
Trainer,
|
||||
TrainingArguments,
|
||||
default_data_collator,
|
||||
is_deepspeed_zero3_enabled,
|
||||
is_torch_tpu_available,
|
||||
set_seed,
|
||||
)
|
||||
from transformers.integrations import is_deepspeed_zero3_enabled
|
||||
from transformers.testing_utils import CaptureLogger
|
||||
from transformers.trainer_utils import get_last_checkpoint
|
||||
from transformers.utils import check_min_version, send_example_telemetry
|
||||
|
@ -52,9 +52,9 @@ from transformers import (
|
||||
SchedulerType,
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
is_deepspeed_zero3_enabled,
|
||||
is_torch_tpu_available,
|
||||
)
|
||||
from transformers.integrations import is_deepspeed_zero3_enabled
|
||||
from transformers.utils import check_min_version, send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
@ -838,7 +838,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -675,7 +675,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -619,7 +619,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -677,7 +677,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -879,7 +879,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
accelerator.save_state(f"step_{completed_steps}")
|
||||
|
||||
if completed_steps >= args.max_train_steps:
|
||||
|
@ -894,7 +894,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -516,7 +516,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -688,7 +688,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -564,7 +564,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -722,7 +722,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -664,7 +664,7 @@ def main():
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
|
@ -2,7 +2,7 @@ transformers==4.38.0
|
||||
datasets==1.16.0
|
||||
wandb==0.12.0
|
||||
tensorboard==2.6.0
|
||||
torch==1.13.1
|
||||
torch==2.2.0
|
||||
huggingface-hub==0.1.0
|
||||
git+https://github.com/huggingface/accelerate.git@3c45b6f760ad8745be9ebc9bbb26f5b04dea4abe
|
||||
datasketch==1.5.7
|
||||
|
@ -1,5 +1,5 @@
|
||||
absl-py==1.0.0
|
||||
aiohttp==3.9.4
|
||||
aiohttp==3.10.2
|
||||
aiosignal==1.2.0
|
||||
alembic==1.7.7
|
||||
appdirs==1.4.4
|
||||
@ -115,7 +115,7 @@ mujoco-py==2.1.2.14
|
||||
multidict==6.0.2
|
||||
multiprocess==0.70.12.2
|
||||
mypy-extensions==0.4.3
|
||||
nltk==3.7
|
||||
nltk==3.9
|
||||
numba==0.55.1
|
||||
numpy==1.22.3
|
||||
oauthlib==3.2.2
|
||||
@ -205,7 +205,7 @@ tensorboard==2.8.0
|
||||
tensorboard-data-server==0.6.1
|
||||
tensorboard-plugin-wit==1.8.1
|
||||
tensorboardX==2.5
|
||||
tensorflow==2.11.1
|
||||
tensorflow==2.12.1
|
||||
tensorflow-io-gcs-filesystem==0.24.0
|
||||
termcolor==1.1.0
|
||||
text-unidecode==1.3
|
||||
@ -217,7 +217,7 @@ timm==0.5.4
|
||||
tokenizers==0.11.6
|
||||
tomli==2.0.1
|
||||
toolz==0.11.2
|
||||
torch==1.13.1
|
||||
torch==2.2.0
|
||||
torchaudio==0.11.0
|
||||
torchvision==0.12.0
|
||||
tqdm==4.66.3
|
||||
|
@ -94,7 +94,6 @@ def main():
|
||||
|
||||
short_validation_dataset = dataset.filter(lambda x: (len(x["question"]) + len(x["context"])) < 4 * 4096)
|
||||
short_validation_dataset = short_validation_dataset.filter(lambda x: x["category"] != "null")
|
||||
short_validation_dataset
|
||||
|
||||
model_id = "vasudevgupta/flax-bigbird-natural-questions"
|
||||
model = FlaxBigBirdForNaturalQuestions.from_pretrained(model_id)
|
||||
|
@ -3,6 +3,6 @@ jaxlib>=0.1.59
|
||||
flax>=0.3.5
|
||||
optax>=0.0.8
|
||||
-f https://download.pytorch.org/whl/torch_stable.html
|
||||
torch==1.13.1
|
||||
torch==2.2.0
|
||||
-f https://download.pytorch.org/whl/torch_stable.html
|
||||
torchvision==0.10.0+cpu
|
@ -48,7 +48,7 @@ nbformat==5.0.7
|
||||
nest-asyncio==1.4.0
|
||||
notebook==6.4.12
|
||||
numpy==1.22.0
|
||||
opencv-python==4.4.0.42
|
||||
opencv-python==4.8.1.78
|
||||
packaging==20.3
|
||||
pandas==1.1.2
|
||||
pandocfilters==1.4.2
|
||||
|
@ -48,7 +48,7 @@ nbformat==5.0.7
|
||||
nest-asyncio==1.4.0
|
||||
notebook==6.4.12
|
||||
numpy==1.22.0
|
||||
opencv-python==4.4.0.42
|
||||
opencv-python==4.8.1.78
|
||||
packaging==20.3
|
||||
pandas==1.1.2
|
||||
pandocfilters==1.4.2
|
||||
@ -84,7 +84,7 @@ six==1.14.0
|
||||
terminado==0.8.3
|
||||
testpath==0.4.4
|
||||
tokenizers==0.8.1rc2
|
||||
torch==1.13.1
|
||||
torch==2.2.0
|
||||
torchvision==0.7.0
|
||||
tornado==6.4.1
|
||||
tqdm==4.66.3
|
||||
|
317
i18n/README_ar.md
Normal file
317
i18n/README_ar.md
Normal file
@ -0,0 +1,317 @@
|
||||
<!---
|
||||
Copyright 2020 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
-->
|
||||
|
||||
<p align="center">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-dark.svg">
|
||||
<source media="(prefers-color-scheme: light)" srcset="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-light.svg">
|
||||
<img alt="Hugging Face Transformers Library" src="https://huggingface.co/datasets/huggingface/documentation-images/raw/main/transformers-logo-light.svg" width="352" height="59" style="max-width: 100%;">
|
||||
</picture>
|
||||
<br/>
|
||||
<br/>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://circleci.com/gh/huggingface/transformers"><img alt="Build" src="https://img.shields.io/circleci/build/github/huggingface/transformers/main"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/LICENSE"><img alt="GitHub" src="https://img.shields.io/github/license/huggingface/transformers.svg?color=blue"></a>
|
||||
<a href="https://huggingface.co/docs/transformers/index"><img alt="Documentation" src="https://img.shields.io/website/http/huggingface.co/docs/transformers/index.svg?down_color=red&down_message=offline&up_message=online"></a>
|
||||
<a href="https://github.com/huggingface/transformers/releases"><img alt="GitHub release" src="https://img.shields.io/github/release/huggingface/transformers.svg"></a>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md"><img alt="Contributor Covenant" src="https://img.shields.io/badge/Contributor%20Covenant-v2.0%20adopted-ff69b4.svg"></a>
|
||||
<a href="https://zenodo.org/badge/latestdoi/155220641"><img src="https://zenodo.org/badge/155220641.svg" alt="DOI"></a>
|
||||
</p>
|
||||
|
||||
<h4 align="center">
|
||||
<p>
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/README.md">English</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hans.md">简体中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_zh-hant.md">繁體中文</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ko.md">한국어</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_es.md">Español</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<b>العربية</b> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
<h3 align="center">
|
||||
<p>أحدث تقنيات التعلم الآلي لـ JAX وPyTorch وTensorFlow</p>
|
||||
</h3>
|
||||
|
||||
<h3 align="center">
|
||||
<a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a>
|
||||
</h3>
|
||||
|
||||
يوفر 🤗 Transformers آلاف النماذج المُدربة مسبقًا لأداء المهام على طرائق مختلفة مثل النص والصورة والصوت.
|
||||
|
||||
يمكن تطبيق هذه النماذج على:
|
||||
|
||||
* 📝 النص، لمهام مثل تصنيف النص واستخراج المعلومات والرد على الأسئلة والتلخيص والترجمة وتوليد النص، في أكثر من 100 لغة.
|
||||
* 🖼️ الصور، لمهام مثل تصنيف الصور وكشف الأشياء والتجزئة.
|
||||
* 🗣️ الصوت، لمهام مثل التعرف على الكلام وتصنيف الصوت.
|
||||
|
||||
يمكن لنماذج المحول أيضًا أداء مهام على **طرائق متعددة مجتمعة**، مثل الرد على الأسئلة الجدولية والتعرف البصري على الحروف واستخراج المعلومات من المستندات الممسوحة ضوئيًا وتصنيف الفيديو والرد على الأسئلة المرئية.
|
||||
|
||||
يوفر 🤗 Transformers واجهات برمجة التطبيقات (APIs) لتحميل تلك النماذج المُدربة مسبقًا واستخدامها على نص معين، وضبطها بدقة على مجموعات البيانات الخاصة بك، ثم مشاركتها مع المجتمع على [مركز النماذج](https://huggingface.co/models) الخاص بنا. وفي الوقت نفسه، فإن كل وحدة نمطية Python التي تحدد بنية هي وحدة مستقلة تمامًا ويمكن تعديلها لتمكين تجارب البحث السريعة.
|
||||
|
||||
يتم دعم 🤗 Transformers بواسطة مكتبات التعلم العميق الثلاث الأكثر شيوعًا - [Jax](https://jax.readthedocs.io/en/latest/) و [PyTorch](https://pytorch.org/) و [TensorFlow](https://www.tensorflow.org/) - مع تكامل سلس بينها. من السهل تدريب نماذجك باستخدام واحدة قبل تحميلها للاستنتاج باستخدام الأخرى.
|
||||
|
||||
## العروض التوضيحية عبر الإنترنت
|
||||
|
||||
يمكنك اختبار معظم نماذجنا مباشرة على صفحاتها من [مركز النماذج](https://huggingface.co/models). كما نقدم [استضافة النماذج الخاصة وإصداراتها وواجهة برمجة تطبيقات الاستدلال](https://huggingface.co/pricing) للنماذج العامة والخاصة.
|
||||
|
||||
فيما يلي بعض الأمثلة:
|
||||
|
||||
في معالجة اللغات الطبيعية:
|
||||
- [استكمال الكلمات المقنعة باستخدام BERT](https://huggingface.co/google-bert/bert-base-uncased?text=Paris+is+the+%5BMASK%5D+of+France)
|
||||
- [التعرف على الكيانات المسماة باستخدام إليكترا](https://huggingface.co/dbmdz/electra-large-discriminator-finetuned-conll03-english?text=My+name+is+Sarah+and+I+live+in+London+city)
|
||||
- [توليد النص باستخدام ميسترال](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.2)
|
||||
- [الاستدلال اللغوي الطبيعي باستخدام RoBERTa](https://huggingface.co/FacebookAI/roberta-large-mnli?text=The+dog+was+lost.+Nobody+lost+any+animal)
|
||||
- [التلخيص باستخدام BART](https://huggingface.co/facebook/bart-large-cnn?text=The+tower+is+324+metres+%281%2C063+ft%29+tall%2C+about+the+same+height+as+an+81-storey+building%2C+and+the+tallest+structure+in+Paris.+Its+base+is+square%2C+measuring+125+metres+%28410+ft%29+on+each+side.+During+its+construction%2C+the+Eiffel+Tower+surpassed+the+Washington+Monument+to+become+the+tallest+man-made+structure+in+the+world%2C+a+title+it+held+for+41+years+until+the+Chrysler+Building+in+New+York+City+was+finished+in+1930.+It+was+the+first+structure+to+reach+a+height+of+300+metres.+Due+to+the+addition+of+a+broadcasting+aerial+at+the+top+of+the+tower+in+1957%2C+it+is+now+taller+than+the+Chrysler+Building+by+5.2+metres+%2817+ft%29.+Excluding+transmitters%2C+the+Eiffel+Tower+is+the+second+tallest+free-standing+structure+in+France+after+the+Millau+Viaduct)
|
||||
- [الرد على الأسئلة باستخدام DistilBERT](https://huggingface.co/distilbert/distilbert-base-uncased-distilled-squad?text=Which+name+is+also+used+to+describe+the+Amazon+rainforest+in+English%3F&context=The+Amazon+rainforest+%28Portuguese%3A+Floresta+Amaz%C3%B4nica+or+Amaz%C3%B4nia%3B+Spanish%3A+Selva+Amaz%C3%B3nica%2C+Amazon%C3%ADa+or+usually+Amazonia%3B+French%3A+For%C3%AAt+amazonienne%3B+Dutch%3A+Amazoneregenwoud%29%2C+also+known+in+English+as+Amazonia+or+the+Amazon+Jungle%2C+is+a+moist+broadleaf+forest+that+covers+most+of+the+Amazon+basin+of+South+America.+This+basin+encompasses+7%2C000%2C000+square+kilometres+%282%2C700%2C000+sq+mi%29%2C+of+which+5%2C500%2C000+square+kilometres+%282%2C100%2C000+sq+mi%29+are+covered+by+the+rainforest.+This+region+includes+territory+belonging+to+nine+nations.+The+majority+of+the+forest+is+contained+within+Brazil%2C+with+60%25+of+the+rainforest%2C+followed+by+Peru+with+13%25%2C+Colombia+with+10%25%2C+and+with+minor+amounts+in+Venezuela%2C+Ecuador%2C+Bolivia%2C+Guyana%2C+Suriname+and+French+Guiana.+States+or+departments+in+four+nations+contain+%22Amazonas%22+in+their+names.+The+Amazon+represents+over+half+of+the+planet%27s+remaining+rainforests%2C+and+comprises+the+largest+and+most+biodiverse+tract+of+tropical+rainforest+in+the+world%2C+with+an+estimated+390+billion+individual+trees+divided+into+16%2C000+species)
|
||||
- [الترجمة باستخدام T5](https://huggingface.co/google-t5/t5-base?text=My+name+is+Wolfgang+and+I+live+in+Berlin)
|
||||
|
||||
في رؤية الكمبيوتر:
|
||||
- [تصنيف الصور باستخدام ViT](https://huggingface.co/google/vit-base-patch16-224)
|
||||
- [كشف الأشياء باستخدام DETR](https://huggingface.co/facebook/detr-resnet-50)
|
||||
- [التجزئة الدلالية باستخدام SegFormer](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512)
|
||||
- [التجزئة الشاملة باستخدام Mask2Former](https://huggingface.co/facebook/mask2former-swin-large-coco-panoptic)
|
||||
- [تقدير العمق باستخدام Depth Anything](https://huggingface.co/docs/transformers/main/model_doc/depth_anything)
|
||||
- [تصنيف الفيديو باستخدام VideoMAE](https://huggingface.co/docs/transformers/model_doc/videomae)
|
||||
- [التجزئة الشاملة باستخدام OneFormer](https://huggingface.co/shi-labs/oneformer_ade20k_dinat_large)
|
||||
|
||||
في الصوت:
|
||||
- [الاعتراف التلقائي بالكلام مع Whisper](https://huggingface.co/openai/whisper-large-v3)
|
||||
- [اكتشاف الكلمات الرئيسية باستخدام Wav2Vec2](https://huggingface.co/superb/wav2vec2-base-superb-ks)
|
||||
- [تصنيف الصوت باستخدام محول طيف الصوت](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593)
|
||||
|
||||
في المهام متعددة الطرائق:
|
||||
- [الرد على الأسئلة الجدولية باستخدام TAPAS](https://huggingface.co/google/tapas-base-finetuned-wtq)
|
||||
- [الرد على الأسئلة المرئية باستخدام ViLT](https://huggingface.co/dandelin/vilt-b32-finetuned-vqa)
|
||||
- [وصف الصورة باستخدام LLaVa](https://huggingface.co/llava-hf/llava-1.5-7b-hf)
|
||||
- [تصنيف الصور بدون تدريب باستخدام SigLIP](https://huggingface.co/google/siglip-so400m-patch14-384)
|
||||
- [الرد على أسئلة المستندات باستخدام LayoutLM](https://huggingface.co/impira/layoutlm-document-qa)
|
||||
- [تصنيف الفيديو بدون تدريب باستخدام X-CLIP](https://huggingface.co/docs/transformers/model_doc/xclip)
|
||||
- [كشف الأشياء بدون تدريب باستخدام OWLv2](https://huggingface.co/docs/transformers/en/model_doc/owlv2)
|
||||
- [تجزئة الصور بدون تدريب باستخدام CLIPSeg](https://huggingface.co/docs/transformers/model_doc/clipseg)
|
||||
- [توليد الأقنعة التلقائي باستخدام SAM](https://huggingface.co/docs/transformers/model_doc/sam)
|
||||
|
||||
|
||||
## 100 مشروع يستخدم المحولات
|
||||
|
||||
🤗 Transformers هو أكثر من مجرد مجموعة أدوات لاستخدام النماذج المُدربة مسبقًا: إنه مجتمع من المشاريع المبنية حوله ومركز Hugging Face. نريد أن يمكّن 🤗 Transformers المطورين والباحثين والطلاب والأساتذة والمهندسين وأي شخص آخر من بناء مشاريعهم التي يحلمون بها.
|
||||
|
||||
للاحتفال بالـ 100,000 نجمة من النماذج المحولة، قررنا تسليط الضوء على المجتمع، وقد أنشأنا صفحة [awesome-transformers](./awesome-transformers.md) التي تُدرج 100 مشروعًا رائعًا تم بناؤها بالقرب من النماذج المحولة.
|
||||
|
||||
إذا كنت تمتلك أو تستخدم مشروعًا تعتقد أنه يجب أن يكون جزءًا من القائمة، فالرجاء فتح PR لإضافته!
|
||||
|
||||
## إذا كنت تبحث عن دعم مخصص من فريق Hugging Face
|
||||
|
||||
<a target="_blank" href="https://huggingface.co/support">
|
||||
<img alt="HuggingFace Expert Acceleration Program" src="https://cdn-media.huggingface.co/marketing/transformers/new-support-improved.png" style="max-width: 600px; border: 1px solid #eee; border-radius: 4px; box-shadow: 0 1px 2px 0 rgba(0, 0, 0, 0.05);">
|
||||
</a><br>
|
||||
|
||||
## جولة سريعة
|
||||
|
||||
لاستخدام نموذج على الفور على إدخال معين (نص أو صورة أو صوت، ...)، نوفر واجهة برمجة التطبيقات (API) الخاصة بـ `pipeline`. تجمع خطوط الأنابيب بين نموذج مُدرب مسبقًا ومعالجة ما قبل التدريب التي تم استخدامها أثناء تدريب هذا النموذج. فيما يلي كيفية استخدام خط أنابيب بسرعة لتصنيف النصوص الإيجابية مقابل السلبية:
|
||||
|
||||
```python
|
||||
>>> from transformers import pipeline
|
||||
|
||||
# خصص خط أنابيب للتحليل الشعوري
|
||||
>>> classifier = pipeline('sentiment-analysis')
|
||||
>>> classifier('We are very happy to introduce pipeline to the transformers repository.')
|
||||
[{'label': 'POSITIVE', 'score': 0.9996980428695679}]
|
||||
```
|
||||
|
||||
يسمح السطر الثاني من التعليمات البرمجية بتحميل النموذج المُدرب مسبقًا الذي يستخدمه خط الأنابيب وتخزينه مؤقتًا، بينما يقوم السطر الثالث بتقييمه على النص المحدد. هنا، تكون الإجابة "إيجابية" بثقة تبلغ 99.97%.
|
||||
|
||||
تتوفر العديد من المهام على خط أنابيب مُدرب مسبقًا جاهز للاستخدام، في NLP ولكن أيضًا في رؤية الكمبيوتر والخطاب. على سبيل المثال، يمكننا بسهولة استخراج الأشياء المكتشفة في صورة:
|
||||
|
||||
``` python
|
||||
>>> import requests
|
||||
>>> from PIL import Image
|
||||
>>> from transformers import pipeline
|
||||
|
||||
# قم بتنزيل صورة بها قطط لطيفة
|
||||
>>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png"
|
||||
>>> image_data = requests.get(url, stream=True).raw
|
||||
>>> image = Image.open(image_data)
|
||||
|
||||
# خصص خط أنابيب لكشف الأشياء
|
||||
>>> object_detector = pipeline('object-detection')
|
||||
>>> object_detector(image)
|
||||
[{'score': 0.9982201457023621،
|
||||
'label': 'remote'،
|
||||
'box': {'xmin': 40, 'ymin': 70, 'xmax': 175, 'ymax': 117}}،
|
||||
{'score': 0.9960021376609802،
|
||||
'label': 'remote'،
|
||||
'box': {'xmin': 333, 'ymin': 72, 'xmax': 368, 'ymax': 187}}،
|
||||
{'score': 0.9954745173454285،
|
||||
'label': 'couch'،
|
||||
'box': {'xmin': 0, 'ymin': 1, 'xmax': 639, 'ymax': 473}}،
|
||||
{'score': 0.9988006353378296،
|
||||
'label': 'cat'،
|
||||
'box': {'xmin': 13, 'ymin': 52, 'xmax': 314, 'ymax': 470}}،
|
||||
{'score': 0.9986783862113953،
|
||||
'label': 'cat'،
|
||||
'box': {'xmin': 345, 'ymin': 23, 'xmax': 640, 'ymax': 368}}]
|
||||
```
|
||||
|
||||
هنا، نحصل على قائمة بالأشياء المكتشفة في الصورة، مع مربع يحيط بالشيء وتقييم الثقة. فيما يلي الصورة الأصلية على اليسار، مع عرض التوقعات على اليمين:
|
||||
|
||||
<h3 align="center">
|
||||
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png" width="400"></a>
|
||||
<a><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample_post_processed.png" width="400"></a>
|
||||
</h3>
|
||||
|
||||
يمكنك معرفة المزيد حول المهام التي تدعمها واجهة برمجة التطبيقات (API) الخاصة بـ `pipeline` في [هذا البرنامج التعليمي](https://huggingface.co/docs/transformers/task_summary).
|
||||
|
||||
بالإضافة إلى `pipeline`، لاستخدام أي من النماذج المُدربة مسبقًا على مهمتك، كل ما عليك هو ثلاثة أسطر من التعليمات البرمجية. فيما يلي إصدار PyTorch:
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer، AutoModel
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
|
||||
>>> model = AutoModel.from_pretrained("google-bert/bert-base-uncased")
|
||||
|
||||
>>> inputs = tokenizer("Hello world!"، return_tensors="pt")
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
|
||||
وهنا رمز مماثل لـ TensorFlow:
|
||||
```python
|
||||
>>> from transformers import AutoTokenizer، TFAutoModel
|
||||
|
||||
>>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
|
||||
>>> model = TFAutoModel.from_pretrained("google-bert/bert-base-uncased")
|
||||
|
||||
>>> inputs = tokenizer("Hello world!"، return_tensors="tf")
|
||||
>>> outputs = model(**inputs)
|
||||
```
|
||||
|
||||
المُعلم مسؤول عن جميع المعالجة المسبقة التي يتوقعها النموذج المُدرب مسبقًا ويمكن استدعاؤه مباشرة على سلسلة واحدة (كما هو موضح في الأمثلة أعلاه) أو قائمة. سيقوم بإخراج قاموس يمكنك استخدامه في التعليمات البرمجية لأسفل أو تمريره مباشرة إلى نموذجك باستخدام عامل فك التعبئة **.
|
||||
|
||||
النموذج نفسه هو وحدة نمطية عادية [Pytorch `nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) أو [TensorFlow `tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) (حسب backend) والتي يمكنك استخدامها كالمعتاد. [يوضح هذا البرنامج التعليمي](https://huggingface.co/docs/transformers/training) كيفية دمج مثل هذا النموذج في حلقة تدريب PyTorch أو TensorFlow التقليدية، أو كيفية استخدام واجهة برمجة تطبيقات `Trainer` لدينا لضبطها بدقة بسرعة على مجموعة بيانات جديدة.
|
||||
|
||||
## لماذا يجب أن أستخدم المحولات؟
|
||||
|
||||
1. نماذج سهلة الاستخدام وحديثة:
|
||||
- أداء عالي في فهم اللغة الطبيعية وتوليدها ورؤية الكمبيوتر والمهام الصوتية.
|
||||
- حاجز دخول منخفض للمربين والممارسين.
|
||||
- عدد قليل من التجريدات التي يواجهها المستخدم مع ثلاث فئات فقط للتعلم.
|
||||
- واجهة برمجة تطبيقات (API) موحدة لاستخدام جميع نماذجنا المُدربة مسبقًا.
|
||||
|
||||
1. تكاليف الكمبيوتر أقل، وبصمة كربونية أصغر:
|
||||
- يمكن للباحثين مشاركة النماذج المدربة بدلاً من إعادة التدريب دائمًا.
|
||||
- يمكن للممارسين تقليل وقت الكمبيوتر وتكاليف الإنتاج.
|
||||
- عشرات البنيات مع أكثر من 400,000 نموذج مُدرب مسبقًا عبر جميع الطرائق.
|
||||
|
||||
1. اختر الإطار المناسب لكل جزء من عمر النموذج:
|
||||
- تدريب النماذج الحديثة في 3 أسطر من التعليمات البرمجية.
|
||||
- قم بنقل نموذج واحد بين إطارات TF2.0/PyTorch/JAX حسب الرغبة.
|
||||
- اختر الإطار المناسب بسلاسة للتدريب والتقييم والإنتاج.
|
||||
|
||||
1. قم بسهولة بتخصيص نموذج أو مثال وفقًا لاحتياجاتك:
|
||||
- نوفر أمثلة لكل بنية لإعادة إنتاج النتائج التي نشرها مؤلفوها الأصليون.
|
||||
- يتم عرض داخليات النموذج بشكل متسق قدر الإمكان.
|
||||
- يمكن استخدام ملفات النموذج بشكل مستقل عن المكتبة للتجارب السريعة.
|
||||
|
||||
## لماذا لا يجب أن أستخدم المحولات؟
|
||||
|
||||
- ليست هذه المكتبة عبارة عن مجموعة أدوات من الصناديق المكونة للشبكات العصبية. لم يتم إعادة صياغة التعليمات البرمجية في ملفات النموذج باستخدام تجريدات إضافية عن قصد، بحيث يمكن للباحثين إجراء حلقات تكرار سريعة على كل من النماذج دون الغوص في تجريدات/ملفات إضافية.
|
||||
- لا يُقصد بواجهة برمجة التطبيقات (API) للتدريب العمل على أي نموذج ولكنه مُستَهدف للعمل مع النماذج التي توفرها المكتبة. للحلقات العامة للتعلم الآلي، يجب استخدام مكتبة أخرى (ربما، [تسريع](https://huggingface.co/docs/accelerate)).
|
||||
- في حين أننا نسعى جاهدين لتقديم أكبر عدد ممكن من حالات الاستخدام، فإن البرامج النصية الموجودة في مجلد [الأمثلة](https://github.com/huggingface/transformers/tree/main/examples) الخاص بنا هي مجرد أمثلة. من المتوقع ألا تعمل هذه البرامج النصية خارج الصندوق على مشكلتك المحددة وأنه سيُطلب منك تغيير بضع أسطر من التعليمات البرمجية لتكييفها مع احتياجاتك.
|
||||
|
||||
## التثبيت
|
||||
|
||||
### باستخدام pip
|
||||
|
||||
تم اختبار هذا المستودع على Python 3.8+، Flax 0.4.1+، PyTorch 1.11+، و TensorFlow 2.6+.
|
||||
|
||||
يجب تثبيت 🤗 Transformers في [بيئة افتراضية](https://docs.python.org/3/library/venv.html). إذا كنت غير معتاد على البيئات الافتراضية Python، فراجع [دليل المستخدم](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/).
|
||||
|
||||
أولاً، قم بإنشاء بيئة افتراضية بالإصدار Python الذي تنوي استخدامه وقم بتنشيطه.
|
||||
|
||||
بعد ذلك، ستحتاج إلى تثبيت واحدة على الأقل من Flax أو PyTorch أو TensorFlow.
|
||||
يرجى الرجوع إلى [صفحة تثبيت TensorFlow](https://www.tensorflow.org/install/)، و [صفحة تثبيت PyTorch](https://pytorch.org/get-started/locally/#start-locally) و/أو [صفحة تثبيت Flax](https://github.com/google/flax#quick-install) و [صفحة تثبيت Jax](https://github.com/google/jax#installation) بشأن أمر التثبيت المحدد لمنصتك.
|
||||
|
||||
عندما يتم تثبيت إحدى هذه المكتبات الخلفية، يمكن تثبيت 🤗 Transformers باستخدام pip كما يلي:
|
||||
|
||||
```bash
|
||||
pip install transformers
|
||||
```
|
||||
|
||||
إذا كنت ترغب في اللعب مع الأمثلة أو تحتاج إلى أحدث إصدار من التعليمات البرمجية ولا يمكنك الانتظار حتى يتم إصدار إصدار جديد، فيجب [تثبيت المكتبة من المصدر](https://huggingface.co/docs/transformers/installation#installing-from-source).
|
||||
|
||||
### باستخدام conda
|
||||
|
||||
يمكن تثبيت 🤗 Transformers باستخدام conda كما يلي:
|
||||
|
||||
```shell script
|
||||
conda install conda-forge::transformers
|
||||
```
|
||||
|
||||
> **_ملاحظة:_** تم إيقاف تثبيت `transformers` من قناة `huggingface`.
|
||||
|
||||
اتبع صفحات التثبيت الخاصة بـ Flax أو PyTorch أو TensorFlow لمعرفة كيفية تثبيتها باستخدام conda.
|
||||
|
||||
> **_ملاحظة:_** على Windows، قد تتم مطالبتك بتنشيط وضع المطور للاستفادة من التخزين المؤقت. إذا لم يكن هذا خيارًا بالنسبة لك، فيرجى إعلامنا بذلك في [هذه المشكلة](https://github.com/huggingface/huggingface_hub/issues/1062).
|
||||
|
||||
## بنيات النماذج
|
||||
|
||||
**[جميع نقاط تفتيش النموذج](https://huggingface.co/models)** التي يوفرها 🤗 Transformers مدمجة بسلاسة من مركز [huggingface.co](https://huggingface.co/models) [model hub](https://huggingface.co/models)، حيث يتم تحميلها مباشرة من قبل [المستخدمين](https://huggingface.co/users) و [المنظمات](https://huggingface.co/organizations).
|
||||
|
||||
عدد نقاط التفتيش الحالية: 
|
||||
|
||||
يوفر 🤗 Transformers حاليًا البنيات التالية: راجع [هنا](https://huggingface.co/docs/transformers/model_summary) للحصول على ملخص لكل منها.
|
||||
|
||||
للتحقق مما إذا كان لكل نموذج تنفيذ في Flax أو PyTorch أو TensorFlow، أو كان لديه مُعلم مرفق مدعوم من مكتبة 🤗 Tokenizers، يرجى الرجوع إلى [هذا الجدول](https://huggingface.co/docs/transformers/index#supported-frameworks).
|
||||
|
||||
تم اختبار هذه التطبيقات على العديد من مجموعات البيانات (راجع البرامج النصية المثالية) ويجب أن تتطابق مع أداء التنفيذ الأصلي. يمكنك العثور على مزيد من التفاصيل حول الأداء في قسم الأمثلة من [الوثائق](https://github.com/huggingface/transformers/tree/main/examples).
|
||||
|
||||
|
||||
## تعلم المزيد
|
||||
|
||||
| القسم | الوصف |
|
||||
|-|-|
|
||||
| [وثائق](https://huggingface.co/docs/transformers/) | وثائق واجهة برمجة التطبيقات (API) الكاملة والبرامج التعليمية |
|
||||
| [ملخص المهام](https://huggingface.co/docs/transformers/task_summary) | المهام التي يدعمها 🤗 Transformers |
|
||||
| [برنامج تعليمي لمعالجة مسبقة](https://huggingface.co/docs/transformers/preprocessing) | استخدام فئة `Tokenizer` لإعداد البيانات للنماذج |
|
||||
| [التدريب والضبط الدقيق](https://huggingface.co/docs/transformers/training) | استخدام النماذج التي يوفرها 🤗 Transformers في حلقة تدريب PyTorch/TensorFlow وواجهة برمجة تطبيقات `Trainer` |
|
||||
| [جولة سريعة: البرامج النصية للضبط الدقيق/الاستخدام](https://github.com/huggingface/transformers/tree/main/examples) | البرامج النصية المثالية للضبط الدقيق للنماذج على مجموعة واسعة من المهام |
|
||||
| [مشاركة النماذج وتحميلها](https://huggingface.co/docs/transformers/model_sharing) | تحميل ومشاركة نماذجك المضبوطة بدقة مع المجتمع |
|
||||
|
||||
## الاستشهاد
|
||||
|
||||
لدينا الآن [ورقة](https://www.aclweb.org/anthology/2020.emnlp-demos.6/) يمكنك الاستشهاد بها لمكتبة 🤗 Transformers:
|
||||
```bibtex
|
||||
@inproceedings{wolf-etal-2020-transformers،
|
||||
title = "Transformers: State-of-the-Art Natural Language Processing"،
|
||||
author = "Thomas Wolf and Lysandre Debut and Victor Sanh and Julien Chaumond and Clement Delangue and Anthony Moi and Pierric Cistac and Tim Rault and R{\'e}mi Louf and Morgan Funtowicz and Joe Davison and Sam Shleifer and Patrick von Platen and Clara Ma and Yacine Jernite and Julien Plu and Canwen Xu and Teven Le Scao and Sylvain Gugger and Mariama Drame and Quentin Lhoest and Alexander M. Rush"،
|
||||
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations"،
|
||||
month = oct،
|
||||
year = "2020"،
|
||||
address = "Online"،
|
||||
publisher = "Association for Computational Linguistics"،
|
||||
url = "https://www.aclweb.org/anthology/2020.emnlp-demos.6"،
|
||||
pages = "38--45"
|
||||
}
|
||||
```
|
@ -48,6 +48,7 @@ limitations under the License.
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<b>Deutsch</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
|
@ -43,6 +43,7 @@ limitations under the License.
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
|
@ -48,6 +48,7 @@ limitations under the License.
|
||||
<b>Français</b> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
|
@ -68,6 +68,7 @@ checkpoint: जाँच बिंदु
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
|
@ -78,6 +78,7 @@ user: ユーザ
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_vi.md">Tiếng Việt</a> |
|
||||
<a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ar.md">العربية</a> |
|
||||
</p>
|
||||
</h4>
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user