mirror of
				https://github.com/huggingface/transformers.git
				synced 2025-10-31 17:14:56 +08:00 
			
		
		
		
	Compare commits
	
		
			114 Commits
		
	
	
		
			run_with_i
			...
			ci_with_to
		
	
	| Author | SHA1 | Date | |
|---|---|---|---|
| 146719af19 | |||
| b759a9c9b5 | |||
| 28aa9132cd | |||
| 35e3659f22 | |||
| e1ac2611ce | |||
| 1eab9c3e7c | |||
| c1600a6c8e | |||
| 8e4e94783b | |||
| e8b2c60fb7 | |||
| 0b2f104897 | |||
| 5927114611 | |||
| b6f6e04178 | |||
| bc17525caf | |||
| 641efa99c3 | |||
| b635c723ff | |||
| ac1bd87bd2 | |||
| d63aadf140 | |||
| 1332a33025 | |||
| b4295c2947 | |||
| d9aefd1e94 | |||
| 04fecff78d | |||
| f49dc9e88e | |||
| 959a9cd678 | |||
| 60b73c7b86 | |||
| db6a65d105 | |||
| 5bcd773b55 | |||
| f81ada3019 | |||
| 6b0e5e6215 | |||
| 361599d284 | |||
| 64fea67ca2 | |||
| fbf4cc6486 | |||
| 05a67b11c2 | |||
| def309661f | |||
| 6f53baecba | |||
| b793703b5b | |||
| ec1004513f | |||
| a4bee5ef1e | |||
| 347b86bba7 | |||
| ae28872f89 | |||
| 09d67319ea | |||
| 4863ea6c63 | |||
| c71d914219 | |||
| df7573fc7c | |||
| e275a3ef0f | |||
| 42c5ac4d98 | |||
| c80cef092f | |||
| c309243ea1 | |||
| f945d07926 | |||
| ae85eddedd | |||
| a468748e28 | |||
| de97dc5e61 | |||
| 249ec520e4 | |||
| b68f7fde19 | |||
| 90313201d1 | |||
| 26dab642be | |||
| f35b87c4c9 | |||
| 24968b35df | |||
| 6e4f672c56 | |||
| 1ab2abb9eb | |||
| 42f7f0a453 | |||
| 1dbb1f718c | |||
| 2e28f3edc5 | |||
| 49688a99ac | |||
| 513da05615 | |||
| 05366c5fa7 | |||
| 7250f70286 | |||
| 6502fbcb0d | |||
| ca2513ac91 | |||
| 4861d48b46 | |||
| a754e1d006 | |||
| ede36bc645 | |||
| 85e00723fd | |||
| 7dae38f191 | |||
| 9202969e66 | |||
| e1595879b1 | |||
| 1858d9a568 | |||
| a84ab00df7 | |||
| 7bdccbfc2e | |||
| a707d53d1e | |||
| 850eb0b419 | |||
| 024abd58bc | |||
| 33da39989a | |||
| d6e0c325a9 | |||
| ddbf9be141 | |||
| 556554db52 | |||
| 7f82dfc907 | |||
| 31050ff6b7 | |||
| 991b486e97 | |||
| b4b5a93534 | |||
| 98786d0fbe | |||
| c0c1aba23e | |||
| 1c96ee32cd | |||
| b33b6e1aaf | |||
| d444e3dd92 | |||
| ad4b39289e | |||
| a28acd9ac3 | |||
| 5a088af755 | |||
| 4b79de01f8 | |||
| 59e28a6c70 | |||
| cb6aecc057 | |||
| 7f9eea42fc | |||
| d9545291d7 | |||
| 86e8fe47a5 | |||
| d14c94508b | |||
| 35ee0609b4 | |||
| b4503e2256 | |||
| 7e9b71ab75 | |||
| 09d540ed9d | |||
| d3d5618789 | |||
| 7372554c1b | |||
| 13d70ce9fa | |||
| 58261e0f5e | |||
| 4b26b12621 | |||
| 9ec8c5f1a1 | 
| @ -7,18 +7,6 @@ parameters: | ||||
|     nightly: | ||||
|         type: boolean | ||||
|         default: false | ||||
|     GHA_Actor: | ||||
|         type: string | ||||
|         default: "" | ||||
|     GHA_Action: | ||||
|         type: string | ||||
|         default: "" | ||||
|     GHA_Event: | ||||
|         type: string | ||||
|         default: "" | ||||
|     GHA_Meta: | ||||
|         type: string | ||||
|         default: "" | ||||
|  | ||||
| jobs: | ||||
|     # Ensure running with CircleCI/huggingface | ||||
| @ -43,6 +31,14 @@ jobs: | ||||
|         parallelism: 1 | ||||
|         steps: | ||||
|             - checkout | ||||
|             - run: if [[ "$CIRCLE_PULL_REQUEST" == "" && "$CIRCLE_BRANCH" != "main" && "$CIRCLE_BRANCH" != *-release ]]; then echo "Not a PR, not the main branch and not a release branch, skip test!"; circleci-agent step halt; fi | ||||
|             - run: 'curl -L -H "Accept: application/vnd.github+json" -H "X-GitHub-Api-Version: 2022-11-28" https://api.github.com/repos/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME/pulls/${CIRCLE_PULL_REQUEST##*/} >> github.txt' | ||||
|             - run: cat github.txt | ||||
|             - run: (python3 -c 'import json; from datetime import datetime; fp = open("github.txt"); data = json.load(fp); fp.close(); f = "%Y-%m-%dT%H:%M:%SZ"; created = datetime.strptime(data["created_at"], f); updated = datetime.strptime(data["updated_at"], f); s = (updated - created).total_seconds(); print(int(s))' || true) > elapsed.txt | ||||
|             - run: if [ "$(cat elapsed.txt)" == "" ]; then echo 60 > elapsed.txt; fi | ||||
|             - run: cat elapsed.txt | ||||
|             - run: if [ "$(cat elapsed.txt)" -lt "30" ]; then echo "PR is just opened, wait some actions from GitHub"; sleep 30; fi | ||||
|             - run: 'if grep -q "\"draft\": true," github.txt; then echo "draft mode, skip test!"; circleci-agent step halt; fi' | ||||
|             - run: uv pip install -U -e . | ||||
|             - run: echo 'export "GIT_COMMIT_MESSAGE=$(git show -s --format=%s)"' >> "$BASH_ENV" && source "$BASH_ENV" | ||||
|             - run: mkdir -p test_preparation | ||||
| @ -112,6 +108,8 @@ jobs: | ||||
|  | ||||
|             - run: | ||||
|                 name: "Retrieve Artifact Paths" | ||||
|                 env: | ||||
|                     CIRCLE_TOKEN: ${{ secrets.CI_ARTIFACT_TOKEN }} | ||||
|                 command: | | ||||
|                     project_slug="gh/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}" | ||||
|                     job_number=${CIRCLE_BUILD_NUM} | ||||
| @ -184,7 +182,6 @@ jobs: | ||||
|             - run: python utils/check_dummies.py | ||||
|             - run: python utils/check_repo.py | ||||
|             - run: python utils/check_inits.py | ||||
|             - run: python utils/check_pipeline_typing.py | ||||
|             - run: python utils/check_config_docstrings.py | ||||
|             - run: python utils/check_config_attributes.py | ||||
|             - run: python utils/check_doctest_list.py | ||||
|  | ||||
| @ -28,8 +28,6 @@ COMMON_ENV_VARIABLES = { | ||||
|     "TRANSFORMERS_IS_CI": True, | ||||
|     "PYTEST_TIMEOUT": 120, | ||||
|     "RUN_PIPELINE_TESTS": False, | ||||
|     # will be adjust in `CircleCIJob.to_dict`. | ||||
|     "RUN_FLAKY": True, | ||||
| } | ||||
| # Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical | ||||
| COMMON_PYTEST_OPTIONS = {"max-worker-restart": 0, "vvv": None, "rsfE":None} | ||||
| @ -109,9 +107,7 @@ class CircleCIJob: | ||||
|                 self.docker_image[0]["image"] = f"{self.docker_image[0]['image']}:dev" | ||||
|             print(f"Using {self.docker_image} docker image") | ||||
|         if self.install_steps is None: | ||||
|             self.install_steps = ["uv pip install ."] | ||||
|         # Use a custom patched pytest to force exit the process at the end, to avoid `Too long with no output (exceeded 10m0s): context deadline exceeded` | ||||
|         self.install_steps.append("uv pip install git+https://github.com/ydshieh/pytest.git@8.4.1-ydshieh") | ||||
|             self.install_steps = ["uv venv && uv pip install ."] | ||||
|         if self.pytest_options is None: | ||||
|             self.pytest_options = {} | ||||
|         if isinstance(self.tests_to_run, str): | ||||
| @ -130,8 +126,6 @@ class CircleCIJob: | ||||
|  | ||||
|     def to_dict(self): | ||||
|         env = COMMON_ENV_VARIABLES.copy() | ||||
|         # Do not run tests decorated by @is_flaky on pull requests | ||||
|         env['RUN_FLAKY'] = os.environ.get("CIRCLE_PULL_REQUEST", "") == "" | ||||
|         env.update(self.additional_env) | ||||
|  | ||||
|         job = { | ||||
| @ -177,29 +171,11 @@ class CircleCIJob: | ||||
|                     "command": f"TESTS=$(circleci tests split  --split-by=timings {self.job_name}_test_list.txt) && echo $TESTS > splitted_tests.txt && echo $TESTS | tr ' ' '\n'" if self.parallelism else f"awk '{{printf \"%s \", $0}}' {self.job_name}_test_list.txt > splitted_tests.txt" | ||||
|                     } | ||||
|             }, | ||||
|             # During the CircleCI docker images build time, we might already (or not) download the data. | ||||
|             # If it's done already, the files are inside the directory `/test_data/`. | ||||
|             {"run": {"name": "fetch hub objects before pytest", "command": "cp -r /test_data/* . 2>/dev/null || true; python3 utils/fetch_hub_objects_for_ci.py"}}, | ||||
|             {"run": {"name": "fetch hub objects before pytest", "command": "python3 utils/fetch_hub_objects_for_ci.py"}}, | ||||
|             {"run": { | ||||
|                 "name": "Run tests", | ||||
|                 "command": f"({timeout_cmd} python3 -m pytest {marker_cmd} -n {self.pytest_num_workers} {junit_flags} {repeat_on_failure_flags} {' '.join(pytest_flags)} $(cat splitted_tests.txt) | tee tests_output.txt)"} | ||||
|             }, | ||||
|             {"run": | ||||
|                 { | ||||
|                     "name": "Check for test crashes", | ||||
|                     "when": "always", | ||||
|                     "command": """if [ ! -f tests_output.txt ]; then | ||||
|                             echo "ERROR: tests_output.txt does not exist - tests may not have run properly" | ||||
|                             exit 1 | ||||
|                         elif grep -q "crashed and worker restarting disabled" tests_output.txt; then | ||||
|                             echo "ERROR: Worker crash detected in test output" | ||||
|                             echo "Found: crashed and worker restarting disabled" | ||||
|                             exit 1 | ||||
|                         else | ||||
|                             echo "Tests output file exists and no worker crashes detected" | ||||
|                         fi""" | ||||
|                 }, | ||||
|             }, | ||||
|             {"run": {"name": "Expand to show skipped tests", "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --skip"}}, | ||||
|             {"run": {"name": "Failed tests: show reasons",   "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --fail"}}, | ||||
|             {"run": {"name": "Errors",                       "when": "always", "command": f"python3 .circleci/parse_test_outputs.py --file tests_output.txt --errors"}}, | ||||
| @ -233,7 +209,7 @@ generate_job = CircleCIJob( | ||||
|     docker_image=[{"image": "huggingface/transformers-torch-light"}], | ||||
|     # networkx==3.3 (after #36957) cause some issues | ||||
|     # TODO: remove this once it works directly | ||||
|     install_steps=["uv pip install ."], | ||||
|     install_steps=["uv venv && uv pip install . && uv pip install networkx==3.2.1"], | ||||
|     marker="generate", | ||||
|     parallelism=6, | ||||
| ) | ||||
| @ -250,6 +226,22 @@ processor_job = CircleCIJob( | ||||
|     parallelism=8, | ||||
| ) | ||||
|  | ||||
| tf_job = CircleCIJob( | ||||
|     "tf", | ||||
|     docker_image=[{"image":"huggingface/transformers-tf-light"}], | ||||
|     parallelism=6, | ||||
| ) | ||||
|  | ||||
|  | ||||
| flax_job = CircleCIJob( | ||||
|     "flax", | ||||
|     docker_image=[{"image":"huggingface/transformers-jax-light"}], | ||||
|     parallelism=6, | ||||
|     pytest_num_workers=16, | ||||
|     resource_class="2xlarge", | ||||
| ) | ||||
|  | ||||
|  | ||||
| pipelines_torch_job = CircleCIJob( | ||||
|     "pipelines_torch", | ||||
|     additional_env={"RUN_PIPELINE_TESTS": True}, | ||||
| @ -258,27 +250,47 @@ pipelines_torch_job = CircleCIJob( | ||||
|     parallelism=4, | ||||
| ) | ||||
|  | ||||
|  | ||||
| pipelines_tf_job = CircleCIJob( | ||||
|     "pipelines_tf", | ||||
|     additional_env={"RUN_PIPELINE_TESTS": True}, | ||||
|     docker_image=[{"image":"huggingface/transformers-tf-light"}], | ||||
|     marker="is_pipeline_test", | ||||
|     parallelism=4, | ||||
| ) | ||||
|  | ||||
|  | ||||
| custom_tokenizers_job = CircleCIJob( | ||||
|     "custom_tokenizers", | ||||
|     additional_env={"RUN_CUSTOM_TOKENIZERS": True}, | ||||
|     docker_image=[{"image": "huggingface/transformers-custom-tokenizers"}], | ||||
| ) | ||||
|  | ||||
|  | ||||
| examples_torch_job = CircleCIJob( | ||||
|     "examples_torch", | ||||
|     additional_env={"OMP_NUM_THREADS": 8}, | ||||
|     docker_image=[{"image":"huggingface/transformers-examples-torch"}], | ||||
|     # TODO @ArthurZucker remove this once docker is easier to build | ||||
|     install_steps=["uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"], | ||||
|     install_steps=["uv venv && uv pip install . && uv pip install -r examples/pytorch/_tests_requirements.txt"], | ||||
|     pytest_num_workers=4, | ||||
| ) | ||||
|  | ||||
|  | ||||
| examples_tensorflow_job = CircleCIJob( | ||||
|     "examples_tensorflow", | ||||
|     additional_env={"OMP_NUM_THREADS": 8}, | ||||
|     docker_image=[{"image":"huggingface/transformers-examples-tf"}], | ||||
|     pytest_num_workers=2, | ||||
| ) | ||||
|  | ||||
|  | ||||
| hub_job = CircleCIJob( | ||||
|     "hub", | ||||
|     additional_env={"HUGGINGFACE_CO_STAGING": True}, | ||||
|     docker_image=[{"image":"huggingface/transformers-torch-light"}], | ||||
|     install_steps=[ | ||||
|         'uv pip install .', | ||||
|         'uv venv && uv pip install .', | ||||
|         'git config --global user.email "ci@dummy.com"', | ||||
|         'git config --global user.name "ci"', | ||||
|     ], | ||||
| @ -287,6 +299,20 @@ hub_job = CircleCIJob( | ||||
|     resource_class="medium", | ||||
| ) | ||||
|  | ||||
|  | ||||
| onnx_job = CircleCIJob( | ||||
|     "onnx", | ||||
|     docker_image=[{"image":"huggingface/transformers-torch-tf-light"}], | ||||
|     install_steps=[ | ||||
|         "uv venv", | ||||
|         "uv pip install .[torch,tf,testing,sentencepiece,onnxruntime,vision,rjieba]", | ||||
|     ], | ||||
|     pytest_options={"k onnx": None}, | ||||
|     pytest_num_workers=1, | ||||
|     resource_class="small", | ||||
| ) | ||||
|  | ||||
|  | ||||
| exotic_models_job = CircleCIJob( | ||||
|     "exotic_models", | ||||
|     docker_image=[{"image":"huggingface/transformers-exotic-models"}], | ||||
| @ -294,6 +320,7 @@ exotic_models_job = CircleCIJob( | ||||
|     pytest_options={"durations": 100}, | ||||
| ) | ||||
|  | ||||
|  | ||||
| repo_utils_job = CircleCIJob( | ||||
|     "repo_utils", | ||||
|     docker_image=[{"image":"huggingface/transformers-consistency"}], | ||||
| @ -301,12 +328,13 @@ repo_utils_job = CircleCIJob( | ||||
|     resource_class="large", | ||||
| ) | ||||
|  | ||||
|  | ||||
| non_model_job = CircleCIJob( | ||||
|     "non_model", | ||||
|     docker_image=[{"image": "huggingface/transformers-torch-light"}], | ||||
|     # networkx==3.3 (after #36957) cause some issues | ||||
|     # TODO: remove this once it works directly | ||||
|     install_steps=["uv pip install .[serving]"], | ||||
|     install_steps=["uv venv && uv pip install . && uv pip install networkx==3.2.1"], | ||||
|     marker="not generate", | ||||
|     parallelism=6, | ||||
| ) | ||||
| @ -324,7 +352,7 @@ doc_test_job = CircleCIJob( | ||||
|     additional_env={"TRANSFORMERS_VERBOSITY": "error", "DATASETS_VERBOSITY": "error", "SKIP_CUDA_DOCTEST": "1"}, | ||||
|     install_steps=[ | ||||
|         # Add an empty file to keep the test step running correctly even no file is selected to be tested. | ||||
|         "uv pip install .", | ||||
|         "uv venv && pip install .", | ||||
|         "touch dummy.py", | ||||
|         command, | ||||
|         "cat pr_documentation_tests_temp.txt", | ||||
| @ -336,7 +364,7 @@ doc_test_job = CircleCIJob( | ||||
|     pytest_num_workers=1, | ||||
| ) | ||||
|  | ||||
| REGULAR_TESTS = [torch_job, hub_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip | ||||
| REGULAR_TESTS = [torch_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip | ||||
| EXAMPLES_TESTS = [examples_torch_job] | ||||
| PIPELINE_TESTS = [pipelines_torch_job] | ||||
| REPO_UTIL_TESTS = [repo_utils_job] | ||||
| @ -365,12 +393,7 @@ def create_circleci_config(folder=None): | ||||
|         "parameters": { | ||||
|             # Only used to accept the parameters from the trigger | ||||
|             "nightly": {"type": "boolean", "default": False}, | ||||
|             # Only used to accept the parameters from GitHub Actions trigger | ||||
|             "GHA_Actor": {"type": "string", "default": ""}, | ||||
|             "GHA_Action": {"type": "string", "default": ""}, | ||||
|             "GHA_Event": {"type": "string", "default": ""}, | ||||
|             "GHA_Meta": {"type": "string", "default": ""}, | ||||
|             "tests_to_run": {"type": "string", "default": ""}, | ||||
|             "tests_to_run": {"type": "string", "default": ''}, | ||||
|             **{j.job_name + "_test_list":{"type":"string", "default":''} for j in jobs}, | ||||
|             **{j.job_name + "_parallelism":{"type":"integer", "default":1} for j in jobs}, | ||||
|         }, | ||||
|  | ||||
							
								
								
									
										36
									
								
								.github/ISSUE_TEMPLATE/bug-report.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										36
									
								
								.github/ISSUE_TEMPLATE/bug-report.yml
									
									
									
									
										vendored
									
									
								
							| @ -16,7 +16,7 @@ body: | ||||
|     id: system-info | ||||
|     attributes: | ||||
|       label: System Info | ||||
|       description: Please share your system info with us. You can run the command `transformers env` and copy-paste its output below. | ||||
|       description: Please share your system info with us. You can run the command `transformers-cli env` and copy-paste its output below. | ||||
|       placeholder: transformers version, platform, python version, ... | ||||
|     validations: | ||||
|       required: true | ||||
| @ -36,23 +36,19 @@ body: | ||||
|  | ||||
|         Models: | ||||
|  | ||||
|           - text models: @ArthurZucker @Cyrilvallez | ||||
|           - vision models: @yonigozlan @molbap | ||||
|           - audio models: @eustlb @ebezzam @vasqu | ||||
|           - multimodal models: @zucchini-nlp | ||||
|           - text models: @ArthurZucker | ||||
|           - vision models: @amyeroberts, @qubvel | ||||
|           - speech models: @eustlb | ||||
|           - graph models: @clefourrier | ||||
|  | ||||
|         Library: | ||||
|  | ||||
|           - flax: @gante and @Rocketknight1 | ||||
|           - generate: @zucchini-nlp (visual-language models) or @gante (all others) | ||||
|           - continuous batching: @remi-or @ArthurZucker @McPatate | ||||
|           - pipelines: @Rocketknight1 | ||||
|           - tensorflow: @gante and @Rocketknight1 | ||||
|           - tokenizers: @ArthurZucker and @itazap | ||||
|           - trainer: @zach-huggingface @SunMarc | ||||
|           - attention: @vasqu @ArthurZucker @CyrilVallez | ||||
|           - model loading (from pretrained, etc): @CyrilVallez | ||||
|           - distributed: @3outeille @ArthurZucker @S1ro1 | ||||
|           - CIs: @ydshieh | ||||
|  | ||||
|         Integrations: | ||||
|  | ||||
| @ -60,13 +56,6 @@ body: | ||||
|           - ray/raytune: @richardliaw, @amogkam | ||||
|           - Big Model Inference: @SunMarc | ||||
|           - quantization (bitsandbytes, autogpt): @SunMarc @MekkCyber | ||||
|           - kernels: @MekkCyber @drbh | ||||
|          | ||||
|         Devices/Backends: | ||||
|          | ||||
|           - AMD ROCm: @ivarflakstad | ||||
|           - Intel XPU: @IlyasMoutawwakil | ||||
|           - Ascend NPU: @ivarflakstad  | ||||
|  | ||||
|         Documentation: @stevhliu | ||||
|  | ||||
| @ -74,6 +63,19 @@ body: | ||||
|  | ||||
|           - for issues with a model, report at https://discuss.huggingface.co/ and tag the model's creator. | ||||
|  | ||||
|         HF projects: | ||||
|  | ||||
|           - accelerate: [different repo](https://github.com/huggingface/accelerate) | ||||
|           - datasets: [different repo](https://github.com/huggingface/datasets) | ||||
|           - diffusers: [different repo](https://github.com/huggingface/diffusers) | ||||
|           - rust tokenizers: [different repo](https://github.com/huggingface/tokenizers) | ||||
|  | ||||
|         Maintained examples (not research project or legacy): | ||||
|  | ||||
|           - Flax: @Rocketknight1 | ||||
|           - PyTorch: See Models above and tag the person corresponding to the modality of the example. | ||||
|           - TensorFlow: @Rocketknight1 | ||||
|  | ||||
|         Research projects are not maintained and should be taken as is. | ||||
|  | ||||
|       placeholder: "@Username ..." | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ISSUE_TEMPLATE/i18n.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ISSUE_TEMPLATE/i18n.md
									
									
									
									
										vendored
									
									
								
							| @ -23,7 +23,7 @@ Some notes: | ||||
| * Please translate in a gender-neutral way. | ||||
| * Add your translations to the folder called `<languageCode>` inside the [source folder](https://github.com/huggingface/transformers/tree/main/docs/source). | ||||
| * Register your translation in `<languageCode>/_toctree.yml`; please follow the order of the [English version](https://github.com/huggingface/transformers/blob/main/docs/source/en/_toctree.yml). | ||||
| * Once you're finished, open a pull request and tag this issue by including #issue-number in the description, where issue-number is the number of this issue. Please ping @stevhliu for review. | ||||
| * Once you're finished, open a pull request and tag this issue by including #issue-number in the description, where issue-number is the number of this issue. Please ping @stevhliu and @MKhalusova for review. | ||||
| * 🙋 If you'd like others to help you with the translation, you can also post in the 🤗 [forums](https://discuss.huggingface.co/). | ||||
|  | ||||
| ## Get Started section | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/ISSUE_TEMPLATE/migration.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/ISSUE_TEMPLATE/migration.yml
									
									
									
									
										vendored
									
									
								
							| @ -6,7 +6,7 @@ body: | ||||
|     id: system-info | ||||
|     attributes: | ||||
|       label: System Info | ||||
|       description: Please share your system info with us. You can run the command `transformers env` and copy-paste its output below. | ||||
|       description: Please share your system info with us. You can run the command `transformers-cli env` and copy-paste its output below. | ||||
|       render: shell | ||||
|       placeholder: transformers version, platform, python version, ... | ||||
|     validations: | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/PULL_REQUEST_TEMPLATE.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/PULL_REQUEST_TEMPLATE.md
									
									
									
									
										vendored
									
									
								
							| @ -51,7 +51,7 @@ Library: | ||||
| - pipelines: @Rocketknight1 | ||||
| - tensorflow: @gante and @Rocketknight1 | ||||
| - tokenizers: @ArthurZucker | ||||
| - trainer: @zach-huggingface, @SunMarc and @qgallouedec | ||||
| - trainer: @zach-huggingface and @SunMarc | ||||
| - chat templates: @Rocketknight1 | ||||
|  | ||||
| Integrations: | ||||
|  | ||||
							
								
								
									
										39
									
								
								.github/copilot-instructions.md
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										39
									
								
								.github/copilot-instructions.md
									
									
									
									
										vendored
									
									
								
							| @ -1,39 +0,0 @@ | ||||
| # copilot-instructions.md Guide for Hugging Face Transformers | ||||
|  | ||||
| This copilot-instructions.md file provides guidance for code agents working with this codebase. | ||||
|  | ||||
| ## Core Project Structure | ||||
|  | ||||
| - `/src/transformers`: This contains the core source code for the library | ||||
|   - `/models`: Code for individual models. Models inherit from base classes in the root `/src/transformers` directory. | ||||
| - `/tests`: This contains the core test classes for the library. These are usually inherited rather than directly run. | ||||
|   - `/models`: Tests for individual models. Model tests inherit from common tests in the root `/tests` directory. | ||||
| - `/docs`: This contains the documentation for the library, including guides, tutorials, and API references. | ||||
|  | ||||
| ## Coding Conventions for Hugging Face Transformers | ||||
|  | ||||
| - PRs should be as brief as possible. Bugfix PRs in particular can often be only one or two lines long, and do not need large comments, docstrings or new functions in this case. Aim to minimize the size of the diff. | ||||
| - When writing tests, they should be added to an existing file. The only exception is for PRs to add a new model, when a new test directory should be created for that model. | ||||
| - Code style is enforced in the CI. You can install the style tools with `pip install -e .[quality]`. You can then run `make fixup` to apply style and consistency fixes to your code. | ||||
|  | ||||
| ## Copying and inheritance | ||||
|  | ||||
| Many models in the codebase have similar code, but it is not shared by inheritance because we want each model file to be self-contained. | ||||
| We use two mechanisms to keep this code in sync: | ||||
|  | ||||
| - "Copied from" syntax. Functions or entire classes can have a comment at the top like this: `# Copied from transformers.models.llama.modeling_llama.rotate_half` or `# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->MT5` | ||||
|   These comments are actively checked by the style tools, and copies will automatically be updated when the base code is updated. If you need to update a copied function, you should | ||||
|   either update the base function and use `make fixup` to propagate the change to all copies, or simply remove the `# Copied from` comment if that is inappropriate. | ||||
| - "Modular" files. These files briefly define models by composing them using inheritance from other models. They are not meant to be used directly. Instead, the style tools | ||||
|   automatically generate a complete modeling file, like `modeling_bert.py`, from the modular file like `modular_bert.py`. If a model has a modular file, the modeling file | ||||
|   should never be edited directly! Instead, changes should be made in the modular file, and then you should run `make fixup` to update the modeling file automatically. | ||||
|  | ||||
| When adding new models, you should prefer `modular` style and inherit as many classes as possible from existing models. | ||||
|  | ||||
| ## Testing | ||||
|  | ||||
| After making changes, you should usually run `make fixup` to ensure any copies and modular files are updated, and then test all affected models. This includes both | ||||
| the model you made the changes in and any other models that were updated by `make fixup`. Tests can be run with `pytest tests/models/[name]/test_modeling_[name].py` | ||||
| If your changes affect code in other classes like tokenizers or processors, you should run those tests instead, like `test_processing_[name].py` or `test_tokenization_[name].py`. | ||||
|  | ||||
| In order to run tests, you may need to install dependencies. You can do this with `pip install -e .[testing]`. You will probably also need to `pip install torch accelerate` if your environment does not already have them. | ||||
							
								
								
									
										18
									
								
								.github/scripts/assign_reviewers.py
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								.github/scripts/assign_reviewers.py
									
									
									
									
										vendored
									
									
								
							| @ -54,21 +54,6 @@ def get_file_owners(file_path, codeowners_lines): | ||||
|             return owners  # Remember, can still be empty! | ||||
|     return []  # Should never happen, but just in case | ||||
|  | ||||
| def pr_author_is_in_hf(pr_author, codeowners_lines): | ||||
|     # Check if the PR author is in the codeowners file | ||||
|     for line in codeowners_lines: | ||||
|         line = line.split('#')[0].strip() | ||||
|         if not line: | ||||
|             continue | ||||
|  | ||||
|         # Split into pattern and owners | ||||
|         parts = line.split() | ||||
|         owners = [owner.removeprefix("@") for owner in parts[1:]] | ||||
|  | ||||
|         if pr_author in owners: | ||||
|             return True | ||||
|     return False | ||||
|  | ||||
| def main(): | ||||
|     script_dir = Path(__file__).parent.absolute() | ||||
|     with open(script_dir / "codeowners_for_review_action") as f: | ||||
| @ -83,9 +68,6 @@ def main(): | ||||
|     pr_number = event['pull_request']['number'] | ||||
|     pr = repo.get_pull(pr_number) | ||||
|     pr_author = pr.user.login | ||||
|     if pr_author_is_in_hf(pr_author, codeowners_lines): | ||||
|         print(f"PR author {pr_author} is in codeowners, skipping review request.") | ||||
|         return | ||||
|  | ||||
|     existing_reviews = list(pr.get_reviews()) | ||||
|     if existing_reviews: | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/add-model-like.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/add-model-like.yml
									
									
									
									
										vendored
									
									
								
							| @ -54,7 +54,7 @@ jobs: | ||||
|       - name: Create model files | ||||
|         run: | | ||||
|           . ~/venv/bin/activate | ||||
|           transformers add-new-model-like --config_file tests/fixtures/add_distilbert_like_config.json --path_to_repo . | ||||
|           transformers-cli add-new-model-like --config_file tests/fixtures/add_distilbert_like_config.json --path_to_repo . | ||||
|           make style | ||||
|           make fix-copies | ||||
|  | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/benchmark.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/benchmark.yml
									
									
									
									
										vendored
									
									
								
							| @ -48,7 +48,7 @@ jobs: | ||||
|  | ||||
|       - name: Run database init script | ||||
|         run: | | ||||
|           psql -f benchmark/utils/init_db.sql | ||||
|           psql -f benchmark/init_db.sql | ||||
|         env: | ||||
|           PGDATABASE: metrics | ||||
|           PGHOST: ${{ secrets.TRANSFORMERS_BENCHMARKS_PGHOST }} | ||||
| @ -64,7 +64,7 @@ jobs: | ||||
|             commit_id=$GITHUB_SHA | ||||
|           fi | ||||
|           commit_msg=$(git show -s --format=%s | cut -c1-70) | ||||
|           python3 benchmark/benchmarks_entrypoint.py "huggingface/transformers" "$BRANCH_NAME" "$commit_id" "$commit_msg" | ||||
|           python3 benchmark/benchmarks_entrypoint.py "$BRANCH_NAME" "$commit_id" "$commit_msg" | ||||
|         env: | ||||
|           HF_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }} | ||||
|           # Enable this to see debug logs | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/build-ci-docker-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/build-ci-docker-images.yml
									
									
									
									
										vendored
									
									
								
							| @ -26,7 +26,7 @@ jobs: | ||||
|  | ||||
|     strategy: | ||||
|       matrix: | ||||
|         file: ["quality", "consistency", "custom-tokenizers", "torch-light", "exotic-models", "examples-torch"] | ||||
|         file: ["quality", "consistency", "custom-tokenizers", "torch-light", "tf-light", "exotic-models", "torch-tf-light", "jax-light", "examples-torch",  "examples-tf"] | ||||
|     continue-on-error: true | ||||
|  | ||||
|     steps: | ||||
|  | ||||
							
								
								
									
										70
									
								
								.github/workflows/build-docker-images.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										70
									
								
								.github/workflows/build-docker-images.yml
									
									
									
									
										vendored
									
									
								
							| @ -19,7 +19,7 @@ concurrency: | ||||
|  | ||||
| jobs: | ||||
|   latest-docker: | ||||
|     name: "Latest PyTorch [dev]" | ||||
|     name: "Latest PyTorch + TensorFlow [dev]" | ||||
|     runs-on: | ||||
|       group: aws-general-8-plus | ||||
|     steps: | ||||
| @ -63,14 +63,14 @@ jobs: | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }} | ||||
|           title: 🤗 Results of the transformers-all-latest-gpu-push-ci docker build | ||||
|           title: 🤗 Results of the transformers-all-latest-gpu-push-ci docker build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
|   latest-torch-deepspeed-docker: | ||||
|     name: "Latest PyTorch + DeepSpeed" | ||||
|     runs-on: | ||||
|       group: aws-g4dn-2xlarge-cache | ||||
|       group: aws-general-8-plus | ||||
|     steps: | ||||
|       - | ||||
|         name: Set up Docker Buildx | ||||
| @ -99,7 +99,7 @@ jobs: | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER}} | ||||
|           title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu docker build | ||||
|           title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu docker build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
| @ -140,7 +140,7 @@ jobs: | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }} | ||||
|           title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu-push-ci docker build | ||||
|           title: 🤗 Results of the transformers-pytorch-deepspeed-latest-gpu-push-ci docker build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
| @ -176,7 +176,7 @@ jobs: | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }} | ||||
|           title: 🤗 Results of the huggingface/transformers-doc-builder docker build | ||||
|           title: 🤗 Results of the huggingface/transformers-doc-builder docker build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
| @ -214,7 +214,7 @@ jobs: | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }} | ||||
|           title: 🤗 Results of the huggingface/transformers-pytorch-gpudocker build | ||||
|           title: 🤗 Results of the huggingface/transformers-pytorch-gpudocker build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
| @ -223,19 +223,19 @@ jobs: | ||||
|     runs-on: | ||||
|       group: aws-general-8-plus | ||||
|     steps: | ||||
|       - | ||||
|       -  | ||||
|         name: Set up Docker Buildx | ||||
|         uses: docker/setup-buildx-action@v3 | ||||
|       - | ||||
|       -  | ||||
|         name: Check out code | ||||
|         uses: actions/checkout@v4 | ||||
|       - | ||||
|       -  | ||||
|         name: Login to DockerHub | ||||
|         uses: docker/login-action@v3 | ||||
|         with: | ||||
|           username: ${{ secrets.DOCKERHUB_USERNAME }} | ||||
|           password: ${{ secrets.DOCKERHUB_PASSWORD }} | ||||
|       - | ||||
|       -  | ||||
|         name: Build and push | ||||
|         uses: docker/build-push-action@v5 | ||||
|         with: | ||||
| @ -263,12 +263,14 @@ jobs: | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }} | ||||
|           title: 🤗 Results of the huggingface/transformers-pytorch-amd-gpu-push-ci build | ||||
|           title: 🤗 Results of the huggingface/transformers-pytorch-amd-gpu-push-ci build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
|   latest-pytorch-deepspeed-amd: | ||||
|     name: "PyTorch + DeepSpeed (AMD) [dev]" | ||||
|   latest-tensorflow: | ||||
|     name: "Latest TensorFlow [dev]" | ||||
|     # Push CI doesn't need this image | ||||
|     if: inputs.image_postfix != '-push-ci' | ||||
|     runs-on: | ||||
|       group: aws-general-8-plus | ||||
|     steps: | ||||
| @ -285,6 +287,42 @@ jobs: | ||||
|           username: ${{ secrets.DOCKERHUB_USERNAME }} | ||||
|           password: ${{ secrets.DOCKERHUB_PASSWORD }} | ||||
|       - | ||||
|         name: Build and push | ||||
|         uses: docker/build-push-action@v5 | ||||
|         with: | ||||
|           context: ./docker/transformers-tensorflow-gpu | ||||
|           build-args: | | ||||
|             REF=main | ||||
|           push: true | ||||
|           tags: huggingface/transformers-tensorflow-gpu | ||||
|  | ||||
|       - name: Post to Slack | ||||
|         if: always() | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }} | ||||
|           title: 🤗 Results of the huggingface/transformers-tensorflow-gpu build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
|   latest-pytorch-deepspeed-amd: | ||||
|     name: "PyTorch + DeepSpeed (AMD) [dev]" | ||||
|     runs-on: | ||||
|       group: aws-general-8-plus | ||||
|     steps: | ||||
|       -  | ||||
|         name: Set up Docker Buildx | ||||
|         uses: docker/setup-buildx-action@v3 | ||||
|       -  | ||||
|         name: Check out code | ||||
|         uses: actions/checkout@v4 | ||||
|       -  | ||||
|         name: Login to DockerHub | ||||
|         uses: docker/login-action@v3 | ||||
|         with: | ||||
|           username: ${{ secrets.DOCKERHUB_USERNAME }} | ||||
|           password: ${{ secrets.DOCKERHUB_PASSWORD }} | ||||
|       -  | ||||
|         name: Build and push | ||||
|         uses: docker/build-push-action@v5 | ||||
|         with: | ||||
| @ -312,7 +350,7 @@ jobs: | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }} | ||||
|           title: 🤗 Results of the transformers-pytorch-deepspeed-amd-gpu build | ||||
|           title: 🤗 Results of the transformers-pytorch-deepspeed-amd-gpu build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
| @ -350,6 +388,6 @@ jobs: | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ secrets.CI_SLACK_CHANNEL_DOCKER }} | ||||
|           title: 🤗 Results of the transformers-quantization-latest-gpu build | ||||
|           title: 🤗 Results of the transformers-quantization-latest-gpu build  | ||||
|           status: ${{ job.status }} | ||||
|           slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|  | ||||
| @ -2,10 +2,6 @@ name: Build docker images (Nightly CI) | ||||
|  | ||||
| on: | ||||
|   workflow_call: | ||||
|     inputs: | ||||
|       job: | ||||
|         required: true | ||||
|         type: string | ||||
|   push: | ||||
|     branches: | ||||
|       - build_nightly_ci_docker_image* | ||||
| @ -16,8 +12,7 @@ concurrency: | ||||
|  | ||||
| jobs: | ||||
|   latest-with-torch-nightly-docker: | ||||
|     name: "Nightly PyTorch" | ||||
|     if: inputs.job == 'latest-with-torch-nightly-docker' || inputs.job == '' | ||||
|     name: "Nightly PyTorch + Stable TensorFlow" | ||||
|     runs-on: | ||||
|       group: aws-general-8-plus | ||||
|     steps: | ||||
| @ -46,9 +41,8 @@ jobs: | ||||
|  | ||||
|   nightly-torch-deepspeed-docker: | ||||
|     name: "Nightly PyTorch + DeepSpeed" | ||||
|     if: inputs.job == 'nightly-torch-deepspeed-docker' || inputs.job == '' | ||||
|     runs-on: | ||||
|       group: aws-g4dn-2xlarge-cache | ||||
|       group: aws-general-8-plus | ||||
|     steps: | ||||
|       - | ||||
|         name: Set up Docker Buildx | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/build_pr_documentation.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/build_pr_documentation.yml
									
									
									
									
										vendored
									
									
								
							| @ -14,4 +14,4 @@ jobs: | ||||
|       commit_sha: ${{ github.event.pull_request.head.sha }} | ||||
|       pr_number: ${{ github.event.number }} | ||||
|       package: transformers | ||||
|       languages: en | ||||
|       languages: ar de en es fr hi it ko pt tr zh ja te | ||||
|  | ||||
							
								
								
									
										25
									
								
								.github/workflows/change_pr_to_draft.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								.github/workflows/change_pr_to_draft.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,25 @@ | ||||
| name: Change PR to draft | ||||
|  | ||||
| on: | ||||
|   pull_request_target: | ||||
|     types: [opened, reopened] | ||||
|  | ||||
| jobs: | ||||
|   convert_pr_to_draft: | ||||
|     runs-on: ubuntu-22.04 | ||||
|     name: Convert PR to draft | ||||
|     permissions: | ||||
|       pull-requests: write | ||||
|       contents: write | ||||
|     if: github.event.pull_request.draft == false | ||||
|     steps: | ||||
|       - name: Convert PR to draft | ||||
|         shell: bash | ||||
|         env: | ||||
|           PR_NUMBER: ${{ github.event.number }} | ||||
|           GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||
|           REPO: ${{ github.repository }} | ||||
|         run: | | ||||
|           echo $PR_NUMBER | ||||
|           gh pr ready $PR_NUMBER --repo $REPO --undo | ||||
|           gh pr comment $PR_NUMBER --repo $REPO --body "Hi 👋, thank you for opening this pull request! The pull request is converted to draft by default. The CI will be paused while the PR is in draft mode. When it is ready for review, please click the \`Ready for review\` button (at the bottom of the PR page). This will assign reviewers and trigger CI." | ||||
| @ -9,21 +9,6 @@ on: | ||||
|       start_sha: | ||||
|         required: true | ||||
|         type: string | ||||
|       job: | ||||
|         required: true | ||||
|         type: string | ||||
|       slack_report_channel: | ||||
|         required: true | ||||
|         type: string | ||||
|       ci_event: | ||||
|         required: true | ||||
|         type: string | ||||
|       report_repo_id: | ||||
|         required: true | ||||
|         type: string | ||||
|       commit_sha: | ||||
|         required: false | ||||
|         type: string | ||||
| 
 | ||||
| 
 | ||||
| env: | ||||
| @ -41,27 +26,27 @@ env: | ||||
| 
 | ||||
| 
 | ||||
| jobs: | ||||
|   check_new_failures: | ||||
|   run_models_gpu: | ||||
|     name: " " | ||||
|     runs-on: | ||||
|       group: aws-g5-4xlarge-cache | ||||
|       group: aws-g4dn-4xlarge-cache | ||||
|     container: | ||||
|       image: ${{ inputs.docker }} | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     steps: | ||||
|       - uses: actions/download-artifact@v4 | ||||
|         with: | ||||
|           name: ci_results_${{ inputs.job }} | ||||
|           path: /transformers/ci_results_${{ inputs.job }} | ||||
|           name: ci_results_run_models_gpu | ||||
|           path: /transformers/ci_results_run_models_gpu | ||||
| 
 | ||||
|       - name: Check file | ||||
|         working-directory: /transformers | ||||
|         run: | | ||||
|           if [ -f ci_results_${{ inputs.job }}/new_failures.json ]; then | ||||
|             echo "`ci_results_${{ inputs.job }}/new_failures.json` exists, continue ..." | ||||
|           if [ -f ci_results_run_models_gpu/new_model_failures.json ]; then | ||||
|             echo "`ci_results_run_models_gpu/new_model_failures.json` exists, continue ..." | ||||
|             echo "process=true" >> $GITHUB_ENV | ||||
|           else | ||||
|             echo "`ci_results_${{ inputs.job }}/new_failures.json` doesn't exist, abort." | ||||
|             echo "`ci_results_run_models_gpu/new_model_failures.json` doesn't exist, abort." | ||||
|             echo "process=false" >> $GITHUB_ENV | ||||
|           fi | ||||
| 
 | ||||
| @ -90,7 +75,7 @@ jobs: | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|         if: ${{ env.process == 'true' }} | ||||
|         run: git fetch && git checkout ${{ inputs.commit_sha || github.sha }} | ||||
|         run: git fetch && git checkout ${{ github.sha }} | ||||
| 
 | ||||
|       - name: Get target commit | ||||
|         working-directory: /transformers/utils | ||||
| @ -127,14 +112,15 @@ jobs: | ||||
|       - name: Check failed tests | ||||
|         working-directory: /transformers | ||||
|         if: ${{ env.process == 'true' }} | ||||
|         run: python3 utils/check_bad_commit.py --start_commit ${{ inputs.start_sha }} --end_commit ${{ env.END_SHA }} --file ci_results_${{ inputs.job }}/new_failures.json --output_file new_failures_with_bad_commit.json | ||||
|         # how to run multiple ones? | ||||
|         run: python3 utils/check_bad_commit.py --start_commit ${{ inputs.start_sha }} --end_commit ${{ env.END_SHA }} --file ci_results_run_models_gpu/new_model_failures.json --output_file new_model_failures_with_bad_commit.json | ||||
| 
 | ||||
|       - name: Show results | ||||
|         working-directory: /transformers | ||||
|         if: ${{ env.process == 'true' }} | ||||
|         run: | | ||||
|           ls -l new_failures_with_bad_commit.json | ||||
|           cat new_failures_with_bad_commit.json | ||||
|           ls -l new_model_failures_with_bad_commit.json | ||||
|           cat new_model_failures_with_bad_commit.json | ||||
| 
 | ||||
|       - name: Checkout back | ||||
|         working-directory: /transformers | ||||
| @ -149,8 +135,6 @@ jobs: | ||||
|         env: | ||||
|           ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} | ||||
|           TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN: ${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }} | ||||
|           JOB_NAME: ${{ inputs.job }} | ||||
|           REPORT_REPO_ID: ${{ inputs.report_repo_id }} | ||||
|         run: | | ||||
|           python3 utils/process_bad_commit_report.py | ||||
| 
 | ||||
| @ -161,8 +145,6 @@ jobs: | ||||
|         env: | ||||
|           ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} | ||||
|           TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN: ${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }} | ||||
|           JOB_NAME: ${{ inputs.job }} | ||||
|           REPORT_REPO_ID: ${{ inputs.report_repo_id }} | ||||
|         run: | | ||||
|           { | ||||
|             echo 'REPORT_TEXT<<EOF' | ||||
| @ -170,31 +152,17 @@ jobs: | ||||
|             echo EOF | ||||
|           } >> "$GITHUB_ENV" | ||||
| 
 | ||||
|       - name: Prepare Slack report title | ||||
|         working-directory: /transformers | ||||
|         if: ${{ env.process == 'true' }} | ||||
|         run: | | ||||
|           pip install slack_sdk | ||||
|           echo "title=$(python3 -c 'import sys; sys.path.append("utils"); from utils.notification_service import job_to_test_map; ci_event = "${{ inputs.ci_event }}"; job = "${{ inputs.job }}"; test_name = job_to_test_map[job]; title = f"New failed tests of {ci_event}" + ":" + f" {test_name}"; print(title)')" >> $GITHUB_ENV | ||||
| 
 | ||||
|       - name: Send processed report | ||||
|         if: ${{ env.process == 'true' && !endsWith(env.REPORT_TEXT, '{}') }} | ||||
|         uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001 | ||||
|         with: | ||||
|           # Slack channel id, channel name, or user id to post message. | ||||
|           # See also: https://api.slack.com/methods/chat.postMessage#channels | ||||
|           channel-id: '#${{ inputs.slack_report_channel }}' | ||||
|           channel-id: '#transformers-ci-feedback-tests' | ||||
|           # For posting a rich message using Block Kit | ||||
|           payload: | | ||||
|             { | ||||
|               "blocks": [ | ||||
|                 { | ||||
|                   "type": "header", | ||||
|                   "text": { | ||||
|                     "type": "plain_text", | ||||
|                     "text": "${{ env.title }}" | ||||
|                   } | ||||
|                 }, | ||||
|                 { | ||||
|                   "type": "section", | ||||
|                   "text": { | ||||
							
								
								
									
										43
									
								
								.github/workflows/collated-reports.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										43
									
								
								.github/workflows/collated-reports.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,43 +0,0 @@ | ||||
| name: CI collated reports | ||||
|  | ||||
| on: | ||||
|   workflow_call: | ||||
|     inputs: | ||||
|       job: | ||||
|         required: true | ||||
|         type: string | ||||
|       report_repo_id: | ||||
|         required: true | ||||
|         type: string | ||||
|       machine_type: | ||||
|         required: true | ||||
|         type: string | ||||
|       gpu_name: | ||||
|         description: Name of the GPU used for the job. Its enough that the value contains the name of the GPU, e.g. "noise-h100-more-noise". Case insensitive. | ||||
|         required: true | ||||
|         type: string | ||||
|  | ||||
| jobs: | ||||
|   collated_reports: | ||||
|     name: Collated reports | ||||
|     runs-on: ubuntu-22.04 | ||||
|     if: always() | ||||
|     steps: | ||||
|       - uses: actions/checkout@v4 | ||||
|       - uses: actions/download-artifact@v4 | ||||
|  | ||||
|       - name: Collated reports | ||||
|         shell: bash | ||||
|         env: | ||||
|           ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} | ||||
|           CI_SHA: ${{ github.sha }} | ||||
|           TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN: ${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }} | ||||
|         run: | | ||||
|           pip install huggingface_hub | ||||
|           python3 utils/collated_reports.py                  \ | ||||
|             --path .                                         \ | ||||
|             --machine-type ${{ inputs.machine_type }}        \ | ||||
|             --commit-hash ${{ env.CI_SHA }}                  \ | ||||
|             --job ${{ inputs.job }}                          \ | ||||
|             --report-repo-id ${{ inputs.report_repo_id }}    \ | ||||
|             --gpu-name ${{ inputs.gpu_name }} | ||||
							
								
								
									
										4
									
								
								.github/workflows/doctest_job.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/doctest_job.yml
									
									
									
									
										vendored
									
									
								
							| @ -28,10 +28,10 @@ jobs: | ||||
|       matrix: | ||||
|         split_keys: ${{ fromJson(inputs.split_keys) }} | ||||
|     runs-on:  | ||||
|       group: aws-g5-4xlarge-cache | ||||
|       group: aws-g4dn-2xlarge-cache | ||||
|     container: | ||||
|       image: huggingface/transformers-all-latest-gpu | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|       options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     steps: | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|  | ||||
							
								
								
									
										4
									
								
								.github/workflows/doctests.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										4
									
								
								.github/workflows/doctests.yml
									
									
									
									
										vendored
									
									
								
							| @ -15,10 +15,10 @@ jobs: | ||||
|   setup: | ||||
|     name: Setup | ||||
|     runs-on:  | ||||
|       group: aws-g5-4xlarge-cache | ||||
|       group: aws-g4dn-2xlarge-cache | ||||
|     container: | ||||
|       image: huggingface/transformers-all-latest-gpu | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|       options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     outputs: | ||||
|       job_splits: ${{ steps.set-matrix.outputs.job_splits }} | ||||
|       split_keys: ${{ steps.set-matrix.outputs.split_keys }} | ||||
|  | ||||
							
								
								
									
										157
									
								
								.github/workflows/get-pr-info.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										157
									
								
								.github/workflows/get-pr-info.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,157 +0,0 @@ | ||||
| name: Get PR commit SHA | ||||
| on: | ||||
|   workflow_call: | ||||
|     inputs: | ||||
|       pr_number: | ||||
|         required: true | ||||
|         type: string | ||||
|     outputs: | ||||
|       PR_HEAD_REPO_FULL_NAME: | ||||
|         description: "The full name of the repository from which the pull request is created" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_HEAD_REPO_FULL_NAME }} | ||||
|       PR_BASE_REPO_FULL_NAME: | ||||
|         description: "The full name of the repository to which the pull request is created" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_BASE_REPO_FULL_NAME }} | ||||
|       PR_HEAD_REPO_OWNER: | ||||
|         description: "The owner of the repository from which the pull request is created" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_HEAD_REPO_OWNER }} | ||||
|       PR_BASE_REPO_OWNER: | ||||
|         description: "The owner of the repository to which the pull request is created" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_BASE_REPO_OWNER }} | ||||
|       PR_HEAD_REPO_NAME: | ||||
|         description: "The name of the repository from which the pull request is created" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_HEAD_REPO_NAME }} | ||||
|       PR_BASE_REPO_NAME: | ||||
|         description: "The name of the repository to which the pull request is created" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_BASE_REPO_NAME }} | ||||
|       PR_HEAD_REF: | ||||
|         description: "The branch name of the pull request in the head repository" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_HEAD_REF }} | ||||
|       PR_BASE_REF: | ||||
|         description: "The branch name in the base repository (to merge into)" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_BASE_REF }} | ||||
|       PR_HEAD_SHA: | ||||
|         description: "The head sha of the pull request branch in the head repository" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_HEAD_SHA }} | ||||
|       PR_BASE_SHA: | ||||
|         description: "The head sha of the target branch in the base repository" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_BASE_SHA }} | ||||
|       PR_MERGE_COMMIT_SHA: | ||||
|         description: "The sha of the merge commit for the pull request (created by GitHub) in the base repository" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_MERGE_COMMIT_SHA }} | ||||
|       PR_HEAD_COMMIT_DATE: | ||||
|         description: "The date of the head sha of the pull request branch in the head repository" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_HEAD_COMMIT_DATE }} | ||||
|       PR_MERGE_COMMIT_DATE: | ||||
|         description: "The date of the merge commit for the pull request (created by GitHub) in the base repository" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_MERGE_COMMIT_DATE }} | ||||
|       PR_HEAD_COMMIT_TIMESTAMP: | ||||
|         description: "The timestamp of the head sha of the pull request branch in the head repository" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_HEAD_COMMIT_TIMESTAMP }} | ||||
|       PR_MERGE_COMMIT_TIMESTAMP: | ||||
|         description: "The timestamp of the merge commit for the pull request (created by GitHub) in the base repository" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_MERGE_COMMIT_TIMESTAMP }} | ||||
|       PR: | ||||
|         description: "The PR" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR }} | ||||
|       PR_FILES: | ||||
|         description: "The files touched in the PR" | ||||
|         value: ${{ jobs.get-pr-info.outputs.PR_FILES }} | ||||
|  | ||||
|  | ||||
| jobs: | ||||
|   get-pr-info: | ||||
|     runs-on: ubuntu-22.04 | ||||
|     name: Get PR commit SHA better | ||||
|     outputs: | ||||
|       PR_HEAD_REPO_FULL_NAME: ${{ steps.pr_info.outputs.head_repo_full_name }} | ||||
|       PR_BASE_REPO_FULL_NAME: ${{ steps.pr_info.outputs.base_repo_full_name }} | ||||
|       PR_HEAD_REPO_OWNER: ${{ steps.pr_info.outputs.head_repo_owner }} | ||||
|       PR_BASE_REPO_OWNER: ${{ steps.pr_info.outputs.base_repo_owner }} | ||||
|       PR_HEAD_REPO_NAME: ${{ steps.pr_info.outputs.head_repo_name }} | ||||
|       PR_BASE_REPO_NAME: ${{ steps.pr_info.outputs.base_repo_name }} | ||||
|       PR_HEAD_REF: ${{ steps.pr_info.outputs.head_ref }} | ||||
|       PR_BASE_REF: ${{ steps.pr_info.outputs.base_ref }} | ||||
|       PR_HEAD_SHA: ${{ steps.pr_info.outputs.head_sha }} | ||||
|       PR_BASE_SHA: ${{ steps.pr_info.outputs.base_sha }} | ||||
|       PR_MERGE_COMMIT_SHA: ${{ steps.pr_info.outputs.merge_commit_sha }} | ||||
|       PR_HEAD_COMMIT_DATE: ${{ steps.pr_info.outputs.head_commit_date }} | ||||
|       PR_MERGE_COMMIT_DATE: ${{ steps.pr_info.outputs.merge_commit_date }} | ||||
|       PR_HEAD_COMMIT_TIMESTAMP: ${{ steps.get_timestamps.outputs.head_commit_timestamp }} | ||||
|       PR_MERGE_COMMIT_TIMESTAMP: ${{ steps.get_timestamps.outputs.merge_commit_timestamp }} | ||||
|       PR: ${{ steps.pr_info.outputs.pr }} | ||||
|       PR_FILES: ${{ steps.pr_info.outputs.files }} | ||||
|     if: ${{ inputs.pr_number != '' }} | ||||
|     steps: | ||||
|       - name: Extract PR details | ||||
|         id: pr_info | ||||
|         uses: actions/github-script@v6 | ||||
|         with: | ||||
|           script: |             | ||||
|             const { data: pr } = await github.rest.pulls.get({ | ||||
|               owner: context.repo.owner, | ||||
|               repo: context.repo.repo, | ||||
|               pull_number: ${{ inputs.pr_number }} | ||||
|             }); | ||||
|  | ||||
|             const { data: head_commit }  = await github.rest.repos.getCommit({ | ||||
|               owner: pr.head.repo.owner.login, | ||||
|               repo: pr.head.repo.name, | ||||
|               ref: pr.head.ref | ||||
|             }); | ||||
|  | ||||
|             const { data: merge_commit }  = await github.rest.repos.getCommit({ | ||||
|               owner: pr.base.repo.owner.login, | ||||
|               repo: pr.base.repo.name, | ||||
|               ref: pr.merge_commit_sha, | ||||
|             }); | ||||
|  | ||||
|             const { data: files } = await github.rest.pulls.listFiles({ | ||||
|               owner: context.repo.owner, | ||||
|               repo: context.repo.repo, | ||||
|               pull_number: ${{ inputs.pr_number }} | ||||
|             }); | ||||
|  | ||||
|             core.setOutput('head_repo_full_name', pr.head.repo.full_name); | ||||
|             core.setOutput('base_repo_full_name', pr.base.repo.full_name); | ||||
|             core.setOutput('head_repo_owner', pr.head.repo.owner.login); | ||||
|             core.setOutput('base_repo_owner', pr.base.repo.owner.login); | ||||
|             core.setOutput('head_repo_name', pr.head.repo.name); | ||||
|             core.setOutput('base_repo_name', pr.base.repo.name); | ||||
|             core.setOutput('head_ref', pr.head.ref); | ||||
|             core.setOutput('base_ref', pr.base.ref); | ||||
|             core.setOutput('head_sha', pr.head.sha); | ||||
|             core.setOutput('base_sha', pr.base.sha); | ||||
|             core.setOutput('merge_commit_sha', pr.merge_commit_sha); | ||||
|             core.setOutput('pr', pr); | ||||
|  | ||||
|             core.setOutput('head_commit_date', head_commit.commit.committer.date); | ||||
|             core.setOutput('merge_commit_date', merge_commit.commit.committer.date); | ||||
|              | ||||
|             core.setOutput('files', files);             | ||||
|              | ||||
|             console.log('PR head commit:', { | ||||
|               head_commit: head_commit, | ||||
|               commit: head_commit.commit, | ||||
|               date: head_commit.commit.committer.date | ||||
|             }); | ||||
|  | ||||
|             console.log('PR merge commit:', { | ||||
|               merge_commit: merge_commit, | ||||
|               commit: merge_commit.commit, | ||||
|               date: merge_commit.commit.committer.date | ||||
|             }); | ||||
|  | ||||
|       - name: Convert dates to timestamps | ||||
|         id: get_timestamps | ||||
|         run: | | ||||
|           head_commit_date=${{ steps.pr_info.outputs.head_commit_date }} | ||||
|           merge_commit_date=${{ steps.pr_info.outputs.merge_commit_date }} | ||||
|           echo $head_commit_date | ||||
|           echo $merge_commit_date | ||||
|           head_commit_timestamp=$(date -d "$head_commit_date" +%s) | ||||
|           merge_commit_timestamp=$(date -d "$merge_commit_date" +%s) | ||||
|           echo $head_commit_timestamp | ||||
|           echo $merge_commit_timestamp | ||||
|           echo "head_commit_timestamp=$head_commit_timestamp" >> $GITHUB_OUTPUT | ||||
|           echo "merge_commit_timestamp=$merge_commit_timestamp" >> $GITHUB_OUTPUT | ||||
							
								
								
									
										36
									
								
								.github/workflows/get-pr-number.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										36
									
								
								.github/workflows/get-pr-number.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,36 +0,0 @@ | ||||
| name: Get PR number | ||||
| on: | ||||
|   workflow_call: | ||||
|     outputs: | ||||
|       PR_NUMBER: | ||||
|         description: "The extracted PR number" | ||||
|         value: ${{ jobs.get-pr-number.outputs.PR_NUMBER }} | ||||
|  | ||||
| jobs: | ||||
|   get-pr-number: | ||||
|     runs-on: ubuntu-22.04 | ||||
|     name: Get PR number | ||||
|     outputs: | ||||
|       PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }} | ||||
|     steps: | ||||
|       - name: Get PR number | ||||
|         shell: bash | ||||
|         run: | | ||||
|           if [[ "${{ github.event.issue.number }}" != "" && "${{ github.event.issue.pull_request }}" != "" ]]; then | ||||
|             echo "PR_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV | ||||
|           elif [[ "${{ github.event.pull_request.number }}" != "" ]]; then | ||||
|             echo "PR_NUMBER=${{ github.event.pull_request.number }}" >> $GITHUB_ENV | ||||
|           elif [[ "${{ github.event.pull_request }}" != "" ]]; then | ||||
|             echo "PR_NUMBER=${{ github.event.number }}" >> $GITHUB_ENV | ||||
|           else | ||||
|             echo "PR_NUMBER=" >> $GITHUB_ENV | ||||
|           fi | ||||
|  | ||||
|       - name: Check PR number | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ env.PR_NUMBER }}" | ||||
|  | ||||
|       - name: Set PR number | ||||
|         id: set_pr_number | ||||
|         run: echo "PR_NUMBER=${{ env.PR_NUMBER }}" >> "$GITHUB_OUTPUT" | ||||
							
								
								
									
										73
									
								
								.github/workflows/model_jobs.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										73
									
								
								.github/workflows/model_jobs.yml
									
									
									
									
										vendored
									
									
								
							| @ -12,21 +12,11 @@ on: | ||||
|       slice_id: | ||||
|         required: true | ||||
|         type: number | ||||
|       docker: | ||||
|       runner: | ||||
|         required: true | ||||
|         type: string | ||||
|       commit_sha: | ||||
|         required: false | ||||
|         type: string | ||||
|       report_name_prefix: | ||||
|         required: false | ||||
|         default: run_models_gpu | ||||
|         type: string | ||||
|       runner_type: | ||||
|         required: false | ||||
|         type: string | ||||
|       report_repo_id: | ||||
|         required: false | ||||
|       docker: | ||||
|         required: true | ||||
|         type: string | ||||
|  | ||||
| env: | ||||
| @ -55,8 +45,6 @@ jobs: | ||||
|     container: | ||||
|       image: ${{ inputs.docker }} | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     outputs: | ||||
|       machine_type: ${{ steps.set_machine_type.outputs.machine_type }} | ||||
|     steps: | ||||
|       - name: Echo input and matrix info | ||||
|         shell: bash | ||||
| @ -78,7 +66,7 @@ jobs: | ||||
|  | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|         run: git fetch && git checkout ${{ inputs.commit_sha || github.sha }} | ||||
|         run: git fetch && git checkout ${{ github.sha }} | ||||
|  | ||||
|       - name: Reinstall transformers in edit mode (remove the one installed during docker image build) | ||||
|         working-directory: /transformers | ||||
| @ -105,20 +93,27 @@ jobs: | ||||
|         run: | | ||||
|           python3 utils/print_env.py | ||||
|  | ||||
| #      - name: Installed torch 2.7.0 | ||||
| #        working-directory: /transformers | ||||
| #        run: python3 -m pip install torch==2.7.0 torchvision torchaudio | ||||
|  | ||||
|       - name: Installed torch 2.7.1 RC | ||||
|         working-directory: /transformers | ||||
|         run: python3 -m pip install torch==2.7.1 torchvision torchaudio --index-url https://download.pytorch.org/whl/test/cu126 | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         working-directory: /transformers | ||||
|         run: pip freeze | ||||
|  | ||||
|       - name: Set `machine_type` for report and artifact names | ||||
|         id: set_machine_type | ||||
|         working-directory: /transformers | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ inputs.machine_type }}" | ||||
|  | ||||
|           if [ "${{ inputs.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ inputs.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ inputs.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ inputs.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ inputs.machine_type }} | ||||
| @ -126,46 +121,26 @@ jobs: | ||||
|  | ||||
|           echo "$machine_type" | ||||
|           echo "machine_type=$machine_type" >> $GITHUB_ENV | ||||
|           echo "machine_type=$machine_type" >> $GITHUB_OUTPUT | ||||
|  | ||||
|       - name: Create report directory if it doesn't exist | ||||
|         shell: bash | ||||
|         run: | | ||||
|           mkdir -p /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports | ||||
|           echo "dummy" > /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports/dummy.txt | ||||
|           ls -la /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports | ||||
|  | ||||
|       - name: Run all tests on GPU | ||||
|         working-directory: /transformers | ||||
|         run: | | ||||
|           PATCH_TESTING_METHODS_TO_COLLECT_OUTPUTS=yes _PATCHED_TESTING_METHODS_OUTPUT_DIR=/transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports python3 -m pytest -rsfE -v --make-reports=${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports tests/${{ matrix.folders }} | ||||
|         run: python3 -m pytest -rsfE -v --make-reports=${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }} | ||||
|  | ||||
|       - name: Failure short reports | ||||
|         if: ${{ failure() }} | ||||
|         continue-on-error: true | ||||
|         run: cat /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports/failures_short.txt | ||||
|         run: cat /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/failures_short.txt | ||||
|  | ||||
|       - name: Captured information | ||||
|         if: ${{ failure() }} | ||||
|         continue-on-error: true | ||||
|       - name: Run test | ||||
|         shell: bash | ||||
|         run: | | ||||
|           cat /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports/captured_info.txt | ||||
|           mkdir -p /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports | ||||
|           echo "hello" > /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/hello.txt | ||||
|           echo "${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports" | ||||
|  | ||||
|       - name: "Test suite reports artifacts: ${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports" | ||||
|       - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports | ||||
|           path: /transformers/reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports | ||||
|  | ||||
|   collated_reports: | ||||
|     name: Collated Reports | ||||
|     if: ${{ always() }} | ||||
|     needs: run_models_gpu | ||||
|     uses: huggingface/transformers/.github/workflows/collated-reports.yml@main | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       report_repo_id: ${{ inputs.report_repo_id }} | ||||
|       gpu_name: ${{ inputs.runner_type }} | ||||
|       machine_type: ${{ needs.run_models_gpu.outputs.machine_type }} | ||||
|     secrets: inherit | ||||
|           name: ${{ env.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports | ||||
|           path: /transformers/reports/${{ env.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports | ||||
|  | ||||
							
								
								
									
										128
									
								
								.github/workflows/model_jobs_amd.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										128
									
								
								.github/workflows/model_jobs_amd.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,128 @@ | ||||
| name: model jobs | ||||
|  | ||||
| on: | ||||
|   workflow_call: | ||||
|     inputs: | ||||
|       folder_slices: | ||||
|         required: true | ||||
|         type: string | ||||
|       machine_type: | ||||
|         required: true | ||||
|         type: string | ||||
|       slice_id: | ||||
|         required: true | ||||
|         type: number | ||||
|       runner: | ||||
|         required: true | ||||
|         type: string | ||||
|       docker: | ||||
|         required: true | ||||
|         type: string | ||||
|  | ||||
| env: | ||||
|   HF_HOME: /mnt/cache | ||||
|   TRANSFORMERS_IS_CI: yes | ||||
|   OMP_NUM_THREADS: 8 | ||||
|   MKL_NUM_THREADS: 8 | ||||
|   RUN_SLOW: yes | ||||
|   # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. | ||||
|   # This token is created under the bot `hf-transformers-bot`. | ||||
|   HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }} | ||||
|   SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} | ||||
|   TF_FORCE_GPU_ALLOW_GROWTH: true | ||||
|   CUDA_VISIBLE_DEVICES: 0,1 | ||||
|  | ||||
| jobs: | ||||
|   run_models_gpu: | ||||
|     name: " " | ||||
|     strategy: | ||||
|       max-parallel: 1  # For now, not to parallelize. Can change later if it works well. | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         folders: ${{ fromJson(inputs.folder_slices)[inputs.slice_id] }} | ||||
|     runs-on: ['${{ inputs.machine_type }}', self-hosted, amd-gpu, '${{ inputs.runner }}'] | ||||
|     container: | ||||
|       image: ${{ inputs.docker }} | ||||
|       options: --device /dev/kfd --device /dev/dri --env ROCR_VISIBLE_DEVICES --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     steps: | ||||
|       - name: Echo input and matrix info | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ inputs.folder_slices }}" | ||||
|           echo "${{ matrix.folders }}" | ||||
|           echo "${{ toJson(fromJson(inputs.folder_slices)[inputs.slice_id]) }}" | ||||
|  | ||||
|       - name: Echo folder ${{ matrix.folders }} | ||||
|         shell: bash | ||||
|         # For folders like `models/bert`, set an env. var. (`matrix_folders`) to `models_bert`, which will be used to | ||||
|         # set the artifact folder names (because the character `/` is not allowed). | ||||
|         run: | | ||||
|           echo "${{ matrix.folders }}" | ||||
|           matrix_folders=${{ matrix.folders }} | ||||
|           matrix_folders=${matrix_folders/'models/'/'models_'} | ||||
|           echo "$matrix_folders" | ||||
|           echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|         run: git fetch && git checkout ${{ github.sha }} | ||||
|  | ||||
|       - name: Reinstall transformers in edit mode (remove the one installed during docker image build) | ||||
|         working-directory: /transformers | ||||
|         run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . | ||||
|  | ||||
|       - name: Update / Install some packages (for Past CI) | ||||
|         if: ${{ contains(inputs.docker, '-past-') }} | ||||
|         working-directory: /transformers | ||||
|         run: | | ||||
|           python3 -m pip install -U datasets | ||||
|  | ||||
|       - name: Update / Install some packages (for Past CI) | ||||
|         if: ${{ contains(inputs.docker, '-past-') && contains(inputs.docker, '-pytorch-') }} | ||||
|         working-directory: /transformers | ||||
|         run: | | ||||
|           python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate | ||||
|  | ||||
|       - name: ROCM-SMI | ||||
|         run: | | ||||
|           rocm-smi | ||||
|  | ||||
|       - name: ROCM-INFO | ||||
|         run: | | ||||
|           rocminfo  | grep "Agent" -A 14 | ||||
|  | ||||
|       - name: Show ROCR environment | ||||
|         run: | | ||||
|           echo "ROCR: $ROCR_VISIBLE_DEVICES" | ||||
|  | ||||
|       - name: Environment | ||||
|         working-directory: /transformers | ||||
|         run: | | ||||
|           python3 utils/print_env.py | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         working-directory: /transformers | ||||
|         run: pip freeze | ||||
|  | ||||
|       - name: Run all tests on GPU | ||||
|         working-directory: /transformers | ||||
|         run: python3 -m pytest -rsfE -v --make-reports=${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }}  -m "not not_device_test" | ||||
|  | ||||
|       - name: Failure short reports | ||||
|         if: ${{ failure() }} | ||||
|         continue-on-error: true | ||||
|         run: cat /transformers/reports/${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/failures_short.txt | ||||
|  | ||||
|       - name: Run test | ||||
|         shell: bash | ||||
|         run: | | ||||
|           mkdir -p /transformers/reports/${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports | ||||
|           echo "hello" > /transformers/reports/${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports/hello.txt | ||||
|           echo "${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports" | ||||
|  | ||||
|       - name: "Test suite reports artifacts: ${{ inputs.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ${{ inputs.machine_type }}_run_models_gpu_${{ env.matrix_folders }}_test_reports | ||||
|           path: /transformers/reports/${{ inputs.machine_type }}_run_models_gpu_${{ matrix.folders }}_test_reports | ||||
							
								
								
									
										121
									
								
								.github/workflows/model_jobs_intel_gaudi.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										121
									
								
								.github/workflows/model_jobs_intel_gaudi.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,121 +0,0 @@ | ||||
| name: model jobs | ||||
|  | ||||
| on: | ||||
|   workflow_call: | ||||
|     inputs: | ||||
|       folder_slices: | ||||
|         required: true | ||||
|         type: string | ||||
|       slice_id: | ||||
|         required: true | ||||
|         type: number | ||||
|       runner: | ||||
|         required: true | ||||
|         type: string | ||||
|       machine_type: | ||||
|         required: true | ||||
|         type: string | ||||
|       report_name_prefix: | ||||
|         required: false | ||||
|         default: run_models_gpu | ||||
|         type: string | ||||
|  | ||||
| env: | ||||
|   RUN_SLOW: yes | ||||
|   PT_HPU_LAZY_MODE: 0 | ||||
|   TRANSFORMERS_IS_CI: yes | ||||
|   PT_ENABLE_INT64_SUPPORT: 1 | ||||
|   HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }} | ||||
|   SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} | ||||
|   HF_HOME: /mnt/cache/.cache/huggingface | ||||
|  | ||||
| jobs: | ||||
|   run_models_gpu: | ||||
|     name: " " | ||||
|     strategy: | ||||
|       max-parallel: 8 | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         folders: ${{ fromJson(inputs.folder_slices)[inputs.slice_id] }} | ||||
|     runs-on: | ||||
|       group: ${{ inputs.runner }} | ||||
|     container: | ||||
|       image: vault.habana.ai/gaudi-docker/1.21.1/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest | ||||
|       options: --runtime=habana | ||||
|         -v /mnt/cache/.cache/huggingface:/mnt/cache/.cache/huggingface | ||||
|         --env OMPI_MCA_btl_vader_single_copy_mechanism=none | ||||
|         --env HABANA_VISIBLE_DEVICES | ||||
|         --env HABANA_VISIBLE_MODULES | ||||
|         --cap-add=sys_nice | ||||
|         --shm-size=64G | ||||
|     steps: | ||||
|       - name: Echo input and matrix info | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ inputs.folder_slices }}" | ||||
|           echo "${{ matrix.folders }}" | ||||
|           echo "${{ toJson(fromJson(inputs.folder_slices)[inputs.slice_id]) }}" | ||||
|  | ||||
|       - name: Echo folder ${{ matrix.folders }} | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ matrix.folders }}" | ||||
|           matrix_folders=${{ matrix.folders }} | ||||
|           matrix_folders=${matrix_folders/'models/'/'models_'} | ||||
|           echo "$matrix_folders" | ||||
|           echo "matrix_folders=$matrix_folders" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           fetch-depth: 0 | ||||
|  | ||||
|       - name: Install dependencies | ||||
|         run: | | ||||
|           pip install -e .[testing,torch] "numpy<2.0.0" scipy scikit-learn | ||||
|  | ||||
|       - name: HL-SMI | ||||
|         run: | | ||||
|           hl-smi | ||||
|           echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}" | ||||
|           echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" | ||||
|  | ||||
|       - name: Environment | ||||
|         run: python3 utils/print_env.py | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         run: pip freeze | ||||
|  | ||||
|       - name: Set `machine_type` for report and artifact names | ||||
|         shell: bash | ||||
|         run: | | ||||
|           if [ "${{ inputs.machine_type }}" = "1gaudi" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ inputs.machine_type }}" = "2gaudi" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ inputs.machine_type }} | ||||
|           fi | ||||
|           echo "machine_type=$machine_type" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Run all tests on Gaudi | ||||
|         run: python3 -m pytest -v --make-reports=${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports tests/${{ matrix.folders }} | ||||
|  | ||||
|       - name: Failure short reports | ||||
|         if: ${{ failure() }} | ||||
|         continue-on-error: true | ||||
|         run: cat reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports/failures_short.txt | ||||
|  | ||||
|       - name: Run test | ||||
|         shell: bash | ||||
|         run: | | ||||
|           mkdir -p reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports | ||||
|           echo "hello" > reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports/hello.txt | ||||
|           echo "${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports" | ||||
|  | ||||
|       - name: "Test suite reports artifacts: ${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ env.matrix_folders }}_test_reports | ||||
|           path: reports/${{ env.machine_type }}_${{ inputs.report_name_prefix }}_${{ matrix.folders }}_test_reports | ||||
| @ -59,7 +59,7 @@ jobs: | ||||
|                   "type": "section", | ||||
|                   "text": { | ||||
|                     "type": "mrkdwn", | ||||
|                     "text": "<https://github.com/huggingface/transformers/commit/${{ env.COMMIT_SHA }}|New model: ${{ env.NEW_MODEL }}> GH_ArthurZucker, GH_lysandrejik, GH_ydshieh\ncommit SHA: ${{ env.COMMIT_SHA }}" | ||||
|                     "text": "<https://github.com/huggingface/transformers/commit/${{ env.COMMIT_SHA }}|New model: ${{ env.NEW_MODEL }}> GH_ArthurZucker, GH_lysandrejik, GH_ydshieh" | ||||
|                   } | ||||
|                 } | ||||
|               ] | ||||
|  | ||||
							
								
								
									
										18
									
								
								.github/workflows/pr-style-bot.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										18
									
								
								.github/workflows/pr-style-bot.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,18 +0,0 @@ | ||||
| # To run this bot, comment "@bot /style" on a PR | ||||
| name: Style Bot | ||||
|  | ||||
| on: | ||||
|   issue_comment: | ||||
|     types: [created] | ||||
|  | ||||
| permissions: | ||||
|   pull-requests: write | ||||
|  | ||||
| jobs: | ||||
|   style: | ||||
|     uses: huggingface/huggingface_hub/.github/workflows/style-bot-action.yml@main | ||||
|     with: | ||||
|       python_quality_dependencies: "[quality]" | ||||
|       style_command_type: "default" | ||||
|     secrets: | ||||
|       bot_token: ${{ secrets.HF_STYLE_BOT_ACTION }} | ||||
							
								
								
									
										134
									
								
								.github/workflows/pr_build_doc_with_comment.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										134
									
								
								.github/workflows/pr_build_doc_with_comment.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,134 +0,0 @@ | ||||
| name: PR - build doc via comment | ||||
| on: | ||||
|   issue_comment: | ||||
|     types: | ||||
|       - created | ||||
|     branches-ignore: | ||||
|       - main | ||||
| concurrency: | ||||
|   group: ${{ github.workflow }}-${{ github.event.issue.number }}-${{ startsWith(github.event.comment.body, 'build-doc') }} | ||||
|   cancel-in-progress: true | ||||
| permissions: {} | ||||
|  | ||||
|  | ||||
| jobs: | ||||
|   get-pr-number: | ||||
|     name: Get PR number | ||||
|     if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr", "eustlb", "MekkCyber", "manueldeprada", "vasqu", "ivarflakstad", "stevhliu", "ebezzam"]'), github.actor) && (startsWith(github.event.comment.body, 'build-doc')) }} | ||||
|     uses: ./.github/workflows/get-pr-number.yml | ||||
|  | ||||
|   get-pr-info: | ||||
|     name: Get PR commit SHA | ||||
|     needs: get-pr-number | ||||
|     if: ${{ needs.get-pr-number.outputs.PR_NUMBER != ''}} | ||||
|     uses: ./.github/workflows/get-pr-info.yml | ||||
|     with: | ||||
|       pr_number: ${{ needs.get-pr-number.outputs.PR_NUMBER }} | ||||
|  | ||||
|   verity_pr_commit: | ||||
|     name: Verity PR commit corresponds to a specific event by comparing timestamps | ||||
|     if: ${{ needs.get-pr-number.outputs.PR_NUMBER != ''}} | ||||
|     runs-on: ubuntu-22.04 | ||||
|     needs: get-pr-info | ||||
|     env: | ||||
|       COMMENT_DATE: ${{ github.event.comment.created_at }} | ||||
|       PR_MERGE_COMMIT_DATE: ${{ needs.get-pr-info.outputs.PR_MERGE_COMMIT_DATE }} | ||||
|       PR_MERGE_COMMIT_TIMESTAMP: ${{ needs.get-pr-info.outputs.PR_MERGE_COMMIT_TIMESTAMP }} | ||||
|     steps: | ||||
|       - run: | | ||||
|           COMMENT_TIMESTAMP=$(date -d "${COMMENT_DATE}" +"%s") | ||||
|           echo "COMMENT_DATE: $COMMENT_DATE" | ||||
|           echo "PR_MERGE_COMMIT_DATE: $PR_MERGE_COMMIT_DATE" | ||||
|           echo "COMMENT_TIMESTAMP: $COMMENT_TIMESTAMP" | ||||
|           echo "PR_MERGE_COMMIT_TIMESTAMP: $PR_MERGE_COMMIT_TIMESTAMP" | ||||
|           if [ $COMMENT_TIMESTAMP -le $PR_MERGE_COMMIT_TIMESTAMP ]; then | ||||
|             echo "Last commit on the pull request is newer than the issue comment triggering this run! Abort!"; | ||||
|             exit -1; | ||||
|           fi | ||||
|  | ||||
|   create_run: | ||||
|     name: Create run | ||||
|     needs: [get-pr-number, get-pr-info] | ||||
|     if: ${{ needs.get-pr-number.outputs.PR_NUMBER != '' }} | ||||
|     permissions: | ||||
|       statuses: write | ||||
|     runs-on: ubuntu-22.04 | ||||
|     steps: | ||||
|       - name: Create Run | ||||
|         id: create_run | ||||
|         env: | ||||
|           GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||
|           # Create a commit status (pending) for a run of this workflow. The status has to be updated later in `update_run_status`. | ||||
|           # See https://docs.github.com/en/rest/commits/statuses?apiVersion=2022-11-28#create-a-commit-status | ||||
|           GITHUB_RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} | ||||
|         run: | | ||||
|           gh api \ | ||||
|             --method POST \ | ||||
|             -H "Accept: application/vnd.github+json" \ | ||||
|             -H "X-GitHub-Api-Version: 2022-11-28" \ | ||||
|             repos/${{ github.repository }}/statuses/${{ needs.get-pr-info.outputs.PR_HEAD_SHA }} \ | ||||
|             -f "target_url=$GITHUB_RUN_URL" -f "state=pending" -f "description=Custom doc building job" -f "context=custom-doc-build" | ||||
|  | ||||
|   reply_to_comment: | ||||
|     name: Reply to the comment | ||||
|     if: ${{ needs.create_run.result == 'success' }} | ||||
|     needs: [get-pr-number, create_run] | ||||
|     permissions: | ||||
|       pull-requests: write | ||||
|     runs-on: ubuntu-22.04 | ||||
|     steps: | ||||
|       - name: Reply to the comment | ||||
|         env: | ||||
|           GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||
|           GITHUB_RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} | ||||
|         run: | | ||||
|           gh api \ | ||||
|             --method POST \ | ||||
|             -H "Accept: application/vnd.github+json" \ | ||||
|             -H "X-GitHub-Api-Version: 2022-11-28" \ | ||||
|             repos/${{ github.repository }}/issues/${{ needs.get-pr-number.outputs.PR_NUMBER }}/comments \ | ||||
|             -f "body=[Building docs for all languages...](${{ env.GITHUB_RUN_URL }})" | ||||
|  | ||||
|   build-doc: | ||||
|     name: Build doc | ||||
|     needs: [get-pr-number, get-pr-info] | ||||
|     if: ${{ needs.get-pr-number.outputs.PR_NUMBER != '' }} | ||||
|     uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main | ||||
|     with: | ||||
|       commit_sha: ${{ needs.get-pr-info.outputs.PR_HEAD_SHA }} | ||||
|       pr_number: ${{ needs.get-pr-number.outputs.PR_NUMBER }} | ||||
|       package: transformers | ||||
|       languages: ar de en es fr hi it ko pt tr zh ja te | ||||
|  | ||||
|   update_run_status: | ||||
|     name: Update Check Run Status | ||||
|     needs: [ get-pr-info, create_run, build-doc ] | ||||
|     permissions: | ||||
|       statuses: write | ||||
|     if: ${{ always() && needs.create_run.result == 'success' }} | ||||
|     runs-on: ubuntu-22.04 | ||||
|     env: | ||||
|       GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||
|       GITHUB_RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }} | ||||
|       STATUS_OK: ${{ contains(fromJSON('["skipped", "success"]'), needs.create_run.result) }} | ||||
|     steps: | ||||
|       - name: Get `build-doc` job status | ||||
|         run: | | ||||
|           echo "${{ needs.build-doc.result }}" | ||||
|           echo $STATUS_OK | ||||
|           if [ "$STATUS_OK" = "true" ]; then | ||||
|             echo "STATUS=success" >> $GITHUB_ENV | ||||
|           else | ||||
|             echo "STATUS=failure" >> $GITHUB_ENV | ||||
|           fi | ||||
|  | ||||
|       - name: Update PR commit statuses | ||||
|         run: | | ||||
|           echo "${{ needs.build-doc.result }}" | ||||
|           echo "${{ env.STATUS }}" | ||||
|           gh api \ | ||||
|             --method POST \ | ||||
|             -H "Accept: application/vnd.github+json" \ | ||||
|             -H "X-GitHub-Api-Version: 2022-11-28" \ | ||||
|             repos/${{ github.repository }}/statuses/${{ needs.get-pr-info.outputs.PR_HEAD_SHA }} \ | ||||
|             -f "target_url=$GITHUB_RUN_URL" -f "state=${{ env.STATUS }}" -f "description=Custom doc building job" -f "context=custom-doc-build" | ||||
							
								
								
									
										177
									
								
								.github/workflows/pr_run_slow_ci.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										177
									
								
								.github/workflows/pr_run_slow_ci.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,177 +0,0 @@ | ||||
| name: PR slow CI | ||||
| on: | ||||
|   pull_request_target: | ||||
|     types: [opened, synchronize, reopened] | ||||
|  | ||||
| jobs: | ||||
|   get-pr-number: | ||||
|     name: Get PR number | ||||
|     uses: ./.github/workflows/get-pr-number.yml | ||||
|  | ||||
|   get-pr-info: | ||||
|     name: Get PR commit SHA | ||||
|     needs: get-pr-number | ||||
|     if: ${{ needs.get-pr-number.outputs.PR_NUMBER != ''}} | ||||
|     uses: ./.github/workflows/get-pr-info.yml | ||||
|     with: | ||||
|       pr_number: ${{ needs.get-pr-number.outputs.PR_NUMBER }} | ||||
|  | ||||
|   get-jobs: | ||||
|     name: Get test files to run | ||||
|     runs-on: ubuntu-22.04 | ||||
|     needs: [get-pr-number, get-pr-info] | ||||
|     outputs: | ||||
|       jobs: ${{ steps.get_jobs.outputs.jobs_to_run }} | ||||
|     steps: | ||||
|       - name: Get repository content | ||||
|         id: repo_content | ||||
|         uses: actions/github-script@v6 | ||||
|         with: | ||||
|           script: | | ||||
|             const { data: tests_dir } = await github.rest.repos.getContent({ | ||||
|               owner: '${{ needs.get-pr-info.outputs.PR_HEAD_REPO_OWNER }}', | ||||
|               repo: '${{ needs.get-pr-info.outputs.PR_HEAD_REPO_NAME }}', | ||||
|               path: 'tests', | ||||
|               ref: '${{ needs.get-pr-info.outputs.PR_HEAD_SHA }}', | ||||
|             }); | ||||
|  | ||||
|             const { data: tests_models_dir } = await github.rest.repos.getContent({ | ||||
|               owner: '${{ needs.get-pr-info.outputs.PR_HEAD_REPO_OWNER }}', | ||||
|               repo: '${{ needs.get-pr-info.outputs.PR_HEAD_REPO_NAME }}', | ||||
|               path: 'tests/models', | ||||
|               ref: '${{ needs.get-pr-info.outputs.PR_HEAD_SHA }}', | ||||
|             }); | ||||
|  | ||||
|             const { data: tests_quantization_dir } = await github.rest.repos.getContent({ | ||||
|               owner: '${{ needs.get-pr-info.outputs.PR_HEAD_REPO_OWNER }}', | ||||
|               repo: '${{ needs.get-pr-info.outputs.PR_HEAD_REPO_NAME }}', | ||||
|               path: 'tests/quantization', | ||||
|               ref: '${{ needs.get-pr-info.outputs.PR_HEAD_SHA }}', | ||||
|             }); | ||||
|  | ||||
|             core.setOutput('tests_dir', tests_dir); | ||||
|             core.setOutput('tests_models_dir', tests_models_dir); | ||||
|             core.setOutput('tests_quantization_dir', tests_quantization_dir); | ||||
|  | ||||
|       # This checkout to the main branch | ||||
|       - uses: actions/checkout@v4 | ||||
|         with: | ||||
|           fetch-depth: "0" | ||||
|  | ||||
|       - name: Write pr_files file | ||||
|         run: | | ||||
|           cat > pr_files.txt << 'EOF' | ||||
|           ${{ needs.get-pr-info.outputs.PR_FILES }} | ||||
|           EOF | ||||
|  | ||||
|       - name: Write tests_dir file | ||||
|         run: | | ||||
|           cat > tests_dir.txt << 'EOF' | ||||
|           ${{ steps.repo_content.outputs.tests_dir }} | ||||
|           EOF | ||||
|  | ||||
|       - name: Write tests_models_dir file | ||||
|         run: | | ||||
|           cat > tests_models_dir.txt << 'EOF' | ||||
|           ${{ steps.repo_content.outputs.tests_models_dir }} | ||||
|           EOF | ||||
|  | ||||
|       - name: Write tests_quantization_dir file | ||||
|         run: | | ||||
|           cat > tests_quantization_dir.txt << 'EOF' | ||||
|           ${{ steps.repo_content.outputs.tests_quantization_dir }} | ||||
|           EOF | ||||
|  | ||||
|       - name: Run script to get jobs to run | ||||
|         id: get_jobs | ||||
|         run: | | ||||
|           python utils/get_pr_run_slow_jobs.py | tee output.txt | ||||
|           echo "jobs_to_run: $(tail -n 1 output.txt)" | ||||
|           echo "jobs_to_run=$(tail -n 1 output.txt)" >> $GITHUB_OUTPUT | ||||
|  | ||||
|   send_comment: | ||||
|     # Will delete the previous comment and send a new one if: | ||||
|     #   - either the content is changed | ||||
|     #   - or the previous comment is 30 minutes or more old | ||||
|     name: Send a comment to suggest jobs to run | ||||
|     if: ${{ needs.get-jobs.outputs.jobs != '' }} | ||||
|     needs: [get-pr-number, get-jobs] | ||||
|     permissions: | ||||
|       pull-requests: write | ||||
|     runs-on: ubuntu-22.04 | ||||
|     steps: | ||||
|       - name: Check and update comment if needed | ||||
|         uses: actions/github-script@v7 | ||||
|         env: | ||||
|           BODY: "\n\nrun-slow: ${{ needs.get-jobs.outputs.jobs }}" | ||||
|         with: | ||||
|           script: | | ||||
|             const prNumber = ${{ needs.get-pr-number.outputs.PR_NUMBER }}; | ||||
|             const commentPrefix = "**[For maintainers]** Suggested jobs to run (before merge)"; | ||||
|             const thirtyMinutesAgo = new Date(Date.now() - 30 * 60 * 1000); // 30 minutes ago | ||||
|             const newBody = `${commentPrefix}${process.env.BODY}`; | ||||
|              | ||||
|             // Get all comments on the PR | ||||
|             const { data: comments } = await github.rest.issues.listComments({ | ||||
|               owner: context.repo.owner, | ||||
|               repo: context.repo.repo, | ||||
|               issue_number: prNumber | ||||
|             }); | ||||
|              | ||||
|             // Find existing comments that start with our prefix | ||||
|             const existingComments = comments.filter(comment =>  | ||||
|               comment.user.login === 'github-actions[bot]' &&  | ||||
|               comment.body.startsWith(commentPrefix) | ||||
|             ); | ||||
|              | ||||
|             let shouldCreateNewComment = true; | ||||
|             let commentsToDelete = []; | ||||
|              | ||||
|             if (existingComments.length > 0) { | ||||
|               // Get the most recent comment | ||||
|               const mostRecentComment = existingComments | ||||
|                 .sort((a, b) => new Date(b.created_at) - new Date(a.created_at))[0]; | ||||
|                | ||||
|               const commentDate = new Date(mostRecentComment.created_at); | ||||
|               const isOld = commentDate < thirtyMinutesAgo; | ||||
|               const isDifferentContent = mostRecentComment.body !== newBody; | ||||
|                | ||||
|               console.log(`Most recent comment created: ${mostRecentComment.created_at}`); | ||||
|               console.log(`Is older than 30 minutes: ${isOld}`); | ||||
|               console.log(`Has different content: ${isDifferentContent}`); | ||||
|                | ||||
|               if (isOld || isDifferentContent) { | ||||
|                 // Delete all existing comments and create new one | ||||
|                 commentsToDelete = existingComments; | ||||
|                 console.log(`Will delete ${commentsToDelete.length} existing comment(s) and create new one`); | ||||
|               } else { | ||||
|                 // Content is same and comment is recent, skip | ||||
|                 shouldCreateNewComment = false; | ||||
|                 console.log('Comment is recent and content unchanged, skipping update'); | ||||
|               } | ||||
|             } else { | ||||
|               console.log('No existing comments found, will create new one'); | ||||
|             } | ||||
|              | ||||
|             // Delete old comments if needed | ||||
|             for (const comment of commentsToDelete) { | ||||
|               console.log(`Deleting comment #${comment.id} (created: ${comment.created_at})`); | ||||
|               await github.rest.issues.deleteComment({ | ||||
|                 owner: context.repo.owner, | ||||
|                 repo: context.repo.repo, | ||||
|                 comment_id: comment.id | ||||
|               }); | ||||
|             } | ||||
|              | ||||
|             // Create new comment if needed | ||||
|             if (shouldCreateNewComment) { | ||||
|               await github.rest.issues.createComment({ | ||||
|                 owner: context.repo.owner, | ||||
|                 repo: context.repo.repo, | ||||
|                 issue_number: prNumber, | ||||
|                 body: newBody | ||||
|               }); | ||||
|               console.log('✅ New comment created'); | ||||
|             } else { | ||||
|               console.log('ℹ️ No comment update needed'); | ||||
|             } | ||||
							
								
								
									
										250
									
								
								.github/workflows/push-important-models.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										250
									
								
								.github/workflows/push-important-models.yml
									
									
									
									
										vendored
									
									
								
							| @ -4,6 +4,17 @@ on: | ||||
|   push: | ||||
|     branches: [ main ] | ||||
|  | ||||
| env: | ||||
|   OUTPUT_SLACK_CHANNEL_ID: "C06L2SGMEEA" | ||||
|   HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }} | ||||
|   HF_HOME: /mnt/cache | ||||
|   TRANSFORMERS_IS_CI: yes | ||||
|   OMP_NUM_THREADS: 8 | ||||
|   MKL_NUM_THREADS: 8 | ||||
|   RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`. | ||||
|   SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} | ||||
|   TF_FORCE_GPU_ALLOW_GROWTH: true | ||||
|  | ||||
| jobs: | ||||
|   get_modified_models: | ||||
|     name: "Get all modified files" | ||||
| @ -14,144 +25,111 @@ jobs: | ||||
|       - name: Check out code | ||||
|         uses: actions/checkout@v4 | ||||
|  | ||||
|       - name: Get changed files using `actions/github-script` | ||||
|         id: get-changed-files | ||||
|         uses: actions/github-script@v7 | ||||
|       - name: Get changed files | ||||
|         id: changed-files | ||||
|         uses: tj-actions/changed-files@1c8e6069583811afb28f97afeaf8e7da80c6be5c | ||||
|         with: | ||||
|           script: | | ||||
|             let files = []; | ||||
|              | ||||
|             // Only handle push events | ||||
|             if (context.eventName === 'push') { | ||||
|               const afterSha = context.payload.after; | ||||
|               const branchName = context.payload.ref.replace('refs/heads/', ''); | ||||
|                | ||||
|               let baseSha; | ||||
|                | ||||
|               if (branchName === 'main') { | ||||
|                 console.log('Push to main branch, comparing to parent commit'); | ||||
|                 // Get the parent commit of the pushed commit | ||||
|                 const { data: commit } = await github.rest.repos.getCommit({ | ||||
|                   owner: context.repo.owner, | ||||
|                   repo: context.repo.repo, | ||||
|                   ref: afterSha | ||||
|                 }); | ||||
|                 baseSha = commit.parents[0]?.sha; | ||||
|                 if (!baseSha) { | ||||
|                   throw new Error('No parent commit found for the pushed commit'); | ||||
|                 } | ||||
|               } else { | ||||
|                 console.log(`Push to branch ${branchName}, comparing to main`); | ||||
|                 baseSha = 'main'; | ||||
|               } | ||||
|                | ||||
|               const { data: comparison } = await github.rest.repos.compareCommits({ | ||||
|                 owner: context.repo.owner, | ||||
|                 repo: context.repo.repo, | ||||
|                 base: baseSha, | ||||
|                 head: afterSha | ||||
|               }); | ||||
|                | ||||
|               // Include added, modified, and renamed files | ||||
|               files = comparison.files | ||||
|                 .filter(file => file.status === 'added' || file.status === 'modified' || file.status === 'renamed') | ||||
|                 .map(file => file.filename); | ||||
|             } | ||||
|              | ||||
|             // Include all files under src/transformers/ (not just models subdirectory) | ||||
|             const filteredFiles = files.filter(file =>  | ||||
|               file.startsWith('src/transformers/') | ||||
|             ); | ||||
|              | ||||
|             core.setOutput('changed_files', filteredFiles.join(' ')); | ||||
|             core.setOutput('any_changed', filteredFiles.length > 0 ? 'true' : 'false'); | ||||
|           files: src/transformers/models/** | ||||
|  | ||||
|       - name: Parse changed files with Python | ||||
|         if: steps.get-changed-files.outputs.any_changed == 'true' | ||||
|         env: | ||||
|           CHANGED_FILES: ${{ steps.get-changed-files.outputs.changed_files }} | ||||
|       - name: Run step if only the files listed above change | ||||
|         if: steps.changed-files.outputs.any_changed == 'true' | ||||
|         id: set-matrix | ||||
|         env: | ||||
|           ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }} | ||||
|         run: | | ||||
|           python3 - << 'EOF' | ||||
|           import os | ||||
|           import sys | ||||
|           import json | ||||
|            | ||||
|           # Add the utils directory to Python path | ||||
|           sys.path.insert(0, 'utils') | ||||
|            | ||||
|           # Import the important models list | ||||
|           from important_files import IMPORTANT_MODELS | ||||
|            | ||||
|           print(f"Important models: {IMPORTANT_MODELS}") | ||||
|            | ||||
|           # Get the changed files from the previous step | ||||
|           changed_files_str = os.environ.get('CHANGED_FILES', '') | ||||
|           changed_files = changed_files_str.split() if changed_files_str else [] | ||||
|            | ||||
|           # Filter to only Python files | ||||
|           python_files = [f for f in changed_files if f.endswith('.py')] | ||||
|           print(f"Python files changed: {python_files}") | ||||
|            | ||||
|           result_models = set() | ||||
|            | ||||
|           # Specific files that trigger all models | ||||
|           transformers_utils_files = [ | ||||
|               'modeling_utils.py', | ||||
|               'modeling_rope_utils.py',  | ||||
|               'modeling_flash_attention_utils.py', | ||||
|               'modeling_attn_mask_utils.py', | ||||
|               'cache_utils.py', | ||||
|               'masking_utils.py', | ||||
|               'pytorch_utils.py' | ||||
|           ] | ||||
|            | ||||
|           # Single loop through all Python files | ||||
|           for file in python_files: | ||||
|               # Check for files under src/transformers/models/ | ||||
|               if file.startswith('src/transformers/models/'): | ||||
|                   remaining_path = file[len('src/transformers/models/'):] | ||||
|                   if '/' in remaining_path: | ||||
|                       model_dir = remaining_path.split('/')[0] | ||||
|                       if model_dir in IMPORTANT_MODELS: | ||||
|                           result_models.add(model_dir) | ||||
|                           print(f"Added model directory: {model_dir}") | ||||
|                | ||||
|               # Check for specific files under src/transformers/ or src/transformers/generation/ files | ||||
|               elif file.startswith('src/transformers/generation/') or \ | ||||
|                    (file.startswith('src/transformers/') and os.path.basename(file) in transformers_utils_files): | ||||
|                   print(f"Found core file: {file} - including all important models") | ||||
|                   result_models.update(IMPORTANT_MODELS) | ||||
|                   break  # No need to continue once we include all models | ||||
|            | ||||
|           # Convert to sorted list and create matrix | ||||
|           result_list = sorted(list(result_models)) | ||||
|           print(f"Final model list: {result_list}") | ||||
|            | ||||
|           if result_list: | ||||
|               matrix_json = json.dumps(result_list) | ||||
|               print(f"matrix={matrix_json}") | ||||
|                | ||||
|               # Write to GITHUB_OUTPUT | ||||
|               with open(os.environ['GITHUB_OUTPUT'], 'a') as f: | ||||
|                   f.write(f"matrix={matrix_json}\n") | ||||
|           else: | ||||
|               print("matrix=[]") | ||||
|               with open(os.environ['GITHUB_OUTPUT'], 'a') as f: | ||||
|                   f.write("matrix=[]\n") | ||||
|           EOF | ||||
|  | ||||
|   model-ci: | ||||
|     name: Model CI | ||||
|     uses: ./.github/workflows/self-scheduled.yml | ||||
|             model_arrays=() | ||||
|             for file in $ALL_CHANGED_FILES; do | ||||
|                 model_path="${file#*models/}" | ||||
|                 model_path="models/${model_path%%/*}" | ||||
|                 if grep -qFx "$model_path" utils/important_models.txt; then | ||||
|                     # Append the file to the matrix string | ||||
|                     model_arrays+=("$model_path") | ||||
|                 fi | ||||
|             done | ||||
|             matrix_string=$(printf '"%s", ' "${model_arrays[@]}" | sed 's/, $//') | ||||
|             echo "matrix=[$matrix_string]" >> $GITHUB_OUTPUT | ||||
|   test_modified_files: | ||||
|     needs: get_modified_models | ||||
|     if: needs.get_modified_models.outputs.matrix != '' && needs.get_modified_models.outputs.matrix != '[]' | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       slack_report_channel: "#transformers-ci-push" | ||||
|       docker: huggingface/transformers-all-latest-gpu | ||||
|       ci_event: push | ||||
|       report_repo_id: hf-internal-testing/transformers_ci_push | ||||
|       commit_sha: ${{ github.sha }} | ||||
|       models: ${{ needs.get_modified_models.outputs.matrix }} | ||||
|     secrets: inherit | ||||
|     name: Slow & FA2 tests | ||||
|     runs-on: | ||||
|       group: aws-g5-4xlarge-cache | ||||
|     container: | ||||
|       image: huggingface/transformers-all-latest-gpu | ||||
|       options: --gpus all --privileged --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     if: ${{ needs.get_modified_models.outputs.matrix != '[]' && needs.get_modified_models.outputs.matrix != '' && fromJson(needs.get_modified_models.outputs.matrix)[0] != null }} | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         model-name: ${{ fromJson(needs.get_modified_models.outputs.matrix) }} | ||||
|  | ||||
|     steps: | ||||
|       - name: Check out code | ||||
|         uses: actions/checkout@v4 | ||||
|  | ||||
|       - name: Install locally transformers & other libs | ||||
|         run: | | ||||
|           apt install sudo | ||||
|           sudo -H pip install --upgrade pip | ||||
|           sudo -H pip uninstall -y transformers | ||||
|           sudo -H pip install -U -e ".[testing]" | ||||
|           MAX_JOBS=4 pip install flash-attn --no-build-isolation | ||||
|           pip install bitsandbytes | ||||
|  | ||||
|       - name: NVIDIA-SMI | ||||
|         run: | | ||||
|           nvidia-smi | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         run: pip freeze | ||||
|  | ||||
|       - name: Run FA2 tests | ||||
|         id: run_fa2_tests | ||||
|         run: | ||||
|           pytest -rsfE -m "flash_attn_test" --make-reports=${{ matrix.model-name }}_fa2_tests/ tests/${{ matrix.model-name }}/test_modeling_* | ||||
|  | ||||
|       - name: "Test suite reports artifacts: ${{ matrix.model-name }}_fa2_tests" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ${{ matrix.model-name }}_fa2_tests | ||||
|           path: /transformers/reports/${{ matrix.model-name }}_fa2_tests | ||||
|  | ||||
|       - name: Post to Slack | ||||
|         if: always() | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ env.OUTPUT_SLACK_CHANNEL_ID }} | ||||
|           title: 🤗 Results of the FA2 tests - ${{ matrix.model-name }} | ||||
|           status: ${{ steps.run_fa2_tests.conclusion}} | ||||
|           slack_token: ${{ secrets.CI_SLACK_BOT_TOKEN }} | ||||
|  | ||||
|       - name: Run integration tests | ||||
|         id: run_integration_tests | ||||
|         if: always() | ||||
|         run: | ||||
|           pytest -rsfE -k "IntegrationTest"  --make-reports=tests_integration_${{ matrix.model-name }} tests/${{ matrix.model-name }}/test_modeling_* | ||||
|  | ||||
|       - name: "Test suite reports artifacts: tests_integration_${{ matrix.model-name }}" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: tests_integration_${{ matrix.model-name }} | ||||
|           path: /transformers/reports/tests_integration_${{ matrix.model-name }} | ||||
|  | ||||
|       - name: Post to Slack | ||||
|         if: always() | ||||
|         uses: huggingface/hf-workflows/.github/actions/post-slack@main | ||||
|         with: | ||||
|           slack_channel: ${{ env.OUTPUT_SLACK_CHANNEL_ID }} | ||||
|           title: 🤗 Results of the Integration tests - ${{ matrix.model-name }} | ||||
|           status: ${{ steps.run_integration_tests.conclusion}} | ||||
|           slack_token: ${{ secrets.CI_SLACK_BOT_TOKEN }} | ||||
|  | ||||
|       - name: Tailscale # In order to be able to SSH when a test fails | ||||
|         if: ${{ runner.debug == '1'}} | ||||
|         uses: huggingface/tailscale-action@v1 | ||||
|         with: | ||||
|           authkey: ${{ secrets.TAILSCALE_SSH_AUTHKEY }} | ||||
|           slackChannel: ${{ secrets.SLACK_CIFEEDBACK_CHANNEL }} | ||||
|           slackToken: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }} | ||||
|           waitForSSH: true | ||||
|  | ||||
							
								
								
									
										16
									
								
								.github/workflows/self-comment-ci.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										16
									
								
								.github/workflows/self-comment-ci.yml
									
									
									
									
										vendored
									
									
								
							| @ -29,7 +29,7 @@ jobs: | ||||
|     runs-on: ubuntu-22.04 | ||||
|     name: Get PR number | ||||
|     # For security: only allow team members to run | ||||
|     if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr", "eustlb", "MekkCyber", "manueldeprada", "vasqu", "ivarflakstad", "stevhliu", "ebezzam", "remi-or"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }} | ||||
|     if: ${{ github.event.issue.state == 'open' && contains(fromJSON('["ydshieh", "ArthurZucker", "zucchini-nlp", "qubvel", "molbap", "gante", "LysandreJik", "Cyrilvallez", "Rocketknight1", "SunMarc", "muellerzr", "eustlb"]'), github.actor) && (startsWith(github.event.comment.body, 'run-slow') || startsWith(github.event.comment.body, 'run slow') || startsWith(github.event.comment.body, 'run_slow')) }} | ||||
|     outputs: | ||||
|       PR_NUMBER: ${{ steps.set_pr_number.outputs.PR_NUMBER }} | ||||
|     steps: | ||||
| @ -145,7 +145,7 @@ jobs: | ||||
|         env: | ||||
|           GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} | ||||
|           MODELS: ${{ needs.get-tests.outputs.models }} | ||||
|           BODY: "\n\nmodels: ${{ needs.get-tests.outputs.models }}\nquantizations: ${{ needs.get-tests.outputs.quantizations }}" | ||||
|           BODY: "This comment contains run-slow, running the specified jobs:\n\nmodels: ${{ needs.get-tests.outputs.models }}\nquantizations: ${{ needs.get-tests.outputs.quantizations }}" | ||||
|         run: | | ||||
|           gh api \ | ||||
|             --method POST \ | ||||
| @ -185,7 +185,7 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         folders: ${{ fromJson(needs.get-tests.outputs.models) }} | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|        group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
| @ -239,9 +239,9 @@ jobs: | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
| @ -292,7 +292,7 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         folders: ${{ fromJson(needs.get-tests.outputs.quantizations) }} | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
| @ -338,9 +338,9 @@ jobs: | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
|  | ||||
							
								
								
									
										61
									
								
								.github/workflows/self-nightly-caller.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										61
									
								
								.github/workflows/self-nightly-caller.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,56 +1,43 @@ | ||||
| name: Nvidia CI with nightly torch | ||||
| name: Self-hosted runner (nightly-ci) | ||||
|  | ||||
|  | ||||
| on: | ||||
|   repository_dispatch: | ||||
|   # triggered when the daily scheduled Nvidia CI is completed. | ||||
|   # This way, we can compare the results more easily. | ||||
|   workflow_run: | ||||
|     workflows: ["Nvidia CI"] | ||||
|     branches: ["main"] | ||||
|     types: [completed] | ||||
|   schedule: | ||||
|     - cron: "17 2 * * *" | ||||
|   push: | ||||
|     branches: | ||||
|       - run_ci_with_nightly_torch* | ||||
|  | ||||
| # Used for `push` to easily modify the target workflow runs to compare against | ||||
| env: | ||||
|     prev_workflow_run_id: "" | ||||
|     other_workflow_run_id: "" | ||||
|  | ||||
|       - run_nightly_ci* | ||||
|  | ||||
| jobs: | ||||
|   build_nightly_torch_ci_images: | ||||
|     name: Build CI Docker Images with nightly torch | ||||
|   build_nightly_ci_images: | ||||
|     name: Build Nightly CI Docker Images | ||||
|     if: (github.event_name == 'schedule') || ((github.event_name == 'push') && startsWith(github.ref_name, 'run_nightly_ci')) | ||||
|     uses: ./.github/workflows/build-nightly-ci-docker-images.yml | ||||
|     with: | ||||
|       job: latest-with-torch-nightly-docker | ||||
|     secrets: inherit | ||||
|  | ||||
|   setup: | ||||
|     name: Setup | ||||
|     runs-on: ubuntu-22.04 | ||||
|     steps: | ||||
|       - name: Setup | ||||
|         run: | | ||||
|           mkdir "setup_values" | ||||
|           echo "${{ inputs.prev_workflow_run_id || env.prev_workflow_run_id }}" > "setup_values/prev_workflow_run_id.txt" | ||||
|           echo "${{ inputs.other_workflow_run_id || env.other_workflow_run_id }}" > "setup_values/other_workflow_run_id.txt" | ||||
|  | ||||
|       - name: Upload artifacts | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: setup_values | ||||
|           path: setup_values | ||||
|  | ||||
|   model-ci: | ||||
|     name: Model CI | ||||
|     needs: build_nightly_torch_ci_images | ||||
|     needs: [build_nightly_ci_images] | ||||
|     uses: ./.github/workflows/self-scheduled.yml | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       slack_report_channel: "#transformers-ci-past-future" | ||||
|       runner: ci | ||||
|       docker: huggingface/transformers-all-latest-torch-nightly-gpu | ||||
|       ci_event: Nightly CI | ||||
|       report_repo_id: hf-internal-testing/transformers_daily_ci_with_torch_nightly | ||||
|       commit_sha: ${{ github.event.workflow_run.head_sha || github.sha }} | ||||
|     secrets: inherit | ||||
|  | ||||
|   deepspeed-ci: | ||||
|     name: DeepSpeed CI | ||||
|     needs: [build_nightly_ci_images] | ||||
|     uses: ./.github/workflows/self-scheduled.yml | ||||
|     with: | ||||
|       job: run_torch_cuda_extensions_gpu | ||||
|       slack_report_channel: "#transformers-ci-past-future" | ||||
|       runner: ci | ||||
|       # test deepspeed nightly build with the latest release torch | ||||
|       docker: huggingface/transformers-pytorch-deepspeed-latest-gpu | ||||
|       ci_event: Nightly CI | ||||
|       working-directory-prefix: /workspace | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										25
									
								
								.github/workflows/self-push-amd-mi300-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								.github/workflows/self-push-amd-mi300-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,25 @@ | ||||
| name: Self-hosted runner (AMD mi300 CI caller) | ||||
|  | ||||
| on: | ||||
|   #workflow_run: | ||||
|   #  workflows: ["Self-hosted runner (push-caller)"] | ||||
|   #  branches: ["main"] | ||||
|   #  types: [completed] | ||||
|   push: | ||||
|     branches: | ||||
|       - run_amd_push_ci_caller* | ||||
|     paths: | ||||
|       - "src/**" | ||||
|       - "tests/**" | ||||
|       - ".github/**" | ||||
|       - "templates/**" | ||||
|       - "utils/**" | ||||
|  | ||||
| jobs: | ||||
|   run_amd_ci: | ||||
|     name: AMD mi300 | ||||
|     if: (cancelled() != true) && ((github.event_name == 'workflow_run') || ((github.event_name == 'push') && (startsWith(github.ref_name, 'run_amd_push_ci_caller') || startsWith(github.ref_name, 'mi300-ci')))) | ||||
|     uses: ./.github/workflows/self-push-amd.yml | ||||
|     with: | ||||
|       gpu_flavor: mi300 | ||||
|     secrets: inherit | ||||
							
								
								
									
										32
									
								
								.github/workflows/self-push.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										32
									
								
								.github/workflows/self-push.yml
									
									
									
									
										vendored
									
									
								
							| @ -31,12 +31,12 @@ jobs: | ||||
|     name: Setup | ||||
|     strategy: | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
|       image: huggingface/transformers-all-latest-gpu-push-ci | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|       options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     outputs: | ||||
|       matrix: ${{ steps.set-matrix.outputs.matrix }} | ||||
|       test_map: ${{ steps.set-matrix.outputs.test_map }} | ||||
| @ -131,12 +131,12 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         folders: ${{ fromJson(needs.setup.outputs.matrix) }} | ||||
|         machine_type: [aws-g5-4xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
|       image: huggingface/transformers-all-latest-gpu-push-ci | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|       options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     env: | ||||
|       # For the meaning of these environment variables, see the job `Setup` | ||||
|       CI_BRANCH_PUSH: ${{ github.event.ref }} | ||||
| @ -169,9 +169,9 @@ jobs: | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
| @ -244,7 +244,7 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         folders: ${{ fromJson(needs.setup.outputs.matrix) }} | ||||
|         machine_type: [aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
| @ -282,9 +282,9 @@ jobs: | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
| @ -357,12 +357,12 @@ jobs: | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-4xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
|       image: huggingface/transformers-pytorch-deepspeed-latest-gpu-push-ci | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|       options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     env: | ||||
|       # For the meaning of these environment variables, see the job `Setup` | ||||
|       CI_BRANCH_PUSH: ${{ github.event.ref }} | ||||
| @ -395,9 +395,9 @@ jobs: | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
| @ -467,7 +467,7 @@ jobs: | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
| @ -505,9 +505,9 @@ jobs: | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
|  | ||||
							
								
								
									
										55
									
								
								.github/workflows/self-scheduled-amd-mi210-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								.github/workflows/self-scheduled-amd-mi210-caller.yml
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @ -0,0 +1,55 @@ | ||||
| name: Self-hosted runner (AMD mi210 scheduled CI caller) | ||||
|  | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: ["Self-hosted runner (AMD scheduled CI caller)"] | ||||
|     branches: ["main"] | ||||
|     types: [completed] | ||||
|   push: | ||||
|     branches: | ||||
|       - run_amd_scheduled_ci_caller* | ||||
|  | ||||
| jobs: | ||||
|   model-ci: | ||||
|     name: Model CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       slack_report_channel: "#transformers-ci-daily-amd" | ||||
|       runner: mi210 | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi210 | ||||
|     secrets: inherit | ||||
|  | ||||
|   torch-pipeline: | ||||
|     name: Torch pipeline CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main | ||||
|     with: | ||||
|       job: run_pipelines_torch_gpu | ||||
|       slack_report_channel: "#transformers-ci-daily-amd" | ||||
|       runner: mi210 | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi210 | ||||
|     secrets: inherit | ||||
|  | ||||
|   example-ci: | ||||
|     name: Example CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main | ||||
|     with: | ||||
|       job: run_examples_gpu | ||||
|       slack_report_channel: "#transformers-ci-daily-amd" | ||||
|       runner: mi210 | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi210 | ||||
|     secrets: inherit | ||||
|  | ||||
|   deepspeed-ci: | ||||
|     name: DeepSpeed CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main | ||||
|     with: | ||||
|       job: run_torch_cuda_extensions_gpu | ||||
|       slack_report_channel: "#transformers-ci-daily-amd" | ||||
|       runner: mi210 | ||||
|       docker: huggingface/transformers-pytorch-deepspeed-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi210 | ||||
|     secrets: inherit | ||||
| @ -15,11 +15,10 @@ jobs: | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       slack_report_channel: "#transformers-ci-daily-amd" | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner: mi250 | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi250 | ||||
|       report_repo_id: optimum-amd/transformers_daily_ci | ||||
|     secrets: inherit | ||||
|  | ||||
|   torch-pipeline: | ||||
| @ -27,11 +26,10 @@ jobs: | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main | ||||
|     with: | ||||
|       job: run_pipelines_torch_gpu | ||||
|       slack_report_channel: "#transformers-ci-daily-amd" | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner: mi250 | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi250 | ||||
|       report_repo_id: optimum-amd/transformers_daily_ci | ||||
|     secrets: inherit | ||||
|  | ||||
|   example-ci: | ||||
| @ -39,11 +37,10 @@ jobs: | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main | ||||
|     with: | ||||
|       job: run_examples_gpu | ||||
|       slack_report_channel: "#transformers-ci-daily-amd" | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner: mi250 | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi250 | ||||
|       report_repo_id: optimum-amd/transformers_daily_ci | ||||
|     secrets: inherit | ||||
|  | ||||
|   deepspeed-ci: | ||||
| @ -51,9 +48,8 @@ jobs: | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled.yaml@main | ||||
|     with: | ||||
|       job: run_torch_cuda_extensions_gpu | ||||
|       slack_report_channel: "#transformers-ci-daily-amd" | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner: mi250 | ||||
|       docker: huggingface/transformers-pytorch-deepspeed-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi250 | ||||
|       report_repo_id: optimum-amd/transformers_daily_ci | ||||
|     secrets: inherit | ||||
|  | ||||
| @ -1,67 +0,0 @@ | ||||
| name: Self-hosted runner scale set (AMD mi325 scheduled CI caller) | ||||
|  | ||||
| # Note: For every job in this workflow, the name of the runner scale set is finalized in the runner yaml i.e. huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml | ||||
| # For example, 1gpu scale set: amd-mi325-ci-1gpu | ||||
| #              2gpu scale set: amd-mi325-ci-2gpu | ||||
|  | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: ["Self-hosted runner (AMD scheduled CI caller)"] | ||||
|     branches: ["main"] | ||||
|     types: [completed] | ||||
|   push: | ||||
|     branches: | ||||
|       - run_amd_scheduled_ci_caller* | ||||
|  | ||||
| jobs: | ||||
|   model-ci: | ||||
|     name: Model CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner_scale_set: amd-mi325-ci | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi325 | ||||
|       report_repo_id: optimum-amd/transformers_daily_ci | ||||
|       env_file: /etc/podinfo/gha-gpu-isolation-settings | ||||
|     secrets: inherit | ||||
|  | ||||
|   torch-pipeline: | ||||
|     name: Torch pipeline CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main | ||||
|     with: | ||||
|       job: run_pipelines_torch_gpu | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner_scale_set: amd-mi325-ci | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi325 | ||||
|       report_repo_id: optimum-amd/transformers_daily_ci | ||||
|       env_file: /etc/podinfo/gha-gpu-isolation-settings | ||||
|     secrets: inherit | ||||
|  | ||||
|   example-ci: | ||||
|     name: Example CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main | ||||
|     with: | ||||
|       job: run_examples_gpu | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner_scale_set: amd-mi325-ci | ||||
|       docker: huggingface/transformers-pytorch-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi325 | ||||
|       report_repo_id: optimum-amd/transformers_daily_ci | ||||
|       env_file: /etc/podinfo/gha-gpu-isolation-settings | ||||
|     secrets: inherit | ||||
|  | ||||
|   deepspeed-ci: | ||||
|     name: DeepSpeed CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main | ||||
|     with: | ||||
|       job: run_torch_cuda_extensions_gpu | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner_scale_set: amd-mi325-ci | ||||
|       docker: huggingface/transformers-pytorch-deepspeed-amd-gpu | ||||
|       ci_event: Scheduled CI (AMD) - mi325 | ||||
|       report_repo_id: optimum-amd/transformers_daily_ci | ||||
|       env_file: /etc/podinfo/gha-gpu-isolation-settings | ||||
|     secrets: inherit | ||||
| @ -1,63 +0,0 @@ | ||||
| name: Self-hosted runner scale set (AMD mi355 scheduled CI caller) | ||||
|  | ||||
| # Note: For every job in this workflow, the name of the runner scale set is finalized in the runner yaml i.e. huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml | ||||
| # For example, 1gpu : amd-mi355-ci-1gpu | ||||
| #              2gpu : amd-mi355-ci-2gpu | ||||
|  | ||||
| on: | ||||
|   workflow_run: | ||||
|     workflows: ["Self-hosted runner (AMD scheduled CI caller)"] | ||||
|     branches: ["main"] | ||||
|     types: [completed] | ||||
|   push: | ||||
|     branches: | ||||
|       - run_amd_scheduled_ci_caller* | ||||
|  | ||||
| jobs: | ||||
|   model-ci: | ||||
|     name: Model CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner_scale_set: amd-mi355-ci | ||||
|       docker: huggingface/testing-rocm7.0-preview | ||||
|       ci_event: Scheduled CI (AMD) - mi355 | ||||
|       report_repo_id: hf-transformers-bot/transformers-ci-dummy | ||||
|     secrets: inherit | ||||
|  | ||||
|   torch-pipeline: | ||||
|     name: Torch pipeline CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main | ||||
|     with: | ||||
|       job: run_pipelines_torch_gpu | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner_scale_set: amd-mi355-ci | ||||
|       docker: huggingface/testing-rocm7.0-preview | ||||
|       ci_event: Scheduled CI (AMD) - mi355 | ||||
|       report_repo_id: hf-transformers-bot/transformers-ci-dummy | ||||
|     secrets: inherit | ||||
|  | ||||
|   example-ci: | ||||
|     name: Example CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main | ||||
|     with: | ||||
|       job: run_examples_gpu | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner_scale_set: amd-mi355-ci | ||||
|       docker: huggingface/testing-rocm7.0-preview | ||||
|       ci_event: Scheduled CI (AMD) - mi355 | ||||
|       report_repo_id: hf-transformers-bot/transformers-ci-dummy | ||||
|     secrets: inherit | ||||
|  | ||||
|   deepspeed-ci: | ||||
|     name: DeepSpeed CI | ||||
|     uses: huggingface/hf-workflows/.github/workflows/transformers_amd_ci_scheduled_arc_scale_set.yaml@main | ||||
|     with: | ||||
|       job: run_torch_cuda_extensions_gpu | ||||
|       slack_report_channel: "#amd-hf-ci" | ||||
|       runner_scale_set: amd-mi355-ci | ||||
|       docker: huggingface/testing-rocm7.0-preview | ||||
|       ci_event: Scheduled CI (AMD) - mi355 | ||||
|       report_repo_id: hf-transformers-bot/transformers-ci-dummy | ||||
|     secrets: inherit | ||||
							
								
								
									
										19
									
								
								.github/workflows/self-scheduled-caller.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										19
									
								
								.github/workflows/self-scheduled-caller.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,12 +1,13 @@ | ||||
| name: Nvidia CI | ||||
| name: Self-hosted runner (scheduled) | ||||
|  | ||||
|  | ||||
| on: | ||||
|   repository_dispatch: | ||||
|   schedule: | ||||
|     - cron: "17 2 * * *" | ||||
| #  repository_dispatch: | ||||
| #  schedule: | ||||
| #    - cron: "17 2 * * *" | ||||
|   push: | ||||
|     branches: | ||||
|       - run_with_info | ||||
|       - ci_with_torch_version_base | ||||
|   workflow_dispatch: | ||||
|     inputs: | ||||
|       prev_workflow_run_id: | ||||
| @ -21,10 +22,10 @@ on: | ||||
|         default: "" | ||||
|  | ||||
|  | ||||
| # Used for `push` to easily modify the target workflow runs to compare against | ||||
| # Used for `push` to easily modiffy the target workflow runs to compare against | ||||
| env: | ||||
|     prev_workflow_run_id: "" | ||||
|     other_workflow_run_id: "" | ||||
|     other_workflow_run_id: "15084441438" | ||||
|  | ||||
|  | ||||
| jobs: | ||||
| @ -50,9 +51,7 @@ jobs: | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       slack_report_channel: "#transformers-ci-dummy" | ||||
|       runner: daily-ci | ||||
|       docker: huggingface/transformers-all-latest-gpu | ||||
|       ci_event: Daily CI | ||||
|       runner_type: "a10" | ||||
|       report_repo_id: hf-internal-testing/transformers_daily_ci | ||||
|       commit_sha: ${{ github.sha }} | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										342
									
								
								.github/workflows/self-scheduled-intel-gaudi.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										342
									
								
								.github/workflows/self-scheduled-intel-gaudi.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,342 +0,0 @@ | ||||
| name: Self-hosted runner (scheduled-intel-gaudi) | ||||
|  | ||||
| on: | ||||
|   workflow_call: | ||||
|     inputs: | ||||
|       job: | ||||
|         required: true | ||||
|         type: string | ||||
|       slack_report_channel: | ||||
|         required: true | ||||
|         type: string | ||||
|       runner_scale_set: | ||||
|         required: true | ||||
|         type: string | ||||
|       ci_event: | ||||
|         required: true | ||||
|         type: string | ||||
|       report_repo_id: | ||||
|         required: true | ||||
|         type: string | ||||
|  | ||||
| env: | ||||
|   NUM_SLICES: 2 | ||||
|   RUN_SLOW: yes | ||||
|   PT_HPU_LAZY_MODE: 0 | ||||
|   TRANSFORMERS_IS_CI: yes | ||||
|   PT_ENABLE_INT64_SUPPORT: 1 | ||||
|   HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }} | ||||
|   SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }} | ||||
|   HF_HOME: /mnt/cache/.cache/huggingface | ||||
|  | ||||
| jobs: | ||||
|   setup: | ||||
|     if: contains(fromJSON('["run_models_gpu", "run_trainer_and_fsdp_gpu"]'), inputs.job) | ||||
|     name: Setup | ||||
|     runs-on: ubuntu-latest | ||||
|     outputs: | ||||
|       slice_ids: ${{ steps.set-matrix.outputs.slice_ids }} | ||||
|       folder_slices: ${{ steps.set-matrix.outputs.folder_slices }} | ||||
|       quantization_matrix: ${{ steps.set-matrix.outputs.quantization_matrix }} | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           fetch-depth: 0 | ||||
|  | ||||
|       - name: Set up Python | ||||
|         uses: actions/setup-python@v5 | ||||
|         with: | ||||
|           python-version: "3.10" | ||||
|  | ||||
|       - id: set-matrix | ||||
|         if: contains(fromJSON('["run_models_gpu", "run_trainer_and_fsdp_gpu"]'), inputs.job) | ||||
|         name: Identify models to test | ||||
|         working-directory: tests | ||||
|         run: | | ||||
|           if [ "${{ inputs.job }}" = "run_models_gpu" ]; then | ||||
|             echo "folder_slices=$(python3 ../utils/split_model_tests.py --num_splits ${{ env.NUM_SLICES }})" >> $GITHUB_OUTPUT | ||||
|             echo "slice_ids=$(python3 -c 'd = list(range(${{ env.NUM_SLICES }})); print(d)')" >> $GITHUB_OUTPUT | ||||
|           elif [ "${{ inputs.job }}" = "run_trainer_and_fsdp_gpu" ]; then | ||||
|             echo "folder_slices=[['trainer'], ['fsdp']]" >> $GITHUB_OUTPUT | ||||
|             echo "slice_ids=[0, 1]" >> $GITHUB_OUTPUT | ||||
|           fi | ||||
|  | ||||
|       - id: set-matrix-quantization | ||||
|         if: ${{ inputs.job == 'run_quantization_torch_gpu' }} | ||||
|         name: Identify quantization method to test | ||||
|         working-directory: tests | ||||
|         run: | | ||||
|           echo "quantization_matrix=$(python3 -c 'import os; tests = os.getcwd(); quantization_tests = os.listdir(os.path.join(tests, "quantization")); d = sorted(list(filter(os.path.isdir, [f"quantization/{x}" for x in quantization_tests]))) ;  print(d)')" >> $GITHUB_OUTPUT | ||||
|  | ||||
|   run_models_gpu: | ||||
|     if: ${{ inputs.job == 'run_models_gpu' }} | ||||
|     name: " " | ||||
|     needs: setup | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [1gaudi, 2gaudi] | ||||
|         slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }} | ||||
|     uses: ./.github/workflows/model_jobs_intel_gaudi.yml | ||||
|     with: | ||||
|       slice_id: ${{ matrix.slice_id }} | ||||
|       machine_type: ${{ matrix.machine_type }} | ||||
|       folder_slices: ${{ needs.setup.outputs.folder_slices }} | ||||
|       runner: ${{ inputs.runner_scale_set }}-${{ matrix.machine_type }} | ||||
|     secrets: inherit | ||||
|  | ||||
|   run_trainer_and_fsdp_gpu: | ||||
|     if: ${{ inputs.job == 'run_trainer_and_fsdp_gpu' }} | ||||
|     name: " " | ||||
|     needs: setup | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [1gaudi, 2gaudi] | ||||
|         slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }} | ||||
|     uses: ./.github/workflows/model_jobs_intel_gaudi.yml | ||||
|     with: | ||||
|       slice_id: ${{ matrix.slice_id }} | ||||
|       machine_type: ${{ matrix.machine_type }} | ||||
|       folder_slices: ${{ needs.setup.outputs.folder_slices }} | ||||
|       runner: ${{ inputs.runner_scale_set }}-${{ matrix.machine_type }} | ||||
|       report_name_prefix: run_trainer_and_fsdp_gpu | ||||
|     secrets: inherit | ||||
|  | ||||
|   run_pipelines_torch_gpu: | ||||
|     if: ${{ inputs.job == 'run_pipelines_torch_gpu' }} | ||||
|     name: Pipelines | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [1gaudi, 2gaudi] | ||||
|     runs-on: | ||||
|       group: ${{ inputs.runner_scale_set }}-${{ matrix.machine_type }} | ||||
|     container: | ||||
|       image: vault.habana.ai/gaudi-docker/1.21.1/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest | ||||
|       options: --runtime=habana | ||||
|         -v /mnt/cache/.cache/huggingface:/mnt/cache/.cache/huggingface | ||||
|         --env OMPI_MCA_btl_vader_single_copy_mechanism=none | ||||
|         --env HABANA_VISIBLE_DEVICES | ||||
|         --env HABANA_VISIBLE_MODULES | ||||
|         --cap-add=sys_nice | ||||
|         --shm-size=64G | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           fetch-depth: 0 | ||||
|  | ||||
|       - name: Install dependencies | ||||
|         run: | | ||||
|           pip install -e .[testing,torch] "numpy<2.0.0" scipy scikit-learn librosa soundfile | ||||
|  | ||||
|       - name: HL-SMI | ||||
|         run: | | ||||
|           hl-smi | ||||
|           echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}" | ||||
|           echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" | ||||
|  | ||||
|       - name: Environment | ||||
|         run: python3 utils/print_env.py | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         run: pip freeze | ||||
|  | ||||
|       - name: Set `machine_type` for report and artifact names | ||||
|         shell: bash | ||||
|         run: | | ||||
|           if [ "${{ matrix.machine_type }}" = "1gaudi" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "2gaudi" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
|           fi | ||||
|           echo "machine_type=$machine_type" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Run all pipeline tests on Intel Gaudi | ||||
|         run: | | ||||
|           python3 -m pytest -v --make-reports=${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports tests/pipelines -m "not not_device_test" | ||||
|  | ||||
|       - name: Failure short reports | ||||
|         if: ${{ failure() }} | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           cat reports/${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports/failures_short.txt | ||||
|  | ||||
|       - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports | ||||
|           path: reports/${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports | ||||
|  | ||||
|   run_examples_gpu: | ||||
|     if: ${{ inputs.job == 'run_examples_gpu' }} | ||||
|     name: Examples directory | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [1gaudi] | ||||
|     runs-on: | ||||
|       group: ${{ inputs.runner_scale_set }}-${{ matrix.machine_type }} | ||||
|     container: | ||||
|       image: vault.habana.ai/gaudi-docker/1.21.1/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest | ||||
|       options: --runtime=habana | ||||
|         -v /mnt/cache/.cache/huggingface:/mnt/cache/.cache/huggingface | ||||
|         --env OMPI_MCA_btl_vader_single_copy_mechanism=none | ||||
|         --env HABANA_VISIBLE_DEVICES | ||||
|         --env HABANA_VISIBLE_MODULES | ||||
|         --cap-add=sys_nice | ||||
|         --shm-size=64G | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           fetch-depth: 0 | ||||
|  | ||||
|       - name: Install dependencies | ||||
|         run: | | ||||
|           pip install -e .[testing,torch] "numpy<2.0.0" scipy scikit-learn librosa soundfile | ||||
|  | ||||
|       - name: HL-SMI | ||||
|         run: | | ||||
|           hl-smi | ||||
|           echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}" | ||||
|           echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" | ||||
|  | ||||
|       - name: Environment | ||||
|         run: | | ||||
|           python3 utils/print_env.py | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         run: | | ||||
|           pip freeze | ||||
|  | ||||
|       - name: Set `machine_type` for report and artifact names | ||||
|         shell: bash | ||||
|         run: | | ||||
|           if [ "${{ matrix.machine_type }}" = "1gaudi" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "2gaudi" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
|           fi | ||||
|           echo "machine_type=$machine_type" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Run examples tests on Intel Gaudi | ||||
|         run: | | ||||
|           pip install -r examples/pytorch/_tests_requirements.txt | ||||
|           python3 -m pytest -v --make-reports=${{ env.machine_type }}_run_examples_gpu_test_reports examples/pytorch -m "not not_device_test" | ||||
|  | ||||
|       - name: Failure short reports | ||||
|         if: ${{ failure() }} | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           cat reports/${{ env.machine_type }}_run_examples_gpu_test_reports/failures_short.txt | ||||
|  | ||||
|       - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_examples_gpu_test_reports" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ${{ env.machine_type }}_run_examples_gpu_test_reports | ||||
|           path: reports/${{ env.machine_type }}_run_examples_gpu_test_reports | ||||
|  | ||||
|   run_torch_cuda_extensions_gpu: | ||||
|     if: ${{ inputs.job == 'run_torch_cuda_extensions_gpu' }} | ||||
|     name: Intel Gaudi deepspeed tests | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [1gaudi, 2gaudi] | ||||
|     runs-on: | ||||
|       group: ${{ inputs.runner_scale_set }}-${{ matrix.machine_type }} | ||||
|     container: | ||||
|       image: vault.habana.ai/gaudi-docker/1.21.1/ubuntu22.04/habanalabs/pytorch-installer-2.6.0:latest | ||||
|       options: --runtime=habana | ||||
|         -v /mnt/cache/.cache/huggingface:/mnt/cache/.cache/huggingface | ||||
|         --env OMPI_MCA_btl_vader_single_copy_mechanism=none | ||||
|         --env HABANA_VISIBLE_DEVICES | ||||
|         --env HABANA_VISIBLE_MODULES | ||||
|         --cap-add=sys_nice | ||||
|         --shm-size=64G | ||||
|     steps: | ||||
|       - name: Checkout | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           fetch-depth: 0 | ||||
|  | ||||
|       - name: Install dependencies | ||||
|         run: | | ||||
|           pip install -e .[testing,torch] "numpy<2.0.0" scipy scikit-learn librosa soundfile | ||||
|           pip install git+https://github.com/HabanaAI/DeepSpeed.git@1.20.0 | ||||
|  | ||||
|       - name: HL-SMI | ||||
|         run: | | ||||
|           hl-smi | ||||
|           echo "HABANA_VISIBLE_DEVICES=${HABANA_VISIBLE_DEVICES}" | ||||
|           echo "HABANA_VISIBLE_MODULES=${HABANA_VISIBLE_MODULES}" | ||||
|  | ||||
|       - name: Environment | ||||
|         run: | | ||||
|           python3 utils/print_env.py | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         run: | | ||||
|           pip freeze | ||||
|  | ||||
|       - name: Set `machine_type` for report and artifact names | ||||
|         shell: bash | ||||
|         run: | | ||||
|           if [ "${{ matrix.machine_type }}" = "1gaudi" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "2gaudi" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
|           fi | ||||
|           echo "machine_type=$machine_type" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Run all deepspeed tests on intel Gaudi | ||||
|         run: | | ||||
|           python3 -m pytest -v --make-reports=${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports tests/deepspeed -m "not not_device_test" | ||||
|  | ||||
|       - name: Failure short reports | ||||
|         if: ${{ failure() }} | ||||
|         continue-on-error: true | ||||
|         run: | | ||||
|           cat reports/${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports/failures_short.txt | ||||
|  | ||||
|       - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports | ||||
|           path: reports/${{ env.machine_type }}_run_torch_cuda_extensions_gpu_test_reports | ||||
|  | ||||
|   send_results: | ||||
|     name: Slack Report | ||||
|     needs: | ||||
|       [ | ||||
|         setup, | ||||
|         run_models_gpu, | ||||
|         run_examples_gpu, | ||||
|         run_torch_cuda_extensions_gpu, | ||||
|         run_pipelines_torch_gpu, | ||||
|         run_trainer_and_fsdp_gpu, | ||||
|       ] | ||||
|     if: ${{ always() }} | ||||
|     uses: ./.github/workflows/slack-report.yml | ||||
|     with: | ||||
|       job: ${{ inputs.job }} | ||||
|       setup_status: ${{ needs.setup.result }} | ||||
|       slack_report_channel: ${{ inputs.slack_report_channel }} | ||||
|       quantization_matrix: ${{ needs.setup.outputs.quantization_matrix }} | ||||
|       folder_slices: ${{ needs.setup.outputs.folder_slices }} | ||||
|       report_repo_id: ${{ inputs.report_repo_id }} | ||||
|       ci_event: ${{ inputs.ci_event }} | ||||
|  | ||||
|     secrets: inherit | ||||
| @ -1,67 +0,0 @@ | ||||
| name: Self-hosted runner (Intel Gaudi3 scheduled CI caller) | ||||
|  | ||||
| on: | ||||
|   repository_dispatch: | ||||
|   workflow_dispatch: | ||||
|   schedule: | ||||
|     - cron: "17 2 * * *" | ||||
|  | ||||
| jobs: | ||||
|   model-ci: | ||||
|     name: Model CI | ||||
|     uses: ./.github/workflows/self-scheduled-intel-gaudi.yml | ||||
|     with: | ||||
|       job: run_models_gpu | ||||
|       ci_event: Scheduled CI (Intel) - Gaudi3 | ||||
|       runner_scale_set: itac-bm-emr-gaudi3-dell | ||||
|       slack_report_channel: "#transformers-ci-daily-intel-gaudi3" | ||||
|       report_repo_id: optimum-intel/transformers_daily_ci_intel_gaudi3 | ||||
|  | ||||
|     secrets: inherit | ||||
|  | ||||
|   pipeline-ci: | ||||
|     name: Pipeline CI | ||||
|     uses: ./.github/workflows/self-scheduled-intel-gaudi.yml | ||||
|     with: | ||||
|       job: run_pipelines_torch_gpu | ||||
|       ci_event: Scheduled CI (Intel) - Gaudi3 | ||||
|       runner_scale_set: itac-bm-emr-gaudi3-dell | ||||
|       slack_report_channel: "#transformers-ci-daily-intel-gaudi3" | ||||
|       report_repo_id: optimum-intel/transformers_daily_ci_intel_gaudi3 | ||||
|  | ||||
|     secrets: inherit | ||||
|  | ||||
|   example-ci: | ||||
|     name: Example CI | ||||
|     uses: ./.github/workflows/self-scheduled-intel-gaudi.yml | ||||
|     with: | ||||
|       job: run_examples_gpu | ||||
|       ci_event: Scheduled CI (Intel) - Gaudi3 | ||||
|       runner_scale_set: itac-bm-emr-gaudi3-dell | ||||
|       slack_report_channel: "#transformers-ci-daily-intel-gaudi3" | ||||
|       report_repo_id: optimum-intel/transformers_daily_ci_intel_gaudi3 | ||||
|  | ||||
|     secrets: inherit | ||||
|  | ||||
|   deepspeed-ci: | ||||
|     name: DeepSpeed CI | ||||
|     uses: ./.github/workflows/self-scheduled-intel-gaudi.yml | ||||
|     with: | ||||
|       job: run_torch_cuda_extensions_gpu | ||||
|       ci_event: Scheduled CI (Intel) - Gaudi3 | ||||
|       runner_scale_set: itac-bm-emr-gaudi3-dell | ||||
|       slack_report_channel: "#transformers-ci-daily-intel-gaudi3" | ||||
|       report_repo_id: optimum-intel/transformers_daily_ci_intel_gaudi3 | ||||
|  | ||||
|     secrets: inherit | ||||
|  | ||||
|   trainer-fsdp-ci: | ||||
|     name: Trainer/FSDP CI | ||||
|     uses: ./.github/workflows/self-scheduled-intel-gaudi.yml | ||||
|     with: | ||||
|       job: run_trainer_and_fsdp_gpu | ||||
|       ci_event: Scheduled CI (Intel) - Gaudi3 | ||||
|       runner_scale_set: itac-bm-emr-gaudi3-dell | ||||
|       slack_report_channel: "#transformers-ci-daily-intel-gaudi3" | ||||
|       report_repo_id: optimum-intel/transformers_daily_ci_intel_gaudi3 | ||||
|     secrets: inherit | ||||
							
								
								
									
										282
									
								
								.github/workflows/self-scheduled.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										282
									
								
								.github/workflows/self-scheduled.yml
									
									
									
									
										vendored
									
									
								
							| @ -1,4 +1,4 @@ | ||||
| name: Nvidia CI (job definitions) | ||||
| name: Self-hosted runner (scheduled) | ||||
|  | ||||
| # Note that each job's dependencies go into a corresponding docker file. | ||||
| # | ||||
| @ -15,6 +15,9 @@ on: | ||||
|       slack_report_channel: | ||||
|         required: true | ||||
|         type: string | ||||
|       runner: | ||||
|         required: true | ||||
|         type: string | ||||
|       docker: | ||||
|         required: true | ||||
|         type: string | ||||
| @ -25,19 +28,7 @@ on: | ||||
|         default: '' | ||||
|         required: false | ||||
|         type: string | ||||
|       report_repo_id: | ||||
|         required: true | ||||
|         type: string | ||||
|       commit_sha: | ||||
|         required: false | ||||
|         type: string | ||||
|       runner_type: | ||||
|         required: false | ||||
|         type: string | ||||
|       models: | ||||
|         default: "" | ||||
|         required: false | ||||
|         type: string | ||||
|  | ||||
|  | ||||
| env: | ||||
|   HF_HOME: /mnt/cache | ||||
| @ -55,16 +46,16 @@ env: | ||||
|  | ||||
| jobs: | ||||
|   setup: | ||||
|     if: contains(fromJSON('["run_models_gpu", "run_quantization_torch_gpu"]'), inputs.job) | ||||
|     name: Setup | ||||
|     if: contains(fromJSON('["run_models_gpu", "run_trainer_and_fsdp_gpu", "run_quantization_torch_gpu"]'), inputs.job) | ||||
|     strategy: | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
|       image: huggingface/transformers-all-latest-gpu | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|       options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     outputs: | ||||
|       folder_slices: ${{ steps.set-matrix.outputs.folder_slices }} | ||||
|       slice_ids: ${{ steps.set-matrix.outputs.slice_ids }} | ||||
| @ -73,7 +64,7 @@ jobs: | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|         run: | | ||||
|           git fetch && git checkout ${{ inputs.commit_sha || github.sha }} | ||||
|           git fetch && git checkout ${{ github.sha }} | ||||
|  | ||||
|       - name: Cleanup | ||||
|         working-directory: /transformers | ||||
| @ -87,24 +78,19 @@ jobs: | ||||
|         run: pip freeze | ||||
|  | ||||
|       - id: set-matrix | ||||
|         if: contains(fromJSON('["run_models_gpu", "run_trainer_and_fsdp_gpu"]'), inputs.job) | ||||
|         if: ${{ inputs.job == 'run_models_gpu' }} | ||||
|         name: Identify models to test | ||||
|         working-directory: /transformers/tests | ||||
|         run: | | ||||
|           if [ "${{ inputs.job }}" = "run_models_gpu" ]; then | ||||
|             echo "folder_slices=$(python3 ../utils/split_model_tests.py --models '${{ inputs.models }}' --num_splits ${{ env.NUM_SLICES }})" >> $GITHUB_OUTPUT | ||||
|             echo "slice_ids=$(python3 -c 'd = list(range(${{ env.NUM_SLICES }})); print(d)')" >> $GITHUB_OUTPUT | ||||
|           elif [ "${{ inputs.job }}" = "run_trainer_and_fsdp_gpu" ]; then | ||||
|             echo "folder_slices=[['trainer'], ['fsdp']]" >> $GITHUB_OUTPUT | ||||
|             echo "slice_ids=[0, 1]" >> $GITHUB_OUTPUT | ||||
|           fi | ||||
|           echo "folder_slices=$(python3 ../utils/split_model_tests.py --num_splits ${{ env.NUM_SLICES }})" >> $GITHUB_OUTPUT | ||||
|           echo "slice_ids=$(python3 -c 'd = list(range(${{ env.NUM_SLICES }})); print(d)')" >> $GITHUB_OUTPUT | ||||
|  | ||||
|       - id: set-matrix-quantization | ||||
|         if: ${{ inputs.job == 'run_quantization_torch_gpu' }} | ||||
|         name: Identify quantization method to test | ||||
|         working-directory: /transformers/tests | ||||
|         run: | | ||||
|           echo "quantization_matrix=$(python3 -c 'import os; tests = os.getcwd(); quantization_tests = os.listdir(os.path.join(tests, "quantization")); d = sorted(list(filter(os.path.isdir, [f"quantization/{x}" for x in quantization_tests]))) ;  print(d)')" >> $GITHUB_OUTPUT | ||||
|           echo "quantization_matrix=$(python3 -c 'import os; tests = os.getcwd(); quantization_tests = os.listdir(os.path.join(tests, "quantization")); d = sorted(list(filter(os.path.isdir, [f"quantization/{x}" for x in quantization_tests]))) ;  print(["quantization/autoawq"])')" >> $GITHUB_OUTPUT | ||||
|  | ||||
|       - name: NVIDIA-SMI | ||||
|         run: | | ||||
| @ -117,38 +103,15 @@ jobs: | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|         slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }} | ||||
|     uses: ./.github/workflows/model_jobs.yml | ||||
|     with: | ||||
|       folder_slices: ${{ needs.setup.outputs.folder_slices }} | ||||
|       machine_type: ${{ matrix.machine_type }} | ||||
|       slice_id: ${{ matrix.slice_id }} | ||||
|       runner: ${{ inputs.runner }} | ||||
|       docker: ${{ inputs.docker }} | ||||
|       commit_sha: ${{ inputs.commit_sha || github.sha }} | ||||
|       runner_type: ${{ inputs.runner_type }} | ||||
|       report_repo_id: ${{ inputs.report_repo_id }} | ||||
|     secrets: inherit | ||||
|  | ||||
|   run_trainer_and_fsdp_gpu: | ||||
|     if: ${{ inputs.job == 'run_trainer_and_fsdp_gpu' }} | ||||
|     name: " " | ||||
|     needs: setup | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         slice_id: [0, 1] | ||||
|     uses: ./.github/workflows/model_jobs.yml | ||||
|     with: | ||||
|       folder_slices: ${{ needs.setup.outputs.folder_slices }} | ||||
|       machine_type: ${{ matrix.machine_type }} | ||||
|       slice_id: ${{ matrix.slice_id }} | ||||
|       docker: ${{ inputs.docker }} | ||||
|       commit_sha: ${{ inputs.commit_sha || github.sha }} | ||||
|       runner_type: ${{ inputs.runner_type }} | ||||
|       report_repo_id: ${{ inputs.report_repo_id }} | ||||
|       report_name_prefix: run_trainer_and_fsdp_gpu | ||||
|     secrets: inherit | ||||
|  | ||||
|   run_pipelines_torch_gpu: | ||||
| @ -157,7 +120,7 @@ jobs: | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
| @ -166,7 +129,7 @@ jobs: | ||||
|     steps: | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|         run: git fetch && git checkout ${{ inputs.commit_sha || github.sha }} | ||||
|         run: git fetch && git checkout ${{ github.sha }} | ||||
|  | ||||
|       - name: Reinstall transformers in edit mode (remove the one installed during docker image build) | ||||
|         working-directory: /transformers | ||||
| @ -191,9 +154,9 @@ jobs: | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
| @ -219,22 +182,23 @@ jobs: | ||||
|           name: ${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports | ||||
|           path: /transformers/reports/${{ env.machine_type }}_run_pipelines_torch_gpu_test_reports | ||||
|  | ||||
|   run_examples_gpu: | ||||
|     if: ${{ inputs.job == 'run_examples_gpu' }} | ||||
|     name: Examples directory | ||||
|   run_pipelines_tf_gpu: | ||||
|     if: ${{ inputs.job == 'run_pipelines_tf_gpu' }} | ||||
|     name: TensorFlow pipelines | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-4xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
|       image: huggingface/transformers-all-latest-gpu | ||||
|       image: huggingface/transformers-tensorflow-gpu | ||||
|       options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     steps: | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|         run: git fetch && git checkout ${{ inputs.commit_sha || github.sha }} | ||||
|         run: | | ||||
|           git fetch && git checkout ${{ github.sha }} | ||||
|  | ||||
|       - name: Reinstall transformers in edit mode (remove the one installed during docker image build) | ||||
|         working-directory: /transformers | ||||
| @ -259,9 +223,77 @@ jobs: | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
|           fi | ||||
|  | ||||
|           echo "$machine_type" | ||||
|           echo "machine_type=$machine_type" >> $GITHUB_ENV | ||||
|  | ||||
|       - name: Run all pipeline tests on GPU | ||||
|         working-directory: /transformers | ||||
|         run: | | ||||
|           python3 -m pytest -n 1 -v --dist=loadfile --make-reports=${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports tests/pipelines | ||||
|  | ||||
|       - name: Failure short reports | ||||
|         if: ${{ always() }} | ||||
|         run: | | ||||
|           cat /transformers/reports/${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports/failures_short.txt | ||||
|  | ||||
|       - name: "Test suite reports artifacts: ${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports" | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports | ||||
|           path: /transformers/reports/${{ env.machine_type }}_run_pipelines_tf_gpu_test_reports | ||||
|  | ||||
|   run_examples_gpu: | ||||
|     if: ${{ inputs.job == 'run_examples_gpu' }} | ||||
|     name: Examples directory | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [aws-g4dn-2xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
|       image: huggingface/transformers-all-latest-gpu | ||||
|       options: --gpus 0 --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/ | ||||
|     steps: | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|         run: git fetch && git checkout ${{ github.sha }} | ||||
|  | ||||
|       - name: Reinstall transformers in edit mode (remove the one installed during docker image build) | ||||
|         working-directory: /transformers | ||||
|         run: python3 -m pip uninstall -y transformers && python3 -m pip install -e . | ||||
|  | ||||
|       - name: NVIDIA-SMI | ||||
|         run: | | ||||
|           nvidia-smi | ||||
|  | ||||
|       - name: Environment | ||||
|         working-directory: /transformers | ||||
|         run: | | ||||
|           python3 utils/print_env.py | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         working-directory: /transformers | ||||
|         run: pip freeze | ||||
|  | ||||
|       - name: Set `machine_type` for report and artifact names | ||||
|         working-directory: /transformers | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
| @ -294,7 +326,7 @@ jobs: | ||||
|     strategy: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
| @ -303,7 +335,7 @@ jobs: | ||||
|     steps: | ||||
|       - name: Update clone | ||||
|         working-directory: ${{ inputs.working-directory-prefix }}/transformers | ||||
|         run: git fetch && git checkout ${{ inputs.commit_sha || github.sha }} | ||||
|         run: git fetch && git checkout ${{ github.sha }} | ||||
|  | ||||
|       - name: Reinstall transformers in edit mode (remove the one installed during docker image build) | ||||
|         working-directory: ${{ inputs.working-directory-prefix }}/transformers | ||||
| @ -351,14 +383,14 @@ jobs: | ||||
|         run: pip freeze | ||||
|  | ||||
|       - name: Set `machine_type` for report and artifact names | ||||
|         working-directory: ${{ inputs.working-directory-prefix }}/transformers | ||||
|         working-directory: /transformers | ||||
|         shell: bash | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
| @ -393,7 +425,7 @@ jobs: | ||||
|       fail-fast: false | ||||
|       matrix: | ||||
|         folders: ${{ fromJson(needs.setup.outputs.quantization_matrix) }} | ||||
|         machine_type: [aws-g5-4xlarge-cache, aws-g5-12xlarge-cache] | ||||
|         machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache] | ||||
|     runs-on: | ||||
|       group: '${{ matrix.machine_type }}' | ||||
|     container: | ||||
| @ -411,7 +443,7 @@ jobs: | ||||
|  | ||||
|       - name: Update clone | ||||
|         working-directory: /transformers | ||||
|         run: git fetch && git checkout ${{ inputs.commit_sha || github.sha }} | ||||
|         run: git fetch && git checkout ${{ github.sha }} | ||||
|  | ||||
|       - name: Reinstall transformers in edit mode (remove the one installed during docker image build) | ||||
|         working-directory: /transformers | ||||
| @ -436,9 +468,9 @@ jobs: | ||||
|         run: | | ||||
|           echo "${{ matrix.machine_type }}" | ||||
|  | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g5-4xlarge-cache" ]; then | ||||
|           if [ "${{ matrix.machine_type }}" = "aws-g4dn-2xlarge-cache" ]; then | ||||
|             machine_type=single-gpu | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g5-12xlarge-cache" ]; then | ||||
|           elif [ "${{ matrix.machine_type }}" = "aws-g4dn-12xlarge-cache" ]; then | ||||
|             machine_type=multi-gpu | ||||
|           else | ||||
|             machine_type=${{ matrix.machine_type }} | ||||
| @ -464,61 +496,60 @@ jobs: | ||||
|           name: ${{ env.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports | ||||
|           path: /transformers/reports/${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports | ||||
|  | ||||
|   run_extract_warnings: | ||||
|     # Let's only do this for the job `run_models_gpu` to simplify the (already complex) logic. | ||||
|     if: ${{ always() && inputs.job == 'run_models_gpu' }} | ||||
|     name: Extract warnings in CI artifacts | ||||
|     runs-on: ubuntu-22.04 | ||||
|     needs: [setup, run_models_gpu] | ||||
|     steps: | ||||
|       - name: Checkout transformers | ||||
|         uses: actions/checkout@v4 | ||||
|         with: | ||||
|           fetch-depth: 2 | ||||
|           ref: ${{ inputs.commit_sha || github.sha }} | ||||
|  | ||||
|       - name: Install transformers | ||||
|         run: pip install transformers | ||||
|  | ||||
|       - name: Show installed libraries and their versions | ||||
|         run: pip freeze | ||||
|  | ||||
|       - name: Create output directory | ||||
|         run: mkdir warnings_in_ci | ||||
|  | ||||
|       - uses: actions/download-artifact@v4 | ||||
|         with: | ||||
|           path: warnings_in_ci | ||||
|  | ||||
|       - name: Show artifacts | ||||
|         run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')" | ||||
|         working-directory: warnings_in_ci | ||||
|  | ||||
|       - name: Extract warnings in CI artifacts | ||||
|         run: | | ||||
|           python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh | ||||
|           echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')" | ||||
|  | ||||
|       - name: Upload artifact | ||||
|         if: ${{ always() }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: warnings_in_ci | ||||
|           path: warnings_in_ci/selected_warnings.json | ||||
| #  run_extract_warnings: | ||||
| #    # Let's only do this for the job `run_models_gpu` to simplify the (already complex) logic. | ||||
| #    if: ${{ always() && inputs.job == 'run_models_gpu' }} | ||||
| #    name: Extract warnings in CI artifacts | ||||
| #    runs-on: ubuntu-22.04 | ||||
| #    needs: [setup, run_models_gpu] | ||||
| #    steps: | ||||
| #      - name: Checkout transformers | ||||
| #        uses: actions/checkout@v4 | ||||
| #        with: | ||||
| #          fetch-depth: 2 | ||||
| # | ||||
| #      - name: Install transformers | ||||
| #        run: pip install transformers | ||||
| # | ||||
| #      - name: Show installed libraries and their versions | ||||
| #        run: pip freeze | ||||
| # | ||||
| #      - name: Create output directory | ||||
| #        run: mkdir warnings_in_ci | ||||
| # | ||||
| #      - uses: actions/download-artifact@v4 | ||||
| #        with: | ||||
| #          path: warnings_in_ci | ||||
| # | ||||
| #      - name: Show artifacts | ||||
| #        run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')" | ||||
| #        working-directory: warnings_in_ci | ||||
| # | ||||
| #      - name: Extract warnings in CI artifacts | ||||
| #        run: | | ||||
| #          python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh | ||||
| #          echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')" | ||||
| # | ||||
| #      - name: Upload artifact | ||||
| #        if: ${{ always() }} | ||||
| #        uses: actions/upload-artifact@v4 | ||||
| #        with: | ||||
| #          name: warnings_in_ci | ||||
| #          path: warnings_in_ci/selected_warnings.json | ||||
|  | ||||
|   send_results: | ||||
|     name: Slack Report | ||||
|     needs: [ | ||||
|       setup, | ||||
|       run_models_gpu, | ||||
|       run_trainer_and_fsdp_gpu, | ||||
|       run_pipelines_torch_gpu, | ||||
|       run_pipelines_tf_gpu, | ||||
|       run_examples_gpu, | ||||
|       run_torch_cuda_extensions_gpu, | ||||
|       run_quantization_torch_gpu, | ||||
|       run_extract_warnings | ||||
| #      run_extract_warnings | ||||
|     ] | ||||
|     if: always() && !cancelled() | ||||
|     if: ${{ always() }} | ||||
|     uses: ./.github/workflows/slack-report.yml | ||||
|     with: | ||||
|       job: ${{ inputs.job }} | ||||
| @ -529,22 +560,15 @@ jobs: | ||||
|       folder_slices: ${{ needs.setup.outputs.folder_slices }} | ||||
|       quantization_matrix: ${{ needs.setup.outputs.quantization_matrix }} | ||||
|       ci_event: ${{ inputs.ci_event }} | ||||
|       report_repo_id: ${{ inputs.report_repo_id }} | ||||
|       commit_sha: ${{ inputs.commit_sha || github.sha }} | ||||
|  | ||||
|     secrets: inherit | ||||
|  | ||||
|   check_new_failures: | ||||
|     if: ${{ always() && inputs.ci_event == 'Daily CI' && needs.send_results.result == 'success' }} | ||||
|     name: Check new failures | ||||
|   check_new_model_failures: | ||||
|     if: ${{ always() && inputs.ci_event == 'Daily CI' && inputs.job == 'run_models_gpu' && needs.send_results.result == 'success' }} | ||||
|     name: Check new model failures | ||||
|     needs: send_results | ||||
|     uses: ./.github/workflows/check_failed_tests.yml | ||||
|     uses: ./.github/workflows/check_failed_model_tests.yml | ||||
|     with: | ||||
|       docker: ${{ inputs.docker }} | ||||
|       start_sha: ${{ inputs.commit_sha || github.sha }} | ||||
|       job: ${{ inputs.job }} | ||||
|       slack_report_channel: ${{ inputs.slack_report_channel }} | ||||
|       ci_event: ${{ inputs.ci_event }} | ||||
|       report_repo_id: ${{ inputs.report_repo_id }} | ||||
|  | ||||
|       start_sha: ${{ github.sha }} | ||||
|     secrets: inherit | ||||
|  | ||||
							
								
								
									
										58
									
								
								.github/workflows/slack-report.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										58
									
								
								.github/workflows/slack-report.yml
									
									
									
									
										vendored
									
									
								
							| @ -21,13 +21,6 @@ on: | ||||
|       ci_event: | ||||
|         required: true | ||||
|         type: string | ||||
|       report_repo_id: | ||||
|         required: true | ||||
|         type: string | ||||
|       commit_sha: | ||||
|         required: false | ||||
|         type: string | ||||
|  | ||||
|  | ||||
| env: | ||||
|   TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN: ${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }} | ||||
| @ -36,7 +29,7 @@ jobs: | ||||
|   send_results: | ||||
|     name: Send results to webhook | ||||
|     runs-on: ubuntu-22.04 | ||||
|     if: always() && !cancelled() | ||||
|     if: always() | ||||
|     steps: | ||||
|       - name: Preliminary job status | ||||
|         shell: bash | ||||
| @ -45,10 +38,6 @@ jobs: | ||||
|           echo "Setup status: ${{ inputs.setup_status }}" | ||||
|  | ||||
|       - uses: actions/checkout@v4 | ||||
|         with: | ||||
|           fetch-depth: 2 | ||||
|           ref: ${{ inputs.commit_sha || github.sha }} | ||||
|  | ||||
|       - uses: actions/download-artifact@v4 | ||||
|  | ||||
|       - name: Prepare some setup values | ||||
| @ -66,7 +55,7 @@ jobs: | ||||
|           fi | ||||
|  | ||||
|       - name: Send message to Slack | ||||
|         shell: bash | ||||
|         if: ${{ inputs.job != 'run_quantization_torch_gpu' }} | ||||
|         env: | ||||
|           CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }} | ||||
|           CI_SLACK_CHANNEL_ID: ${{ secrets.CI_SLACK_CHANNEL_ID }} | ||||
| @ -75,25 +64,22 @@ jobs: | ||||
|           SLACK_REPORT_CHANNEL: ${{ inputs.slack_report_channel }} | ||||
|           ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} | ||||
|           CI_EVENT: ${{ inputs.ci_event }} | ||||
|           # This `CI_TITLE` would be empty for `schedule` or `workflow_run` events. | ||||
|           CI_TITLE: ${{ github.event.head_commit.message }} | ||||
|           CI_SHA: ${{ inputs.commit_sha || github.sha }} | ||||
|           CI_SHA: ${{ github.sha }} | ||||
|           CI_TEST_JOB: ${{ inputs.job }} | ||||
|           SETUP_STATUS: ${{ inputs.setup_status }} | ||||
|           REPORT_REPO_ID: ${{ inputs.report_repo_id }} | ||||
|         # We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change | ||||
|         # `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`. | ||||
|         # For a job that doesn't depend on (i.e. `needs`) `setup`, the value for `inputs.folder_slices` would be an | ||||
|         # empty string, and the called script still get one argument (which is the emtpy string). | ||||
|         run: | | ||||
|           echo "$PREV_WORKFLOW_RUN_ID" | ||||
|           echo "$OTHER_WORKFLOW_RUN_ID" | ||||
|           echo "$prev_workflow_run_id" | ||||
|           echo "$other_workflow_run_id" | ||||
|           pip install huggingface_hub | ||||
|           pip install slack_sdk | ||||
|           pip show slack_sdk | ||||
|           if [ "${{ inputs.quantization_matrix }}" != "" ]; then | ||||
|             python utils/notification_service.py "${{ inputs.quantization_matrix }}" | ||||
|           else | ||||
|             python utils/notification_service.py "${{ inputs.folder_slices }}" | ||||
|           fi | ||||
|           python utils/notification_service.py "${{ inputs.folder_slices }}" | ||||
|  | ||||
|       # Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack. | ||||
|       - name: Failure table artifacts | ||||
| @ -101,3 +87,31 @@ jobs: | ||||
|         with: | ||||
|           name: ci_results_${{ inputs.job }} | ||||
|           path: ci_results_${{ inputs.job }} | ||||
|  | ||||
|       - uses: actions/checkout@v4 | ||||
|       - uses: actions/download-artifact@v4 | ||||
|       - name: Send message to Slack for quantization workflow | ||||
|         if: ${{ inputs.job == 'run_quantization_torch_gpu' }} | ||||
|         env: | ||||
|           CI_SLACK_BOT_TOKEN: ${{ secrets.CI_SLACK_BOT_TOKEN }} | ||||
|           ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }} | ||||
|           SLACK_REPORT_CHANNEL: ${{ inputs.slack_report_channel }} | ||||
|           CI_EVENT: ${{ inputs.ci_event }} | ||||
|           CI_SHA: ${{ github.sha }} | ||||
|           CI_TEST_JOB: ${{ inputs.job }} | ||||
|           SETUP_STATUS: ${{ inputs.setup_status }} | ||||
|         # We pass `needs.setup.outputs.quantization_matrix` as the argument. A processing in `notification_service_quantization.py` to change | ||||
|         # `quantization/bnb` to `quantization_bnb` is required, as the artifact names use `_` instead of `/`. | ||||
|         run: | | ||||
|           pip install huggingface_hub | ||||
|           pip install slack_sdk | ||||
|           pip show slack_sdk | ||||
|           python utils/notification_service_quantization.py "${{ inputs.quantization_matrix }}" | ||||
|  | ||||
|       # Upload complete failure tables, as they might be big and only truncated versions could be sent to Slack. | ||||
|       - name: Failure table artifacts | ||||
|         if: ${{ inputs.job == 'run_quantization_torch_gpu' }} | ||||
|         uses: actions/upload-artifact@v4 | ||||
|         with: | ||||
|           name: ci_results_${{ inputs.job }} | ||||
|           path: ci_results_${{ inputs.job }} | ||||
|  | ||||
							
								
								
									
										2
									
								
								.github/workflows/ssh-runner.yml
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										2
									
								
								.github/workflows/ssh-runner.yml
									
									
									
									
										vendored
									
									
								
							| @ -35,7 +35,7 @@ jobs: | ||||
|         shell: bash | ||||
|         run: | | ||||
|           if [[ "${{ github.event.inputs.num_gpus }}" == "single" && "${{ github.event.inputs.runner_type }}" == "t4" ]]; then | ||||
|             echo "RUNNER=aws-g4dn-4xlarge-cache" >> $GITHUB_ENV | ||||
|             echo "RUNNER=aws-g4dn-2xlarge-cache" >> $GITHUB_ENV | ||||
|           elif [[ "${{ github.event.inputs.num_gpus }}" == "multi" && "${{ github.event.inputs.runner_type }}" == "t4" ]]; then | ||||
|             echo "RUNNER=aws-g4dn-12xlarge-cache" >> $GITHUB_ENV | ||||
|           elif [[ "${{ github.event.inputs.num_gpus }}" == "single" && "${{ github.event.inputs.runner_type }}" == "a10" ]]; then | ||||
|  | ||||
							
								
								
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										3
									
								
								.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -167,6 +167,3 @@ tags | ||||
|  | ||||
| # ruff | ||||
| .ruff_cache | ||||
|  | ||||
| # modular conversion | ||||
| *.modular_backup | ||||
|  | ||||
							
								
								
									
										39
									
								
								AGENTS.md
									
									
									
									
									
								
							
							
						
						
									
										39
									
								
								AGENTS.md
									
									
									
									
									
								
							| @ -1,39 +0,0 @@ | ||||
| # AGENTS.md Guide for Hugging Face Transformers | ||||
|  | ||||
| This AGENTS.md file provides guidance for code agents working with this codebase. | ||||
|  | ||||
| ## Core Project Structure | ||||
|  | ||||
| - `/src/transformers`: This contains the core source code for the library | ||||
|   - `/models`: Code for individual models. Models inherit from base classes in the root `/src/transformers` directory. | ||||
| - `/tests`: This contains the core test classes for the library. These are usually inherited rather than directly run. | ||||
|   - `/models`: Tests for individual models. Model tests inherit from common tests in the root `/tests` directory. | ||||
| - `/docs`: This contains the documentation for the library, including guides, tutorials, and API references. | ||||
|  | ||||
| ## Coding Conventions for Hugging Face Transformers | ||||
|  | ||||
| - PRs should be as brief as possible. Bugfix PRs in particular can often be only one or two lines long, and do not need large comments, docstrings or new functions in this case. Aim to minimize the size of the diff. | ||||
| - When writing tests, they should be added to an existing file. The only exception is for PRs to add a new model, when a new test directory should be created for that model. | ||||
| - Code style is enforced in the CI. You can install the style tools with `pip install -e .[quality]`. You can then run `make fixup` to apply style and consistency fixes to your code. | ||||
|  | ||||
| ## Copying and inheritance | ||||
|  | ||||
| Many models in the codebase have similar code, but it is not shared by inheritance because we want each model file to be self-contained. | ||||
| We use two mechanisms to keep this code in sync: | ||||
|  | ||||
| - "Copied from" syntax. Functions or entire classes can have a comment at the top like this: `# Copied from transformers.models.llama.modeling_llama.rotate_half` or `# Copied from transformers.models.t5.modeling_t5.T5LayerNorm with T5->MT5` | ||||
|   These comments are actively checked by the style tools, and copies will automatically be updated when the base code is updated. If you need to update a copied function, you should | ||||
|   either update the base function and use `make fixup` to propagate the change to all copies, or simply remove the `# Copied from` comment if that is inappropriate. | ||||
| - "Modular" files. These files briefly define models by composing them using inheritance from other models. They are not meant to be used directly. Instead, the style tools | ||||
|   automatically generate a complete modeling file, like `modeling_bert.py`, from the modular file like `modular_bert.py`. If a model has a modular file, the modeling file | ||||
|   should never be edited directly! Instead, changes should be made in the modular file, and then you should run `make fixup` to update the modeling file automatically. | ||||
|  | ||||
| When adding new models, you should prefer `modular` style. | ||||
|  | ||||
| ## Testing | ||||
|  | ||||
| After making changes, you should usually run `make fixup` to ensure any copies and modular files are updated, and then test all affected models. This includes both | ||||
| the model you made the changes in and any other models that were updated by `make fixup`. Tests can be run with `pytest tests/models/[name]/test_modeling_[name].py` | ||||
| If your changes affect code in other classes like tokenizers or processors, you should run those tests instead, like `test_processing_[name].py` or `test_tokenization_[name].py`. | ||||
|  | ||||
| In order to run tests, you may need to install dependencies. You can do this with `pip install -e .[testing]`. You will probably also need to `pip install torch accelerate` if your environment does not already have them. | ||||
| @ -68,7 +68,8 @@ already reported** (use the search bar on GitHub under Issues). Your issue shoul | ||||
|  | ||||
| Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it: | ||||
|  | ||||
| * Your **OS type and version** and **Python**, and **PyTorch** versions when applicable. | ||||
| * Your **OS type and version** and **Python**, **PyTorch** and | ||||
|   **TensorFlow** versions when applicable. | ||||
| * A short, self-contained, code snippet that allows us to reproduce the bug in | ||||
|   less than 30s. | ||||
| * The *full* traceback if an exception is raised. | ||||
| @ -77,7 +78,7 @@ Once you've confirmed the bug hasn't already been reported, please include the f | ||||
| To get the OS and software versions automatically, run the following command: | ||||
|  | ||||
| ```bash | ||||
| transformers env | ||||
| transformers-cli env | ||||
| ``` | ||||
|  | ||||
| You can also run the same command from the root of the repository: | ||||
| @ -164,7 +165,8 @@ You'll need **[Python 3.9](https://github.com/huggingface/transformers/blob/main | ||||
|    mode with the `-e` flag. | ||||
|  | ||||
|    Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a | ||||
|    failure with this command. If that's the case make sure to install Pytorch then do: | ||||
|    failure with this command. If that's the case make sure to install the Deep Learning framework you are working with | ||||
|    (PyTorch, TensorFlow and/or Flax) then do: | ||||
|  | ||||
|    ```bash | ||||
|    pip install -e ".[quality]" | ||||
|  | ||||
| @ -26,7 +26,7 @@ There are two main venues to receive support: [the forums](https://discuss.huggi | ||||
|  | ||||
| [The user forums](https://discuss.huggingface.co/) are supported by the wide community of the library users and backed up by developers when needed. | ||||
|  | ||||
| If you have a difficulty with deploying this library or some questions, or you'd like to discuss a new feature, please first consider discussing those things at the forums. Only when you feel your subject matter has been crystallized and you still need support from the library developers do proceed to file an [issue](https://github.com/huggingface/transformers/issues). | ||||
| If you have a difficulty with deploying this library or some questions, or you'd like to discuss a new feature, please first consider discussing those things at the forums. Only when you feel your subject matter has been crystalized and you still need support from the library developers do proceed to file an [issue](https://github.com/huggingface/transformers/issues). | ||||
|  | ||||
| In particular all "Please explain" questions or objectively very user-specific feature requests belong to the forums. Here are some example of such questions: | ||||
|  | ||||
|  | ||||
							
								
								
									
										25
									
								
								Makefile
									
									
									
									
									
								
							
							
						
						
									
										25
									
								
								Makefile
									
									
									
									
									
								
							| @ -3,24 +3,18 @@ | ||||
| # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) | ||||
| export PYTHONPATH = src | ||||
|  | ||||
| check_dirs := examples tests src utils scripts benchmark benchmark_v2 | ||||
| check_dirs := examples tests src utils | ||||
|  | ||||
| exclude_folders :=  "" | ||||
|  | ||||
| modified_only_fixup: | ||||
| 	@current_branch=$$(git branch --show-current); \ | ||||
| 	if [ "$$current_branch" = "main" ]; then \ | ||||
| 		echo "On main branch, running 'style' target instead..."; \ | ||||
| 		$(MAKE) style; \ | ||||
| 	$(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) | ||||
| 	@if test -n "$(modified_py_files)"; then \ | ||||
| 		echo "Checking/fixing $(modified_py_files)"; \ | ||||
| 		ruff check $(modified_py_files) --fix --exclude $(exclude_folders); \ | ||||
| 		ruff format $(modified_py_files) --exclude $(exclude_folders);\ | ||||
| 	else \ | ||||
| 		modified_py_files=$$(python utils/get_modified_files.py $(check_dirs)); \ | ||||
| 		if [ -n "$$modified_py_files" ]; then \ | ||||
| 			echo "Checking/fixing files: $${modified_py_files}"; \ | ||||
| 			ruff check $${modified_py_files} --fix --exclude $(exclude_folders); \ | ||||
| 			ruff format $${modified_py_files} --exclude $(exclude_folders); \ | ||||
| 		else \ | ||||
| 			echo "No library .py files were modified"; \ | ||||
| 		fi; \ | ||||
| 		echo "No library .py files were modified"; \ | ||||
| 	fi | ||||
|  | ||||
| # Update src/transformers/dependency_versions_table.py | ||||
| @ -46,13 +40,11 @@ repo-consistency: | ||||
| 	python utils/check_dummies.py | ||||
| 	python utils/check_repo.py | ||||
| 	python utils/check_inits.py | ||||
| 	python utils/check_pipeline_typing.py | ||||
| 	python utils/check_config_docstrings.py | ||||
| 	python utils/check_config_attributes.py | ||||
| 	python utils/check_doctest_list.py | ||||
| 	python utils/update_metadata.py --check-only | ||||
| 	python utils/check_docstrings.py | ||||
| 	python utils/add_dates.py | ||||
|  | ||||
| # this target runs checks on all files | ||||
|  | ||||
| @ -87,9 +79,8 @@ fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency | ||||
|  | ||||
| fix-copies: | ||||
| 	python utils/check_copies.py --fix_and_overwrite | ||||
| 	python utils/check_modular_conversion.py --fix_and_overwrite | ||||
| 	python utils/check_modular_conversion.py  --fix_and_overwrite | ||||
| 	python utils/check_dummies.py --fix_and_overwrite | ||||
| 	python utils/check_pipeline_typing.py --fix_and_overwrite | ||||
| 	python utils/check_doctest_list.py --fix_and_overwrite | ||||
| 	python utils/check_docstrings.py --fix_and_overwrite | ||||
|  | ||||
|  | ||||
							
								
								
									
										42
									
								
								README.md
									
									
									
									
									
								
							
							
						
						
									
										42
									
								
								README.md
									
									
									
									
									
								
							| @ -44,7 +44,7 @@ limitations under the License. | ||||
|         <a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ja.md">日本語</a> | | ||||
|         <a href="https://github.com/huggingface/transformers/blob/main/i18n/README_hd.md">हिन्दी</a> | | ||||
|         <a href="https://github.com/huggingface/transformers/blob/main/i18n/README_ru.md">Русский</a> | | ||||
|         <a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Português</a> | | ||||
|         <a href="https://github.com/huggingface/transformers/blob/main/i18n/README_pt-br.md">Рortuguês</a> | | ||||
|         <a href="https://github.com/huggingface/transformers/blob/main/i18n/README_te.md">తెలుగు</a> | | ||||
|         <a href="https://github.com/huggingface/transformers/blob/main/i18n/README_fr.md">Français</a> | | ||||
|         <a href="https://github.com/huggingface/transformers/blob/main/i18n/README_de.md">Deutsch</a> | | ||||
| @ -59,28 +59,18 @@ limitations under the License. | ||||
| </h3> | ||||
|  | ||||
| <h3 align="center"> | ||||
|     <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/transformers_as_a_model_definition.png"/> | ||||
|     <a href="https://hf.co/course"><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/course_banner.png"></a> | ||||
| </h3> | ||||
|  | ||||
| Transformers is a library of pretrained text, computer vision, audio, video, and multimodal models for inference and training. Use Transformers to fine-tune models on your data, build inference applications, and for generative AI use cases across multiple modalities. | ||||
|  | ||||
| Transformers acts as the model-definition framework for state-of-the-art machine learning models in text, computer  | ||||
| vision, audio, video, and multimodal model, for both inference and training.  | ||||
|  | ||||
| It centralizes the model definition so that this definition is agreed upon across the ecosystem. `transformers` is the  | ||||
| pivot across frameworks: if a model definition is supported, it will be compatible with the majority of training  | ||||
| frameworks (Axolotl, Unsloth, DeepSpeed, FSDP, PyTorch-Lightning, ...), inference engines (vLLM, SGLang, TGI, ...), | ||||
| and adjacent modeling libraries (llama.cpp, mlx, ...) which leverage the model definition from `transformers`. | ||||
|  | ||||
| We pledge to help support new state-of-the-art models and democratize their usage by having their model definition be | ||||
| simple, customizable, and efficient. | ||||
|  | ||||
| There are over 1M+ Transformers [model checkpoints](https://huggingface.co/models?library=transformers&sort=trending) on the [Hugging Face Hub](https://huggingface.com/models) you can use. | ||||
| There are over 500K+ Transformers [model checkpoints](https://huggingface.co/models?library=transformers&sort=trending) on the [Hugging Face Hub](https://huggingface.com/models) you can use. | ||||
|  | ||||
| Explore the [Hub](https://huggingface.com/) today to find a model and use Transformers to help you get started right away. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| Transformers works with Python 3.9+ [PyTorch](https://pytorch.org/get-started/locally/) 2.1+, [TensorFlow](https://www.tensorflow.org/install/pip) 2.6+, and [Flax](https://flax.readthedocs.io/en/latest/) 0.4.1+. | ||||
| Transformers works with Python 3.9+ [PyTorch](https://pytorch.org/get-started/locally/) 2.0+, [TensorFlow](https://www.tensorflow.org/install/pip) 2.6+, and [Flax](https://flax.readthedocs.io/en/latest/) 0.4.1+. | ||||
|  | ||||
| Create and activate a virtual environment with [venv](https://docs.python.org/3/library/venv.html) or [uv](https://docs.astral.sh/uv/), a fast Rust-based Python package and project manager. | ||||
|  | ||||
| @ -88,6 +78,7 @@ Create and activate a virtual environment with [venv](https://docs.python.org/3/ | ||||
| # venv | ||||
| python -m venv .my-env | ||||
| source .my-env/bin/activate | ||||
|  | ||||
| # uv | ||||
| uv venv .my-env | ||||
| source .my-env/bin/activate | ||||
| @ -97,10 +88,10 @@ Install Transformers in your virtual environment. | ||||
|  | ||||
| ```py | ||||
| # pip | ||||
| pip install "transformers[torch]" | ||||
| pip install transformers | ||||
|  | ||||
| # uv | ||||
| uv pip install "transformers[torch]" | ||||
| uv pip install transformers | ||||
| ``` | ||||
|  | ||||
| Install Transformers from source if you want the latest changes in the library or are interested in contributing. However, the *latest* version may not be stable. Feel free to open an [issue](https://github.com/huggingface/transformers/issues) if you encounter an error. | ||||
| @ -108,12 +99,7 @@ Install Transformers from source if you want the latest changes in the library o | ||||
| ```shell | ||||
| git clone https://github.com/huggingface/transformers.git | ||||
| cd transformers | ||||
|  | ||||
| # pip | ||||
| pip install .[torch] | ||||
|  | ||||
| # uv | ||||
| uv pip install .[torch] | ||||
| pip install . | ||||
| ``` | ||||
|  | ||||
| ## Quickstart | ||||
| @ -135,7 +121,7 @@ To chat with a model, the usage pattern is the same. The only difference is you | ||||
| > [!TIP] | ||||
| > You can also chat with a model directly from the command line. | ||||
| > ```shell | ||||
| > transformers chat Qwen/Qwen2.5-0.5B-Instruct | ||||
| > transformers-cli chat --model_name_or_path Qwen/Qwen2.5-0.5B-Instruct | ||||
| > ``` | ||||
|  | ||||
| ```py | ||||
| @ -147,7 +133,7 @@ chat = [ | ||||
|     {"role": "user", "content": "Hey, can you tell me any fun things to do in New York?"} | ||||
| ] | ||||
|  | ||||
| pipeline = pipeline(task="text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct", dtype=torch.bfloat16, device_map="auto") | ||||
| pipeline = pipeline(task="text-generation", model="meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.bfloat16, device_map="auto") | ||||
| response = pipeline(chat, max_new_tokens=512) | ||||
| print(response[0]["generated_text"][-1]["content"]) | ||||
| ``` | ||||
| @ -242,7 +228,7 @@ pipeline( | ||||
|  | ||||
| - This library is not a modular toolbox of building blocks for neural nets. The code in the model files is not refactored with additional abstractions on purpose, so that researchers can quickly iterate on each of the models without diving into additional abstractions/files. | ||||
| - The training API is optimized to work with PyTorch models provided by Transformers. For generic machine learning loops, you should use another library like [Accelerate](https://huggingface.co/docs/accelerate). | ||||
| - The [example scripts](https://github.com/huggingface/transformers/tree/main/examples) are only *examples*. They may not necessarily work out-of-the-box on your specific use case and you'll need to adapt the code for it to work. | ||||
| - The [example scripts]((https://github.com/huggingface/transformers/tree/main/examples)) are only *examples*. They may not necessarily work out-of-the-box on your specific use case and you'll need to adapt the code for it to work. | ||||
|  | ||||
| ## 100 projects using Transformers | ||||
|  | ||||
| @ -280,8 +266,8 @@ Expand each modality below to see a few example models for various use cases. | ||||
| - Automatic mask generation with [SAM](https://huggingface.co/facebook/sam-vit-base) | ||||
| - Depth estimation with [DepthPro](https://huggingface.co/apple/DepthPro-hf) | ||||
| - Image classification with [DINO v2](https://huggingface.co/facebook/dinov2-base) | ||||
| - Keypoint detection with [SuperPoint](https://huggingface.co/magic-leap-community/superpoint) | ||||
| - Keypoint matching with [SuperGlue](https://huggingface.co/magic-leap-community/superglue_outdoor) | ||||
| - Keypoint detection with [SuperGlue](https://huggingface.co/magic-leap-community/superglue_outdoor) | ||||
| - Keypoint matching with [SuperGlue](https://huggingface.co/magic-leap-community/superglue) | ||||
| - Object detection with [RT-DETRv2](https://huggingface.co/PekingU/rtdetr_v2_r50vd) | ||||
| - Pose Estimation with [VitPose](https://huggingface.co/usyd-community/vitpose-base-simple) | ||||
| - Universal segmentation with [OneFormer](https://huggingface.co/shi-labs/oneformer_ade20k_swin_large) | ||||
|  | ||||
| @ -14,7 +14,7 @@ Models uploaded on the Hugging Face Hub come in different formats. We heavily re | ||||
| models in the [`safetensors`](https://github.com/huggingface/safetensors) format (which is the default prioritized | ||||
| by the transformers library), as developed specifically to prevent arbitrary code execution on your system. | ||||
|  | ||||
| To avoid loading models from unsafe formats (e.g. [pickle](https://docs.python.org/3/library/pickle.html), you should use the `use_safetensors` parameter. If doing so, in the event that no .safetensors file is present, transformers will error when loading the model. | ||||
| To avoid loading models from unsafe formats(e.g. [pickle](https://docs.python.org/3/library/pickle.html), you should use the `use_safetensors` parameter. If doing so, in the event that no .safetensors file is present, transformers will error when loading the model. | ||||
|  | ||||
| ### Remote code | ||||
|  | ||||
| @ -27,6 +27,13 @@ These models require the `trust_remote_code=True` parameter to be set when using | ||||
| the content of the modeling files when using this argument. We recommend setting a revision in order to ensure you | ||||
| protect yourself from updates on the repository. | ||||
|  | ||||
| #### Tools | ||||
|  | ||||
| Through the `Agent` framework, remote tools can be downloaded to be used by the Agent. You're to specify these tools | ||||
| yourself, but please keep in mind that their code will be run on your machine if the Agent chooses to run them. | ||||
|  | ||||
| Please inspect the code of the tools before passing them to the Agent to protect your runtime and local setup. | ||||
|  | ||||
| ## Reporting a Vulnerability | ||||
|  | ||||
| Feel free to submit vulnerability reports to [security@huggingface.co](mailto:security@huggingface.co), where someone from the HF security team will review and recommend next steps. If reporting a vulnerability specific to open source, please note [Huntr](https://huntr.com) is a vulnerability disclosure program for open source software. | ||||
|  | ||||
| @ -288,7 +288,7 @@ Keywords: Music understanding, Music generation | ||||
|  | ||||
| ## [dalle-flow](https://github.com/jina-ai/dalle-flow) | ||||
|  | ||||
| DALL·E Flow is an interactive workflow for generating high-definition images from a text prompt. It leverages DALL·E-Mega, GLID-3 XL, and Stable Diffusion to generate image candidates, and then calls CLIP-as-service to rank the candidates w.r.t. the prompt. | ||||
| DALL·E Flow is an interactive workflow for generating high-definition images from a text prompt. Itt leverages DALL·E-Mega, GLID-3 XL, and Stable Diffusion to generate image candidates, and then calls CLIP-as-service to rank the candidates w.r.t. the prompt. | ||||
| The preferred candidate is fed to GLID-3 XL for diffusion, which often enriches the texture and background. Finally, the candidate is upscaled to 1024x1024 via SwinIR. | ||||
|  | ||||
| Keywords: High-definition image generation, Stable Diffusion, DALL-E Mega, GLID-3 XL, CLIP, SwinIR | ||||
| @ -526,7 +526,7 @@ Keywords: Model deployment, CLoud, Mobile, Edge | ||||
|  | ||||
| ## [underthesea](https://github.com/undertheseanlp/underthesea) | ||||
|  | ||||
| [underthesea](https://github.com/undertheseanlp/underthesea) is a Vietnamese NLP toolkit. Underthesea is a suite of open source Python modules data sets and tutorials supporting research and development in Vietnamese Natural Language Processing. We provide extremely easy API to quickly apply pretrained NLP models to your Vietnamese text, such as word segmentation, part-of-speech tagging (PoS), named entity recognition (NER), text classification and dependency parsing. | ||||
| [underthesea](https://github.com/undertheseanlp/underthesea) is a Vietnamese NLP toolkit. Underthesea is a suite of open source Python modules data sets and tutorials supporting research and development in Vietnamese Natural Language Processing. We provides extremely easy API to quickly apply pretrained NLP models to your Vietnamese text, such as word segmentation, part-of-speech tagging (PoS), named entity recognition (NER), text classification and dependency parsing. | ||||
|  | ||||
| Keywords: Vietnamese, NLP | ||||
|  | ||||
|  | ||||
							
								
								
									
										1
									
								
								benchmark/.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								benchmark/.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1 +0,0 @@ | ||||
| benchmark_results/ | ||||
| @ -1,354 +0,0 @@ | ||||
| # Copyright 2025 The HuggingFace Team. All rights reserved. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| import os | ||||
| import sys | ||||
| from logging import Logger | ||||
| from threading import Event, Thread | ||||
| from time import perf_counter, sleep | ||||
| from typing import Optional | ||||
|  | ||||
|  | ||||
| # Add the parent directory to Python path to import benchmarks_entrypoint | ||||
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | ||||
| import gpustat | ||||
| import psutil | ||||
| import psycopg2 | ||||
| from benchmarks_entrypoint import MetricsRecorder | ||||
|  | ||||
|  | ||||
| # Optional heavy ML dependencies - only required when actually running the benchmark | ||||
| try: | ||||
|     import torch | ||||
|  | ||||
|     from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, StaticCache | ||||
|  | ||||
|     TRANSFORMERS_AVAILABLE = True | ||||
| except ImportError: | ||||
|     TRANSFORMERS_AVAILABLE = False | ||||
|     torch = None | ||||
|     AutoModelForCausalLM = None | ||||
|     AutoTokenizer = None | ||||
|     GenerationConfig = None | ||||
|     StaticCache = None | ||||
|  | ||||
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" | ||||
| os.environ["TOKENIZERS_PARALLELISM"] = "1" | ||||
|  | ||||
| # Only set torch precision if torch is available | ||||
| if TRANSFORMERS_AVAILABLE: | ||||
|     torch.set_float32_matmul_precision("high") | ||||
|  | ||||
|  | ||||
| def collect_metrics(benchmark_id, continue_metric_collection, metrics_recorder): | ||||
|     p = psutil.Process(os.getpid()) | ||||
|     while not continue_metric_collection.is_set(): | ||||
|         with p.oneshot(): | ||||
|             cpu_util = p.cpu_percent() | ||||
|             mem_megabytes = p.memory_info().rss / (1024 * 1024) | ||||
|         gpu_stats = gpustat.GPUStatCollection.new_query() | ||||
|         gpu_util = gpu_stats[0]["utilization.gpu"] | ||||
|         gpu_mem_megabytes = gpu_stats[0]["memory.used"] | ||||
|         metrics_recorder.collect_device_measurements( | ||||
|             benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes | ||||
|         ) | ||||
|         sleep(0.01) | ||||
|  | ||||
|  | ||||
| def run_benchmark( | ||||
|     logger: Logger, | ||||
|     repository: str, | ||||
|     branch: str, | ||||
|     commit_id: str, | ||||
|     commit_msg: str, | ||||
|     metrics_recorder=None, | ||||
|     num_tokens_to_generate=100, | ||||
| ): | ||||
|     # Check if required ML dependencies are available | ||||
|     if not TRANSFORMERS_AVAILABLE: | ||||
|         logger.error("Transformers and torch are required to run the LLaMA benchmark. Please install them with:") | ||||
|         logger.error("pip install torch transformers") | ||||
|         logger.error("Skipping LLaMA benchmark due to missing dependencies.") | ||||
|         return | ||||
|  | ||||
|     continue_metric_collection = Event() | ||||
|     metrics_thread = None | ||||
|     model_id = "meta-llama/Llama-2-7b-hf" | ||||
|  | ||||
|     # If no metrics_recorder is provided, create one for backward compatibility | ||||
|     if metrics_recorder is None: | ||||
|         try: | ||||
|             metrics_recorder = MetricsRecorder( | ||||
|                 psycopg2.connect("dbname=metrics"), logger, repository, branch, commit_id, commit_msg, True | ||||
|             ) | ||||
|             should_close_recorder = True | ||||
|         except Exception as e: | ||||
|             logger.error(f"Failed to create metrics recorder: {e}") | ||||
|             return | ||||
|     else: | ||||
|         should_close_recorder = False | ||||
|     try: | ||||
|         gpu_stats = gpustat.GPUStatCollection.new_query() | ||||
|         gpu_name = gpu_stats[0]["name"] | ||||
|         benchmark_id = metrics_recorder.initialise_benchmark({"gpu_name": gpu_name, "model_id": model_id}) | ||||
|         logger.info(f"running benchmark #{benchmark_id} on {gpu_name} for {model_id}") | ||||
|         metrics_thread = Thread( | ||||
|             target=collect_metrics, | ||||
|             args=[benchmark_id, continue_metric_collection, metrics_recorder], | ||||
|         ) | ||||
|         metrics_thread.start() | ||||
|         logger.info("started background thread to fetch device metrics") | ||||
|  | ||||
|         os.environ["TOKENIZERS_PARALLELISM"] = "false"  # silence warnings when compiling | ||||
|  | ||||
|         device = "cuda" | ||||
|  | ||||
|         logger.info("downloading weights") | ||||
|         # This is to avoid counting download in model load time measurement | ||||
|         model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16) | ||||
|         gen_config = GenerationConfig(do_sample=False, top_p=1, temperature=1) | ||||
|         logger.info("loading model") | ||||
|         start = perf_counter() | ||||
|         model = AutoModelForCausalLM.from_pretrained( | ||||
|             model_id, dtype=torch.float16, generation_config=gen_config | ||||
|         ).eval() | ||||
|         model.to(device) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         model_load_time = end - start | ||||
|         logger.info(f"loaded model in: {model_load_time}s") | ||||
|  | ||||
|         tokenizer = AutoTokenizer.from_pretrained(model_id) | ||||
|  | ||||
|         prompt = "Why dogs are so cute?" | ||||
|         inputs = tokenizer(prompt, return_tensors="pt").to(device) | ||||
|  | ||||
|         # Specify the max length (including both the prompt and the response) | ||||
|         # When calling `generate` with `cache_implementation="static" later, this is also used to create a `StaticCache` object | ||||
|         # with sequence length = `max_length`. The longer the more you will re-use it | ||||
|         seq_length = inputs["input_ids"].shape[1] | ||||
|         model.generation_config.max_length = seq_length + num_tokens_to_generate | ||||
|         batch_size = inputs["input_ids"].shape[0] | ||||
|  | ||||
|         # Copied from the gpt-fast repo | ||||
|         def multinomial_sample_one_no_sync(probs_sort):  # Does multinomial sampling without a cuda synchronization | ||||
|             q = torch.empty_like(probs_sort).exponential_(1) | ||||
|             return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) | ||||
|  | ||||
|         def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None): | ||||
|             logits = logits / max(temperature, 1e-5) | ||||
|  | ||||
|             if top_k is not None: | ||||
|                 v, _ = torch.topk(logits, min(top_k, logits.size(-1))) | ||||
|                 pivot = v.select(-1, -1).unsqueeze(-1) | ||||
|                 logits = torch.where(logits < pivot, -float("Inf"), logits) | ||||
|             probs = torch.nn.functional.softmax(logits, dim=-1) | ||||
|             return probs | ||||
|  | ||||
|         def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None): | ||||
|             probs = logits_to_probs(logits[0, -1], temperature, top_k) | ||||
|             idx_next = multinomial_sample_one_no_sync(probs) | ||||
|             return idx_next, probs | ||||
|  | ||||
|         # First eager forward pass | ||||
|         logger.info("running first eager forward pass") | ||||
|         start = perf_counter() | ||||
|         _ = model(**inputs) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         first_eager_fwd_pass_time = end - start | ||||
|         logger.info(f"completed first eager forward pass in: {first_eager_fwd_pass_time}s") | ||||
|  | ||||
|         # Second eager forward pass (should be faster) | ||||
|         logger.info("running second eager forward pass") | ||||
|         start = perf_counter() | ||||
|         _ = model(**inputs) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         second_eager_fwd_pass_time = end - start | ||||
|         logger.info(f"completed second eager forward pass in: {second_eager_fwd_pass_time}s") | ||||
|  | ||||
|         # First eager generation | ||||
|         logger.info("running first eager generation") | ||||
|         start = perf_counter() | ||||
|         output = model.generate(**inputs) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         first_eager_generate_time = end - start | ||||
|         logger.info(f"completed first eager generation in: {first_eager_generate_time}s") | ||||
|         logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|         # Second eager generation (should be faster) | ||||
|         logger.info("running second eager generation") | ||||
|         start = perf_counter() | ||||
|         output = model.generate(**inputs) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         second_eager_generate_time = end - start | ||||
|         logger.info(f"completed second eager generation in: {second_eager_generate_time}s") | ||||
|         logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|         logger.info("running generation timing loop") | ||||
|  | ||||
|         input_pos = torch.arange(0, seq_length, device=device) | ||||
|         inputs = inputs["input_ids"] | ||||
|  | ||||
|         start = perf_counter() | ||||
|         with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): | ||||
|             logits = model(inputs, position_ids=input_pos).logits | ||||
|         next_token, probs = sample(logits, temperature=0.6, top_k=5) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         time_to_first_token = end - start | ||||
|  | ||||
|         input_pos = torch.tensor([seq_length], device=device, dtype=torch.int) | ||||
|         next_token = next_token.clone() | ||||
|         start = perf_counter() | ||||
|         with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): | ||||
|             logits = model(next_token, position_ids=input_pos).logits | ||||
|         next_token, probs = sample(logits, temperature=0.6, top_k=5) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         time_to_second_token = end - start | ||||
|  | ||||
|         input_pos = torch.tensor([seq_length + 1], device=device, dtype=torch.int) | ||||
|         next_token = next_token.clone() | ||||
|         start = perf_counter() | ||||
|         with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): | ||||
|             logits = model(next_token, position_ids=input_pos).logits | ||||
|         next_token, probs = sample(logits, temperature=0.6, top_k=5) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         time_to_third_token = end - start | ||||
|  | ||||
|         logger.info("running longer generation timing loop") | ||||
|  | ||||
|         total_time = 0 | ||||
|         for i in range(20): | ||||
|             input_pos = torch.tensor([seq_length + 2 + i], device=device, dtype=torch.int) | ||||
|             next_token = next_token.clone() | ||||
|             start = perf_counter() | ||||
|             with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.MATH): | ||||
|                 logits = model(next_token, position_ids=input_pos).logits | ||||
|             next_token, probs = sample(logits, temperature=0.6, top_k=5) | ||||
|             torch.cuda.synchronize() | ||||
|             end = perf_counter() | ||||
|             total_time += end - start | ||||
|  | ||||
|         mean_time_to_next_token = total_time / 20 | ||||
|  | ||||
|         logger.info("running compilation benchmarks") | ||||
|  | ||||
|         # Now compile the model | ||||
|         model = torch.compile(model, mode="max-autotune", fullgraph=True) | ||||
|  | ||||
|         # StaticCache for generation | ||||
|         with torch.device(device): | ||||
|             model.setup_caches(max_batch_size=batch_size, max_seq_len=seq_length + num_tokens_to_generate) | ||||
|  | ||||
|         input_pos = torch.arange(0, seq_length, device=device) | ||||
|         inputs = tokenizer(prompt, return_tensors="pt").to(device)["input_ids"] | ||||
|  | ||||
|         logger.info("compiling model") | ||||
|  | ||||
|         model = AutoModelForCausalLM.from_pretrained(model_id, dtype=torch.float16, generation_config=gen_config) | ||||
|         model.to(device) | ||||
|         model = torch.compile(model, mode="max-autotune", fullgraph=True) | ||||
|  | ||||
|         past_key_values = StaticCache( | ||||
|             model.config, | ||||
|             max_batch_size=batch_size, | ||||
|             device=device, | ||||
|             dtype=torch.float16, | ||||
|             max_cache_len=seq_length + 128, | ||||
|         ) | ||||
|         # 1st call | ||||
|         start = perf_counter() | ||||
|         output = model.generate(**inputs, past_key_values=past_key_values) | ||||
|         end = perf_counter() | ||||
|         first_compile_generate_time = end - start | ||||
|         logger.info(f"completed first compile generation in: {first_compile_generate_time}s") | ||||
|         logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|         past_key_values = StaticCache( | ||||
|             model.config, | ||||
|             max_batch_size=batch_size, | ||||
|             device=device, | ||||
|             dtype=torch.float16, | ||||
|             max_cache_len=seq_length + 128, | ||||
|         ) | ||||
|         # 2nd call | ||||
|         start = perf_counter() | ||||
|         output = model.generate(**inputs, past_key_values=past_key_values) | ||||
|         end = perf_counter() | ||||
|         second_compile_generate_time = end - start | ||||
|         logger.info(f"completed second compile generation in: {second_compile_generate_time}s") | ||||
|         logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|         past_key_values = StaticCache( | ||||
|             model.config, | ||||
|             max_batch_size=batch_size, | ||||
|             device=device, | ||||
|             dtype=torch.float16, | ||||
|             max_cache_len=seq_length + 128, | ||||
|         ) | ||||
|         # 3rd call | ||||
|         start = perf_counter() | ||||
|         output = model.generate(**inputs, past_key_values=past_key_values) | ||||
|         end = perf_counter() | ||||
|         third_compile_generate_time = end - start | ||||
|         logger.info(f"completed third compile generation in: {third_compile_generate_time}s") | ||||
|         logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|         past_key_values = StaticCache( | ||||
|             model.config, | ||||
|             max_batch_size=batch_size, | ||||
|             device=device, | ||||
|             dtype=torch.float16, | ||||
|             max_cache_len=seq_length + 128, | ||||
|         ) | ||||
|         # 4th call | ||||
|         start = perf_counter() | ||||
|         output = model.generate(**inputs, past_key_values=past_key_values) | ||||
|         end = perf_counter() | ||||
|         fourth_compile_generate_time = end - start | ||||
|         logger.info(f"completed fourth compile generation in: {fourth_compile_generate_time}s") | ||||
|         logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|         metrics_recorder.collect_model_measurements( | ||||
|             benchmark_id, | ||||
|             { | ||||
|                 "model_load_time": model_load_time, | ||||
|                 "first_eager_forward_pass_time_secs": first_eager_fwd_pass_time, | ||||
|                 "second_eager_forward_pass_time_secs": second_eager_fwd_pass_time, | ||||
|                 "first_eager_generate_time_secs": first_eager_generate_time, | ||||
|                 "second_eager_generate_time_secs": second_eager_generate_time, | ||||
|                 "time_to_first_token_secs": time_to_first_token, | ||||
|                 "time_to_second_token_secs": time_to_second_token, | ||||
|                 "time_to_third_token_secs": time_to_third_token, | ||||
|                 "time_to_next_token_mean_secs": mean_time_to_next_token, | ||||
|                 "first_compile_generate_time_secs": first_compile_generate_time, | ||||
|                 "second_compile_generate_time_secs": second_compile_generate_time, | ||||
|                 "third_compile_generate_time_secs": third_compile_generate_time, | ||||
|                 "fourth_compile_generate_time_secs": fourth_compile_generate_time, | ||||
|             }, | ||||
|         ) | ||||
|     except Exception as e: | ||||
|         logger.error(f"Caught exception: {e}") | ||||
|     continue_metric_collection.set() | ||||
|     if metrics_thread is not None: | ||||
|         metrics_thread.join() | ||||
|  | ||||
|     # Only close the recorder if we created it locally | ||||
|     if should_close_recorder: | ||||
|         metrics_recorder.close() | ||||
| @ -31,7 +31,9 @@ from contextlib import contextmanager | ||||
| from pathlib import Path | ||||
|  | ||||
| from git import Repo | ||||
|  | ||||
| from huggingface_hub import HfApi | ||||
|  | ||||
| from optimum_benchmark import Benchmark | ||||
| from optimum_benchmark_wrapper import main | ||||
|  | ||||
| @ -88,7 +90,7 @@ def summarize(run_dir, metrics, expand_metrics=False): | ||||
|  | ||||
|         model = benchmark.config.backend["model"] | ||||
|  | ||||
|         # This looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`. | ||||
|         # Ths looks like `benchmark.input_shapes.batch_size=1,benchmark.input_shapes.sequence_length=5`. | ||||
|         # (we rely on the usage of hydra's `${hydra.job.override_dirname}`.) | ||||
|         benchmark_name = re.sub(f"backend.model={model},*", "", report_dir) | ||||
|         benchmark_name = str(Path(benchmark_name).parts[-1]) | ||||
|  | ||||
| @ -1,36 +1,15 @@ | ||||
| # Copyright 2025 The HuggingFace Team. All rights reserved. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
| import argparse | ||||
| import importlib.util | ||||
| import json | ||||
| import logging | ||||
| import os | ||||
| from typing import Dict | ||||
| import sys | ||||
| import uuid | ||||
| from datetime import datetime | ||||
|  | ||||
| import pandas as pd | ||||
| from psycopg2.extras import Json | ||||
| from psycopg2.extensions import register_adapter | ||||
|  | ||||
|  | ||||
| try: | ||||
|     from psycopg2.extensions import register_adapter | ||||
|     from psycopg2.extras import Json | ||||
|  | ||||
|     register_adapter(dict, Json) | ||||
|     PSYCOPG2_AVAILABLE = True | ||||
| except ImportError: | ||||
|     PSYCOPG2_AVAILABLE = False | ||||
| register_adapter(dict, Json) | ||||
|  | ||||
|  | ||||
| class ImportModuleException(Exception): | ||||
| @ -38,273 +17,59 @@ class ImportModuleException(Exception): | ||||
|  | ||||
|  | ||||
| class MetricsRecorder: | ||||
|     def __init__( | ||||
|         self, | ||||
|         connection, | ||||
|         logger: logging.Logger, | ||||
|         repository: str, | ||||
|         branch: str, | ||||
|         commit_id: str, | ||||
|         commit_msg: str, | ||||
|         collect_csv_data: bool = True, | ||||
|     ): | ||||
|     def __init__(self, connection, logger: logging.Logger, branch: str, commit_id: str, commit_msg: str): | ||||
|         self.conn = connection | ||||
|         self.use_database = connection is not None | ||||
|         if self.use_database: | ||||
|             self.conn.autocommit = True | ||||
|         self.conn.autocommit = True | ||||
|         self.logger = logger | ||||
|         self.repository = repository | ||||
|         self.branch = branch | ||||
|         self.commit_id = commit_id | ||||
|         self.commit_msg = commit_msg | ||||
|         self.collect_csv_data = collect_csv_data | ||||
|  | ||||
|         # For CSV export - store all data in pandas DataFrames (only if CSV collection is enabled) | ||||
|         if self.collect_csv_data: | ||||
|             # Initialize empty DataFrames with proper schemas | ||||
|             self.benchmarks_df = pd.DataFrame( | ||||
|                 columns=[ | ||||
|                     "benchmark_id", | ||||
|                     "repository", | ||||
|                     "branch", | ||||
|                     "commit_id", | ||||
|                     "commit_message", | ||||
|                     "metadata", | ||||
|                     "created_at", | ||||
|                 ] | ||||
|             ) | ||||
|             self.device_measurements_df = pd.DataFrame( | ||||
|                 columns=["benchmark_id", "cpu_util", "mem_megabytes", "gpu_util", "gpu_mem_megabytes", "time"] | ||||
|             ) | ||||
|             self.model_measurements_df = pd.DataFrame( | ||||
|                 columns=[ | ||||
|                     "benchmark_id", | ||||
|                     "time", | ||||
|                     "model_load_time", | ||||
|                     "first_eager_forward_pass_time_secs", | ||||
|                     "second_eager_forward_pass_time_secs", | ||||
|                     "first_eager_generate_time_secs", | ||||
|                     "second_eager_generate_time_secs", | ||||
|                     "time_to_first_token_secs", | ||||
|                     "time_to_second_token_secs", | ||||
|                     "time_to_third_token_secs", | ||||
|                     "time_to_next_token_mean_secs", | ||||
|                     "first_compile_generate_time_secs", | ||||
|                     "second_compile_generate_time_secs", | ||||
|                     "third_compile_generate_time_secs", | ||||
|                     "fourth_compile_generate_time_secs", | ||||
|                 ] | ||||
|             ) | ||||
|         else: | ||||
|             self.benchmarks_df = None | ||||
|             self.device_measurements_df = None | ||||
|             self.model_measurements_df = None | ||||
|  | ||||
|     def initialise_benchmark(self, metadata: dict[str, str]) -> str: | ||||
|     def initialise_benchmark(self, metadata: Dict[str, str]) -> int: | ||||
|         """ | ||||
|         Creates a new benchmark, returns the benchmark id (UUID) | ||||
|         Creates a new benchmark, returns the benchmark id | ||||
|         """ | ||||
|         # Generate a unique UUID for this benchmark | ||||
|         benchmark_id = str(uuid.uuid4()) | ||||
|  | ||||
|         if self.use_database: | ||||
|             with self.conn.cursor() as cur: | ||||
|                 cur.execute( | ||||
|                     "INSERT INTO benchmarks (benchmark_id, repository, branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s, %s, %s)", | ||||
|                     (benchmark_id, self.repository, self.branch, self.commit_id, self.commit_msg, metadata), | ||||
|                 ) | ||||
|                 self.logger.debug(f"initialised benchmark #{benchmark_id}") | ||||
|  | ||||
|         # Store benchmark data for CSV export (if enabled) | ||||
|         if self.collect_csv_data: | ||||
|             # Add row to pandas DataFrame | ||||
|             new_row = pd.DataFrame( | ||||
|                 [ | ||||
|                     { | ||||
|                         "benchmark_id": benchmark_id, | ||||
|                         "repository": self.repository, | ||||
|                         "branch": self.branch, | ||||
|                         "commit_id": self.commit_id, | ||||
|                         "commit_message": self.commit_msg, | ||||
|                         "metadata": json.dumps(metadata), | ||||
|                         "created_at": datetime.utcnow().isoformat(), | ||||
|                     } | ||||
|                 ] | ||||
|         # gpu_name: str, model_id: str | ||||
|         with self.conn.cursor() as cur: | ||||
|             cur.execute( | ||||
|                 "INSERT INTO benchmarks (branch, commit_id, commit_message, metadata) VALUES (%s, %s, %s, %s) RETURNING benchmark_id", | ||||
|                 (self.branch, self.commit_id, self.commit_msg, metadata), | ||||
|             ) | ||||
|             self.benchmarks_df = pd.concat([self.benchmarks_df, new_row], ignore_index=True) | ||||
|             benchmark_id = cur.fetchone()[0] | ||||
|             logger.debug(f"initialised benchmark #{benchmark_id}") | ||||
|             return benchmark_id | ||||
|  | ||||
|         mode_info = [] | ||||
|         if self.use_database: | ||||
|             mode_info.append("database") | ||||
|         if self.collect_csv_data: | ||||
|             mode_info.append("CSV") | ||||
|         mode_str = " + ".join(mode_info) if mode_info else "no storage" | ||||
|  | ||||
|         self.logger.debug(f"initialised benchmark #{benchmark_id} ({mode_str} mode)") | ||||
|         return benchmark_id | ||||
|  | ||||
|     def collect_device_measurements(self, benchmark_id: str, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes): | ||||
|     def collect_device_measurements(self, benchmark_id: int, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes): | ||||
|         """ | ||||
|         Collect device metrics, such as CPU & GPU usage. These are "static", as in you cannot pass arbitrary arguments to the function. | ||||
|         """ | ||||
|         # Store device measurements for CSV export (if enabled) | ||||
|         if self.collect_csv_data: | ||||
|             # Add row to pandas DataFrame | ||||
|             new_row = pd.DataFrame( | ||||
|                 [ | ||||
|                     { | ||||
|                         "benchmark_id": benchmark_id, | ||||
|                         "cpu_util": cpu_util, | ||||
|                         "mem_megabytes": mem_megabytes, | ||||
|                         "gpu_util": gpu_util, | ||||
|                         "gpu_mem_megabytes": gpu_mem_megabytes, | ||||
|                         "time": datetime.utcnow().isoformat(), | ||||
|                     } | ||||
|                 ] | ||||
|         with self.conn.cursor() as cur: | ||||
|             cur.execute( | ||||
|                 "INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)", | ||||
|                 (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes), | ||||
|             ) | ||||
|             self.device_measurements_df = pd.concat([self.device_measurements_df, new_row], ignore_index=True) | ||||
|  | ||||
|         # Store in database if available | ||||
|         if self.use_database: | ||||
|             with self.conn.cursor() as cur: | ||||
|                 cur.execute( | ||||
|                     "INSERT INTO device_measurements (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes) VALUES (%s, %s, %s, %s, %s)", | ||||
|                     (benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes), | ||||
|                 ) | ||||
|  | ||||
|         self.logger.debug( | ||||
|             f"collected device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]" | ||||
|             f"inserted device measurements for benchmark #{benchmark_id} [CPU util: {cpu_util}, mem MBs: {mem_megabytes}, GPU util: {gpu_util}, GPU mem MBs: {gpu_mem_megabytes}]" | ||||
|         ) | ||||
|  | ||||
|     def collect_model_measurements(self, benchmark_id: str, measurements: dict[str, float]): | ||||
|         # Store model measurements for CSV export (if enabled) | ||||
|         if self.collect_csv_data: | ||||
|             # Add row to pandas DataFrame with flattened measurements | ||||
|             row_data = {"benchmark_id": benchmark_id, "time": datetime.utcnow().isoformat()} | ||||
|             # Flatten the measurements dict into the row | ||||
|             row_data.update(measurements) | ||||
|  | ||||
|             new_row = pd.DataFrame([row_data]) | ||||
|             self.model_measurements_df = pd.concat([self.model_measurements_df, new_row], ignore_index=True) | ||||
|  | ||||
|         # Store in database if available | ||||
|         if self.use_database: | ||||
|             with self.conn.cursor() as cur: | ||||
|                 cur.execute( | ||||
|                     """ | ||||
|                     INSERT INTO model_measurements ( | ||||
|                         benchmark_id, | ||||
|                         measurements | ||||
|                     ) VALUES (%s, %s) | ||||
|                     """, | ||||
|                     ( | ||||
|                         benchmark_id, | ||||
|                         measurements, | ||||
|                     ), | ||||
|                 ) | ||||
|  | ||||
|         self.logger.debug(f"collected model measurements for benchmark #{benchmark_id}: {measurements}") | ||||
|  | ||||
|     def export_to_csv(self, output_dir: str = "benchmark_results"): | ||||
|         """ | ||||
|         Export all collected data to CSV files using pandas DataFrames | ||||
|         """ | ||||
|         if not self.collect_csv_data: | ||||
|             self.logger.warning("CSV data collection is disabled - no CSV files will be generated") | ||||
|             return | ||||
|  | ||||
|         if not os.path.exists(output_dir): | ||||
|             os.makedirs(output_dir) | ||||
|             self.logger.info(f"Created output directory: {output_dir}") | ||||
|  | ||||
|         timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | ||||
|         files_created = [] | ||||
|  | ||||
|         # Export using pandas DataFrames | ||||
|         self._export_pandas_data(output_dir, timestamp, files_created) | ||||
|  | ||||
|         self.logger.info(f"CSV export complete! Created {len(files_created)} files in {output_dir}") | ||||
|  | ||||
|     def _export_pandas_data(self, output_dir: str, timestamp: str, files_created: list): | ||||
|         """ | ||||
|         Export CSV files using pandas DataFrames | ||||
|         """ | ||||
|         # Export benchmarks | ||||
|         benchmarks_file = os.path.join(output_dir, f"benchmarks_{timestamp}.csv") | ||||
|         self.benchmarks_df.to_csv(benchmarks_file, index=False) | ||||
|         files_created.append(benchmarks_file) | ||||
|         self.logger.info(f"Exported {len(self.benchmarks_df)} benchmark records to {benchmarks_file}") | ||||
|  | ||||
|         # Export device measurements | ||||
|         device_file = os.path.join(output_dir, f"device_measurements_{timestamp}.csv") | ||||
|         self.device_measurements_df.to_csv(device_file, index=False) | ||||
|         files_created.append(device_file) | ||||
|         self.logger.info(f"Exported {len(self.device_measurements_df)} device measurement records to {device_file}") | ||||
|  | ||||
|         # Export model measurements (already flattened) | ||||
|         model_file = os.path.join(output_dir, f"model_measurements_{timestamp}.csv") | ||||
|         self.model_measurements_df.to_csv(model_file, index=False) | ||||
|         files_created.append(model_file) | ||||
|         self.logger.info(f"Exported {len(self.model_measurements_df)} model measurement records to {model_file}") | ||||
|  | ||||
|         # Create comprehensive summary using pandas operations | ||||
|         summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.csv") | ||||
|         self._create_summary(summary_file) | ||||
|         files_created.append(summary_file) | ||||
|  | ||||
|     def _create_summary(self, summary_file: str): | ||||
|         """ | ||||
|         Create a comprehensive summary CSV using pandas operations | ||||
|         """ | ||||
|         if len(self.benchmarks_df) == 0: | ||||
|             # Create empty summary file | ||||
|             summary_df = pd.DataFrame() | ||||
|             summary_df.to_csv(summary_file, index=False) | ||||
|             self.logger.info(f"Created empty benchmark summary at {summary_file}") | ||||
|             return | ||||
|  | ||||
|         # Start with benchmarks as the base | ||||
|         summary_df = self.benchmarks_df.copy() | ||||
|  | ||||
|         # Add model measurements (join on benchmark_id) | ||||
|         if len(self.model_measurements_df) > 0: | ||||
|             # Drop 'time' column from model measurements to avoid conflicts | ||||
|             model_df = self.model_measurements_df.drop(columns=["time"], errors="ignore") | ||||
|             summary_df = summary_df.merge(model_df, on="benchmark_id", how="left") | ||||
|  | ||||
|         # Calculate device measurement aggregates using pandas groupby | ||||
|         if len(self.device_measurements_df) > 0: | ||||
|             device_agg = ( | ||||
|                 self.device_measurements_df.groupby("benchmark_id") | ||||
|                 .agg( | ||||
|                     { | ||||
|                         "cpu_util": ["mean", "max", "std", "count"], | ||||
|                         "mem_megabytes": ["mean", "max", "std"], | ||||
|                         "gpu_util": ["mean", "max", "std"], | ||||
|                         "gpu_mem_megabytes": ["mean", "max", "std"], | ||||
|                     } | ||||
|                 ) | ||||
|                 .round(3) | ||||
|     def collect_model_measurements(self, benchmark_id: int, measurements: Dict[str, float]): | ||||
|         with self.conn.cursor() as cur: | ||||
|             cur.execute( | ||||
|                 """ | ||||
|                 INSERT INTO model_measurements ( | ||||
|                     benchmark_id, | ||||
|                     measurements | ||||
|                 ) VALUES (%s, %s) | ||||
|                 """, | ||||
|                 ( | ||||
|                     benchmark_id, | ||||
|                     measurements, | ||||
|                 ), | ||||
|             ) | ||||
|  | ||||
|             # Flatten column names | ||||
|             device_agg.columns = [f"{col[0]}_{col[1]}" for col in device_agg.columns] | ||||
|             device_agg = device_agg.reset_index() | ||||
|  | ||||
|             # Rename count column to be more descriptive | ||||
|             if "cpu_util_count" in device_agg.columns: | ||||
|                 device_agg = device_agg.rename(columns={"cpu_util_count": "device_measurement_count"}) | ||||
|  | ||||
|             # Merge with summary | ||||
|             summary_df = summary_df.merge(device_agg, on="benchmark_id", how="left") | ||||
|  | ||||
|         # Export the comprehensive summary | ||||
|         summary_df.to_csv(summary_file, index=False) | ||||
|         self.logger.info(f"Created comprehensive benchmark summary with {len(summary_df)} records at {summary_file}") | ||||
|         self.logger.debug(f"inserted model measurements for benchmark #{benchmark_id}: {measurements}") | ||||
|  | ||||
|     def close(self): | ||||
|         if self.use_database and self.conn: | ||||
|             self.conn.close() | ||||
|         self.conn.close() | ||||
|  | ||||
|  | ||||
| logger = logging.getLogger(__name__) | ||||
| @ -317,18 +82,12 @@ handler.setFormatter(formatter) | ||||
| logger.addHandler(handler) | ||||
|  | ||||
|  | ||||
| def parse_arguments() -> tuple[str, str, str, str, bool, str]: | ||||
| def parse_arguments(): | ||||
|     """ | ||||
|     Parse command line arguments for the benchmarking CLI. | ||||
|     """ | ||||
|     parser = argparse.ArgumentParser(description="CLI for benchmarking the huggingface/transformers.") | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "repository", | ||||
|         type=str, | ||||
|         help="The repository name on which the benchmarking is performed.", | ||||
|     ) | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "branch", | ||||
|         type=str, | ||||
| @ -347,21 +106,9 @@ def parse_arguments() -> tuple[str, str, str, str, bool, str]: | ||||
|         help="The commit message associated with the commit, truncated to 70 characters.", | ||||
|     ) | ||||
|  | ||||
|     parser.add_argument("--csv", action="store_true", default=False, help="Enable CSV output files generation.") | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "--csv-output-dir", | ||||
|         type=str, | ||||
|         default="benchmark_results", | ||||
|         help="Directory for CSV output files (default: benchmark_results).", | ||||
|     ) | ||||
|  | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     # CSV is disabled by default, only enabled when --csv is used | ||||
|     generate_csv = args.csv | ||||
|  | ||||
|     return args.repository, args.branch, args.commit_id, args.commit_msg, generate_csv, args.csv_output_dir | ||||
|     return args.branch, args.commit_id, args.commit_msg | ||||
|  | ||||
|  | ||||
| def import_from_path(module_name, file_path): | ||||
| @ -375,128 +122,22 @@ def import_from_path(module_name, file_path): | ||||
|         raise ImportModuleException(f"failed to load python module: {e}") | ||||
|  | ||||
|  | ||||
| def create_database_connection(): | ||||
|     """ | ||||
|     Try to create a database connection. Returns None if connection fails. | ||||
|     """ | ||||
|     if not PSYCOPG2_AVAILABLE: | ||||
|         logger.warning("psycopg2 not available - running in CSV-only mode") | ||||
|         return None | ||||
|  | ||||
|     try: | ||||
|         import psycopg2 | ||||
|  | ||||
|         conn = psycopg2.connect("dbname=metrics") | ||||
|         logger.info("Successfully connected to database") | ||||
|         return conn | ||||
|     except Exception as e: | ||||
|         logger.warning(f"Failed to connect to database: {e}. Running in CSV-only mode") | ||||
|         return None | ||||
|  | ||||
|  | ||||
| def create_global_metrics_recorder( | ||||
|     repository: str, branch: str, commit_id: str, commit_msg: str, generate_csv: bool = False | ||||
| ) -> MetricsRecorder: | ||||
|     """ | ||||
|     Create a global metrics recorder that will be used across all benchmarks. | ||||
|     """ | ||||
|     connection = create_database_connection() | ||||
|     recorder = MetricsRecorder(connection, logger, repository, branch, commit_id, commit_msg, generate_csv) | ||||
|  | ||||
|     # Log the storage mode | ||||
|     storage_modes = [] | ||||
|     if connection is not None: | ||||
|         storage_modes.append("database") | ||||
|     if generate_csv: | ||||
|         storage_modes.append("CSV") | ||||
|  | ||||
|     if not storage_modes: | ||||
|         logger.warning("Running benchmarks with NO data storage (no database connection, CSV disabled)") | ||||
|         logger.warning("Use --csv flag to enable CSV output when database is unavailable") | ||||
|     else: | ||||
|         logger.info(f"Running benchmarks with: {' + '.join(storage_modes)} storage") | ||||
|  | ||||
|     return recorder | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     benchmarks_folder_path = os.path.dirname(os.path.realpath(__file__)) | ||||
|     benches_folder_path = os.path.join(benchmarks_folder_path, "benches") | ||||
|  | ||||
|     repository, branch, commit_id, commit_msg, generate_csv, csv_output_dir = parse_arguments() | ||||
|     branch, commit_id, commit_msg = parse_arguments() | ||||
|  | ||||
|     # Create a global metrics recorder | ||||
|     global_metrics_recorder = create_global_metrics_recorder(repository, branch, commit_id, commit_msg, generate_csv) | ||||
|  | ||||
|     successful_benchmarks = 0 | ||||
|     failed_benchmarks = 0 | ||||
|  | ||||
|     # Automatically discover all benchmark modules in benches/ folder | ||||
|     benchmark_modules = [] | ||||
|  | ||||
|     if os.path.exists(benches_folder_path): | ||||
|         logger.debug(f"Scanning for benchmarks in: {benches_folder_path}") | ||||
|         for entry in os.scandir(benches_folder_path): | ||||
|     for entry in os.scandir(benchmarks_folder_path): | ||||
|         try: | ||||
|             if not entry.name.endswith(".py"): | ||||
|                 continue | ||||
|             if entry.name.startswith("__"):  # Skip __init__.py, __pycache__, etc. | ||||
|             if entry.path == __file__: | ||||
|                 continue | ||||
|  | ||||
|             # Check if the file has a run_benchmark function | ||||
|             try: | ||||
|                 logger.debug(f"checking if benches/{entry.name} has run_benchmark function") | ||||
|                 module = import_from_path(entry.name.split(".")[0], entry.path) | ||||
|                 if hasattr(module, "run_benchmark"): | ||||
|                     benchmark_modules.append(entry.name) | ||||
|                     logger.debug(f"discovered benchmark: {entry.name}") | ||||
|                 else: | ||||
|                     logger.debug(f"skipping {entry.name} - no run_benchmark function found") | ||||
|             except Exception as e: | ||||
|                 logger.debug(f"failed to check benches/{entry.name}: {e}") | ||||
|     else: | ||||
|         logger.warning(f"Benches directory not found: {benches_folder_path}") | ||||
|  | ||||
|     if benchmark_modules: | ||||
|         logger.info(f"Discovered {len(benchmark_modules)} benchmark(s): {benchmark_modules}") | ||||
|     else: | ||||
|         logger.warning("No benchmark modules found in benches/ directory") | ||||
|  | ||||
|     for module_name in benchmark_modules: | ||||
|         module_path = os.path.join(benches_folder_path, module_name) | ||||
|         try: | ||||
|             logger.debug(f"loading: {module_name}") | ||||
|             module = import_from_path(module_name.split(".")[0], module_path) | ||||
|             logger.info(f"running benchmarks in: {module_name}") | ||||
|  | ||||
|             # Check if the module has an updated run_benchmark function that accepts metrics_recorder | ||||
|             try: | ||||
|                 # Try the new signature first | ||||
|                 module.run_benchmark(logger, repository, branch, commit_id, commit_msg, global_metrics_recorder) | ||||
|             except TypeError: | ||||
|                 # Fall back to the old signature for backward compatibility | ||||
|                 logger.warning( | ||||
|                     f"Module {module_name} using old run_benchmark signature - database connection will be created per module" | ||||
|                 ) | ||||
|                 module.run_benchmark(logger, repository, branch, commit_id, commit_msg) | ||||
|  | ||||
|             successful_benchmarks += 1 | ||||
|             logger.debug(f"loading: {entry.name}") | ||||
|             module = import_from_path(entry.name.split(".")[0], entry.path) | ||||
|             logger.info(f"running benchmarks in: {entry.name}") | ||||
|             module.run_benchmark(logger, branch, commit_id, commit_msg) | ||||
|         except ImportModuleException as e: | ||||
|             logger.error(e) | ||||
|             failed_benchmarks += 1 | ||||
|         except Exception as e: | ||||
|             logger.error(f"error running benchmarks for {module_name}: {e}") | ||||
|             failed_benchmarks += 1 | ||||
|  | ||||
|     # Export CSV results at the end (if enabled) | ||||
|     try: | ||||
|         if generate_csv: | ||||
|             global_metrics_recorder.export_to_csv(csv_output_dir) | ||||
|             logger.info(f"CSV reports have been generated and saved to the {csv_output_dir} directory") | ||||
|         else: | ||||
|             logger.info("CSV generation disabled - no CSV files created (use --csv to enable)") | ||||
|  | ||||
|         logger.info(f"Benchmark run completed. Successful: {successful_benchmarks}, Failed: {failed_benchmarks}") | ||||
|     except Exception as e: | ||||
|         logger.error(f"Failed to export CSV results: {e}") | ||||
|     finally: | ||||
|         global_metrics_recorder.close() | ||||
|             logger.error(f"error running benchmarks for {entry.name}: {e}") | ||||
|  | ||||
| @ -19,7 +19,7 @@ backend: | ||||
|   model: meta-llama/Llama-2-7b-hf | ||||
|   cache_implementation: static | ||||
|   torch_compile: true | ||||
|   dtype: float16 | ||||
|   torch_dtype: float16 | ||||
|   torch_compile_config: | ||||
|     backend: inductor | ||||
|     mode: reduce-overhead | ||||
|  | ||||
							
								
								
									
										33
									
								
								benchmark/init_db.sql
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								benchmark/init_db.sql
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,33 @@ | ||||
| CREATE TABLE IF NOT EXISTS benchmarks ( | ||||
|   benchmark_id SERIAL PRIMARY KEY, | ||||
|   branch VARCHAR(255), | ||||
|   commit_id VARCHAR(72), | ||||
|   commit_message VARCHAR(70), | ||||
|   metadata jsonb, | ||||
|   created_at timestamp without time zone NOT NULL DEFAULT (current_timestamp AT TIME ZONE 'UTC') | ||||
| ); | ||||
|  | ||||
| CREATE INDEX IF NOT EXISTS benchmarks_benchmark_id_idx ON benchmarks (benchmark_id); | ||||
|  | ||||
| CREATE INDEX IF NOT EXISTS benchmarks_branch_idx ON benchmarks (branch); | ||||
|  | ||||
| CREATE TABLE IF NOT EXISTS device_measurements ( | ||||
|   measurement_id SERIAL PRIMARY KEY, | ||||
|   benchmark_id int REFERENCES benchmarks (benchmark_id), | ||||
|   cpu_util double precision, | ||||
|   mem_megabytes double precision, | ||||
|   gpu_util double precision, | ||||
|   gpu_mem_megabytes double precision, | ||||
|   time timestamp without time zone NOT NULL DEFAULT (current_timestamp AT TIME ZONE 'UTC') | ||||
| ); | ||||
|  | ||||
| CREATE INDEX IF NOT EXISTS device_measurements_branch_idx ON device_measurements (benchmark_id); | ||||
|  | ||||
| CREATE TABLE IF NOT EXISTS model_measurements ( | ||||
|   measurement_id SERIAL PRIMARY KEY, | ||||
|   benchmark_id int REFERENCES benchmarks (benchmark_id), | ||||
|   measurements jsonb, | ||||
|   time timestamp without time zone NOT NULL DEFAULT (current_timestamp AT TIME ZONE 'UTC') | ||||
| ); | ||||
|  | ||||
| CREATE INDEX IF NOT EXISTS model_measurements_branch_idx ON model_measurements (benchmark_id); | ||||
							
								
								
									
										342
									
								
								benchmark/llama.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										342
									
								
								benchmark/llama.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,342 @@ | ||||
| from logging import Logger | ||||
| import os | ||||
| from threading import Event, Thread | ||||
| from time import perf_counter, sleep | ||||
| from typing import Optional | ||||
| from benchmarks_entrypoint import MetricsRecorder | ||||
| import gpustat | ||||
| import psutil | ||||
| import psycopg2 | ||||
| import torch | ||||
|  | ||||
| from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, StaticCache | ||||
|  | ||||
|  | ||||
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" | ||||
|  | ||||
| os.environ["TOKENIZERS_PARALLELISM"] = "1" | ||||
| torch.set_float32_matmul_precision("high") | ||||
|  | ||||
|  | ||||
| def collect_metrics(benchmark_id, continue_metric_collection, metrics_recorder): | ||||
|     p = psutil.Process(os.getpid()) | ||||
|     while not continue_metric_collection.is_set(): | ||||
|         with p.oneshot(): | ||||
|             cpu_util = p.cpu_percent() | ||||
|             mem_megabytes = p.memory_info().rss / (1024 * 1024) | ||||
|         gpu_stats = gpustat.GPUStatCollection.new_query() | ||||
|         gpu_util = gpu_stats[0]["utilization.gpu"] | ||||
|         gpu_mem_megabytes = gpu_stats[0]["memory.used"] | ||||
|         metrics_recorder.collect_device_measurements( | ||||
|             benchmark_id, cpu_util, mem_megabytes, gpu_util, gpu_mem_megabytes | ||||
|         ) | ||||
|         sleep(0.01) | ||||
|  | ||||
|  | ||||
| def run_benchmark(logger: Logger, branch: str, commit_id: str, commit_msg: str, num_tokens_to_generate=100): | ||||
|     continue_metric_collection = Event() | ||||
|     metrics_thread = None | ||||
|     model_id = "meta-llama/Llama-2-7b-hf" | ||||
|     metrics_recorder = MetricsRecorder(psycopg2.connect("dbname=metrics"), logger, branch, commit_id, commit_msg) | ||||
|     try: | ||||
|         gpu_stats = gpustat.GPUStatCollection.new_query() | ||||
|         gpu_name = gpu_stats[0]["name"] | ||||
|         benchmark_id = metrics_recorder.initialise_benchmark({"gpu_name": gpu_name, "model_id": model_id}) | ||||
|         logger.info(f"running benchmark #{benchmark_id} on {gpu_name} for {model_id}") | ||||
|         metrics_thread = Thread( | ||||
|             target=collect_metrics, | ||||
|             args=[benchmark_id, continue_metric_collection, metrics_recorder], | ||||
|         ) | ||||
|         metrics_thread.start() | ||||
|         logger.info("started background thread to fetch device metrics") | ||||
|  | ||||
|         os.environ["TOKENIZERS_PARALLELISM"] = "false"  # silence warnings when compiling | ||||
|  | ||||
|         device = "cuda" | ||||
|  | ||||
|         logger.info("downloading weights") | ||||
|         # This is to avoid counting download in model load time measurement | ||||
|         model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16) | ||||
|         gen_config = GenerationConfig(do_sample=False, top_p=1, temperature=1) | ||||
|         logger.info("loading model") | ||||
|         start = perf_counter() | ||||
|         model = AutoModelForCausalLM.from_pretrained( | ||||
|             model_id, torch_dtype=torch.float16, generation_config=gen_config | ||||
|         ).eval() | ||||
|         model.to(device) | ||||
|         torch.cuda.synchronize() | ||||
|         end = perf_counter() | ||||
|         model_load_time = end - start | ||||
|         logger.info(f"loaded model in: {model_load_time}s") | ||||
|  | ||||
|         tokenizer = AutoTokenizer.from_pretrained(model_id) | ||||
|  | ||||
|         prompt = "Why dogs are so cute?" | ||||
|         inputs = tokenizer(prompt, return_tensors="pt").to(device) | ||||
|  | ||||
|         # Specify the max length (including both the prompt and the response) | ||||
|         # When calling `generate` with `cache_implementation="static" later, this is also used to create a `StaticCache` object | ||||
|         # with sequence length = `max_length`. The longer the more you will re-use it | ||||
|         seq_length = inputs["input_ids"].shape[1] | ||||
|         model.generation_config.max_length = seq_length + num_tokens_to_generate | ||||
|         batch_size = inputs["input_ids"].shape[0] | ||||
|  | ||||
|         # Copied from the gpt-fast repo | ||||
|         def multinomial_sample_one_no_sync(probs_sort):  # Does multinomial sampling without a cuda synchronization | ||||
|             q = torch.empty_like(probs_sort).exponential_(1) | ||||
|             return torch.argmax(probs_sort / q, dim=-1, keepdim=True).to(dtype=torch.int) | ||||
|  | ||||
|         def logits_to_probs(logits, temperature: float = 1.0, top_k: Optional[int] = None): | ||||
|             logits = logits / max(temperature, 1e-5) | ||||
|  | ||||
|             if top_k is not None: | ||||
|                 v, _ = torch.topk(logits, min(top_k, logits.size(-1))) | ||||
|                 pivot = v.select(-1, -1).unsqueeze(-1) | ||||
|                 logits = torch.where(logits < pivot, -float("Inf"), logits) | ||||
|             probs = torch.nn.functional.softmax(logits, dim=-1) | ||||
|             return probs | ||||
|  | ||||
|         def sample(logits, temperature: float = 1.0, top_k: Optional[int] = None): | ||||
|             probs = logits_to_probs(logits[:, -1], temperature, top_k) | ||||
|             idx_next = multinomial_sample_one_no_sync(probs) | ||||
|             return idx_next, probs | ||||
|  | ||||
|         def decode_one_token(model, cur_token, cache_position, past_key_values): | ||||
|             logits = model( | ||||
|                 cur_token, | ||||
|                 cache_position=cache_position, | ||||
|                 past_key_values=past_key_values, | ||||
|                 return_dict=False, | ||||
|                 use_cache=True, | ||||
|             )[0] | ||||
|             new_token = sample(logits, temperature=0.6, top_k=5)[0] | ||||
|             return new_token | ||||
|  | ||||
|         ######### | ||||
|         # Eager # | ||||
|         ######### | ||||
|         with torch.no_grad(): | ||||
|             past_key_values = StaticCache( | ||||
|                 model.config, | ||||
|                 max_batch_size=batch_size, | ||||
|                 device=device, | ||||
|                 dtype=torch.float16, | ||||
|                 max_cache_len=seq_length + num_tokens_to_generate, | ||||
|             ) | ||||
|             cache_position = torch.arange(seq_length, device=device) | ||||
|             start = perf_counter() | ||||
|             model( | ||||
|                 **inputs, | ||||
|                 cache_position=cache_position, | ||||
|                 past_key_values=past_key_values, | ||||
|                 return_dict=False, | ||||
|                 use_cache=True, | ||||
|             ) | ||||
|             end = perf_counter() | ||||
|             first_eager_fwd_pass_time = end - start | ||||
|             logger.info(f"completed first eager fwd pass in: {first_eager_fwd_pass_time}s") | ||||
|             start = perf_counter() | ||||
|             output = model.generate(**inputs, do_sample=False) | ||||
|             end = perf_counter() | ||||
|             first_eager_generate_time = end - start | ||||
|             logger.info(f"completed first eager generation in: {first_eager_generate_time}s") | ||||
|             logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|             past_key_values = StaticCache( | ||||
|                 model.config, | ||||
|                 max_batch_size=batch_size, | ||||
|                 device=device, | ||||
|                 dtype=torch.float16, | ||||
|                 max_cache_len=seq_length + num_tokens_to_generate, | ||||
|             ) | ||||
|             cache_position = torch.arange(seq_length, device=device) | ||||
|             start = perf_counter() | ||||
|             model( | ||||
|                 **inputs, | ||||
|                 cache_position=cache_position, | ||||
|                 past_key_values=past_key_values, | ||||
|                 return_dict=False, | ||||
|                 use_cache=True, | ||||
|             ) | ||||
|             end = perf_counter() | ||||
|             second_eager_fwd_pass_time = end - start | ||||
|             logger.info(f"completed second eager fwd pass in: {second_eager_fwd_pass_time}s") | ||||
|             start = perf_counter() | ||||
|             model.generate(**inputs, do_sample=False) | ||||
|             end = perf_counter() | ||||
|             second_eager_generate_time = end - start | ||||
|             logger.info(f"completed second eager generation in: {second_eager_generate_time}s") | ||||
|             logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|             torch.compiler.reset() | ||||
|  | ||||
|             ################ | ||||
|             # Forward pass # | ||||
|             ################ | ||||
|  | ||||
|             # `torch.compile(model, ...)` is not recommended as you compile callbacks | ||||
|             # and full generate. We recommend compiling only the forward for now. | ||||
|             # "reduce-overhead" will use cudagraphs. | ||||
|             generated_ids = torch.zeros( | ||||
|                 (batch_size, num_tokens_to_generate + seq_length), dtype=torch.int, device=device | ||||
|             ) | ||||
|  | ||||
|             generated_ids[:, :seq_length] = inputs["input_ids"] | ||||
|             decode_one_token = torch.compile(decode_one_token, mode="reduce-overhead", fullgraph=True) | ||||
|             # model.forward = torch.compile(model.forward, mode="reduce-overhead", fullgraph=True) | ||||
|             # TODO use  decode_one_token(model, input_id.clone(), cache_position) for verification | ||||
|             past_key_values = StaticCache( | ||||
|                 model.config, | ||||
|                 max_batch_size=batch_size, | ||||
|                 device=device, | ||||
|                 dtype=torch.float16, | ||||
|                 max_cache_len=seq_length + num_tokens_to_generate + 10, | ||||
|             ) | ||||
|             cache_position = torch.arange(seq_length, device=device) | ||||
|             all_generated_tokens = [] | ||||
|             ### First compile, prefill | ||||
|             start = perf_counter() | ||||
|             next_token = decode_one_token( | ||||
|                 model, inputs["input_ids"], cache_position=cache_position, past_key_values=past_key_values | ||||
|             ) | ||||
|             torch.cuda.synchronize() | ||||
|             end = perf_counter() | ||||
|             time_to_first_token = end - start | ||||
|             logger.info(f"completed first compile generation in: {time_to_first_token}s") | ||||
|             cache_position += 1 | ||||
|             all_generated_tokens += next_token.tolist() | ||||
|  | ||||
|             cache_position = torch.tensor([seq_length], device=device) | ||||
|             ### First compile, decoding | ||||
|             start = perf_counter() | ||||
|             next_token = decode_one_token( | ||||
|                 model, next_token.clone(), cache_position=cache_position, past_key_values=past_key_values | ||||
|             ) | ||||
|             torch.cuda.synchronize() | ||||
|             end = perf_counter() | ||||
|             time_to_second_token = end - start | ||||
|             logger.info(f"completed second compile generation in: {time_to_second_token}s") | ||||
|             cache_position += 1 | ||||
|             all_generated_tokens += next_token.tolist() | ||||
|  | ||||
|             ### Second compile, decoding | ||||
|             start = perf_counter() | ||||
|             next_token = decode_one_token( | ||||
|                 model, next_token.clone(), cache_position=cache_position, past_key_values=past_key_values | ||||
|             ) | ||||
|             torch.cuda.synchronize() | ||||
|             end = perf_counter() | ||||
|             time_to_third_token = end - start | ||||
|             logger.info(f"completed third compile forward in: {time_to_third_token}s") | ||||
|             cache_position += 1 | ||||
|             all_generated_tokens += next_token.tolist() | ||||
|  | ||||
|             ### Using cuda graphs decoding | ||||
|  | ||||
|             start = perf_counter() | ||||
|             for _ in range(1, num_tokens_to_generate): | ||||
|                 all_generated_tokens += next_token.tolist() | ||||
|                 next_token = decode_one_token( | ||||
|                     model, next_token.clone(), cache_position=cache_position, past_key_values=past_key_values | ||||
|                 ) | ||||
|                 cache_position += 1 | ||||
|             torch.cuda.synchronize() | ||||
|             end = perf_counter() | ||||
|             mean_time_to_next_token = (end - start) / num_tokens_to_generate | ||||
|             logger.info(f"completed next compile generation in: {mean_time_to_next_token}s") | ||||
|             logger.info(f"generated: {tokenizer.batch_decode(all_generated_tokens)}") | ||||
|  | ||||
|             #################### | ||||
|             # Generate compile # | ||||
|             #################### | ||||
|             torch.compiler.reset() | ||||
|             # we will not compile full generate as it' s to intensive, tho we measure full forward! | ||||
|  | ||||
|             past_key_values = StaticCache( | ||||
|                 model.config, | ||||
|                 max_batch_size=batch_size, | ||||
|                 device=device, | ||||
|                 dtype=torch.float16, | ||||
|                 max_cache_len=seq_length + 128, | ||||
|             ) | ||||
|  | ||||
|             # 1st call | ||||
|             start = perf_counter() | ||||
|             output = model.generate(**inputs, past_key_values=past_key_values) | ||||
|             torch.cuda.synchronize() | ||||
|             end = perf_counter() | ||||
|             first_compile_generate_time = end - start | ||||
|             logger.info(f"completed first compile generation in: {first_compile_generate_time}s") | ||||
|             logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|             past_key_values = StaticCache( | ||||
|                 model.config, | ||||
|                 max_batch_size=batch_size, | ||||
|                 device=device, | ||||
|                 dtype=torch.float16, | ||||
|                 max_cache_len=seq_length + 128, | ||||
|             ) | ||||
|             # 2nd call | ||||
|             start = perf_counter() | ||||
|             output = model.generate(**inputs, past_key_values=past_key_values) | ||||
|             torch.cuda.synchronize() | ||||
|             end = perf_counter() | ||||
|             second_compile_generate_time = end - start | ||||
|             logger.info(f"completed second compile generation in: {second_compile_generate_time}s") | ||||
|             logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|             past_key_values = StaticCache( | ||||
|                 model.config, | ||||
|                 max_batch_size=batch_size, | ||||
|                 device=device, | ||||
|                 dtype=torch.float16, | ||||
|                 max_cache_len=seq_length + 128, | ||||
|             ) | ||||
|  | ||||
|             # 3nd call | ||||
|             start = perf_counter() | ||||
|             output = model.generate(**inputs, past_key_values=past_key_values) | ||||
|             end = perf_counter() | ||||
|             third_compile_generate_time = end - start | ||||
|             logger.info(f"completed third compile generation in: {third_compile_generate_time}s") | ||||
|             logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|             past_key_values = StaticCache( | ||||
|                 model.config, | ||||
|                 max_batch_size=batch_size, | ||||
|                 device=device, | ||||
|                 dtype=torch.float16, | ||||
|                 max_cache_len=seq_length + 128, | ||||
|             ) | ||||
|             # 4th call | ||||
|             start = perf_counter() | ||||
|             output = model.generate(**inputs, past_key_values=past_key_values) | ||||
|             end = perf_counter() | ||||
|             fourth_compile_generate_time = end - start | ||||
|             logger.info(f"completed fourth compile generation in: {fourth_compile_generate_time}s") | ||||
|             logger.info(f"generated: {tokenizer.batch_decode(output.cpu().tolist())}") | ||||
|  | ||||
|         metrics_recorder.collect_model_measurements( | ||||
|             benchmark_id, | ||||
|             { | ||||
|                 "model_load_time": model_load_time, | ||||
|                 "first_eager_forward_pass_time_secs": first_eager_fwd_pass_time, | ||||
|                 "second_eager_forward_pass_time_secs": second_eager_fwd_pass_time, | ||||
|                 "first_eager_generate_time_secs": first_eager_generate_time, | ||||
|                 "second_eager_generate_time_secs": second_eager_generate_time, | ||||
|                 "time_to_first_token_secs": time_to_first_token, | ||||
|                 "time_to_second_token_secs": time_to_second_token, | ||||
|                 "time_to_third_token_secs": time_to_third_token, | ||||
|                 "time_to_next_token_mean_secs": mean_time_to_next_token, | ||||
|                 "first_compile_generate_time_secs": first_compile_generate_time, | ||||
|                 "second_compile_generate_time_secs": second_compile_generate_time, | ||||
|                 "third_compile_generate_time_secs": third_compile_generate_time, | ||||
|                 "fourth_compile_generate_time_secs": fourth_compile_generate_time, | ||||
|             }, | ||||
|         ) | ||||
|     except Exception as e: | ||||
|         logger.error(f"Caught exception: {e}") | ||||
|     continue_metric_collection.set() | ||||
|     if metrics_thread is not None: | ||||
|         metrics_thread.join() | ||||
|     metrics_recorder.close() | ||||
| @ -3,11 +3,7 @@ import subprocess | ||||
|  | ||||
|  | ||||
| def main(config_dir, config_name, args): | ||||
|     subprocess.run( | ||||
|         ["optimum-benchmark", "--config-dir", f"{config_dir}", "--config-name", f"{config_name}"] | ||||
|         + ["hydra/job_logging=disabled", "hydra/hydra_logging=disabled"] | ||||
|         + args | ||||
|     ) | ||||
|     subprocess.run(["optimum-benchmark", "--config-dir", f"{config_dir}", "--config-name", f"{config_name}"] + ["hydra/job_logging=disabled", "hydra/hydra_logging=disabled"] + args) | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|  | ||||
| @ -2,5 +2,4 @@ gpustat==1.1.1 | ||||
| psutil==6.0.0 | ||||
| psycopg2==2.9.9 | ||||
| torch>=2.4.0 | ||||
| hf_transfer | ||||
| pandas>=1.5.0 | ||||
| hf_transfer | ||||
							
								
								
									
										1
									
								
								benchmark_v2/.gitignore
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										1
									
								
								benchmark_v2/.gitignore
									
									
									
									
										vendored
									
									
								
							| @ -1 +0,0 @@ | ||||
| benchmark_results/ | ||||
| @ -1,98 +0,0 @@ | ||||
| # Benchmarking v2 | ||||
|  | ||||
| A comprehensive benchmarking framework for transformer models that supports multiple execution modes (eager, compiled, kernelized), detailed performance metrics collection, and structured output format. | ||||
|  | ||||
|  | ||||
| ## Quick Start | ||||
|  | ||||
| ### Running All Benchmarks | ||||
|  | ||||
| ```bash | ||||
| # Run all benchmarks with default settings | ||||
| python run_benchmarks.py | ||||
|  | ||||
| # Specify output directory | ||||
| python run_benchmarks.py --output-dir my_results | ||||
|  | ||||
| # Run with custom parameters | ||||
| python run_benchmarks.py \ | ||||
|     --warmup-iterations 5 \ | ||||
|     --measurement-iterations 10 \ | ||||
|     --num-tokens-to-generate 200 | ||||
| ``` | ||||
|  | ||||
| ### Running Specific Benchmarks | ||||
|  | ||||
| ```bash | ||||
| # Include only specific benchmarks | ||||
| python run_benchmarks.py --include llama | ||||
|  | ||||
| # Exclude specific benchmarks | ||||
| python run_benchmarks.py --exclude old_benchmark | ||||
|  | ||||
| ## Output Format | ||||
|  | ||||
| Results are saved as JSON files with the following structure: | ||||
|  | ||||
| ```json | ||||
| { | ||||
|   "model_name": "llama_2_7b", | ||||
|   "benchmark_scenarios": [ | ||||
|     { | ||||
|       "scenario_name": "eager_variant", | ||||
|       "metadata": { | ||||
|         "timestamp": "2025-01-XX...", | ||||
|         "commit_id": "abc123...", | ||||
|         "hardware_info": { | ||||
|           "gpu_name": "NVIDIA A100", | ||||
|           "gpu_memory_total": 40960, | ||||
|           "cpu_count": 64 | ||||
|         }, | ||||
|         "config": { | ||||
|           "variant": "eager", | ||||
|           "warmup_iterations": 3, | ||||
|           "measurement_iterations": 5 | ||||
|         } | ||||
|       }, | ||||
|       "measurements": { | ||||
|         "latency": { | ||||
|           "mean": 2.45, | ||||
|           "median": 2.43, | ||||
|           "std": 0.12, | ||||
|           "min": 2.31, | ||||
|           "max": 2.67, | ||||
|           "p95": 2.61, | ||||
|           "p99": 2.65 | ||||
|         }, | ||||
|         "time_to_first_token": { | ||||
|           "mean": 0.15, | ||||
|           "std": 0.02 | ||||
|         }, | ||||
|         "tokens_per_second": { | ||||
|           "mean": 87.3, | ||||
|           "unit": "tokens/sec" | ||||
|         } | ||||
|       }, | ||||
|       "gpu_metrics": { | ||||
|         "gpu_utilization_mean": 85.2, | ||||
|         "gpu_memory_used_mean": 12450 | ||||
|       } | ||||
|     } | ||||
|   ] | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Debug Mode | ||||
|  | ||||
| ```bash | ||||
| python run_benchmarks.py --log-level DEBUG | ||||
| ``` | ||||
|  | ||||
| ## Contributing | ||||
|  | ||||
| To add new benchmarks: | ||||
|  | ||||
| 1. Create a new file in `benches/` | ||||
| 2. Implement the `ModelBenchmark` interface | ||||
| 3. Add a runner function (`run_<benchmark_name>` or `run_benchmark`) | ||||
| 4. run_benchmarks.py | ||||
| @ -1 +0,0 @@ | ||||
| # Benchmark implementations directory | ||||
| @ -1,166 +0,0 @@ | ||||
| # Copyright 2025 The HuggingFace Team. All rights reserved. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
| import logging | ||||
| import os | ||||
| from typing import Any | ||||
|  | ||||
| import torch | ||||
| from benchmark_framework import ModelBenchmark | ||||
|  | ||||
|  | ||||
| os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1" | ||||
| os.environ["TOKENIZERS_PARALLELISM"] = "1" | ||||
| torch.set_float32_matmul_precision("high") | ||||
|  | ||||
|  | ||||
| class LLaMABenchmark(ModelBenchmark): | ||||
|     """Simplified LLaMA model benchmark implementation using the ModelBenchmark base class.""" | ||||
|  | ||||
|     def __init__(self, logger: logging.Logger): | ||||
|         super().__init__(logger) | ||||
|         self._default_prompt = "Why dogs are so cute?"  # Custom prompt for LLaMA | ||||
|  | ||||
|     def get_scenario_configs(self) -> list[dict[str, Any]]: | ||||
|         """ | ||||
|         Get LLaMA-specific scenario configurations. | ||||
|  | ||||
|         Returns: | ||||
|             List of scenario configuration dictionaries | ||||
|         """ | ||||
|         return [ | ||||
|             # Eager variants | ||||
|             {"variant": "eager", "compile_mode": None, "use_cache": True, "description": "Eager execution with cache"}, | ||||
|             # Compiled variants | ||||
|             { | ||||
|                 "variant": "compiled", | ||||
|                 "compile_mode": "max-autotune", | ||||
|                 "use_cache": True, | ||||
|                 "description": "Compiled with max autotune", | ||||
|             }, | ||||
|             # Kernelized variant (if available) | ||||
|             { | ||||
|                 "variant": "kernelized", | ||||
|                 "compile_mode": "max-autotune", | ||||
|                 "use_cache": True, | ||||
|                 "description": "Kernelized execution", | ||||
|             }, | ||||
|         ] | ||||
|  | ||||
|     def _is_kernelization_available(self) -> bool: | ||||
|         """Check if kernelization is available for LLaMA.""" | ||||
|         try: | ||||
|             from kernels import Mode, kernelize  # noqa: F401 | ||||
|  | ||||
|             return True | ||||
|         except ImportError: | ||||
|             self.logger.debug("Kernelization not available: kernels module not found") | ||||
|             return False | ||||
|  | ||||
|     def get_default_generation_config(self) -> dict[str, Any]: | ||||
|         """Get LLaMA-specific generation configuration.""" | ||||
|         return { | ||||
|             "do_sample": False, | ||||
|             "top_p": 1.0, | ||||
|             "temperature": 1.0, | ||||
|             "repetition_penalty": 1.0, | ||||
|             "max_new_tokens": None,  # Will be set per scenario | ||||
|         } | ||||
|  | ||||
|     def get_model_init_kwargs(self, config) -> dict[str, Any]: | ||||
|         """Get LLaMA-specific model initialization kwargs.""" | ||||
|         return { | ||||
|             "torch_dtype": getattr(torch, config.torch_dtype), | ||||
|             "attn_implementation": config.attn_implementation, | ||||
|             "use_cache": True, | ||||
|         } | ||||
|  | ||||
|     def get_default_torch_dtype(self) -> str: | ||||
|         """Get default torch dtype for LLaMA.""" | ||||
|         return "float16"  # LLaMA works well with float16 | ||||
|  | ||||
|     def get_default_device(self) -> str: | ||||
|         """Get default device for LLaMA.""" | ||||
|         return "cuda"  # LLaMA prefers CUDA | ||||
|  | ||||
|  | ||||
| def run_llama(logger, output_dir, **kwargs): | ||||
|     """ | ||||
|     Run LLaMA benchmark with the given configuration. | ||||
|  | ||||
|     Args: | ||||
|         logger: Logger instance | ||||
|         output_dir: Output directory for results | ||||
|         **kwargs: Additional configuration options | ||||
|  | ||||
|     Returns: | ||||
|         Path to output file if successful | ||||
|     """ | ||||
|     from benchmark_framework import BenchmarkRunner | ||||
|  | ||||
|     # Extract parameters with defaults | ||||
|     model_id = kwargs.get("model_id", "meta-llama/Llama-2-7b-hf") | ||||
|     warmup_iterations = kwargs.get("warmup_iterations", 3) | ||||
|     measurement_iterations = kwargs.get("measurement_iterations", 5) | ||||
|     num_tokens_to_generate = kwargs.get("num_tokens_to_generate", 100) | ||||
|     include_sdpa_variants = kwargs.get("include_sdpa_variants", True) | ||||
|     device = kwargs.get("device", "cuda") | ||||
|     torch_dtype = kwargs.get("torch_dtype", "float16") | ||||
|     batch_size = kwargs.get("batch_size", 1) | ||||
|     commit_id = kwargs.get("commit_id") | ||||
|  | ||||
|     logger.info(f"Starting LLaMA benchmark for model: {model_id}") | ||||
|     logger.info( | ||||
|         f"Configuration: warmup={warmup_iterations}, measurement={measurement_iterations}, tokens={num_tokens_to_generate}" | ||||
|     ) | ||||
|  | ||||
|     try: | ||||
|         # Create benchmark instance | ||||
|         benchmark = LLaMABenchmark(logger) | ||||
|  | ||||
|         # Create scenarios | ||||
|         scenarios = benchmark.create_scenarios( | ||||
|             model_id=model_id, | ||||
|             warmup_iterations=warmup_iterations, | ||||
|             measurement_iterations=measurement_iterations, | ||||
|             num_tokens_to_generate=num_tokens_to_generate, | ||||
|             include_sdpa_variants=include_sdpa_variants, | ||||
|             device=device, | ||||
|             torch_dtype=torch_dtype, | ||||
|             batch_size=batch_size, | ||||
|         ) | ||||
|  | ||||
|         logger.info(f"Created {len(scenarios)} benchmark scenarios") | ||||
|  | ||||
|         # Create runner and execute benchmarks | ||||
|         runner = BenchmarkRunner(logger, output_dir) | ||||
|         results = runner.run_benchmark(benchmark, scenarios, commit_id=commit_id) | ||||
|  | ||||
|         if not results: | ||||
|             logger.warning("No successful benchmark results") | ||||
|             return None | ||||
|  | ||||
|         # Save results | ||||
|         model_name = model_id.split("/")[-1]  # Extract model name from ID | ||||
|         output_file = runner.save_results(model_name, results) | ||||
|  | ||||
|         logger.info(f"LLaMA benchmark completed successfully. Results saved to: {output_file}") | ||||
|         return output_file | ||||
|  | ||||
|     except Exception as e: | ||||
|         logger.error(f"LLaMA benchmark failed: {e}") | ||||
|         import traceback | ||||
|  | ||||
|         logger.debug(traceback.format_exc()) | ||||
|         raise | ||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							| @ -1,6 +0,0 @@ | ||||
| numpy>=1.21.0 | ||||
| psutil>=5.8.0 | ||||
| gpustat>=1.0.0 | ||||
| torch>=2.0.0 | ||||
| transformers>=4.30.0 | ||||
| datasets>=2.10.0  | ||||
| @ -1,340 +0,0 @@ | ||||
| #!/usr/bin/env python3 | ||||
| # Copyright 2025 The HuggingFace Team. All rights reserved. | ||||
| # | ||||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| # you may not use this file except in compliance with the License. | ||||
| # You may obtain a copy of the License at | ||||
| # | ||||
| #     http://www.apache.org/licenses/LICENSE-2.0 | ||||
| # | ||||
| # Unless required by applicable law or agreed to in writing, software | ||||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| # See the License for the specific language governing permissions and | ||||
| # limitations under the License. | ||||
|  | ||||
| """ | ||||
| Top-level benchmarking script that automatically discovers and runs all benchmarks | ||||
| in the ./benches directory, organizing outputs into model-specific subfolders. | ||||
| """ | ||||
|  | ||||
| import argparse | ||||
| import importlib.util | ||||
| import json | ||||
| import logging | ||||
| import os | ||||
| import sys | ||||
| from datetime import datetime | ||||
| from pathlib import Path | ||||
| from typing import Any, Optional | ||||
|  | ||||
|  | ||||
| def setup_logging(log_level: str = "INFO", enable_file_logging: bool = False) -> logging.Logger: | ||||
|     """Setup logging configuration.""" | ||||
|     numeric_level = getattr(logging, log_level.upper(), None) | ||||
|     if not isinstance(numeric_level, int): | ||||
|         raise ValueError(f"Invalid log level: {log_level}") | ||||
|  | ||||
|     handlers = [logging.StreamHandler(sys.stdout)] | ||||
|  | ||||
|     if enable_file_logging: | ||||
|         handlers.append(logging.FileHandler(f"benchmark_run_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log")) | ||||
|  | ||||
|     logging.basicConfig( | ||||
|         level=numeric_level, format="[%(levelname)s - %(asctime)s] %(name)s: %(message)s", handlers=handlers | ||||
|     ) | ||||
|  | ||||
|     return logging.getLogger(__name__) | ||||
|  | ||||
|  | ||||
| def discover_benchmarks(benches_dir: str) -> list[dict[str, Any]]: | ||||
|     """ | ||||
|     Discover all benchmark modules in the benches directory. | ||||
|  | ||||
|     Returns: | ||||
|         List of dictionaries containing benchmark module info | ||||
|     """ | ||||
|     benchmarks = [] | ||||
|     benches_path = Path(benches_dir) | ||||
|  | ||||
|     if not benches_path.exists(): | ||||
|         raise FileNotFoundError(f"Benches directory not found: {benches_dir}") | ||||
|  | ||||
|     for py_file in benches_path.glob("*.py"): | ||||
|         if py_file.name.startswith("__"): | ||||
|             continue | ||||
|  | ||||
|         module_name = py_file.stem | ||||
|  | ||||
|         try: | ||||
|             # Import the module | ||||
|             spec = importlib.util.spec_from_file_location(module_name, py_file) | ||||
|             module = importlib.util.module_from_spec(spec) | ||||
|             spec.loader.exec_module(module) | ||||
|  | ||||
|             # Check if it has a benchmark runner function | ||||
|             if hasattr(module, f"run_{module_name}"): | ||||
|                 benchmarks.append( | ||||
|                     { | ||||
|                         "name": module_name, | ||||
|                         "path": str(py_file), | ||||
|                         "module": module, | ||||
|                         "runner_function": getattr(module, f"run_{module_name}"), | ||||
|                     } | ||||
|                 ) | ||||
|             elif hasattr(module, "run_benchmark"): | ||||
|                 benchmarks.append( | ||||
|                     { | ||||
|                         "name": module_name, | ||||
|                         "path": str(py_file), | ||||
|                         "module": module, | ||||
|                         "runner_function": getattr(module, "run_benchmark"), | ||||
|                     } | ||||
|                 ) | ||||
|             else: | ||||
|                 logging.warning(f"No runner function found in {py_file}") | ||||
|  | ||||
|         except Exception as e: | ||||
|             logging.error(f"Failed to import {py_file}: {e}") | ||||
|  | ||||
|     return benchmarks | ||||
|  | ||||
|  | ||||
| def run_single_benchmark( | ||||
|     benchmark_info: dict[str, Any], output_dir: str, logger: logging.Logger, **kwargs | ||||
| ) -> Optional[str]: | ||||
|     """ | ||||
|     Run a single benchmark and return the output file path. | ||||
|  | ||||
|     Args: | ||||
|         benchmark_info: Dictionary containing benchmark module info | ||||
|         output_dir: Base output directory | ||||
|         logger: Logger instance | ||||
|         **kwargs: Additional arguments to pass to the benchmark | ||||
|  | ||||
|     Returns: | ||||
|         Path to the output file if successful, None otherwise | ||||
|     """ | ||||
|     benchmark_name = benchmark_info["name"] | ||||
|     runner_func = benchmark_info["runner_function"] | ||||
|  | ||||
|     logger.info(f"Running benchmark: {benchmark_name}") | ||||
|  | ||||
|     try: | ||||
|         # Check function signature to determine what arguments to pass | ||||
|         import inspect | ||||
|  | ||||
|         sig = inspect.signature(runner_func) | ||||
|  | ||||
|         # Prepare arguments based on function signature | ||||
|         func_kwargs = {"logger": logger, "output_dir": output_dir} | ||||
|  | ||||
|         # Add other kwargs if the function accepts them | ||||
|         for param_name in sig.parameters: | ||||
|             if param_name in kwargs: | ||||
|                 func_kwargs[param_name] = kwargs[param_name] | ||||
|  | ||||
|         # Filter kwargs to only include parameters the function accepts | ||||
|         # If function has **kwargs, include all provided kwargs | ||||
|         has_var_kwargs = any(param.kind == param.VAR_KEYWORD for param in sig.parameters.values()) | ||||
|         if has_var_kwargs: | ||||
|             valid_kwargs = {**func_kwargs, **kwargs} | ||||
|         else: | ||||
|             valid_kwargs = {k: v for k, v in func_kwargs.items() if k in sig.parameters} | ||||
|  | ||||
|         # Run the benchmark | ||||
|         result = runner_func(**valid_kwargs) | ||||
|  | ||||
|         if isinstance(result, str): | ||||
|             # Function returned a file path | ||||
|             return result | ||||
|         else: | ||||
|             logger.info(f"Benchmark {benchmark_name} completed successfully") | ||||
|             return "completed" | ||||
|  | ||||
|     except Exception as e: | ||||
|         logger.error(f"Benchmark {benchmark_name} failed: {e}") | ||||
|         import traceback | ||||
|  | ||||
|         logger.debug(traceback.format_exc()) | ||||
|         return None | ||||
|  | ||||
|  | ||||
| def generate_summary_report(output_dir: str, benchmark_results: dict[str, Any], logger: logging.Logger) -> str: | ||||
|     """Generate a summary report of all benchmark runs.""" | ||||
|     timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") | ||||
|     summary_file = os.path.join(output_dir, f"benchmark_summary_{timestamp}.json") | ||||
|  | ||||
|     summary_data = { | ||||
|         "run_metadata": { | ||||
|             "timestamp": datetime.utcnow().isoformat(), | ||||
|             "total_benchmarks": len(benchmark_results), | ||||
|             "successful_benchmarks": len([r for r in benchmark_results.values() if r is not None]), | ||||
|             "failed_benchmarks": len([r for r in benchmark_results.values() if r is None]), | ||||
|         }, | ||||
|         "benchmark_results": benchmark_results, | ||||
|         "output_directory": output_dir, | ||||
|     } | ||||
|  | ||||
|     with open(summary_file, "w") as f: | ||||
|         json.dump(summary_data, f, indent=2, default=str) | ||||
|  | ||||
|     logger.info(f"Summary report saved to: {summary_file}") | ||||
|     return summary_file | ||||
|  | ||||
|  | ||||
| def main(): | ||||
|     """Main entry point for the benchmarking script.""" | ||||
|     parser = argparse.ArgumentParser(description="Run all benchmarks in the ./benches directory") | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "--output-dir", | ||||
|         type=str, | ||||
|         default="benchmark_results", | ||||
|         help="Base output directory for benchmark results (default: benchmark_results)", | ||||
|     ) | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "--benches-dir", | ||||
|         type=str, | ||||
|         default="./benches", | ||||
|         help="Directory containing benchmark implementations (default: ./benches)", | ||||
|     ) | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "--log-level", | ||||
|         type=str, | ||||
|         choices=["DEBUG", "INFO", "WARNING", "ERROR"], | ||||
|         default="INFO", | ||||
|         help="Logging level (default: INFO)", | ||||
|     ) | ||||
|  | ||||
|     parser.add_argument("--model-id", type=str, help="Specific model ID to benchmark (if supported by benchmarks)") | ||||
|  | ||||
|     parser.add_argument("--warmup-iterations", type=int, default=3, help="Number of warmup iterations (default: 3)") | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "--measurement-iterations", type=int, default=5, help="Number of measurement iterations (default: 5)" | ||||
|     ) | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "--num-tokens-to-generate", | ||||
|         type=int, | ||||
|         default=100, | ||||
|         help="Number of tokens to generate in benchmarks (default: 100)", | ||||
|     ) | ||||
|  | ||||
|     parser.add_argument("--include", type=str, nargs="*", help="Only run benchmarks matching these names") | ||||
|  | ||||
|     parser.add_argument("--exclude", type=str, nargs="*", help="Exclude benchmarks matching these names") | ||||
|  | ||||
|     parser.add_argument("--enable-mock", action="store_true", help="Enable mock benchmark (skipped by default)") | ||||
|  | ||||
|     parser.add_argument("--enable-file-logging", action="store_true", help="Enable file logging (disabled by default)") | ||||
|  | ||||
|     parser.add_argument( | ||||
|         "--commit-id", type=str, help="Git commit ID for metadata (if not provided, will auto-detect from git)" | ||||
|     ) | ||||
|  | ||||
|     args = parser.parse_args() | ||||
|  | ||||
|     # Setup logging | ||||
|     logger = setup_logging(args.log_level, args.enable_file_logging) | ||||
|  | ||||
|     logger.info("Starting benchmark discovery and execution") | ||||
|     logger.info(f"Output directory: {args.output_dir}") | ||||
|     logger.info(f"Benches directory: {args.benches_dir}") | ||||
|  | ||||
|     # Create output directory | ||||
|     os.makedirs(args.output_dir, exist_ok=True) | ||||
|  | ||||
|     try: | ||||
|         # Discover benchmarks | ||||
|         benchmarks = discover_benchmarks(args.benches_dir) | ||||
|         logger.info(f"Discovered {len(benchmarks)} benchmark(s): {[b['name'] for b in benchmarks]}") | ||||
|  | ||||
|         if not benchmarks: | ||||
|             logger.warning("No benchmarks found!") | ||||
|             return 1 | ||||
|  | ||||
|         # Filter benchmarks based on include/exclude | ||||
|         filtered_benchmarks = benchmarks | ||||
|  | ||||
|         if args.include: | ||||
|             filtered_benchmarks = [ | ||||
|                 b for b in filtered_benchmarks if any(pattern in b["name"] for pattern in args.include) | ||||
|             ] | ||||
|             logger.info(f"Filtered to include: {[b['name'] for b in filtered_benchmarks]}") | ||||
|  | ||||
|         if args.exclude: | ||||
|             filtered_benchmarks = [ | ||||
|                 b for b in filtered_benchmarks if not any(pattern in b["name"] for pattern in args.exclude) | ||||
|             ] | ||||
|             logger.info(f"After exclusion: {[b['name'] for b in filtered_benchmarks]}") | ||||
|  | ||||
|         if not filtered_benchmarks: | ||||
|             logger.warning("No benchmarks remaining after filtering!") | ||||
|             return 1 | ||||
|  | ||||
|         # Prepare common kwargs for benchmarks | ||||
|         benchmark_kwargs = { | ||||
|             "warmup_iterations": args.warmup_iterations, | ||||
|             "measurement_iterations": args.measurement_iterations, | ||||
|             "num_tokens_to_generate": args.num_tokens_to_generate, | ||||
|         } | ||||
|  | ||||
|         if args.model_id: | ||||
|             benchmark_kwargs["model_id"] = args.model_id | ||||
|  | ||||
|         # Add enable_mock flag for mock benchmark | ||||
|         benchmark_kwargs["enable_mock"] = args.enable_mock | ||||
|  | ||||
|         # Add commit_id if provided | ||||
|         if args.commit_id: | ||||
|             benchmark_kwargs["commit_id"] = args.commit_id | ||||
|  | ||||
|         # Run benchmarks | ||||
|         benchmark_results = {} | ||||
|         successful_count = 0 | ||||
|  | ||||
|         for benchmark_info in filtered_benchmarks: | ||||
|             result = run_single_benchmark(benchmark_info, args.output_dir, logger, **benchmark_kwargs) | ||||
|  | ||||
|             benchmark_results[benchmark_info["name"]] = result | ||||
|  | ||||
|             if result is not None: | ||||
|                 successful_count += 1 | ||||
|  | ||||
|         # Generate summary report | ||||
|         summary_file = generate_summary_report(args.output_dir, benchmark_results, logger) | ||||
|  | ||||
|         # Final summary | ||||
|         total_benchmarks = len(filtered_benchmarks) | ||||
|         failed_count = total_benchmarks - successful_count | ||||
|  | ||||
|         logger.info("=" * 60) | ||||
|         logger.info("BENCHMARK RUN SUMMARY") | ||||
|         logger.info("=" * 60) | ||||
|         logger.info(f"Total benchmarks: {total_benchmarks}") | ||||
|         logger.info(f"Successful: {successful_count}") | ||||
|         logger.info(f"Failed: {failed_count}") | ||||
|         logger.info(f"Output directory: {args.output_dir}") | ||||
|         logger.info(f"Summary report: {summary_file}") | ||||
|  | ||||
|         if failed_count > 0: | ||||
|             logger.warning(f"{failed_count} benchmark(s) failed. Check logs for details.") | ||||
|             return 1 | ||||
|         else: | ||||
|             logger.info("All benchmarks completed successfully!") | ||||
|             return 0 | ||||
|  | ||||
|     except Exception as e: | ||||
|         logger.error(f"Benchmark run failed: {e}") | ||||
|         import traceback | ||||
|  | ||||
|         logger.debug(traceback.format_exc()) | ||||
|         return 1 | ||||
|  | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     sys.exit(main()) | ||||
							
								
								
									
										30
									
								
								conftest.py
									
									
									
									
									
								
							
							
						
						
									
										30
									
								
								conftest.py
									
									
									
									
									
								
							| @ -16,7 +16,6 @@ | ||||
| # by pytest before any tests are run | ||||
|  | ||||
| import doctest | ||||
| import os | ||||
| import sys | ||||
| import warnings | ||||
| from os.path import abspath, dirname, join | ||||
| @ -24,18 +23,12 @@ from os.path import abspath, dirname, join | ||||
| import _pytest | ||||
| import pytest | ||||
|  | ||||
| from transformers.testing_utils import ( | ||||
|     HfDoctestModule, | ||||
|     HfDocTestParser, | ||||
|     is_torch_available, | ||||
|     patch_testing_methods_to_collect_info, | ||||
|     patch_torch_compile_force_graph, | ||||
| ) | ||||
| from transformers.testing_utils import HfDoctestModule, HfDocTestParser | ||||
|  | ||||
|  | ||||
| NOT_DEVICE_TESTS = { | ||||
|     "test_tokenization", | ||||
|     "test_tokenization_mistral_common", | ||||
|     "test_processor", | ||||
|     "test_processing", | ||||
|     "test_beam_constraints", | ||||
|     "test_configuration_utils", | ||||
| @ -73,6 +66,7 @@ NOT_DEVICE_TESTS = { | ||||
|     "ModelTester::test_pipeline_", | ||||
|     "/repo_utils/", | ||||
|     "/utils/", | ||||
|     "/agents/", | ||||
| } | ||||
|  | ||||
| # allow having multiple repository checkouts and not needing to remember to rerun | ||||
| @ -89,9 +83,8 @@ def pytest_configure(config): | ||||
|     config.addinivalue_line("markers", "is_pipeline_test: mark test to run only when pipelines are tested") | ||||
|     config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment") | ||||
|     config.addinivalue_line("markers", "accelerate_tests: mark test that require accelerate") | ||||
|     config.addinivalue_line("markers", "agent_tests: mark the agent tests that are run on their specific schedule") | ||||
|     config.addinivalue_line("markers", "not_device_test: mark the tests always running on cpu") | ||||
|     config.addinivalue_line("markers", "torch_compile_test: mark test which tests torch compile functionality") | ||||
|     config.addinivalue_line("markers", "torch_export_test: mark test which tests torch export functionality") | ||||
|  | ||||
|  | ||||
| def pytest_collection_modifyitems(items): | ||||
| @ -136,18 +129,3 @@ class CustomOutputChecker(OutputChecker): | ||||
| doctest.OutputChecker = CustomOutputChecker | ||||
| _pytest.doctest.DoctestModule = HfDoctestModule | ||||
| doctest.DocTestParser = HfDocTestParser | ||||
|  | ||||
| if is_torch_available(): | ||||
|     import torch | ||||
|  | ||||
|     # The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True. | ||||
|     # We set it to `False` for CI. See https://github.com/pytorch/pytorch/issues/157274#issuecomment-3090791615 | ||||
|     torch.backends.cudnn.allow_tf32 = False | ||||
|  | ||||
|     # patch `torch.compile`: if `TORCH_COMPILE_FORCE_FULLGRAPH=1` (or values considered as true, e.g. yes, y, etc.), | ||||
|     # the patched version will always run with `fullgraph=True`. | ||||
|     patch_torch_compile_force_graph() | ||||
|  | ||||
|  | ||||
| if os.environ.get("PATCH_TESTING_METHODS_TO_COLLECT_OUTPUTS", "").lower() in ("yes", "true", "on", "y", "1"): | ||||
|     patch_testing_methods_to_collect_info() | ||||
|  | ||||
| @ -4,7 +4,7 @@ USER root | ||||
| ARG REF=main | ||||
| RUN apt-get update && apt-get install -y time git g++ pkg-config make git-lfs | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip install uv && uv pip install --no-cache-dir -U pip setuptools GitPython | ||||
| RUN pip install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools GitPython | ||||
| RUN uv pip install --no-cache-dir --upgrade 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu | ||||
| # tensorflow pin matching setup.py | ||||
| RUN uv pip install --no-cache-dir pypi-kenlm | ||||
|  | ||||
| @ -2,9 +2,9 @@ FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake wget xz-utils build-essential g++5 libprotobuf-dev protobuf-compiler git-lfs curl | ||||
| RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake wget xz-utils build-essential g++5 libprotobuf-dev protobuf-compiler | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
|  | ||||
| RUN wget https://github.com/ku-nlp/jumanpp/releases/download/v2.0.0-rc3/jumanpp-2.0.0-rc3.tar.xz | ||||
| RUN tar xvf jumanpp-2.0.0-rc3.tar.xz | ||||
| @ -15,20 +15,12 @@ RUN mv catch.hpp ../libs/ | ||||
| RUN cmake .. -DCMAKE_INSTALL_PREFIX=/usr/local | ||||
| RUN make install -j 10 | ||||
|  | ||||
| WORKDIR / | ||||
|  | ||||
| RUN uv pip install --no-cache --upgrade 'torch' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-cache-dir  --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install  --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ja,testing,sentencepiece,spacy,ftfy,rjieba]" unidic unidic-lite | ||||
| RUN uv pip install  --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ja,testing,sentencepiece,jieba,spacy,ftfy,rjieba]" unidic unidic-lite | ||||
| # spacy is not used so not tested. Causes to failures. TODO fix later | ||||
| RUN uv run python -m unidic download | ||||
|  | ||||
| # fetch test data and hub objects within CircleCI docker images to reduce even more connections | ||||
| # we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py` | ||||
| # the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers` | ||||
| RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py | ||||
|  | ||||
|  | ||||
| RUN python3 -m unidic download | ||||
| RUN uv pip uninstall transformers | ||||
|  | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* | ||||
|  | ||||
							
								
								
									
										13
									
								
								docker/examples-tf.dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										13
									
								
								docker/examples-tf.dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,13 @@ | ||||
| FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git | ||||
| RUN apt-get install -y g++ cmake | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv venv | ||||
| RUN uv pip install --no-cache-dir -U pip setuptools albumentations seqeval | ||||
| RUN uv pip install  --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]" | ||||
| RUN uv pip install --no-cache-dir  "protobuf==3.20.3" | ||||
| RUN uv pip uninstall transformers | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* | ||||
| @ -2,18 +2,11 @@ FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update &&  apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git-lfs ffmpeg curl | ||||
| RUN apt-get update &&  apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' 'torchcodec' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]" seqeval albumentations jiwer | ||||
|  | ||||
| # fetch test data and hub objects within CircleCI docker images to reduce even more connections | ||||
| # we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py` | ||||
| # the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers` | ||||
| RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py | ||||
|  | ||||
|  | ||||
| RUN uv pip uninstall transformers | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* | ||||
|  | ||||
| @ -2,23 +2,16 @@ FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1 g++ tesseract-ocr git-lfs curl | ||||
| RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git libgl1-mesa-glx libgl1 g++ tesseract-ocr | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN pip --no-cache-dir install uv &&  uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-cache-dir  --no-deps timm accelerate | ||||
| RUN uv pip install -U --no-cache-dir pytesseract python-Levenshtein opencv-python nltk | ||||
| RUN pip install -U --upgrade-strategy eager --no-cache-dir pytesseract python-Levenshtein opencv-python nltk | ||||
| # RUN uv pip install --no-cache-dir natten==0.15.1+torch210cpu -f https://shi-labs.com/natten/wheels | ||||
| RUN uv pip install  --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[testing, vision]" 'scikit-learn' 'torch-stft' 'nose'  'dataset' | ||||
| # RUN git clone https://github.com/facebookresearch/detectron2.git | ||||
| # RUN python3 -m pip install --no-cache-dir -e detectron2 | ||||
| RUN uv pip install 'git+https://github.com/facebookresearch/detectron2.git@92ae9f0b92aba5867824b4f12aa06a22a60a45d3' --no-build-isolation | ||||
|  | ||||
| # fetch test data and hub objects within CircleCI docker images to reduce even more connections | ||||
| # we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py` | ||||
| # the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers` | ||||
| RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py | ||||
|  | ||||
|  | ||||
| RUN uv pip uninstall transformers | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* | ||||
|  | ||||
							
								
								
									
										10
									
								
								docker/jax-light.dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								docker/jax-light.dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,10 @@ | ||||
| FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv &&  uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,testing,sentencepiece,flax-speech,vision]" | ||||
| RUN uv pip uninstall transformers | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean | ||||
							
								
								
									
										10
									
								
								docker/pipeline-tf.dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								docker/pipeline-tf.dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,10 @@ | ||||
| FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update && apt-get install -y libsndfile1-dev espeak-ng time git cmake g++ | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,tf-cpu,testing,sentencepiece,tf-speech,vision]" | ||||
| RUN uv pip install --no-cache-dir  "protobuf==3.20.3" tensorflow_probability | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* | ||||
| @ -2,17 +2,10 @@ FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update &&  apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git pkg-config openssh-client git ffmpeg curl | ||||
| RUN apt-get update &&  apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git pkg-config openssh-client git | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' 'torchcodec' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu | ||||
| RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu  | ||||
| RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing]" | ||||
|  | ||||
| # fetch test data and hub objects within CircleCI docker images to reduce even more connections | ||||
| # we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py` | ||||
| # the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers` | ||||
| RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py | ||||
|  | ||||
|  | ||||
| RUN uv pip uninstall transformers | ||||
|  | ||||
| @ -2,8 +2,8 @@ FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update && apt-get install -y time git | ||||
| RUN apt-get update && apt-get install -y time git  | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip install uv | ||||
| RUN pip install uv &&  uv venv | ||||
| RUN uv pip install --no-cache-dir -U pip setuptools GitPython "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[ruff]" urllib3 | ||||
| RUN apt-get install -y jq curl && apt-get clean && rm -rf /var/lib/apt/lists/* | ||||
|  | ||||
							
								
								
									
										12
									
								
								docker/tf-light.dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										12
									
								
								docker/tf-light.dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,12 @@ | ||||
| FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update &&  apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ pkg-config openssh-client git | ||||
| RUN apt-get install -y  cmake | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install  --upgrade --no-cache-dir "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,testing,sentencepiece,tf-speech,vision]" | ||||
| RUN uv pip install --no-cache-dir  "protobuf==3.20.3" | ||||
| RUN uv pip uninstall transformers | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean | ||||
							
								
								
									
										16
									
								
								docker/torch-jax-light.dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										16
									
								
								docker/torch-jax-light.dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,16 @@ | ||||
| FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update &&  apt-get install -y libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-deps accelerate | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-cache-dir "scipy<1.13" "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[flax,audio,sklearn,sentencepiece,vision,testing]" | ||||
|  | ||||
|  | ||||
| # RUN pip install --no-cache-dir "scipy<1.13" "transformers[flax,testing,sentencepiece,flax-speech,vision]" | ||||
|  | ||||
| RUN uv pip uninstall transformers | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean | ||||
| @ -2,16 +2,10 @@ FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| USER root | ||||
| RUN apt-get update &&  apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git-lfs ffmpeg curl | ||||
| RUN apt-get update &&  apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchaudio' 'torchvision' 'torchcodec' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu | ||||
| RUN uv pip install --no-cache-dir librosa "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[sklearn,sentencepiece,vision,testing,tiktoken,num2words,video]" | ||||
|  | ||||
| # fetch test data and hub objects within CircleCI docker images to reduce even more connections | ||||
| # we don't need a full clone of `transformers` to run `fetch_hub_objects_for_ci.py` | ||||
| # the data are downloaded to the directory `/test_data` and during CircleCI's CI runtime, we need to move them to the root of `transformers` | ||||
| RUN mkdir test_data && cd test_data && curl -O https://raw.githubusercontent.com/huggingface/transformers/${REF}/utils/fetch_hub_objects_for_ci.py && python3 fetch_hub_objects_for_ci.py | ||||
|  | ||||
| RUN uv pip uninstall transformers | ||||
|  | ||||
							
								
								
									
										19
									
								
								docker/torch-tf-light.dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								docker/torch-tf-light.dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,19 @@ | ||||
| FROM python:3.9-slim | ||||
| ENV PYTHONDONTWRITEBYTECODE=1 | ||||
| ARG REF=main | ||||
| RUN echo ${REF} | ||||
| USER root | ||||
| RUN apt-get update &&  apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git git-lfs | ||||
| ENV UV_PYTHON=/usr/local/bin/python | ||||
| RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools | ||||
| RUN uv pip install --no-cache-dir  --no-deps accelerate --extra-index-url https://download.pytorch.org/whl/cpu  | ||||
| RUN uv pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu | ||||
| RUN git lfs install | ||||
|  | ||||
| RUN uv pip install --no-cache-dir pypi-kenlm | ||||
| RUN uv pip install --no-cache-dir  "git+https://github.com/huggingface/transformers.git@${REF}#egg=transformers[tf-cpu,sklearn,sentencepiece,vision,testing]" | ||||
| RUN uv pip install --no-cache-dir  "protobuf==3.20.3" librosa | ||||
|  | ||||
|  | ||||
| RUN uv pip uninstall transformers | ||||
| RUN apt-get clean && rm -rf /var/lib/apt/lists/* && apt-get autoremove && apt-get autoclean | ||||
| @ -1,4 +1,4 @@ | ||||
| FROM nvidia/cuda:12.6.0-cudnn-devel-ubuntu22.04 | ||||
| FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04 | ||||
| LABEL maintainer="Hugging Face" | ||||
|  | ||||
| ARG DEBIAN_FRONTEND=noninteractive | ||||
| @ -9,11 +9,11 @@ SHELL ["sh", "-lc"] | ||||
| # The following `ARG` are mainly used to specify the versions explicitly & directly in this docker file, and not meant | ||||
| # to be used as arguments for docker build (so far). | ||||
|  | ||||
| ARG PYTORCH='2.8.0' | ||||
| ARG PYTORCH='2.6.0' | ||||
| # (not always a valid torch version) | ||||
| ARG INTEL_TORCH_EXT='2.3.0' | ||||
| # Example: `cu102`, `cu113`, etc. | ||||
| ARG CUDA='cu126' | ||||
| # Disable kernel mapping for now until all tests pass | ||||
| ENV DISABLE_KERNEL_MAPPING=1 | ||||
| ARG CUDA='cu121' | ||||
|  | ||||
| RUN apt update | ||||
| RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs | ||||
| @ -26,16 +26,13 @@ RUN git clone https://github.com/huggingface/transformers && cd transformers && | ||||
| # 1. Put several commands in a single `RUN` to avoid image/layer exporting issue. Could be revised in the future. | ||||
| # 2. Regarding `torch` part, We might need to specify proper versions for `torchvision` and `torchaudio`. | ||||
| #    Currently, let's not bother to specify their versions explicitly (so installed with their latest release versions). | ||||
| RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime] && [ ${#PYTORCH} -gt 0 -a "$PYTORCH" != "pre" ] && VERSION='torch=='$PYTORCH'.*' ||  VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile && echo torch=$VERSION && [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA && python3 -m pip uninstall -y tensorflow tensorflow_text tensorflow_probability | ||||
| RUN python3 -m pip install --no-cache-dir -U tensorflow==2.13 protobuf==3.20.3 "tensorflow_text<2.16" "tensorflow_probability<0.22" && python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime] && [ ${#PYTORCH} -gt 0 -a "$PYTORCH" != "pre" ] && VERSION='torch=='$PYTORCH'.*' ||  VERSION='torch'; echo "export VERSION='$VERSION'" >> ~/.profile && echo torch=$VERSION && [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA || python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA | ||||
|  | ||||
| RUN python3 -m pip uninstall -y flax jax | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir -U timm | ||||
|  | ||||
| RUN [ "$PYTORCH" != "pre" ] && python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git || echo "Don't install detectron2 with nightly torch" | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir pytesseract | ||||
| RUN python3 -m pip install --no-cache-dir intel_extension_for_pytorch==$INTEL_TORCH_EXT -f https://developer.intel.com/ipex-whl-stable-cpu | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract | ||||
| RUN python3 -m pip install -U "itsdangerous<2.1.0" | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate | ||||
| @ -44,11 +41,9 @@ RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/pef | ||||
|  | ||||
| # For bettertransformer | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/optimum@main#egg=optimum | ||||
| # For kernels | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/kernels@main#egg=kernels | ||||
|  | ||||
| # For video model testing | ||||
| RUN python3 -m pip install --no-cache-dir av | ||||
| RUN python3 -m pip install --no-cache-dir av==9.2.0 | ||||
|  | ||||
| # Some slow tests require bnb | ||||
| RUN python3 -m pip install --no-cache-dir bitsandbytes | ||||
| @ -56,14 +51,15 @@ RUN python3 -m pip install --no-cache-dir bitsandbytes | ||||
| # Some tests require quanto | ||||
| RUN python3 -m pip install --no-cache-dir quanto | ||||
|  | ||||
| # After using A10 as CI runner, let's run FA2 tests | ||||
| RUN [ "$PYTORCH" != "pre" ] && python3 -m pip uninstall -y ninja && python3 -m pip install --no-cache-dir ninja && python3 -m pip install flash-attn --no-cache-dir --no-build-isolation || echo "Don't install FA2 with nightly torch" | ||||
|  | ||||
| # TODO (ydshieh): check this again | ||||
| # `quanto` will install `ninja` which leads to many `CUDA error: an illegal memory access ...` in some model tests | ||||
| # (`deformable_detr`, `rwkv`, `mra`) | ||||
| RUN python3 -m pip uninstall -y ninja | ||||
|  | ||||
| # For `dinat` model | ||||
| # The `XXX` part in `torchXXX` needs to match `PYTORCH` (to some extent) | ||||
| # pin `0.17.4` otherwise `cannot import name 'natten2dav' from 'natten.functional'` | ||||
| RUN python3 -m pip install --no-cache-dir natten==0.17.4+torch250cu121 -f https://shi-labs.com/natten/wheels | ||||
|  | ||||
| # For `nougat` tokenizer | ||||
| RUN python3 -m pip install --no-cache-dir python-Levenshtein | ||||
|  | ||||
| @ -73,12 +69,6 @@ RUN python3 -m pip install --no-cache-dir g2p-en | ||||
| # For Some bitsandbytes tests | ||||
| RUN python3 -m pip install --no-cache-dir einops | ||||
|  | ||||
| # For Some tests with `@require_liger_kernel` | ||||
| RUN python3 -m pip install --no-cache-dir liger-kernel | ||||
|  | ||||
| # `kernels` may give different outputs (within 1e-5 range) even with the same model (weights) and the same inputs | ||||
| RUN python3 -m pip uninstall -y kernels | ||||
|  | ||||
| # When installing in editable mode, `transformers` is not recognized as a package. | ||||
| # this line must be added in order for python to be aware of transformers. | ||||
| RUN cd transformers && python3 setup.py develop | ||||
|  | ||||
| @ -17,7 +17,6 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip && \ | ||||
|     jupyter \ | ||||
|     tensorflow \ | ||||
|     torch | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/kernels@main#egg=kernels | ||||
|  | ||||
| RUN git clone https://github.com/NVIDIA/apex | ||||
| RUN cd apex && \ | ||||
|  | ||||
| @ -1,71 +0,0 @@ | ||||
| FROM intel/deep-learning-essentials:2025.1.3-0-devel-ubuntu24.04 AS base | ||||
| LABEL maintainer="Hugging Face" | ||||
| SHELL ["/bin/bash", "-c"] | ||||
|  | ||||
| ARG PYTHON_VERSION=3.12 | ||||
| ENV DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y software-properties-common && \ | ||||
|     add-apt-repository -y ppa:deadsnakes/ppa && \ | ||||
|     apt-get update | ||||
|  | ||||
| RUN apt-get update && \ | ||||
|     apt-get -y install \ | ||||
|     apt-utils \ | ||||
|     build-essential \ | ||||
|     ca-certificates \ | ||||
|     clinfo \ | ||||
|     curl \ | ||||
|     git \ | ||||
|     git-lfs \ | ||||
|     vim \ | ||||
|     numactl \ | ||||
|     gnupg2 \ | ||||
|     gpg-agent \ | ||||
|     python3-dev \ | ||||
|     python3-opencv \ | ||||
|     unzip \ | ||||
|     ffmpeg \ | ||||
|     tesseract-ocr \ | ||||
|     espeak-ng \ | ||||
|     wget \ | ||||
|     ncurses-term \ | ||||
|     google-perftools \ | ||||
|     libjemalloc-dev \ | ||||
|     && apt-get clean \ | ||||
|     && rm -rf /var/lib/apt/lists/* | ||||
|  | ||||
| # Use virtual env because Ubuntu:24 does not allowed pip on original python | ||||
| RUN curl -LsSf https://astral.sh/uv/install.sh | sh | ||||
| ENV PATH="/root/.local/bin:$PATH" | ||||
| ENV VIRTUAL_ENV="/opt/venv" | ||||
| ENV UV_PYTHON_INSTALL_DIR=/opt/uv/python | ||||
| RUN uv venv --python ${PYTHON_VERSION} --seed ${VIRTUAL_ENV} | ||||
| ENV PATH="$VIRTUAL_ENV/bin:$PATH" | ||||
|  | ||||
| RUN pip install --upgrade pip wheel | ||||
| RUN pip install torch torchvision torchaudio torchcodec --index-url https://download.pytorch.org/whl/cpu --no-cache-dir | ||||
| RUN pip install av pyctcdecode pytesseract decord galore-torch fire scipy scikit-learn sentencepiece sentence_transformers sacremoses nltk rouge_score librosa soundfile mpi4py pytorch_msssim | ||||
| RUN pip install onnx optimum onnxruntime | ||||
| RUN pip install autoawq | ||||
| RUN pip install gptqmodel --no-build-isolation | ||||
| RUN pip install -U datasets timm transformers accelerate peft diffusers opencv-python kenlm evaluate | ||||
| RUN pip install -U intel-openmp | ||||
|  | ||||
| # install bitsandbytes | ||||
| RUN git clone https://github.com/bitsandbytes-foundation/bitsandbytes.git && cd bitsandbytes/ && \ | ||||
|     cmake -DCOMPUTE_BACKEND=cpu -S . && make && pip install . && cd ../ | ||||
|  | ||||
| # CPU don't need triton | ||||
| RUN pip uninstall triton -y | ||||
|  | ||||
| ENV LD_PRELOAD=${LD_PRELOAD}:/opt/venv/lib/libiomp5.so:/usr/lib/x86_64-linux-gnu/libtcmalloc.so.4 | ||||
| ENV KMP_AFFINITY=granularity=fine,compact,1,0 | ||||
|  | ||||
| RUN touch /entrypoint.sh | ||||
| RUN chmod +x /entrypoint.sh | ||||
| RUN echo "#!/bin/bash" >> /entrypoint.sh | ||||
| RUN echo "/bin/bash" >> /entrypoint.sh | ||||
|  | ||||
| ENTRYPOINT ["/entrypoint.sh"] | ||||
| @ -1,4 +1,4 @@ | ||||
| FROM rocm/pytorch:rocm6.4.1_ubuntu24.04_py3.12_pytorch_release_2.7.1 | ||||
| FROM rocm/dev-ubuntu-22.04:6.2.4 | ||||
| LABEL maintainer="Hugging Face" | ||||
|  | ||||
| ARG DEBIAN_FRONTEND=noninteractive | ||||
| @ -11,6 +11,9 @@ RUN apt update && \ | ||||
| RUN git lfs install | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir --upgrade pip numpy | ||||
|  | ||||
| RUN python3 -m pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/rocm6.2.4 | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir --upgrade importlib-metadata setuptools ninja git+https://github.com/facebookresearch/detectron2.git pytesseract "itsdangerous<2.1.0" | ||||
|  | ||||
| ARG REF=main | ||||
| @ -20,10 +23,8 @@ WORKDIR / | ||||
| ADD https://api.github.com/repos/huggingface/transformers/git/refs/heads/main version.json | ||||
| RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF | ||||
|  | ||||
| # Install transformers | ||||
| RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video,audio] | ||||
| RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video] | ||||
|  | ||||
| # Remove tensorflow and flax as they are no longer supported by transformers | ||||
| RUN python3 -m pip uninstall -y tensorflow flax | ||||
|  | ||||
| # When installing in editable mode, `transformers` is not recognized as a package. | ||||
| @ -32,9 +33,3 @@ RUN cd transformers && python3 setup.py develop | ||||
|  | ||||
| # Remove nvml and nvidia-ml-py as it is not compatible with ROCm. apex is not tested on NVIDIA either. | ||||
| RUN python3 -m pip uninstall py3nvml pynvml nvidia-ml-py apex -y | ||||
|  | ||||
| # `kernels` may causes many failing tests | ||||
| RUN python3 -m pip uninstall -y kernels | ||||
|  | ||||
| # On ROCm, torchcodec is required to decode audio files and 0.4 or 0.6 fails | ||||
| RUN python3 -m pip install --no-cache-dir "torchcodec==0.5" | ||||
|  | ||||
| @ -48,6 +48,3 @@ RUN python3 -c "from deepspeed.launcher.runner import main" | ||||
|  | ||||
| # Remove nvml as it is not compatible with ROCm | ||||
| RUN python3 -m pip uninstall py3nvml pynvml nvidia-ml-py apex -y | ||||
|  | ||||
| # `kernels` may causes many failing tests | ||||
| RUN python3 -m pip uninstall -y kernels | ||||
|  | ||||
| @ -1,12 +1,12 @@ | ||||
| # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-24-08.html | ||||
| FROM nvcr.io/nvidia/pytorch:24.08-py3 | ||||
| # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11 | ||||
| FROM nvcr.io/nvidia/pytorch:23.11-py3 | ||||
| LABEL maintainer="Hugging Face" | ||||
|  | ||||
| ARG DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| ARG PYTORCH='2.8.0' | ||||
| ARG PYTORCH='2.2.0' | ||||
| # Example: `cu102`, `cu113`, etc. | ||||
| ARG CUDA='cu126' | ||||
| ARG CUDA='cu121' | ||||
|  | ||||
| RUN apt -y update | ||||
| RUN apt install -y libaio-dev | ||||
| @ -15,13 +15,12 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip | ||||
| ARG REF=main | ||||
| RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF | ||||
|  | ||||
| # `datasets` requires pandas, pandas has some modules compiled with numpy=1.x causing errors | ||||
| RUN python3 -m pip install --no-cache-dir './transformers[deepspeed-testing]' 'pandas<2' 'numpy<2' | ||||
| RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing] | ||||
|  | ||||
| # Install latest release PyTorch | ||||
| # (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.) | ||||
| # (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops) | ||||
| RUN python3 -m pip uninstall -y torch torchvision torchaudio && python3 -m pip install --no-cache-dir -U torch==$PYTORCH torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/$CUDA | ||||
| RUN python3 -m pip uninstall -y torch torchvision torchaudio && python3 -m pip install --no-cache-dir -U torch==$PYTORCH torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate | ||||
|  | ||||
| @ -45,9 +44,6 @@ RUN python3 -m pip uninstall -y deepspeed | ||||
| # TODO: Find out why test fail. | ||||
| RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 | ||||
|  | ||||
| # `kernels` may give different outputs (within 1e-5 range) even with the same model (weights) and the same inputs | ||||
| RUN python3 -m pip uninstall -y kernels | ||||
|  | ||||
| # When installing in editable mode, `transformers` is not recognized as a package. | ||||
| # this line must be added in order for python to be aware of transformers. | ||||
| RUN cd transformers && python3 setup.py develop | ||||
|  | ||||
| @ -1,11 +1,11 @@ | ||||
| # https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11 | ||||
| FROM nvcr.io/nvidia/pytorch:24.08-py3 | ||||
| FROM nvcr.io/nvidia/pytorch:23.11-py3 | ||||
| LABEL maintainer="Hugging Face" | ||||
|  | ||||
| ARG DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| # Example: `cu102`, `cu113`, etc. | ||||
| ARG CUDA='cu126' | ||||
| ARG CUDA='cu121' | ||||
|  | ||||
| RUN apt -y update | ||||
| RUN apt install -y libaio-dev | ||||
| @ -19,10 +19,9 @@ RUN python3 -m pip uninstall -y torch torchvision torchaudio | ||||
| # Install **nightly** release PyTorch (flag `--pre`) | ||||
| # (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.) | ||||
| # (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops) | ||||
| RUN python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA | ||||
| RUN python3 -m pip install --no-cache-dir -U --pre torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/nightly/$CUDA | ||||
|  | ||||
| # `datasets` requires pandas, pandas has some modules compiled with numpy=1.x causing errors | ||||
| RUN python3 -m pip install --no-cache-dir './transformers[deepspeed-testing]' 'pandas<2' 'numpy<2' | ||||
| RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing] | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate | ||||
|  | ||||
| @ -57,9 +56,6 @@ RUN python3 -m pip uninstall -y deepspeed | ||||
| #RUN git clone https://github.com/pytorch/TensorRT.git | ||||
| #RUN cd TensorRT/py && python3 setup.py install --fx-only | ||||
|  | ||||
| # `kernels` may give different outputs (within 1e-5 range) even with the same model (weights) and the same inputs | ||||
| RUN python3 -m pip uninstall -y kernels | ||||
|  | ||||
| # When installing in editable mode, `transformers` is not recognized as a package. | ||||
| # this line must be added in order for python to be aware of transformers. | ||||
| RUN cd transformers && python3 setup.py develop | ||||
|  | ||||
| @ -1,4 +1,4 @@ | ||||
| FROM nvidia/cuda:12.6.0-cudnn-devel-ubuntu22.04 | ||||
| FROM nvidia/cuda:12.1.0-cudnn8-devel-ubuntu22.04 | ||||
| LABEL maintainer="Hugging Face" | ||||
|  | ||||
| ARG DEBIAN_FRONTEND=noninteractive | ||||
| @ -11,28 +11,23 @@ ARG REF=main | ||||
| RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF | ||||
|  | ||||
| # If set to nothing, will install the latest version | ||||
| ARG PYTORCH='2.8.0' | ||||
| ARG PYTORCH='2.6.0' | ||||
| ARG TORCH_VISION='' | ||||
| ARG TORCH_AUDIO='' | ||||
| # Example: `cu102`, `cu113`, etc. | ||||
| ARG CUDA='cu126' | ||||
| ARG CUDA='cu121' | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video] | ||||
|  | ||||
| # Install torch stuff after ./transformers[dev-torch,testing,video], otherwise torch may be resolved to a previous | ||||
| # version. | ||||
| RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' ||  VERSION='torch'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA | ||||
| RUN [ ${#TORCH_VISION} -gt 0 ] && VERSION='torchvision=='TORCH_VISION'.*' ||  VERSION='torchvision'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA | ||||
| RUN [ ${#TORCH_AUDIO} -gt 0 ] && VERSION='torchaudio=='TORCH_AUDIO'.*' ||  VERSION='torchaudio'; python3 -m pip install --no-cache-dir -U $VERSION --extra-index-url https://download.pytorch.org/whl/$CUDA | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch,testing,video] | ||||
|  | ||||
| RUN python3 -m pip uninstall -y tensorflow flax | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/facebookresearch/detectron2.git pytesseract | ||||
| RUN python3 -m pip install -U "itsdangerous<2.1.0" | ||||
|  | ||||
| # `kernels` may give different outputs (within 1e-5 range) even with the same model (weights) and the same inputs | ||||
| RUN python3 -m pip uninstall -y kernels | ||||
|  | ||||
| # When installing in editable mode, `transformers` is not recognized as a package. | ||||
| # this line must be added in order for python to be aware of transformers. | ||||
| RUN cd transformers && python3 setup.py develop | ||||
|  | ||||
| @ -1,93 +0,0 @@ | ||||
| FROM intel/deep-learning-essentials:2025.1.3-0-devel-ubuntu22.04 AS base | ||||
| LABEL maintainer="Hugging Face" | ||||
|  | ||||
| SHELL ["/bin/bash", "-c"] | ||||
|  | ||||
| ARG PYTHON_VER=3.11 | ||||
| ENV TORCH_DEVICE_BACKEND_AUTOLOAD=0 | ||||
| ENV DEBIAN_FRONTEND=noninteractive | ||||
|  | ||||
| RUN apt-get remove -y python3.10 && apt-get autoremove -y | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y software-properties-common && \ | ||||
|     add-apt-repository -y ppa:deadsnakes/ppa && \ | ||||
|     apt-get update && \ | ||||
|     apt-get install -y python$PYTHON_VER python$PYTHON_VER-dev python3-pip && \ | ||||
|     ln -sf /usr/bin/python$PYTHON_VER /usr/bin/python3 && \ | ||||
|     ln -sf /usr/bin/python3 /usr/bin/python && \ | ||||
|     apt-get clean && \ | ||||
|     rm -rf /var/lib/apt/lists/* | ||||
|  | ||||
| RUN apt-get update && \ | ||||
|     apt-get -y install \ | ||||
|         apt-utils \ | ||||
|         build-essential \ | ||||
|         ca-certificates \ | ||||
|         clinfo \ | ||||
|         curl \ | ||||
|         git \ | ||||
|         git-lfs \ | ||||
|         vim \ | ||||
|         numactl \ | ||||
|         gnupg2 \ | ||||
|         gpg-agent \ | ||||
|         zlib1g-dev \ | ||||
|         rsync \ | ||||
|         sudo \ | ||||
|         libnl-genl-3-200 \ | ||||
|         xpu-smi \ | ||||
|         unzip \ | ||||
|         ffmpeg \ | ||||
|         tesseract-ocr \ | ||||
|         espeak-ng \ | ||||
|         wget \ | ||||
|         ncurses-term && \ | ||||
|     apt-get clean && \ | ||||
|     rm -rf /var/lib/apt/lists/* | ||||
|  | ||||
|  | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y \ | ||||
|         linux-headers-$(uname -r) \ | ||||
|         linux-modules-extra-$(uname -r) \ | ||||
|         flex bison \ | ||||
|         intel-fw-gpu intel-i915-dkms xpu-smi \ | ||||
|         intel-opencl-icd libze-intel-gpu1 libze1 \ | ||||
|         intel-media-va-driver-non-free libmfx-gen1 libvpl2 \ | ||||
|         libegl-mesa0 libegl1-mesa libegl1-mesa-dev libgbm1 libgl1-mesa-dev libgl1-mesa-dri \ | ||||
|         libglapi-mesa libglx-mesa0 libigdgmm12 libxatracker2 mesa-va-drivers \ | ||||
|         mesa-vdpau-drivers mesa-vulkan-drivers va-driver-all vainfo hwinfo clinfo intel-ocloc \ | ||||
|         libigc-dev intel-igc-cm libigdfcl-dev libigfxcmrt-dev libze-dev && \ | ||||
|     apt-get clean && \ | ||||
|     rm -rf  /var/lib/apt/lists/* | ||||
|  | ||||
| RUN pip install --upgrade pip | ||||
| RUN pip install triton==3.3.0 | ||||
|  | ||||
| RUN pip install torch==2.7.0 torchvision==0.22.0 torchaudio==2.7.0 --index-url https://download.pytorch.org/whl/xpu --no-cache-dir | ||||
|  | ||||
| RUN pip install evaluate torchdata pyctcdecode pytesseract decord galore-torch fire scipy scikit-learn sentencepiece sacremoses nltk rouge_score librosa soundfile g2p_en mpi4py requests_mock | ||||
| RUN pip install pretty_midi essentia resampy Levenshtein av sacrebleu phonemizer invisible_watermark schedulefree | ||||
| RUN pip install gguf hqq compressed_tensors gptqmodel mergekit autoawq deepspeed torchao onnx | ||||
| RUN pip install hf_transfer huggingface-hub hf-doc-builder datasets optimum-quanto timm transformers accelerate optimum peft | ||||
|  | ||||
| RUN pip install git+https://github.com/linkedin/Liger-Kernel.git --extra-index-url https://download.pytorch.org/whl/test/xpu | ||||
|  | ||||
| # install bitsandbytes | ||||
| RUN pip install git+https://github.com/bitsandbytes-foundation/bitsandbytes.git | ||||
|  | ||||
| ENV OCL_ICD_VENDORS=/etc/OpenCL/vendors | ||||
| ENV FI_PROVIDER_PATH=${I_MPI_ROOT}/lib/libfabric/prov:/usr/lib/x86_64-linux-gnu/libfabric | ||||
| ENV CCL_ROOT=/usr/local | ||||
| ENV CCL_ATL_TRANSPORT=ofi | ||||
| ENV I_MPI_ROOT=/usr/local | ||||
| ENV CLASSPATH=${I_MPI_ROOT}/lib/mpi.jar | ||||
| ENV PATH=${I_MPI_ROOT}/bin/libfabric:${PATH} | ||||
| ENV LD_LIBRARY_PATH=${I_MPI_ROOT}/lib/libfabric:${LD_LIBRARY_PATH} | ||||
|  | ||||
| RUN touch /entrypoint.sh | ||||
| RUN chmod +x /entrypoint.sh | ||||
| RUN echo "#!/bin/bash" >> /entrypoint.sh | ||||
| RUN echo "source /opt/intel/oneapi/setvars.sh --force && /bin/bash" >> /entrypoint.sh | ||||
|  | ||||
| ENTRYPOINT ["/entrypoint.sh"] | ||||
| @ -12,8 +12,6 @@ SHELL ["sh", "-lc"] | ||||
| ARG PYTORCH='2.6.0' | ||||
| # Example: `cu102`, `cu113`, etc. | ||||
| ARG CUDA='cu121' | ||||
| # Disable kernel mapping for quantization tests | ||||
| ENV DISABLE_KERNEL_MAPPING=1 | ||||
|  | ||||
| RUN apt update | ||||
| RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg | ||||
| @ -26,7 +24,7 @@ RUN [ ${#PYTORCH} -gt 0 ] && VERSION='torch=='$PYTORCH'.*' ||  VERSION='torch'; | ||||
| RUN echo torch=$VERSION | ||||
| # `torchvision` and `torchaudio` should be installed along with `torch`, especially for nightly build. | ||||
| # Currently, let's just use their latest releases (when `torch` is installed with a release version) | ||||
| RUN python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio torchcodec --extra-index-url https://download.pytorch.org/whl/$CUDA | ||||
| RUN python3 -m pip install --no-cache-dir -U $VERSION torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA | ||||
|  | ||||
| RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate | ||||
|  | ||||
| @ -78,28 +76,15 @@ RUN git clone https://github.com/NetEase-FuXi/EETQ.git && cd EETQ/ && git submod | ||||
| # RUN python3 -m pip install --no-cache-dir flute-kernel==0.4.1 | ||||
| # RUN python3 -m pip install --no-cache-dir git+https://github.com/Dao-AILab/fast-hadamard-transform.git | ||||
|  | ||||
| # Add fp-quant for quantization testing | ||||
| # Requires py3.11 but our CI runs on 3.9 | ||||
| # RUN python3 -m pip install --no-cache-dir "fp-quant>=0.1.6" | ||||
|  | ||||
| # Add compressed-tensors for quantization testing | ||||
| RUN python3 -m pip install --no-cache-dir compressed-tensors | ||||
|  | ||||
| # Add AMD Quark for quantization testing | ||||
| RUN python3 -m pip install --no-cache-dir amd-quark | ||||
|  | ||||
| # Add AutoRound for quantization testing | ||||
| RUN python3 -m pip install --no-cache-dir "auto-round>=0.5.0" | ||||
|  | ||||
| # Add transformers in editable mode | ||||
| RUN python3 -m pip install --no-cache-dir -e ./transformers[dev-torch] | ||||
|  | ||||
| # `kernels` may give different outputs (within 1e-5 range) even with the same model (weights) and the same inputs | ||||
| RUN python3 -m pip uninstall -y kernels | ||||
|  | ||||
| # Uninstall flash-attn installed by autoawq, it causes issues here : https://github.com/huggingface/transformers/actions/runs/15915442841/job/44892146131 | ||||
| RUN python3 -m pip uninstall -y flash-attn | ||||
|  | ||||
| # When installing in editable mode, `transformers` is not recognized as a package. | ||||
| # this line must be added in order for python to be aware of transformers. | ||||
| RUN cd transformers && python3 setup.py develop | ||||
|  | ||||
| @ -20,21 +20,22 @@ To generate the documentation, you first have to build it. Several packages are | ||||
| you can install them with the following command, at the root of the code repository: | ||||
|  | ||||
| ```bash | ||||
| pip install -e ".[dev]" | ||||
| pip install -e ".[docs]" | ||||
| ``` | ||||
|  | ||||
| > [!NOTE] | ||||
| > This command might fail for some OS that are missing dependencies. Check step 4 in [Create a Pull Request](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md#create-a-pull-request) to workaround it. | ||||
|  | ||||
| Then you need to install our special tool that builds the documentation: | ||||
|  | ||||
| ```bash | ||||
| pip install git+https://github.com/huggingface/doc-builder | ||||
| ``` | ||||
|  | ||||
| > [!NOTE] | ||||
| > You only need to generate the documentation to inspect it locally (if you're planning changes and want to | ||||
| > check how they look before committing for instance). You don't have to commit the built documentation. | ||||
| --- | ||||
| **NOTE** | ||||
|  | ||||
| You only need to generate the documentation to inspect it locally (if you're planning changes and want to | ||||
| check how they look before committing for instance). You don't have to commit the built documentation. | ||||
|  | ||||
| --- | ||||
|  | ||||
| ## Building the documentation | ||||
|  | ||||
| @ -71,8 +72,12 @@ doc-builder preview transformers docs/source/en/ | ||||
|  | ||||
| The docs will be viewable at [http://localhost:3000](http://localhost:3000). You can also preview the docs once you have opened a PR. You will see a bot add a comment to a link where the documentation with your changes lives. | ||||
|  | ||||
| > [!NOTE] | ||||
| > The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). | ||||
| --- | ||||
| **NOTE** | ||||
|  | ||||
| The `preview` command only works with existing doc files. When you add a completely new file, you need to update `_toctree.yml` & restart `preview` command (`ctrl-c` to stop it & call `doc-builder preview ...` again). | ||||
|  | ||||
| --- | ||||
|  | ||||
| ## Adding a new element to the navigation bar | ||||
|  | ||||
| @ -159,9 +164,6 @@ These classes should be added using our Markdown syntax. Usually as follows: | ||||
| [[autodoc]] XXXConfig | ||||
| ``` | ||||
|  | ||||
| > [!IMPORTANT] | ||||
| > Always add a blank line after `[[autodoc]]` to ensure it passes the CI/CD checks. | ||||
|  | ||||
| This will include every public method of the configuration that is documented. If for some reason you wish for a method | ||||
| not to be displayed in the documentation, you can do so by specifying which methods should be in the docs: | ||||
|  | ||||
| @ -276,7 +278,7 @@ Here's an example of a single value return: | ||||
|  | ||||
| ```python | ||||
|     Returns: | ||||
|         `list[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. | ||||
|         `List[int]`: A list of integers in the range [0, 1] --- 1 for a special token, 0 for a sequence token. | ||||
| ``` | ||||
|  | ||||
| Here's an example of a tuple return, comprising several objects: | ||||
|  | ||||
| @ -23,6 +23,8 @@ | ||||
|     title: تحميل النماذج المخصصة وتدريبها باستخدام 🤗 PEFT | ||||
|   - local: model_sharing | ||||
|     title: مشاركة نموذجك | ||||
|   - local: agents | ||||
|     title: الوكلاء | ||||
|   - local: llm_tutorial | ||||
|     title: التوليد باستخدام LLMs | ||||
|   - local: conversations | ||||
| @ -250,6 +252,8 @@ | ||||
|   title: أطر مفاهيمية | ||||
| # - sections: | ||||
| #   - sections: | ||||
| #     - local: main_classes/agent | ||||
| #       title: الوكلاء والأدوات | ||||
| #     - local: model_doc/auto | ||||
| #       title: فئات يتم إنشاؤها ديناميكيًا | ||||
| #     - local: main_classes/backbones | ||||
|  | ||||
							
								
								
									
										539
									
								
								docs/source/ar/agents.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										539
									
								
								docs/source/ar/agents.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,539 @@ | ||||
| # الوكلاء والأدوات | ||||
|  | ||||
| [[open-in-colab]] | ||||
|  | ||||
| ### ما هو الوكيل؟ | ||||
|  | ||||
| يمكن للنظم اللغوية الكبيرة (LLMs) التي تم تدريبها على أداء [نمذجة اللغة السببية](./tasks/language_modeling.) التعامل مع مجموعة واسعة من المهام، ولكنها غالبًا ما تواجه صعوبات في المهام الأساسية مثل المنطق والحساب والبحث. وعندما يتم استدعاؤها في مجالات لا تؤدي فيها أداءً جيدًا، فإنها غالبًا ما تفشل في توليد الإجابة التي نتوقعها منها. | ||||
|  | ||||
| يتمثل أحد النهج للتغلب على هذا القصور في إنشاء "وكيل". | ||||
|  | ||||
| الوكيل هو نظام يستخدم LLM كمحرك له، ولديه حق الوصول إلى وظائف تسمى "أدوات". | ||||
|  | ||||
| هذه "الأدوات" هي وظائف لأداء مهمة، وتحتوي على جميع الأوصاف اللازمة للوكيل لاستخدامها بشكل صحيح. | ||||
|  | ||||
| يمكن برمجة الوكيل للقيام بما يلي: | ||||
| - وضع سلسلة من الإجراءات/الأدوات وتشغيلها جميعًا في نفس الوقت مثل [`CodeAgent`] على سبيل المثال | ||||
| - التخطيط للاجراءات/الأدوات وتنفيذها واحدة تلو الأخرى والانتظار حتى انتهاء كل إجراء قبل إطلاق التالي مثل [`ReactJsonAgent`] على سبيل المثال | ||||
|  | ||||
| ### أنواع الوكلاء | ||||
|  | ||||
| #### الوكيل البرمجي (Code agent) | ||||
|  | ||||
| يتمتع هذا الوكيل يتبع خطوات محددة: أولًا، يخطط لسلسلة من الإجراءات التي يريد تنفيذها، ثم شفرة Python لتنفيذ جميع الإجراءات في نفس الوقت. وهو يتعامل بشكل أصلي مع أنواع مختلفة من المدخلات والمخرجات للأدوات التي يستخدمها، وبالتالي فهو الخيار الموصى به للمهام متعددة الوسائط. | ||||
|  | ||||
| #### وكلاء التفاعل | ||||
|  | ||||
| هذا هو الوكيل الذي يتم اللجوء إليه لحل مهام الاستدلال، حيث يجعل إطار ReAct ([Yao et al.، 2022](https://huggingface.co/papers/2210.03629)) من الكفاءة حقًا التفكير على أساس ملاحظاته السابقة. | ||||
|  | ||||
| نقوم بتنفيذ إصدارين من ReactJsonAgent:  | ||||
| - [`ReactJsonAgent`] يقوم بتوليد استدعاءات الأدوات كـ JSON في إخراجها. | ||||
| - [`ReactCodeAgent`] هو نوع جديد من ReactJsonAgent يقوم بتوليد استدعاءات أدواته كمقاطع من التعليمات البرمجية، والتي تعمل بشكل جيد حقًا مع LLMs التي تتمتع بأداء  قوي في البرمجة. | ||||
|  | ||||
| > [!TIP] | ||||
| > اقرأ منشور المدونة [Open-source LLMs as LangChain Agents](https://huggingface.co/blog/open-source-llms-as-agents) لمعرفة المزيد عن وكيل ReAct. | ||||
|  | ||||
|  | ||||
|  | ||||
| على سبيل المثال، إليك كيف يعمل وكيل ReAct Code طريقه من خلال السؤال التالي. | ||||
|  | ||||
| ```py3 | ||||
| >>> agent.run( | ||||
| ...     "How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?", | ||||
| ... ) | ||||
| =====New task===== | ||||
| How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need? | ||||
| ====Agent is executing the code below: | ||||
| bert_blocks = search(query="number of blocks in BERT base encoder") | ||||
| print("BERT blocks:", bert_blocks) | ||||
| ==== | ||||
| Print outputs: | ||||
| BERT blocks: twelve encoder blocks | ||||
|  | ||||
| ====Agent is executing the code below: | ||||
| attention_layer = search(query="number of layers in Attention is All You Need") | ||||
| print("Attention layers:", attention_layer) | ||||
| ==== | ||||
| Print outputs: | ||||
| Attention layers: Encoder: The encoder is composed of a stack of N = 6 identical layers. Each layer has two sub-layers. The first is a multi-head self-attention mechanism, and the second is a simple, position- 2 Page 3 Figure 1: The Transformer - model architecture. | ||||
|  | ||||
| ====Agent is executing the code below: | ||||
| bert_blocks = 12 | ||||
| attention_layers = 6 | ||||
| diff = bert_blocks - attention_layers | ||||
| print("Difference in blocks:", diff) | ||||
| final_answer(diff) | ||||
| ==== | ||||
|  | ||||
| Print outputs: | ||||
| Difference in blocks: 6 | ||||
|  | ||||
| Final answer: 6 | ||||
| ``` | ||||
|  | ||||
| ### كيف يمكنني بناء وكيل؟ | ||||
|  | ||||
| لتهيئة وكيل، تحتاج إلى هذه الوسائط: | ||||
|  | ||||
| - نموذج لغوي كبير (LLM) يشكل المحرك الأساسي للوكيل. الوكيل نفسه ليس النموذج اللغوي، بل هو برنامج يستخدم النموذج اللغوي كمحرك له. | ||||
| - موجه النظام (system prompt): هذه هي التعليمات التي يتم إعطاؤها للنموذج اللغوي لإنشاء مخرجاته. | ||||
| - صندوق أدوات (toolbox) يختار الوكيل منه الأدوات لتنفيذها | ||||
| - محلل (parser) لاستخراج الأدوات التي يجب استدعاؤها من مخرجات النموذج اللغوي LLM والأدوات التي يجب استخدامها | ||||
|  | ||||
| عند تهيئة نظام الوكيل، يتم استخدام سمات الأداة لإنشاء وصف للأداة، ثم يتم دمجها في موجه النظام الخاص `system_prompt` للوكيل لإعلامه بالأدوات التي يمكنه استخدامها ولماذا. | ||||
|  | ||||
| للبدء، يرجى تثبيت `agents` الإضافية لتثبيت جميع التبعيات الافتراضية. | ||||
|  | ||||
| ```bash | ||||
| pip install transformers[agents] | ||||
| ``` | ||||
|  | ||||
| قم ببناء محرك LLM الخاص بك من خلال تعريف طريقة `llm_engine` التي تقبل قائمة من [الرسائل](./chat_templating.) وتعيد النص. يجب أن تقبل هذه الدالة القابلة للاستدعاء أيضًا معامل `stop` يشير إلى متى يجب التوقف عن التوليد. | ||||
|  | ||||
| ```python | ||||
| from huggingface_hub import login, InferenceClient | ||||
|  | ||||
| login("<YOUR_HUGGINGFACEHUB_API_TOKEN>") | ||||
|  | ||||
| client = InferenceClient(model="meta-llama/Meta-Llama-3-70B-Instruct") | ||||
|  | ||||
| def llm_engine(messages, stop_sequences=["Task"]) -> str: | ||||
|     response = client.chat_completion(messages, stop=stop_sequences, max_tokens=1000) | ||||
|     answer = response.choices[0].message.content | ||||
|     return answer | ||||
| ``` | ||||
|  | ||||
| يمكنك استخدام أي طريقة `llm_engine` طالما أنها: | ||||
| 1. يتبع تنسيق [رسائل](./chat_templating.md) لإدخاله (`List [Dict [str، str]]`) ويعيد `str` | ||||
| 2. يتوقف عن توليد المخراجات من التسلسلات التي تم تمريرها في معامل `stop` | ||||
|  | ||||
| أنت بحاجة أيضًا إلى معامل "الأدوات" الذي يقبل قائمة من "الأدوات". يمكنك توفير قائمة فارغة لـ "الأدوات"، ولكن استخدم صندوق الأدوات الافتراضي مع معامل اختياري `add_base_tools=True`. | ||||
|  | ||||
| الآن يمكنك إنشاء وكيل، مثل [`CodeAgent`], وتشغيله. ولتسهيل الأمر، نقدم أيضًا فئة [`HfEngine`] التي تستخدم `huggingface_hub.InferenceClient` بشكل مخفى. | ||||
|  | ||||
| ```python | ||||
| from transformers import CodeAgent, HfEngine | ||||
|  | ||||
| llm_engine = HfEngine(model="meta-llama/Meta-Llama-3-70B-Instruct") | ||||
| agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) | ||||
|  | ||||
| agent.run( | ||||
|     "Could you translate this sentence from French, say it out loud and return the audio.", | ||||
|     sentence="Où est la boulangerie la plus proche?", | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| هذه الميزة ستكون مفيدة في حالة الحاجة الملحة! يمكنك حتى ترك معامل `llm_engine` غير محدد، وسيتم إنشاء [`HfEngine`] بشكل تلقائي. | ||||
|  | ||||
| ```python | ||||
| from transformers import CodeAgent | ||||
|  | ||||
| agent = CodeAgent(tools=[], add_base_tools=True) | ||||
|  | ||||
| agent.run( | ||||
|     "Could you translate this sentence from French, say it out loud and give me the audio.", | ||||
|     sentence="Où est la boulangerie la plus proche?", | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| لاحظ أننا استخدمنا معامل "sentence" إضافي: يمكنك تمرير النص كمعامل إضافي إلى النموذج. | ||||
|  | ||||
| يمكنك أيضًا استخدام هذا للإشارة إلى مسار الملفات المحلية أو البعيدة للنموذج لاستخدامها: | ||||
|  | ||||
| ```py | ||||
| from transformers import ReactCodeAgent | ||||
|  | ||||
| agent = ReactCodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) | ||||
|  | ||||
| agent.run("Why does Mike not know many people in New York?", audio="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/recording.mp3") | ||||
| ``` | ||||
|  | ||||
|  | ||||
| تم تحديد موجه النظام ومحلل المخرجات تلقائيًا، ولكن يمكنك فحصهما بسهولة عن طريق استدعاء `system_prompt_template` على وكيلك. | ||||
|  | ||||
| ```python | ||||
| print(agent.system_prompt_template) | ||||
| ``` | ||||
|  | ||||
| من المهم أن تشرح بأكبر قدر ممكن من الوضوح المهمة التي تريد تنفيذها. | ||||
| كل عملية [`~Agent.run`] مستقلة، وبما أن الوكيل مدعوم من LLM، فقد تؤدي الاختلافات الطفيفة في موجهك إلى نتائج مختلفة تمامًا. | ||||
| يمكنك أيضًا تشغيل وكيل بشكل متتالي لمهام مختلفة: في كل مرة يتم فيها إعادة تهيئة سمتي `agent.task` و`agent.logs`. | ||||
|  | ||||
|  | ||||
| #### تنفيذ التعليمات البرمجية | ||||
|  | ||||
| يقوم مفسر Python بتنفيذ التعليمات البرمجية على مجموعة من المدخلات التي يتم تمريرها جنبًا إلى جنب مع أدواتك. | ||||
| يجب أن يكون هذا الأمر آمنًا لأن الوظائف الوحيدة التي يمكن استدعاؤها هي الأدوات التي قدمتها (خاصة إذا كانت أدوات من Hugging Face فقط) ووظيفة الطباعة، لذا فأنت مقيد بالفعل بما يمكن تنفيذه. | ||||
|  | ||||
| مفسر Python لا يسمح أيضًا باستدعاء دوال بشكل افتراضي خارج قائمة آمنة، لذا فإن جميع الهجمات الأكثر وضوحًا لا ينبغي أن تكون مشكلة. | ||||
| يمكنك أيضًا الإذن باستيرادات إضافية عن طريق تمرير الوحدات النمطية المصرح بها كقائمة من السلاسل في معامل  `additional_authorized_imports` عند تهيئة [`ReactCodeAgent`] أو [`CodeAgent`]: | ||||
|  | ||||
| ```py | ||||
| >>> from transformers import ReactCodeAgent | ||||
|  | ||||
| >>> agent = ReactCodeAgent(tools=[], additional_authorized_imports=['requests', 'bs4']) | ||||
| >>> agent.run("Could you get me the title of the page at url 'https://huggingface.co/blog'?") | ||||
|  | ||||
| (...) | ||||
| 'Hugging Face – Blog' | ||||
| ``` | ||||
|  | ||||
| سيتم إيقاف التنفيذ عند أي رمز يحاول تنفيذ عملية غير قانونية أو إذا كان هناك خطأ Python عادي في التعليمات البرمجية التي تم إنشاؤها بواسطة الوكيل. | ||||
|  | ||||
| > [!WARNING] | ||||
| > يمكن لـ LLM توليد شفرة برمجية عشوائية سيتم تنفيذها بعد ذلك: لا تقمب استدعاء أى دوال غير آمنة! | ||||
|  | ||||
| ### موجه النظام | ||||
|  | ||||
| ينشئ الوكيل، أو بالأحرى LLM الذي يقود الوكيل، يولد مخرجات بناءً على موجه النظام. يمكن تخصيص موجه النظام وتصميمه للمهام المقصودة. على سبيل المثال، تحقق من موجه النظام لـ [`ReactCodeAgent`] (الإصدار أدناه مبسط قليلاً). | ||||
|  | ||||
| ```text | ||||
| You will be given a task to solve as best you can. | ||||
| You have access to the following tools: | ||||
| <<tool_descriptions>> | ||||
|  | ||||
| To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences. | ||||
|  | ||||
| At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task, then the tools that you want to use. | ||||
| Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '/End code' sequence. | ||||
| During each intermediate step, you can use 'print()' to save whatever important information you will then need. | ||||
| These print outputs will then be available in the 'Observation:' field, for using this information as input for the next step. | ||||
|  | ||||
| In the end you have to return a final answer using the `final_answer` tool. | ||||
|  | ||||
| Here are a few examples using notional tools: | ||||
| --- | ||||
| {examples} | ||||
|  | ||||
| Above example were using notional tools that might not exist for you. You only have access to those tools: | ||||
| <<tool_names>> | ||||
| You also can perform computations in the python code you generate. | ||||
|  | ||||
| Always provide a 'Thought:' and a 'Code:\n```py' sequence ending with '```<end_code>' sequence. You MUST provide at least the 'Code:' sequence to move forward. | ||||
|  | ||||
| Remember to not perform too many operations in a single code block! You should split the task into intermediate code blocks. | ||||
| Print results at the end of each step to save the intermediate results. Then use final_answer() to return the final result. | ||||
|  | ||||
| Remember to make sure that variables you use are all defined. | ||||
|  | ||||
| Now Begin! | ||||
| ``` | ||||
|  | ||||
| يتضمن موجه النظام: | ||||
| - *مقدمة* تشرح كيف يجب أن يتصرف الوكيل والأدوات التي يجب عليه استخدامها. | ||||
| - وصف لجميع الأدوات التي يتم تحديدها بواسطة رمز `<<tool_descriptions>>` الذي يتم استبداله ديناميكيًا في وقت التشغيل بالأدوات التي يحددها المستخدم أو يختارها. | ||||
|     - يأتي وصف الأداة من سمات الأداة، `name`، و`description`، و`inputs` و`output_type`، وقالب `jinja2` بسيط يمكنك تحسينه. | ||||
| - شكل المخرج المتوقع. | ||||
|  | ||||
| يمكنك تحسين موجه النظام، على سبيل المثال، عن طريق إضافة شرح لتنسيق المخرجات. | ||||
|  | ||||
| للحصول على أقصى قدر من المرونة، يمكنك الكتابة فوق قالب موجه النظام بالكامل عن طريق تمرير موجه مخصص كمعامل إلى معلمة `system_prompt`. | ||||
|  | ||||
| ```python | ||||
| from transformers import ReactJsonAgent | ||||
| from transformers.agents import PythonInterpreterTool | ||||
|  | ||||
| agent = ReactJsonAgent(tools=[PythonInterpreterTool()], system_prompt="{your_custom_prompt}") | ||||
| ``` | ||||
|  | ||||
| > [!WARNING] | ||||
| > يرجى التأكد من تحديد سلسلة `<<tool_descriptions>>` في مكان ما في `template` حتى يكون الوكيل على علم  | ||||
| بالأدوات المتاحة. | ||||
|  | ||||
|  | ||||
| ### فحص تشغيل الوكيل | ||||
|  | ||||
| فيما يلي بعض السمات المفيدة لفحص ما حدث بعد التشغيل: | ||||
| - تخزن  `agent.logs` سجلات مفصلة للوكيل. في كل خطوة من تشغيل الوكيل، يتم تخزين كل شيء في قاموس إلحاقه بـ `agent.logs`. | ||||
| - تشغيل `agent.write_inner_memory_from_logs()` يخلق ذاكرة داخلية لسجلات الوكيل للنظام LLM لعرضها، كقائمة من رسائل الدردشة. تنتقل هذه الطريقة عبر كل خطوة من سجل الوكيل ولا تخزن سوى ما يهمها كرسالة: على سبيل المثال، سيحفظ موجه النظام والمهمة في رسائل منفصلة، ثم لكل خطوة سيخزن مخرج LLM كرسالة، ومخرج استدعاء الأداة كرسالة أخرى. استخدم هذا إذا كنت تريد عرضًا عامًا لما حدث - ولكن لن يتم نسخ كل سجل بواسطة هذه الطريقة. | ||||
|  | ||||
| ## الأدوات | ||||
|  | ||||
| الأداة هي عبارة عن وظيفة أساسية يستخدمها الوكيل لتنفيذ مهمة محددة. | ||||
|  | ||||
| يمكنك على سبيل المثال التحقق من [`PythonInterpreterTool`]: لديه اسم ووصف ووصف للمدخلات ونوع للمخرج، وطريقة `__call__` التي تقوم بتنفيذ المهمة المطلوبة. | ||||
|  | ||||
| عند تهيئة الوكيل، يتم استخدام سمات الأداة لتوليد وصف للأداة يتم تضمينه في موجه النظام الخاص بالوكيل. يتيح هذا للوكيل معرفة الأدوات التي يمكنه استخدامها ولماذا. | ||||
|  | ||||
| ### صندوق الأدوات الافتراضي | ||||
|  | ||||
| يأتي Transformers مع صندوق أدوات افتراضي لتمكين الوكلاء، والذي يمكنك إضافته إلى وكيلك عند التهيئة باستخدام معامل `add_base_tools = True`: | ||||
|  | ||||
| - **الإجابة على أسئلة المستند**: الإجابة على سؤال حول المستند (مثل ملف PDF) بتنسيق صورة ([Donut](./model_doc/donut)) | ||||
| - **الإجابة على أسئلة الصور**: الإجابة على سؤال حول صورة ([VILT](./model_doc/vilt)) | ||||
| - **التحدث إلى النص**: قم بتفريغ الكلام إلى نص ([Whisper](./model_doc/whisper)) | ||||
| - **النص إلى كلام**: تحويل النص إلى كلام ([SpeechT5](./model_doc/speecht5)) | ||||
| - **الترجمة**: ترجمة جملة معينة من لغة المصدر إلى لغة الهدف. | ||||
| - **مفسر كود Python**: تشغيل كود Python الذي تم إنشاؤه بواسطة LLM في بيئة آمنة. لن يتم إضافة هذه الأداة إلى [`ReactJsonAgent`] إلا إذا استخدمت `add_base_tools=True`، نظرًا لأن الأدوات المستندة إلى التعليمات البرمجية يمكنها بالفعل تنفيذ كود Python | ||||
| لا تترجم النصوص الخاصة ولا الأكواد البرمجية ولا الروابط ولا رموز HTML وCSS: | ||||
|  | ||||
| يمكنك استخدام أداة يدويًا عن طريق استدعاء دالة [`load_tool`] وتحديد مهمة لتنفيذها. | ||||
|  | ||||
| ```python | ||||
| from transformers import load_tool | ||||
|  | ||||
| tool = load_tool("text-to-speech") | ||||
| audio = tool("This is a text to speech tool") | ||||
| ``` | ||||
|  | ||||
| ### إنشاء أداة جديدة | ||||
|  | ||||
| يمكنك إنشاء أداتك الخاصة لتغطية حالات الاستخدام التي لا تغطيها الأدوات الافتراضية من Hugging Face. | ||||
| على سبيل المثال، دعنا نقوم بإنشاء أداة تعرض النموذج الأكثر تنزيلًا لمهمة معينة من Hub. | ||||
|  | ||||
| سوف نبدأ بالكود التالي. | ||||
|  | ||||
| ```python | ||||
| from huggingface_hub import list_models | ||||
|  | ||||
| task = "text-classification" | ||||
|  | ||||
| model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) | ||||
| print(model.id) | ||||
| ``` | ||||
|  | ||||
| يمكن تحويل هذه الشيفرة إلى فئة ترث من الفئة العليا [`Tool`]. | ||||
|  | ||||
| تحتاج الأداة المخصصة إلى: | ||||
|  | ||||
| - اسم `name`، والتي تمثل اسم الأداة نفسها. عادةً ما يصف الاسم وظيفتها. بما أن الكود يعيد النموذج الأكثر تنزيلًا لمهمة ما، فلنسمها `model_download_counter`. | ||||
| - تستخدم خاصية `description` لملء موجه نظام الوكيل. | ||||
| - خاصية `inputs`، والتي هي عبارة عن قاموس بمفاتيح "type" و"description". يحتوي على معلومات تساعد المفسر Python على اتخاذ خيارات مستنيرة بشأن المدخلات. | ||||
| - خاصية `output_type`، والتي تحدد نوع المخرج. | ||||
| - طريقة `forward` والتي تحتوي على الكود الذي سيتم تنفيذه للحصول على النتيجة النهائية. | ||||
|  | ||||
| ```python | ||||
| from transformers import Tool | ||||
| from huggingface_hub import list_models | ||||
|  | ||||
| class HFModelDownloadsTool(Tool): | ||||
|     name = "model_download_counter" | ||||
|     description = ( | ||||
|         "This is a tool that returns the most downloaded model of a given task on the Hugging Face Hub. " | ||||
|         "It returns the name of the checkpoint." | ||||
|     ) | ||||
|  | ||||
|     inputs = { | ||||
|         "task": { | ||||
|             "type": "text", | ||||
|             "description": "the task category (such as text-classification, depth-estimation, etc)", | ||||
|         } | ||||
|     } | ||||
|     output_type = "text" | ||||
|  | ||||
|     def forward(self, task: str): | ||||
|         model = next(iter(list_models(filter=task, sort="downloads", direction=-1))) | ||||
|         return model.id | ||||
| ``` | ||||
|  | ||||
| الآن بعد أن أصبحت فئة `HfModelDownloadsTool` المخصصة جاهزة، يمكنك حفظها في ملف باسم `model_downloads.py` واستيرادها للاستخدام. | ||||
|  | ||||
| ```python | ||||
| from model_downloads import HFModelDownloadsTool | ||||
|  | ||||
| tool = HFModelDownloadsTool() | ||||
| ``` | ||||
|  | ||||
| يمكنك أيضًا مشاركة أداتك المخصصة في Hub عن طريق استدعاء [`~Tool.push_to_hub`] على الأداة. تأكد من أنك قمت بإنشاء مستودع لها على Hub وأنك تستخدم رمز وصول للقراءة. | ||||
|  | ||||
| ```python | ||||
| tool.push_to_hub("{your_username}/hf-model-downloads") | ||||
| ``` | ||||
|  | ||||
| قم بتحميل الأداة باستخدام دالة [`~Tool.load_tool`] ومررها إلى معلمة `tools` في الوكيل الخاص بك. | ||||
|  | ||||
| ```python | ||||
| from transformers import load_tool, CodeAgent | ||||
|  | ||||
| model_download_tool = load_tool("m-ric/hf-model-downloads") | ||||
| agent = CodeAgent(tools=[model_download_tool], llm_engine=llm_engine) | ||||
| agent.run( | ||||
|     "Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub?" | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| ستحصل على ما يلي: | ||||
|  | ||||
| ```text | ||||
| ======== New task ======== | ||||
| Can you give me the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub? | ||||
| ==== Agent is executing the code below: | ||||
| most_downloaded_model = model_download_counter(task="text-to-video") | ||||
| print(f"The most downloaded model for the 'text-to-video' task is {most_downloaded_model}.") | ||||
| ==== | ||||
| ``` | ||||
|  | ||||
| والناتج: | ||||
|  | ||||
| `"النموذج الأكثر تنزيلًا لمهمة `text-to-video` هو ByteDance/AnimateDiff-Lightning."` | ||||
|  | ||||
| ### إدارة صندوق أدوات الوكيل الخاص بك | ||||
|  | ||||
| إذا كنت قد قمت بتهيئة وكيل، فمن غير الملائم إعادة تهيئته من البداية لإضافة أداة جديدة ترغب في استخدامها. باستخدام مكتبة Transformers، يمكنك إدارة صندوق أدوات الوكيل بإضافة أو استبدال أداة موجودة. | ||||
|  | ||||
| دعنا نضيف الأداة `model_download_tool` إلى وكيل تم تهيئته مسبقًا باستخدام صندوق الأدوات الافتراضي. | ||||
|  | ||||
| ```python | ||||
| from transformers import CodeAgent | ||||
|  | ||||
| agent = CodeAgent(tools=[], llm_engine=llm_engine, add_base_tools=True) | ||||
| agent.toolbox.add_tool(model_download_tool) | ||||
| ``` | ||||
|  | ||||
| الآن يمكننا الاستفادة من الأداة الجديدة وأداة تحويل النص إلى كلام السابقة: | ||||
|  | ||||
| ```python | ||||
|     agent.run( | ||||
|         "Can you read out loud the name of the model that has the most downloads in the 'text-to-video' task on the Hugging Face Hub and return the audio?" | ||||
|     ) | ||||
| ``` | ||||
|  | ||||
| | **Audio**                                                                                                                                            | | ||||
| |------------------------------------------------------------------------------------------------------------------------------------------------------| | ||||
| | <audio controls><source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/damo.wav" type="audio/wav"/> | | ||||
|  | ||||
| > [!WARNING] | ||||
| > احترس عند إضافة أدوات إلى وكيل يعمل بالفعل لأنه يمكن أن يؤثر على اختيار الأداة لصالح أداتك أو اختيار أداة أخرى غير المحددة بالفعل. | ||||
|  | ||||
| استخدم طريقة `agent.toolbox.update_tool()` لاستبدال أداة موجودة في صندوق أدوات الوكيل. | ||||
| هذا مفيد إذا كانت أداتك الجديدة بديلاً مباشرًا للأداة الموجودة لأن الوكيل يعرف بالفعل كيفية تنفيذ تلك المهمة المحددة. | ||||
| تأكد فقط من اتباع الأداة الجديدة لنفس واجهة برمجة التطبيقات (API) للأداة المستبدلة أو قم بتكييف قالب موجه النظام لضمان تحديث جميع الأمثلة التي تستخدم الأداة المستبدلة. | ||||
|  | ||||
| ### استخدام مجموعة من الأدوات | ||||
|  | ||||
| يمكنك الاستفادة من مجموعات الأدوات باستخدام كائن ToolCollection، مع تحديد مجموعة الأدوات التي تريد استخدامها. | ||||
| ثم قم بتمريرها كقائمة لتهيئة الوكيل الخاص بك، وبدء استخدامها! | ||||
|  | ||||
| ```py | ||||
| from transformers import ToolCollection, ReactCodeAgent | ||||
|  | ||||
| image_tool_collection = ToolCollection(collection_slug="huggingface-tools/diffusion-tools-6630bb19a942c2306a2cdb6f") | ||||
| agent = ReactCodeAgent(tools=[*image_tool_collection.tools], add_base_tools=True) | ||||
|  | ||||
| agent.run("Please draw me a picture of rivers and lakes.") | ||||
| ``` | ||||
|  | ||||
| لتسريع البداية، يتم تحميل الأدوات فقط إذا استدعاها الوكيل. | ||||
|  | ||||
| ستحصل على هذه الصورة: | ||||
|  | ||||
| <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rivers_and_lakes.png" /> | ||||
|  | ||||
| ### استخدام gradio-tools | ||||
|  | ||||
| [gradio-tools](https://github.com/freddyaboulton/gradio-tools) هي مكتبة قوية تتيح استخدام Hugging | ||||
| Face Spaces كأدوات. تدعم العديد من المساحات الموجودة بالإضافة إلى مساحات مخصصة. | ||||
|  | ||||
| تدعم مكتبة Transformers `gradio_tools` باستخدام طريقة [`Tool.from_gradio`] في الفئة. على سبيل المثال، دعنا نستخدم [`StableDiffusionPromptGeneratorTool`](https://github.com/freddyaboulton/gradio-tools/blob/main/gradio_tools/tools/prompt_generator.py) من مجموعة أدوات `gradio-tools` لتحسين المطالبات لإنشاء صور أفضل. | ||||
|  | ||||
| استورد وقم بتهيئة الأداة، ثم مررها إلى طريقة `Tool.from_gradio`: | ||||
|  | ||||
| ```python | ||||
| from gradio_tools import StableDiffusionPromptGeneratorTool | ||||
| from transformers import Tool, load_tool, CodeAgent | ||||
|  | ||||
| gradio_prompt_generator_tool = StableDiffusionPromptGeneratorTool() | ||||
| prompt_generator_tool = Tool.from_gradio(gradio_prompt_generator_tool) | ||||
| ``` | ||||
|  | ||||
| الآن يمكنك استخدامه مثل أي أداة أخرى. على سبيل المثال، دعنا نحسن الموجه `a rabbit wearing a space suit`. | ||||
|  | ||||
| ```python | ||||
| image_generation_tool = load_tool('huggingface-tools/text-to-image') | ||||
| agent = CodeAgent(tools=[prompt_generator_tool, image_generation_tool], llm_engine=llm_engine) | ||||
|  | ||||
| agent.run( | ||||
|     "Improve this prompt, then generate an image of it.", prompt='A rabbit wearing a space suit' | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| يستفيد النموذج بشكل كافٍ من الأداة: | ||||
|  | ||||
| ```text | ||||
| ======== New task ======== | ||||
| Improve this prompt, then generate an image of it. | ||||
| You have been provided with these initial arguments: {'prompt': 'A rabbit wearing a space suit'}. | ||||
| ==== Agent is executing the code below: | ||||
| improved_prompt = StableDiffusionPromptGenerator(query=prompt) | ||||
| while improved_prompt == "QUEUE_FULL": | ||||
|     improved_prompt = StableDiffusionPromptGenerator(query=prompt) | ||||
| print(f"The improved prompt is {improved_prompt}.") | ||||
| image = image_generator(prompt=improved_prompt) | ||||
| ==== | ||||
| ``` | ||||
|  | ||||
| قبل إنشاء الصورة أخيرًا: | ||||
|  | ||||
| <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/rabbit_spacesuit_flux.webp" /> | ||||
|  | ||||
| > [!WARNING] | ||||
| > تتطلب gradio-tools إدخالات وإخراجات *نصية* حتى عند العمل مع طرائق مختلفة مثل كائنات الصور والصوت. الإدخالات والإخراجات الصورية والصوتية غير متوافقة حاليًا. | ||||
|  | ||||
| ### استخدام أدوات LangChain | ||||
|  | ||||
| نحن نحب Langchain ونعتقد أنها تحتوي على مجموعة أدوات قوية للغاية. | ||||
| لاستيراد أداة من LangChain، استخدم الطريقة `from_langchain()`. | ||||
|  | ||||
| فيما يلي كيفية استخدامها لإعادة إنشاء نتيجة البحث في المقدمة باستخدام أداة بحث الويب LangChain. | ||||
|  | ||||
| ```python | ||||
| from langchain.agents import load_tools | ||||
| from transformers import Tool, ReactCodeAgent | ||||
|  | ||||
| search_tool = Tool.from_langchain(load_tools(["serpapi"])[0]) | ||||
|  | ||||
| agent = ReactCodeAgent(tools=[search_tool]) | ||||
|  | ||||
| agent.run("How many more blocks (also denoted as layers) in BERT base encoder than the encoder from the architecture proposed in Attention is All You Need?") | ||||
| ``` | ||||
|  | ||||
| ## واجهة Gradio | ||||
|  | ||||
| يمكنك الاستفادة من `gradio.Chatbot` لعرض أفكار الوكيل الخاص بك باستخدام `stream_to_gradio`، إليك مثال: | ||||
|  | ||||
| ```py | ||||
| import gradio as gr | ||||
| from transformers import ( | ||||
|     load_tool, | ||||
|     ReactCodeAgent, | ||||
|     HfEngine, | ||||
|     stream_to_gradio, | ||||
| ) | ||||
|  | ||||
| # Import tool from Hub | ||||
| image_generation_tool = load_tool("m-ric/text-to-image") | ||||
|  | ||||
| llm_engine = HfEngine("meta-llama/Meta-Llama-3-70B-Instruct") | ||||
|  | ||||
| # Initialize the agent with the image generation tool | ||||
| agent = ReactCodeAgent(tools=[image_generation_tool], llm_engine=llm_engine) | ||||
|  | ||||
|  | ||||
| def interact_with_agent(task): | ||||
|     messages = [] | ||||
|     messages.append(gr.ChatMessage(role="user", content=task)) | ||||
|     yield messages | ||||
|     for msg in stream_to_gradio(agent, task): | ||||
|         messages.append(msg) | ||||
|         yield messages + [ | ||||
|             gr.ChatMessage(role="assistant", content="⏳ Task not finished yet!") | ||||
|         ] | ||||
|     yield messages | ||||
|  | ||||
|  | ||||
| with gr.Blocks() as demo: | ||||
|     text_input = gr.Textbox(lines=1, label="Chat Message", value="Make me a picture of the Statue of Liberty.") | ||||
|     submit = gr.Button("Run illustrator agent!") | ||||
|     chatbot = gr.Chatbot( | ||||
|         label="Agent", | ||||
|         type="messages", | ||||
|         avatar_images=( | ||||
|             None, | ||||
|             "https://em-content.zobj.net/source/twitter/53/robot-face_1f916.png", | ||||
|         ), | ||||
|     ) | ||||
|     submit.click(interact_with_agent, [text_input], [chatbot]) | ||||
|  | ||||
| if __name__ == "__main__": | ||||
|     demo.launch() | ||||
| ``` | ||||
| @ -3,16 +3,16 @@ | ||||
| يُشهد في الآونة الأخيرة نمو مجال دراسي يُعنى باستكشاف آلية عمل نماذج المحولات الضخمة مثل BERT (والذي يُطلق عليها البعض اسم "BERTology"). ومن الأمثلة البارزة على هذا المجال ما يلي: | ||||
|  | ||||
| - BERT Rediscovers the Classical NLP Pipeline بواسطة Ian Tenney و Dipanjan Das و Ellie Pavlick: | ||||
|   https://huggingface.co/papers/1905.05950 | ||||
| - Are Sixteen Heads Really Better than One? بواسطة Paul Michel و Omer Levy و Graham Neubig: https://huggingface.co/papers/1905.10650 | ||||
|   https://arxiv.org/abs/1905.05950 | ||||
| - Are Sixteen Heads Really Better than One? بواسطة Paul Michel و Omer Levy و Graham Neubig: https://arxiv.org/abs/1905.10650 | ||||
| - What Does BERT Look At? An Analysis of BERT's Attention بواسطة Kevin Clark و Urvashi Khandelwal و Omer Levy و Christopher D. | ||||
|   Manning: https://huggingface.co/papers/1906.04341 | ||||
| - CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://huggingface.co/papers/2210.04633 | ||||
|   Manning: https://arxiv.org/abs/1906.04341 | ||||
| - CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://arxiv.org/abs/2210.04633 | ||||
|  | ||||
| لإثراء هذا المجال الناشئ، قمنا بتضمين بعض الميزات الإضافية في نماذج BERT/GPT/GPT-2 للسماح للناس بالوصول إلى التمثيلات الداخلية، والتي تم تكييفها بشكل أساسي من العمل الرائد لـ Paul Michel (https://huggingface.co/papers/1905.10650): | ||||
| لإثراء هذا المجال الناشئ، قمنا بتضمين بعض الميزات الإضافية في نماذج BERT/GPT/GPT-2 للسماح للناس بالوصول إلى التمثيلات الداخلية، والتي تم تكييفها بشكل أساسي من العمل الرائد لـ Paul Michel (https://arxiv.org/abs/1905.10650): | ||||
|  | ||||
| - الوصول إلى جميع الحالات المخفية في BERT/GPT/GPT-2، | ||||
| - الوصول إلى جميع أوزان الانتباه لكل رأس في BERT/GPT/GPT-2، | ||||
| - استرجاع قيم ومشتقات  مخرجات الرأس لحساب درجة أهمية الرأس وحذفه كما هو موضح في https://huggingface.co/papers/1905.10650. | ||||
| - استرجاع قيم ومشتقات  مخرجات الرأس لحساب درجة أهمية الرأس وحذفه كما هو موضح في https://arxiv.org/abs/1905.10650. | ||||
|  | ||||
| ولمساعدتك على فهم واستخدام هذه الميزات بسهولة، أضفنا مثالًا برمجيًا محددًا: [bertology.py](https://github.com/huggingface/transformers-research-projects/tree/main/bertology/run_bertology.py) أثناء استخراج المعلومات  وتقليص من نموذج تم تدريبه مسبقًا على GLUE. | ||||
| @ -304,7 +304,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer | ||||
| checkpoint = "NousResearch/Hermes-2-Pro-Llama-3-8B" | ||||
|  | ||||
| tokenizer = AutoTokenizer.from_pretrained(checkpoint) | ||||
| model = AutoModelForCausalLM.from_pretrained(checkpoint, dtype=torch.bfloat16, device_map="auto") | ||||
| model = AutoModelForCausalLM.from_pretrained(checkpoint, torch_dtype=torch.bfloat16, device_map="auto") | ||||
|  | ||||
| ```python | ||||
| messages = [ | ||||
|  | ||||
| @ -25,7 +25,7 @@ chat = [ | ||||
| import torch | ||||
| from transformers import pipeline | ||||
|  | ||||
| pipe = pipeline("text-generation", "meta-llama/Meta-Llama-3-8B-Instruct", dtype=torch.bfloat16, device_map="auto") | ||||
| pipe = pipeline("text-generation", "meta-llama/Meta-Llama-3-8B-Instruct", torch_dtype=torch.bfloat16, device_map="auto") | ||||
| response = pipe(chat, max_new_tokens=512) | ||||
| print(response[0]['generated_text'][-1]['content']) | ||||
| ``` | ||||
| @ -126,7 +126,7 @@ chat = [ | ||||
| ] | ||||
|  | ||||
| # 1: تحميل النموذج والمحلل | ||||
| model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", dtype=torch.bfloat16) | ||||
| model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct", device_map="auto", torch_dtype=torch.bfloat16) | ||||
| tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B-Instruct") | ||||
|  | ||||
| # 2: تطبيق قالب الدردشة | ||||
| @ -164,7 +164,7 @@ print("Decoded output:\n", decoded_output) | ||||
|  | ||||
| ### اعتبارات الذاكرة | ||||
|  | ||||
| بشكل افتراضي، تقوم فئات Hugging Face مثل [`TextGenerationPipeline`] أو [`AutoModelForCausalLM`] بتحميل النموذج في دقة "float32". وهذا يعني أنه يحتاج إلى 4 بايتات (32 بت) لكل معلمة، لذا فإن نموذج "8B" بحجم 8 مليار معلمة سيحتاج إلى ~32 جيجابايت من الذاكرة. ومع ذلك، يمكن أن يكون هذا مضيعة للموارد! يتم تدريب معظم نماذج اللغة الحديثة في دقة "bfloat16"، والتي تستخدم فقط 2 بايت لكل معلمة. إذا كان عتادك يدعم ذلك (Nvidia 30xx/Axxx أو أحدث)، فيمكنك تحميل النموذج في دقة "bfloat16"، باستخدام معامل "dtype" كما فعلنا أعلاه. | ||||
| بشكل افتراضي، تقوم فئات Hugging Face مثل [`TextGenerationPipeline`] أو [`AutoModelForCausalLM`] بتحميل النموذج في دقة "float32". وهذا يعني أنه يحتاج إلى 4 بايتات (32 بت) لكل معلمة، لذا فإن نموذج "8B" بحجم 8 مليار معلمة سيحتاج إلى ~32 جيجابايت من الذاكرة. ومع ذلك، يمكن أن يكون هذا مضيعة للموارد! يتم تدريب معظم نماذج اللغة الحديثة في دقة "bfloat16"، والتي تستخدم فقط 2 بايت لكل معلمة. إذا كان عتادك يدعم ذلك (Nvidia 30xx/Axxx أو أحدث)، فيمكنك تحميل النموذج في دقة "bfloat16"، باستخدام معامل "torch_dtype" كما فعلنا أعلاه. | ||||
|  | ||||
| ومن الممكن أيضًا النزول إلى أقل من 16 بت باستخدام "التكميم"، وهي طريقة لضغط أوزان النموذج بطريقة تفقد بعض المعلومات. يسمح هذا بضغط كل معلمة إلى 8 بتات أو 4 بتات أو حتى أقل. لاحظ أنه، خاصة في 4 بتات، قد تتأثر جودة ناتج النموذج سلبًا، ولكن غالبًا ما يكون هذا مقايضة تستحق القيام بها لتناسب نموذج محادثة أكبر وأكثر قدرة في الذاكرة. دعنا كيف يمكننا تطبيق ذلك باستخدام مكتبة `bitsandbytes`: | ||||
|  | ||||
|  | ||||
| @ -30,7 +30,7 @@ class ResnetConfig(PretrainedConfig): | ||||
|     def __init__( | ||||
|         self, | ||||
|         block_type="bottleneck", | ||||
|         layers: list[int] = [3, 4, 6, 3], | ||||
|         layers: List[int] = [3, 4, 6, 3], | ||||
|         num_classes: int = 1000, | ||||
|         input_channels: int = 3, | ||||
|         cardinality: int = 1, | ||||
| @ -280,7 +280,7 @@ resnet50d.model.load_state_dict(pretrained_model.state_dict()) | ||||
| الآن لإرسال النموذج إلى Hub، تأكد من تسجيل الدخول. إما تشغيل في المحطة الأوامر الطرفية الخاصة بك: | ||||
|  | ||||
| ```bash | ||||
| hf auth login | ||||
| huggingface-cli login | ||||
| ``` | ||||
|  | ||||
| أو من دفتر ملاحظات: | ||||
|  | ||||
| @ -77,7 +77,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id, gguf_file=filename) | ||||
|  | ||||
| الآن لديك إمكانية الوصول إلى النسخة الكامل غير المكممة للنموذج في بيئة PyTorch، حيث يمكنك دمجه مع مجموعة كبيرة من الأدوات الأخرى. | ||||
|  | ||||
| لإعادة التحويل إلى ملف `gguf`، نوصي باستخدام ملف [`convert-hf-to-gguf.py`](https://github.com/ggerganov/llama.cpp/blob/master/convert_hf_to_gguf.py) من llama.cpp. | ||||
| لإعادة التحويل إلى ملف `gguf`، نوصي باستخدام ملف [`convert-hf-to-gguf.py`](https://github.com/ggerganov/llama.cpp/blob/master/convert-hf-to-gguf.py) من llama.cpp. | ||||
|  | ||||
| فيما يلي كيفية إكمال البرنامج النصي أعلاه لحفظ النموذج وإعادة تصديره مرة أخرى إلى `gguf`: | ||||
|  | ||||
|  | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	