mirror of
https://github.com/huggingface/transformers.git
synced 2025-11-03 03:14:36 +08:00
Compare commits
114 Commits
check_temp
...
ci_with_to
| Author | SHA1 | Date | |
|---|---|---|---|
| 146719af19 | |||
| b759a9c9b5 | |||
| 28aa9132cd | |||
| 35e3659f22 | |||
| e1ac2611ce | |||
| 1eab9c3e7c | |||
| c1600a6c8e | |||
| 8e4e94783b | |||
| e8b2c60fb7 | |||
| 0b2f104897 | |||
| 5927114611 | |||
| b6f6e04178 | |||
| bc17525caf | |||
| 641efa99c3 | |||
| b635c723ff | |||
| ac1bd87bd2 | |||
| d63aadf140 | |||
| 1332a33025 | |||
| b4295c2947 | |||
| d9aefd1e94 | |||
| 04fecff78d | |||
| f49dc9e88e | |||
| 959a9cd678 | |||
| 60b73c7b86 | |||
| db6a65d105 | |||
| 5bcd773b55 | |||
| f81ada3019 | |||
| 6b0e5e6215 | |||
| 361599d284 | |||
| 64fea67ca2 | |||
| fbf4cc6486 | |||
| 05a67b11c2 | |||
| def309661f | |||
| 6f53baecba | |||
| b793703b5b | |||
| ec1004513f | |||
| a4bee5ef1e | |||
| 347b86bba7 | |||
| ae28872f89 | |||
| 09d67319ea | |||
| 4863ea6c63 | |||
| c71d914219 | |||
| df7573fc7c | |||
| e275a3ef0f | |||
| 42c5ac4d98 | |||
| c80cef092f | |||
| c309243ea1 | |||
| f945d07926 | |||
| ae85eddedd | |||
| a468748e28 | |||
| de97dc5e61 | |||
| 249ec520e4 | |||
| b68f7fde19 | |||
| 90313201d1 | |||
| 26dab642be | |||
| f35b87c4c9 | |||
| 24968b35df | |||
| 6e4f672c56 | |||
| 1ab2abb9eb | |||
| 42f7f0a453 | |||
| 1dbb1f718c | |||
| 2e28f3edc5 | |||
| 49688a99ac | |||
| 513da05615 | |||
| 05366c5fa7 | |||
| 7250f70286 | |||
| 6502fbcb0d | |||
| ca2513ac91 | |||
| 4861d48b46 | |||
| a754e1d006 | |||
| ede36bc645 | |||
| 85e00723fd | |||
| 7dae38f191 | |||
| 9202969e66 | |||
| e1595879b1 | |||
| 1858d9a568 | |||
| a84ab00df7 | |||
| 7bdccbfc2e | |||
| a707d53d1e | |||
| 850eb0b419 | |||
| 024abd58bc | |||
| 33da39989a | |||
| d6e0c325a9 | |||
| ddbf9be141 | |||
| 556554db52 | |||
| 7f82dfc907 | |||
| 31050ff6b7 | |||
| 991b486e97 | |||
| b4b5a93534 | |||
| 98786d0fbe | |||
| c0c1aba23e | |||
| 1c96ee32cd | |||
| b33b6e1aaf | |||
| d444e3dd92 | |||
| ad4b39289e | |||
| a28acd9ac3 | |||
| 5a088af755 | |||
| 4b79de01f8 | |||
| 59e28a6c70 | |||
| cb6aecc057 | |||
| 7f9eea42fc | |||
| d9545291d7 | |||
| 86e8fe47a5 | |||
| d14c94508b | |||
| 35ee0609b4 | |||
| b4503e2256 | |||
| 7e9b71ab75 | |||
| 09d540ed9d | |||
| d3d5618789 | |||
| 7372554c1b | |||
| 13d70ce9fa | |||
| 58261e0f5e | |||
| 4b26b12621 | |||
| 9ec8c5f1a1 |
60
.github/workflows/check_failed_model_tests.yml
vendored
60
.github/workflows/check_failed_model_tests.yml
vendored
@ -29,7 +29,7 @@ jobs:
|
||||
run_models_gpu:
|
||||
name: " "
|
||||
runs-on:
|
||||
group: aws-g4dn-2xlarge-cache
|
||||
group: aws-g4dn-4xlarge-cache
|
||||
container:
|
||||
image: ${{ inputs.docker }}
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
@ -39,55 +39,101 @@ jobs:
|
||||
name: ci_results_run_models_gpu
|
||||
path: /transformers/ci_results_run_models_gpu
|
||||
|
||||
- name: Check file
|
||||
working-directory: /transformers
|
||||
run: |
|
||||
if [ -f ci_results_run_models_gpu/new_model_failures.json ]; then
|
||||
echo "`ci_results_run_models_gpu/new_model_failures.json` exists, continue ..."
|
||||
echo "process=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "`ci_results_run_models_gpu/new_model_failures.json` doesn't exist, abort."
|
||||
echo "process=false" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
if: ${{ env.process == 'true' }}
|
||||
with:
|
||||
pattern: setup_values*
|
||||
path: setup_values
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare some setup values
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: |
|
||||
if [ -f setup_values/prev_workflow_run_id.txt ]; then
|
||||
echo "PREV_WORKFLOW_RUN_ID=$(cat setup_values/prev_workflow_run_id.txt)" >> $GITHUB_ENV
|
||||
else
|
||||
echo "PREV_WORKFLOW_RUN_ID=" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
if [ -f setup_values/other_workflow_run_id.txt ]; then
|
||||
echo "OTHER_WORKFLOW_RUN_ID=$(cat setup_values/other_workflow_run_id.txt)" >> $GITHUB_ENV
|
||||
else
|
||||
echo "OTHER_WORKFLOW_RUN_ID=" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Update clone
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: git fetch && git checkout ${{ github.sha }}
|
||||
|
||||
- name: Get target commit
|
||||
working-directory: /transformers/utils
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: |
|
||||
echo "END_SHA=$(TOKEN=${{ secrets.ACCESS_REPO_INFO_TOKEN }} python3 -c 'import os; from get_previous_daily_ci import get_last_daily_ci_run_commit; commit=get_last_daily_ci_run_commit(token=os.environ["TOKEN"]); print(commit)')" >> $GITHUB_ENV
|
||||
echo "END_SHA=$(TOKEN=${{ secrets.ACCESS_REPO_INFO_TOKEN }} python3 -c 'import os; from get_previous_daily_ci import get_last_daily_ci_run_commit; commit=get_last_daily_ci_run_commit(token=os.environ["TOKEN"], workflow_run_id=os.environ["PREV_WORKFLOW_RUN_ID"]); print(commit)')" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout to `start_sha`
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: git fetch && git checkout ${{ inputs.start_sha }}
|
||||
|
||||
- name: Reinstall transformers in edit mode (remove the one installed during docker image build)
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: python3 -m pip uninstall -y transformers && python3 -m pip install -e .
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: |
|
||||
nvidia-smi
|
||||
|
||||
- name: Environment
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: pip freeze
|
||||
|
||||
- name: Check failed tests
|
||||
working-directory: /transformers
|
||||
run: python3 utils/check_bad_commit.py --start_commit ${{ inputs.start_sha }} --end_commit ${{ env.END_SHA }} --file ci_results_run_models_gpu/new_model_failures_temp.json --output_file new_model_failures_with_bad_commit_temp.json
|
||||
if: ${{ env.process == 'true' }}
|
||||
# how to run multiple ones?
|
||||
run: python3 utils/check_bad_commit.py --start_commit ${{ inputs.start_sha }} --end_commit ${{ env.END_SHA }} --file ci_results_run_models_gpu/new_model_failures.json --output_file new_model_failures_with_bad_commit.json
|
||||
|
||||
- name: Show results
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: |
|
||||
ls -l new_model_failures_with_bad_commit_temp.json
|
||||
cat new_model_failures_with_bad_commit_temp.json
|
||||
ls -l new_model_failures_with_bad_commit.json
|
||||
cat new_model_failures_with_bad_commit.json
|
||||
|
||||
- name: Checkout back
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
run: |
|
||||
git checkout ${{ inputs.start_sha }}
|
||||
|
||||
- name: Process report
|
||||
shell: bash
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
env:
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN: ${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }}
|
||||
run: |
|
||||
python3 utils/process_bad_commit_report.py
|
||||
@ -95,7 +141,9 @@ jobs:
|
||||
- name: Process report
|
||||
shell: bash
|
||||
working-directory: /transformers
|
||||
if: ${{ env.process == 'true' }}
|
||||
env:
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN: ${{ secrets.TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN }}
|
||||
run: |
|
||||
{
|
||||
@ -105,7 +153,7 @@ jobs:
|
||||
} >> "$GITHUB_ENV"
|
||||
|
||||
- name: Send processed report
|
||||
if: ${{ !endsWith(env.REPORT_TEXT, '{}') }}
|
||||
if: ${{ env.process == 'true' && !endsWith(env.REPORT_TEXT, '{}') }}
|
||||
uses: slackapi/slack-github-action@6c661ce58804a1a20f6dc5fbee7f0381b469e001
|
||||
with:
|
||||
# Slack channel id, channel name, or user id to post message.
|
||||
|
||||
8
.github/workflows/model_jobs.yml
vendored
8
.github/workflows/model_jobs.yml
vendored
@ -93,9 +93,13 @@ jobs:
|
||||
run: |
|
||||
python3 utils/print_env.py
|
||||
|
||||
- name: Installed torch 2.7 RC
|
||||
# - name: Installed torch 2.7.0
|
||||
# working-directory: /transformers
|
||||
# run: python3 -m pip install torch==2.7.0 torchvision torchaudio
|
||||
|
||||
- name: Installed torch 2.7.1 RC
|
||||
working-directory: /transformers
|
||||
run: python3 -m pip install torch==2.7.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/test/cu126
|
||||
run: python3 -m pip install torch==2.7.1 torchvision torchaudio --index-url https://download.pytorch.org/whl/test/cu126
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
working-directory: /transformers
|
||||
|
||||
95
.github/workflows/self-scheduled-caller.yml
vendored
95
.github/workflows/self-scheduled-caller.yml
vendored
@ -7,72 +7,51 @@ on:
|
||||
# - cron: "17 2 * * *"
|
||||
push:
|
||||
branches:
|
||||
- check_temp
|
||||
- ci_with_torch_version_base
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
prev_workflow_run_id:
|
||||
description: 'previous workflow run id to compare'
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
other_workflow_run_id:
|
||||
description: 'other workflow run id to compare'
|
||||
type: string
|
||||
required: false
|
||||
default: ""
|
||||
|
||||
|
||||
# Used for `push` to easily modiffy the target workflow runs to compare against
|
||||
env:
|
||||
prev_workflow_run_id: ""
|
||||
other_workflow_run_id: "15084441438"
|
||||
|
||||
|
||||
jobs:
|
||||
setup:
|
||||
name: Setup
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: Setup
|
||||
run: |
|
||||
mkdir "setup_values"
|
||||
echo "${{ inputs.prev_workflow_run_id || env.prev_workflow_run_id }}" > "setup_values/prev_workflow_run_id.txt"
|
||||
echo "${{ inputs.other_workflow_run_id || env.other_workflow_run_id }}" > "setup_values/other_workflow_run_id.txt"
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: setup_values
|
||||
path: setup_values
|
||||
|
||||
model-ci:
|
||||
name: Model CI
|
||||
uses: ./.github/workflows/self-scheduled.yml
|
||||
with:
|
||||
job: run_models_gpu
|
||||
slack_report_channel: "#transformers-ci-daily-models"
|
||||
slack_report_channel: "#transformers-ci-dummy"
|
||||
runner: daily-ci
|
||||
docker: huggingface/transformers-all-latest-gpu
|
||||
ci_event: Daily CI
|
||||
secrets: inherit
|
||||
#
|
||||
# torch-pipeline:
|
||||
# name: Torch pipeline CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_pipelines_torch_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-pipeline-torch"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-pytorch-gpu
|
||||
# ci_event: Daily CI
|
||||
# secrets: inherit
|
||||
#
|
||||
# tf-pipeline:
|
||||
# name: TF pipeline CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_pipelines_tf_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-pipeline-tf"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-tensorflow-gpu
|
||||
# ci_event: Daily CI
|
||||
# secrets: inherit
|
||||
#
|
||||
# example-ci:
|
||||
# name: Example CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_examples_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-examples"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-all-latest-gpu
|
||||
# ci_event: Daily CI
|
||||
# secrets: inherit
|
||||
#
|
||||
# deepspeed-ci:
|
||||
# name: DeepSpeed CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_torch_cuda_extensions_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-deepspeed"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-pytorch-deepspeed-latest-gpu
|
||||
# ci_event: Daily CI
|
||||
# working-directory-prefix: /workspace
|
||||
# secrets: inherit
|
||||
#
|
||||
# quantization-ci:
|
||||
# name: Quantization CI
|
||||
# uses: ./.github/workflows/self-scheduled.yml
|
||||
# with:
|
||||
# job: run_quantization_torch_gpu
|
||||
# slack_report_channel: "#transformers-ci-daily-quantization"
|
||||
# runner: daily-ci
|
||||
# docker: huggingface/transformers-quantization-latest-gpu
|
||||
# ci_event: Daily CI
|
||||
# secrets: inherit
|
||||
|
||||
85
.github/workflows/self-scheduled.yml
vendored
85
.github/workflows/self-scheduled.yml
vendored
@ -29,6 +29,7 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
|
||||
|
||||
env:
|
||||
HF_HOME: /mnt/cache
|
||||
TRANSFORMERS_IS_CI: yes
|
||||
@ -89,7 +90,7 @@ jobs:
|
||||
name: Identify quantization method to test
|
||||
working-directory: /transformers/tests
|
||||
run: |
|
||||
echo "quantization_matrix=$(python3 -c 'import os; tests = os.getcwd(); quantization_tests = os.listdir(os.path.join(tests, "quantization")); d = sorted(list(filter(os.path.isdir, [f"quantization/{x}" for x in quantization_tests]))) ; print(d)')" >> $GITHUB_OUTPUT
|
||||
echo "quantization_matrix=$(python3 -c 'import os; tests = os.getcwd(); quantization_tests = os.listdir(os.path.join(tests, "quantization")); d = sorted(list(filter(os.path.isdir, [f"quantization/{x}" for x in quantization_tests]))) ; print(["quantization/autoawq"])')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: NVIDIA-SMI
|
||||
run: |
|
||||
@ -495,46 +496,46 @@ jobs:
|
||||
name: ${{ env.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports
|
||||
path: /transformers/reports/${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports
|
||||
|
||||
run_extract_warnings:
|
||||
# Let's only do this for the job `run_models_gpu` to simplify the (already complex) logic.
|
||||
if: ${{ always() && inputs.job == 'run_models_gpu' }}
|
||||
name: Extract warnings in CI artifacts
|
||||
runs-on: ubuntu-22.04
|
||||
needs: [setup, run_models_gpu]
|
||||
steps:
|
||||
- name: Checkout transformers
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Install transformers
|
||||
run: pip install transformers
|
||||
|
||||
- name: Show installed libraries and their versions
|
||||
run: pip freeze
|
||||
|
||||
- name: Create output directory
|
||||
run: mkdir warnings_in_ci
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: warnings_in_ci
|
||||
|
||||
- name: Show artifacts
|
||||
run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')"
|
||||
working-directory: warnings_in_ci
|
||||
|
||||
- name: Extract warnings in CI artifacts
|
||||
run: |
|
||||
python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh
|
||||
echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')"
|
||||
|
||||
- name: Upload artifact
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: warnings_in_ci
|
||||
path: warnings_in_ci/selected_warnings.json
|
||||
# run_extract_warnings:
|
||||
# # Let's only do this for the job `run_models_gpu` to simplify the (already complex) logic.
|
||||
# if: ${{ always() && inputs.job == 'run_models_gpu' }}
|
||||
# name: Extract warnings in CI artifacts
|
||||
# runs-on: ubuntu-22.04
|
||||
# needs: [setup, run_models_gpu]
|
||||
# steps:
|
||||
# - name: Checkout transformers
|
||||
# uses: actions/checkout@v4
|
||||
# with:
|
||||
# fetch-depth: 2
|
||||
#
|
||||
# - name: Install transformers
|
||||
# run: pip install transformers
|
||||
#
|
||||
# - name: Show installed libraries and their versions
|
||||
# run: pip freeze
|
||||
#
|
||||
# - name: Create output directory
|
||||
# run: mkdir warnings_in_ci
|
||||
#
|
||||
# - uses: actions/download-artifact@v4
|
||||
# with:
|
||||
# path: warnings_in_ci
|
||||
#
|
||||
# - name: Show artifacts
|
||||
# run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')"
|
||||
# working-directory: warnings_in_ci
|
||||
#
|
||||
# - name: Extract warnings in CI artifacts
|
||||
# run: |
|
||||
# python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh
|
||||
# echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')"
|
||||
#
|
||||
# - name: Upload artifact
|
||||
# if: ${{ always() }}
|
||||
# uses: actions/upload-artifact@v4
|
||||
# with:
|
||||
# name: warnings_in_ci
|
||||
# path: warnings_in_ci/selected_warnings.json
|
||||
|
||||
send_results:
|
||||
name: Slack Report
|
||||
@ -546,7 +547,7 @@ jobs:
|
||||
run_examples_gpu,
|
||||
run_torch_cuda_extensions_gpu,
|
||||
run_quantization_torch_gpu,
|
||||
run_extract_warnings
|
||||
# run_extract_warnings
|
||||
]
|
||||
if: ${{ always() }}
|
||||
uses: ./.github/workflows/slack-report.yml
|
||||
|
||||
22
.github/workflows/slack-report.yml
vendored
22
.github/workflows/slack-report.yml
vendored
@ -39,6 +39,21 @@ jobs:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
|
||||
- name: Prepare some setup values
|
||||
run: |
|
||||
if [ -f setup_values/prev_workflow_run_id.txt ]; then
|
||||
echo "PREV_WORKFLOW_RUN_ID=$(cat setup_values/prev_workflow_run_id.txt)" >> $GITHUB_ENV
|
||||
else
|
||||
echo "PREV_WORKFLOW_RUN_ID=" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
if [ -f setup_values/other_workflow_run_id.txt ]; then
|
||||
echo "OTHER_WORKFLOW_RUN_ID=$(cat setup_values/other_workflow_run_id.txt)" >> $GITHUB_ENV
|
||||
else
|
||||
echo "OTHER_WORKFLOW_RUN_ID=" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Send message to Slack
|
||||
if: ${{ inputs.job != 'run_quantization_torch_gpu' }}
|
||||
env:
|
||||
@ -50,7 +65,6 @@ jobs:
|
||||
ACCESS_REPO_INFO_TOKEN: ${{ secrets.ACCESS_REPO_INFO_TOKEN }}
|
||||
CI_EVENT: ${{ inputs.ci_event }}
|
||||
CI_SHA: ${{ github.sha }}
|
||||
CI_WORKFLOW_REF: ${{ github.workflow_ref }}
|
||||
CI_TEST_JOB: ${{ inputs.job }}
|
||||
SETUP_STATUS: ${{ inputs.setup_status }}
|
||||
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
|
||||
@ -58,7 +72,10 @@ jobs:
|
||||
# For a job that doesn't depend on (i.e. `needs`) `setup`, the value for `inputs.folder_slices` would be an
|
||||
# empty string, and the called script still get one argument (which is the emtpy string).
|
||||
run: |
|
||||
sudo apt-get install -y curl
|
||||
echo "$PREV_WORKFLOW_RUN_ID"
|
||||
echo "$OTHER_WORKFLOW_RUN_ID"
|
||||
echo "$prev_workflow_run_id"
|
||||
echo "$other_workflow_run_id"
|
||||
pip install huggingface_hub
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
@ -86,7 +103,6 @@ jobs:
|
||||
# We pass `needs.setup.outputs.quantization_matrix` as the argument. A processing in `notification_service_quantization.py` to change
|
||||
# `quantization/bnb` to `quantization_bnb` is required, as the artifact names use `_` instead of `/`.
|
||||
run: |
|
||||
sudo apt-get install -y curl
|
||||
pip install huggingface_hub
|
||||
pip install slack_sdk
|
||||
pip show slack_sdk
|
||||
|
||||
@ -258,7 +258,7 @@ class NewTaskModelForNewTask(NewTaskModelPreTrainedModel, GenerationMixin):
|
||||
input_tensor,
|
||||
is_training: bool = False,
|
||||
):
|
||||
if self.language_model.config._attn_implementation == "flash_attention_2":
|
||||
if self.config.text_config._attn_implementation == "flash_attention_2":
|
||||
if attention_mask is not None and 0.0 in attention_mask:
|
||||
return attention_mask
|
||||
return None
|
||||
|
||||
@ -55,3 +55,4 @@ markers = [
|
||||
]
|
||||
log_cli = 1
|
||||
log_cli_level = "WARNING"
|
||||
asyncio_default_fixture_loop_scope = "function"
|
||||
|
||||
@ -381,7 +381,7 @@ class DepthProImageEncoder(nn.Module):
|
||||
batch_size, num_channels, height, width = pixel_values.shape
|
||||
|
||||
# scale the image for image_encoder
|
||||
size = self.model.config.image_size
|
||||
size = self.config.image_model_config.image_size
|
||||
pixel_values = F.interpolate(
|
||||
pixel_values,
|
||||
size=(size, size),
|
||||
|
||||
@ -1213,9 +1213,9 @@ class DPTForDepthEstimation(DPTPreTrainedModel):
|
||||
hidden_states = backbone_hidden_states
|
||||
|
||||
patch_height, patch_width = None, None
|
||||
if self.backbone is not None and self.config.is_hybrid is False:
|
||||
if self.config.backbone_config is not None and self.config.is_hybrid is False:
|
||||
_, _, height, width = pixel_values.shape
|
||||
patch_size = self.backbone.config.patch_size
|
||||
patch_size = self.config.backbone_config.patch_size
|
||||
patch_height = height // patch_size
|
||||
patch_width = width // patch_size
|
||||
|
||||
|
||||
@ -1114,7 +1114,7 @@ class Gemma3ForConditionalGeneration(Gemma3PreTrainedModel, GenerationMixin):
|
||||
input_tensor,
|
||||
is_training: bool = False,
|
||||
):
|
||||
if self.language_model.config._attn_implementation == "flash_attention_2":
|
||||
if self.config.text_config._attn_implementation == "flash_attention_2":
|
||||
return attention_mask
|
||||
|
||||
if attention_mask is not None and attention_mask.dim() == 4:
|
||||
|
||||
@ -788,7 +788,7 @@ class Gemma3ForConditionalGeneration(PaliGemmaForConditionalGeneration):
|
||||
input_tensor,
|
||||
is_training: bool = False,
|
||||
):
|
||||
if self.language_model.config._attn_implementation == "flash_attention_2":
|
||||
if self.config.text_config._attn_implementation == "flash_attention_2":
|
||||
return attention_mask
|
||||
|
||||
if attention_mask is not None and attention_mask.dim() == 4:
|
||||
|
||||
@ -341,7 +341,7 @@ class PaliGemmaForConditionalGeneration(PaliGemmaPreTrainedModel, GenerationMixi
|
||||
input_tensor=None,
|
||||
is_training: Optional[bool] = None,
|
||||
):
|
||||
if self.language_model.config._attn_implementation == "flash_attention_2":
|
||||
if self.config.text_config._attn_implementation == "flash_attention_2":
|
||||
if attention_mask is not None and 0.0 in attention_mask:
|
||||
return attention_mask
|
||||
return None
|
||||
|
||||
@ -314,8 +314,8 @@ class VitPoseForPoseEstimation(VitPosePreTrainedModel):
|
||||
# Turn output hidden states in tensor of shape (batch_size, num_channels, height, width)
|
||||
sequence_output = outputs.feature_maps[-1] if return_dict else outputs[0][-1]
|
||||
batch_size = sequence_output.shape[0]
|
||||
patch_height = self.backbone.config.image_size[0] // self.backbone.config.patch_size[0]
|
||||
patch_width = self.backbone.config.image_size[1] // self.backbone.config.patch_size[1]
|
||||
patch_height = self.config.backbone_config.image_size[0] // self.config.backbone_config.patch_size[0]
|
||||
patch_width = self.config.backbone_config.image_size[1] // self.config.backbone_config.patch_size[1]
|
||||
sequence_output = (
|
||||
sequence_output.permute(0, 2, 1).reshape(batch_size, -1, patch_height, patch_width).contiguous()
|
||||
)
|
||||
|
||||
@ -193,8 +193,6 @@ class BackboneMixin:
|
||||
else:
|
||||
raise ValueError(f"backbone_type {self.backbone_type} not supported.")
|
||||
|
||||
self._forward_signature = dict(inspect.signature(self.forward).parameters)
|
||||
|
||||
@property
|
||||
def out_features(self):
|
||||
return self._out_features
|
||||
@ -232,7 +230,8 @@ class BackboneMixin:
|
||||
return [self.out_feature_channels[name] for name in self.out_features]
|
||||
|
||||
def forward_with_filtered_kwargs(self, *args, **kwargs):
|
||||
filtered_kwargs = {k: v for k, v in kwargs.items() if k in self._forward_signature}
|
||||
signature = dict(inspect.signature(self.forward).parameters)
|
||||
filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature}
|
||||
return self(*args, **filtered_kwargs)
|
||||
|
||||
def forward(
|
||||
|
||||
@ -66,9 +66,6 @@ class LogitsProcessorTest(unittest.TestCase):
|
||||
scores = torch.ones((batch_size, length), device=torch_device, dtype=torch.float) / length
|
||||
return scores
|
||||
|
||||
def test_foo(self):
|
||||
assert 1 == 2
|
||||
|
||||
def test_min_length_dist_processor(self):
|
||||
vocab_size = 20
|
||||
batch_size = 4
|
||||
|
||||
@ -221,6 +221,7 @@ class ViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
super().test_multi_gpu_data_parallel_forward()
|
||||
|
||||
def test_config(self):
|
||||
# assert 1 == 2
|
||||
self.config_tester.run_common_tests()
|
||||
|
||||
@unittest.skip(reason="ViT does not use inputs_embeds")
|
||||
@ -236,7 +237,12 @@ class ViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
||||
x = model.get_output_embeddings()
|
||||
self.assertTrue(x is None or isinstance(x, nn.Linear))
|
||||
|
||||
def test_foo(self):
|
||||
pass
|
||||
# assert 1 == 4
|
||||
|
||||
def test_model(self):
|
||||
assert 1 == 3
|
||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||
self.model_tester.create_and_check_model(*config_and_inputs)
|
||||
|
||||
|
||||
@ -38,6 +38,8 @@ def create_script(target_test):
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
os.system("echo 'asyncio_default_fixture_loop_scope = "function"' >> pyproject.toml")
|
||||
|
||||
result = subprocess.run(
|
||||
["python3", "-m", "pytest", "-v", f"{target_test}"],
|
||||
capture_output = True,
|
||||
@ -45,6 +47,8 @@ result = subprocess.run(
|
||||
)
|
||||
print(result.stdout)
|
||||
|
||||
os.system("git checkout -- .")
|
||||
|
||||
if len(result.stderr) > 0:
|
||||
if "ERROR: file or directory not found: " in result.stderr:
|
||||
print("test file or directory not found in this commit")
|
||||
@ -52,9 +56,6 @@ if len(result.stderr) > 0:
|
||||
elif "ERROR: not found: " in result.stderr:
|
||||
print("test not found in this commit")
|
||||
exit(0)
|
||||
elif "ERROR: not found: " in result.stderr:
|
||||
print("test not found in this commit")
|
||||
exit(0)
|
||||
else:
|
||||
print(f"pytest failed to run: {{result.stderr}}")
|
||||
exit(-1)
|
||||
|
||||
@ -1,3 +1,4 @@
|
||||
import json
|
||||
import os
|
||||
import zipfile
|
||||
|
||||
@ -5,7 +6,7 @@ import requests
|
||||
from get_ci_error_statistics import download_artifact, get_artifacts_links
|
||||
|
||||
|
||||
def get_daily_ci_runs(token, num_runs=7):
|
||||
def get_daily_ci_runs(token, num_runs=7, workflow_id=None):
|
||||
"""Get the workflow runs of the scheduled (daily) CI.
|
||||
|
||||
This only selects the runs triggered by the `schedule` event on the `main` branch.
|
||||
@ -18,7 +19,11 @@ def get_daily_ci_runs(token, num_runs=7):
|
||||
# From a given workflow run (where we have workflow run id), we can get the workflow id by going to
|
||||
# https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}
|
||||
# and check the `workflow_id` key.
|
||||
workflow_id = "90575235"
|
||||
# workflow_id = "90575235"
|
||||
if not workflow_id:
|
||||
workflow_run_id = os.environ['GITHUB_RUN_ID']
|
||||
workflow_run = requests.get(f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}", headers=headers).json()
|
||||
workflow_id = workflow_run["workflow_id"]
|
||||
|
||||
url = f"https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"
|
||||
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
|
||||
@ -29,34 +34,56 @@ def get_daily_ci_runs(token, num_runs=7):
|
||||
return result["workflow_runs"]
|
||||
|
||||
|
||||
def get_last_daily_ci_runs(token):
|
||||
def get_last_daily_ci_run(token, workflow_run_id=None, workflow_id=None, commit_sha=None):
|
||||
"""Get the last completed workflow run id of the scheduled (daily) CI."""
|
||||
return "14277576462"
|
||||
workflow_runs = get_daily_ci_runs(token)
|
||||
workflow_run_id = None
|
||||
for workflow_run in workflow_runs:
|
||||
if workflow_run["status"] == "completed":
|
||||
workflow_run_id = workflow_run["id"]
|
||||
headers = None
|
||||
if token is not None:
|
||||
headers = {"Accept": "application/vnd.github+json", "Authorization": f"Bearer {token}"}
|
||||
|
||||
workflow_run = None
|
||||
if workflow_run_id is not None and workflow_run_id != "":
|
||||
workflow_run = requests.get(f"https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}", headers=headers).json()
|
||||
return workflow_run
|
||||
|
||||
workflow_runs = get_daily_ci_runs(token, workflow_id=workflow_id)
|
||||
for run in workflow_runs:
|
||||
if commit_sha in [None, ""] and run["status"] == "completed":
|
||||
workflow_run = run
|
||||
break
|
||||
# if `commit_sha` is specified, and `workflow_run["head_sha"]` matches it, return it.
|
||||
elif commit_sha not in [None, ""] and run["head_sha"] == commit_sha:
|
||||
workflow_run = run
|
||||
break
|
||||
|
||||
return workflow_run
|
||||
|
||||
|
||||
def get_last_daily_ci_workflow_run_id(token, workflow_run_id=None, workflow_id=None, commit_sha=None):
|
||||
"""Get the last completed workflow run id of the scheduled (daily) CI."""
|
||||
if workflow_run_id is not None and workflow_run_id != "":
|
||||
return workflow_run_id
|
||||
|
||||
workflow_run = get_last_daily_ci_run(token, workflow_id=workflow_id, commit_sha=commit_sha)
|
||||
workflow_run_id = None
|
||||
if workflow_run is not None:
|
||||
workflow_run_id = workflow_run["id"]
|
||||
|
||||
return workflow_run_id
|
||||
|
||||
|
||||
def get_last_daily_ci_run_commit(token):
|
||||
def get_last_daily_ci_run_commit(token, workflow_run_id=None, workflow_id=None, commit_sha=None):
|
||||
"""Get the commit sha of the last completed scheduled daily CI workflow run."""
|
||||
workflow_runs = get_daily_ci_runs(token)
|
||||
head_sha = None
|
||||
for workflow_run in workflow_runs:
|
||||
if workflow_run["status"] == "completed":
|
||||
head_sha = workflow_run["head_sha"]
|
||||
break
|
||||
workflow_run = get_last_daily_ci_run(token, workflow_run_id=workflow_run_id, workflow_id=workflow_id, commit_sha=commit_sha)
|
||||
workflow_run_head_sha = None
|
||||
if workflow_run is not None:
|
||||
workflow_run_head_sha = workflow_run["head_sha"]
|
||||
|
||||
return head_sha
|
||||
return workflow_run_head_sha
|
||||
|
||||
|
||||
def get_last_daily_ci_artifacts(artifact_names, output_dir, token):
|
||||
def get_last_daily_ci_artifacts(artifact_names, output_dir, token, workflow_run_id=None, workflow_id=None, commit_sha=None):
|
||||
"""Get the artifacts of last completed workflow run id of the scheduled (daily) CI."""
|
||||
workflow_run_id = get_last_daily_ci_runs(token)
|
||||
workflow_run_id = get_last_daily_ci_workflow_run_id(token, workflow_run_id=workflow_run_id, workflow_id=workflow_id, commit_sha=commit_sha)
|
||||
if workflow_run_id is not None:
|
||||
artifacts_links = get_artifacts_links(worflow_run_id=workflow_run_id, token=token)
|
||||
for artifact_name in artifact_names:
|
||||
@ -67,9 +94,9 @@ def get_last_daily_ci_artifacts(artifact_names, output_dir, token):
|
||||
)
|
||||
|
||||
|
||||
def get_last_daily_ci_reports(artifact_names, output_dir, token):
|
||||
def get_last_daily_ci_reports(artifact_names, output_dir, token, workflow_run_id=None, workflow_id=None, commit_sha=None):
|
||||
"""Get the artifacts' content of the last completed workflow run id of the scheduled (daily) CI."""
|
||||
get_last_daily_ci_artifacts(artifact_names, output_dir, token)
|
||||
get_last_daily_ci_artifacts(artifact_names, output_dir, token, workflow_run_id=workflow_run_id, workflow_id=workflow_id, commit_sha=commit_sha)
|
||||
|
||||
results = {}
|
||||
for artifact_name in artifact_names:
|
||||
|
||||
@ -26,7 +26,7 @@ from typing import Dict, List, Optional, Union
|
||||
|
||||
import requests
|
||||
from get_ci_error_statistics import get_jobs
|
||||
from get_previous_daily_ci import get_last_daily_ci_reports
|
||||
from get_previous_daily_ci import get_last_daily_ci_reports, get_last_daily_ci_workflow_run_id, get_last_daily_ci_run
|
||||
from huggingface_hub import HfApi
|
||||
from slack_sdk import WebClient
|
||||
|
||||
@ -109,6 +109,7 @@ class Message:
|
||||
additional_results: Dict,
|
||||
selected_warnings: List = None,
|
||||
prev_ci_artifacts=None,
|
||||
other_ci_artifacts=None,
|
||||
):
|
||||
self.title = title
|
||||
self.ci_title = ci_title
|
||||
@ -159,6 +160,7 @@ class Message:
|
||||
self.selected_warnings = selected_warnings
|
||||
|
||||
self.prev_ci_artifacts = prev_ci_artifacts
|
||||
self.other_ci_artifacts = other_ci_artifacts
|
||||
|
||||
@property
|
||||
def time(self) -> str:
|
||||
@ -515,71 +517,80 @@ class Message:
|
||||
if len(self.selected_warnings) > 0:
|
||||
blocks.append(self.warnings)
|
||||
|
||||
new_failure_blocks = self.get_new_model_failure_blocks(with_header=False)
|
||||
if len(new_failure_blocks) > 0:
|
||||
blocks.extend(new_failure_blocks)
|
||||
for idx, (prev_workflow_run_id, prev_ci_artifacts) in enumerate([self.prev_ci_artifacts] + self.other_ci_artifacts):
|
||||
|
||||
# To save the list of new model failures
|
||||
extra_blocks = self.get_new_model_failure_blocks(to_truncate=False)
|
||||
if extra_blocks:
|
||||
failure_text = extra_blocks[-1]["text"]["text"]
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures_temp.txt")
|
||||
with open(file_path, "w", encoding="UTF-8") as fp:
|
||||
fp.write(failure_text)
|
||||
if idx == 0:
|
||||
# This is to show on slack. For now, let's only show this for the same workflow id
|
||||
new_failure_blocks = self.get_new_model_failure_blocks(prev_ci_artifacts=prev_ci_artifacts, with_header=False)
|
||||
if len(new_failure_blocks) > 0:
|
||||
blocks.extend(new_failure_blocks)
|
||||
|
||||
# upload results to Hub dataset
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures_temp.txt")
|
||||
commit_info = api.upload_file(
|
||||
path_or_fileobj=file_path,
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures_temp.txt",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures_temp.txt"
|
||||
# To save the list of new model failures and uploaed to hub repositories
|
||||
extra_blocks = self.get_new_model_failure_blocks(prev_ci_artifacts=prev_ci_artifacts, to_truncate=False)
|
||||
if extra_blocks:
|
||||
|
||||
# extra processing to save to json format
|
||||
new_failed_tests = {}
|
||||
for line in failure_text.split():
|
||||
if "https://github.com/huggingface/transformers/actions/runs" in line:
|
||||
pattern = r"<(https://github.com/huggingface/transformers/actions/runs/.+?/job/.+?)\|(.+?)>"
|
||||
items = re.findall(pattern, line)
|
||||
elif "tests/" in line:
|
||||
if "tests/models/" in line:
|
||||
filename = "new_model_failures"
|
||||
if idx > 0:
|
||||
filename = f"{filename}_against_{prev_workflow_run_id}"
|
||||
|
||||
# prepare the file name `new_model_failures` or `new_model_failures_against_{}`
|
||||
|
||||
failure_text = extra_blocks[-1]["text"]["text"]
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.txt")
|
||||
with open(file_path, "w", encoding="UTF-8") as fp:
|
||||
fp.write(failure_text)
|
||||
|
||||
# upload results to Hub dataset
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.txt")
|
||||
commit_info = api.upload_file(
|
||||
path_or_fileobj=file_path,
|
||||
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{filename}.txt",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{report_repo_folder}/ci_results_{job_name}/{filename}.txt"
|
||||
|
||||
# extra processing to save to json format
|
||||
new_failed_tests = {}
|
||||
for line in failure_text.split():
|
||||
if "https://github.com/huggingface/transformers/actions/runs" in line:
|
||||
pattern = r"<(https://github.com/huggingface/transformers/actions/runs/.+?/job/.+?)\|(.+?)>"
|
||||
items = re.findall(pattern, line)
|
||||
elif "tests/models/" in line:
|
||||
model = line.split("/")[2]
|
||||
else:
|
||||
model = line.split("/")[1]
|
||||
if model not in new_failed_tests:
|
||||
new_failed_tests[model] = {"single-gpu": [], "multi-gpu": []}
|
||||
for url, device in items:
|
||||
new_failed_tests[model][f"{device}-gpu"].append(line)
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures_temp.json")
|
||||
with open(file_path, "w", encoding="UTF-8") as fp:
|
||||
json.dump(new_failed_tests, fp, ensure_ascii=False, indent=4)
|
||||
if model not in new_failed_tests:
|
||||
new_failed_tests[model] = {"single-gpu": [], "multi-gpu": []}
|
||||
for url, device in items:
|
||||
new_failed_tests[model][f"{device}-gpu"].append(line)
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.json")
|
||||
with open(file_path, "w", encoding="UTF-8") as fp:
|
||||
json.dump(new_failed_tests, fp, ensure_ascii=False, indent=4)
|
||||
|
||||
# upload results to Hub dataset
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/new_model_failures_temp.json")
|
||||
_ = api.upload_file(
|
||||
path_or_fileobj=file_path,
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/new_model_failures_temp.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
# upload results to Hub dataset
|
||||
file_path = os.path.join(os.getcwd(), f"ci_results_{job_name}/{filename}.json")
|
||||
_ = api.upload_file(
|
||||
path_or_fileobj=file_path,
|
||||
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{filename}.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
|
||||
block = {
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "plain_text",
|
||||
"text": " ",
|
||||
},
|
||||
"accessory": {
|
||||
"type": "button",
|
||||
"text": {"type": "plain_text", "text": "Check New model failures"},
|
||||
"url": url,
|
||||
},
|
||||
}
|
||||
blocks.append(block)
|
||||
if idx == 0:
|
||||
block = {
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "plain_text",
|
||||
"text": " ",
|
||||
},
|
||||
"accessory": {
|
||||
"type": "button",
|
||||
"text": {"type": "plain_text", "text": "Check New model failures"},
|
||||
"url": url,
|
||||
},
|
||||
}
|
||||
blocks.append(block)
|
||||
|
||||
return json.dumps(blocks)
|
||||
|
||||
@ -647,6 +658,8 @@ class Message:
|
||||
|
||||
def post(self):
|
||||
payload = self.payload
|
||||
return
|
||||
|
||||
print("Sending the following payload")
|
||||
print(json.dumps({"blocks": json.loads(payload)}))
|
||||
|
||||
@ -700,18 +713,18 @@ class Message:
|
||||
{"type": "section", "text": {"type": "mrkdwn", "text": failure_text}},
|
||||
]
|
||||
|
||||
def get_new_model_failure_blocks(self, with_header=True, to_truncate=True):
|
||||
if self.prev_ci_artifacts is None:
|
||||
def get_new_model_failure_blocks(self, prev_ci_artifacts, with_header=True, to_truncate=True):
|
||||
if prev_ci_artifacts is None:
|
||||
return []
|
||||
|
||||
sorted_dict = sorted(self.model_results.items(), key=lambda t: t[0])
|
||||
|
||||
prev_model_results = {}
|
||||
if (
|
||||
f"ci_results_{job_name}" in self.prev_ci_artifacts
|
||||
and "model_results.json" in self.prev_ci_artifacts[f"ci_results_{job_name}"]
|
||||
f"ci_results_{job_name}" in prev_ci_artifacts
|
||||
and "model_results.json" in prev_ci_artifacts[f"ci_results_{job_name}"]
|
||||
):
|
||||
prev_model_results = json.loads(self.prev_ci_artifacts[f"ci_results_{job_name}"]["model_results.json"])
|
||||
prev_model_results = json.loads(prev_ci_artifacts[f"ci_results_{job_name}"]["model_results.json"])
|
||||
|
||||
all_failure_lines = {}
|
||||
for job, job_result in sorted_dict:
|
||||
@ -812,20 +825,6 @@ class Message:
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
blocks = self.get_new_model_failure_blocks()
|
||||
if blocks:
|
||||
print("Sending the following reply")
|
||||
print(json.dumps({"blocks": blocks}))
|
||||
|
||||
client.chat_postMessage(
|
||||
channel=SLACK_REPORT_CHANNEL_ID,
|
||||
text="Results for new failures",
|
||||
blocks=blocks,
|
||||
thread_ts=self.thread_ts["ts"],
|
||||
)
|
||||
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def retrieve_artifact(artifact_path: str, gpu: Optional[str]):
|
||||
if gpu not in [None, "single", "multi"]:
|
||||
@ -1152,6 +1151,21 @@ if __name__ == "__main__":
|
||||
# Remove some entries in `additional_files` if they are not concerned.
|
||||
test_name = None
|
||||
job_name = os.getenv("CI_TEST_JOB")
|
||||
report_repo_subfolder = ""
|
||||
if os.getenv("GITHUB_EVENT_NAME") != "schedule":
|
||||
# use workflow run id (if it is not a scheduled run)
|
||||
report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}"
|
||||
report_repo_subfolder = f"runs/{report_repo_subfolder}"
|
||||
|
||||
workflow_run = get_last_daily_ci_run(token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv('GITHUB_RUN_ID'))
|
||||
workflow_run_created_time = workflow_run["created_at"]
|
||||
workflow_id = workflow_run["workflow_id"]
|
||||
|
||||
report_repo_folder = workflow_run_created_time.split("T")[0]
|
||||
|
||||
if report_repo_subfolder:
|
||||
report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}"
|
||||
|
||||
if job_name in job_to_test_map:
|
||||
test_name = job_to_test_map[job_name]
|
||||
additional_files = {k: v for k, v in additional_files.items() if k == test_name}
|
||||
@ -1221,8 +1235,10 @@ if __name__ == "__main__":
|
||||
if not os.path.isdir(os.path.join(os.getcwd(), f"ci_results_{job_name}")):
|
||||
os.makedirs(os.path.join(os.getcwd(), f"ci_results_{job_name}"))
|
||||
|
||||
target_workflow = "huggingface/transformers/.github/workflows/self-scheduled-caller.yml@refs/heads/main"
|
||||
is_scheduled_ci_run = os.environ.get("CI_WORKFLOW_REF") == target_workflow
|
||||
nvidia_daily_ci_workflow = "huggingface/transformers/.github/workflows/self-scheduled-caller.yml"
|
||||
is_nvidia_daily_ci_workflow = os.environ.get("GITHUB_WORKFLOW_REF").startswith(nvidia_daily_ci_workflow)
|
||||
is_scheduled_ci_run = os.environ.get("GITHUB_EVENT_NAME") == "schedule"
|
||||
# TODO: remove this one
|
||||
is_scheduled_ci_run = True
|
||||
|
||||
# Only the model testing job is concerned: this condition is to avoid other jobs to upload the empty list as
|
||||
@ -1231,15 +1247,13 @@ if __name__ == "__main__":
|
||||
with open(f"ci_results_{job_name}/model_results.json", "w", encoding="UTF-8") as fp:
|
||||
json.dump(model_results, fp, indent=4, ensure_ascii=False)
|
||||
|
||||
# upload results to Hub dataset (only for the scheduled daily CI run on `main`)
|
||||
# if is_scheduled_ci_run:
|
||||
# api.upload_file(
|
||||
# path_or_fileobj=f"ci_results_{job_name}/model_results.json",
|
||||
# path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/model_results.json",
|
||||
# repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
# repo_type="dataset",
|
||||
# token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
# )
|
||||
api.upload_file(
|
||||
path_or_fileobj=f"ci_results_{job_name}/model_results.json",
|
||||
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/model_results.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
|
||||
# Must have the same keys as in `additional_results`.
|
||||
# The values are used as the file names where to save the corresponding CI job results.
|
||||
@ -1253,26 +1267,66 @@ if __name__ == "__main__":
|
||||
with open(f"ci_results_{job_name}/{test_to_result_name[job]}_results.json", "w", encoding="UTF-8") as fp:
|
||||
json.dump(job_result, fp, indent=4, ensure_ascii=False)
|
||||
|
||||
# upload results to Hub dataset (only for the scheduled daily CI run on `main`)
|
||||
# if is_scheduled_ci_run:
|
||||
# api.upload_file(
|
||||
# path_or_fileobj=f"ci_results_{job_name}/{test_to_result_name[job]}_results.json",
|
||||
# path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/{test_to_result_name[job]}_results.json",
|
||||
# repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
# repo_type="dataset",
|
||||
# token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
# )
|
||||
api.upload_file(
|
||||
path_or_fileobj=f"ci_results_{job_name}/{test_to_result_name[job]}_results.json",
|
||||
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/{test_to_result_name[job]}_results.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
|
||||
prev_ci_artifacts = None
|
||||
if is_scheduled_ci_run:
|
||||
# scheduled daily CI --> compared to the previous run
|
||||
# - scheduled AMD CI or other CI --> compared to nvidia scheduled daily CI run
|
||||
# push CI / workflow_dispatch --> need specific workflow runs specified
|
||||
# - might want to specify multiple runs
|
||||
# - could be no runs specified
|
||||
|
||||
prev_workflow_run_id = None
|
||||
other_workflow_run_ids = []
|
||||
|
||||
if not is_scheduled_ci_run:
|
||||
# TODO: remove `if job_name == "run_models_gpu"`
|
||||
if job_name == "run_models_gpu":
|
||||
# Get the last previously completed CI's failure tables
|
||||
artifact_names = [f"ci_results_{job_name}"]
|
||||
output_dir = os.path.join(os.getcwd(), "previous_reports")
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
prev_ci_artifacts = get_last_daily_ci_reports(
|
||||
artifact_names=artifact_names, output_dir=output_dir, token=os.environ["ACCESS_REPO_INFO_TOKEN"]
|
||||
)
|
||||
# This is the previous completed scheduled run
|
||||
prev_workflow_run_id = get_last_daily_ci_workflow_run_id(token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_id=workflow_id)
|
||||
print("11111")
|
||||
print(prev_workflow_run_id)
|
||||
# For a scheduled run that is not the Nvidia's scheduled daily CI, let's add Nvidia's scheduled daily CI as a target to compare.
|
||||
if True or not is_nvidia_daily_ci_workflow:
|
||||
# The id of the workflow `.github/workflows/self-scheduled-caller.yml` (not of a workflow run of it).
|
||||
other_workflow_id = "90575235"
|
||||
# We need to get the Nvidia's scheduled daily CI run that match the current run (i.e. run with the same commit SHA)
|
||||
other_workflow_run_id = get_last_daily_ci_workflow_run_id(token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_id=other_workflow_id, commit_sha="0f77ca72cae3565632bafd7e06080b2c19920f06")
|
||||
print("2222")
|
||||
print(other_workflow_run_id)
|
||||
other_workflow_run_ids.append(other_workflow_run_id)
|
||||
else:
|
||||
prev_workflow_run_id = os.environ["PREV_WORKFLOW_RUN_ID"]
|
||||
other_workflow_run_id = os.environ["OTHER_WORKFLOW_RUN_ID"]
|
||||
other_workflow_run_ids.append(other_workflow_run_id)
|
||||
|
||||
prev_ci_artifacts = (None, None)
|
||||
other_ci_artifacts = []
|
||||
|
||||
for idx, target_workflow_run_id in enumerate([prev_workflow_run_id] + other_workflow_run_ids):
|
||||
if target_workflow_run_id is None or target_workflow_run_id == "":
|
||||
continue
|
||||
else:
|
||||
# Get the last previously completed CI's failure tables
|
||||
artifact_names = [f"ci_results_{job_name}"]
|
||||
output_dir = os.path.join(os.getcwd(), "previous_reports")
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
# TODO: better name
|
||||
ci_artifacts = get_last_daily_ci_reports(
|
||||
artifact_names=artifact_names,
|
||||
output_dir=output_dir,
|
||||
token=os.environ["ACCESS_REPO_INFO_TOKEN"],
|
||||
workflow_run_id=target_workflow_run_id,
|
||||
)
|
||||
if idx == 0:
|
||||
prev_ci_artifacts = (target_workflow_run_id, ci_artifacts)
|
||||
else:
|
||||
other_ci_artifacts.append((target_workflow_run_id, ci_artifacts))
|
||||
|
||||
message = Message(
|
||||
title,
|
||||
@ -1281,9 +1335,10 @@ if __name__ == "__main__":
|
||||
additional_results,
|
||||
selected_warnings=selected_warnings,
|
||||
prev_ci_artifacts=prev_ci_artifacts,
|
||||
other_ci_artifacts=other_ci_artifacts,
|
||||
)
|
||||
|
||||
# send report only if there is any failure (for push CI)
|
||||
if message.n_failures or (ci_event != "push" and not ci_event.startswith("Push CI (AMD)")):
|
||||
message.post()
|
||||
message.post_reply()
|
||||
# message.post_reply()
|
||||
|
||||
@ -21,6 +21,7 @@ import time
|
||||
from typing import Dict
|
||||
|
||||
from get_ci_error_statistics import get_jobs
|
||||
from get_previous_daily_ci import get_last_daily_ci_run
|
||||
from huggingface_hub import HfApi
|
||||
from notification_service import (
|
||||
Message,
|
||||
@ -246,20 +247,39 @@ if __name__ == "__main__":
|
||||
)
|
||||
|
||||
job_name = os.getenv("CI_TEST_JOB")
|
||||
|
||||
report_repo_subfolder = ""
|
||||
if os.getenv("GITHUB_EVENT_NAME") != "schedule":
|
||||
# use workflow run id (if it is not a scheduled run)
|
||||
report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}"
|
||||
report_repo_subfolder = f"runs/{report_repo_subfolder}"
|
||||
|
||||
workflow_run = get_last_daily_ci_run(token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv('GITHUB_RUN_ID'))
|
||||
workflow_run_created_time = workflow_run["created_at"]
|
||||
workflow_id = workflow_run["workflow_id"]
|
||||
|
||||
report_repo_folder = workflow_run_created_time.split("T")[0]
|
||||
|
||||
if report_repo_subfolder:
|
||||
report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}"
|
||||
|
||||
if not os.path.isdir(os.path.join(os.getcwd(), f"ci_results_{job_name}")):
|
||||
os.makedirs(os.path.join(os.getcwd(), f"ci_results_{job_name}"))
|
||||
|
||||
nvidia_daily_ci_workflow = "huggingface/transformers/.github/workflows/self-scheduled-caller.yml"
|
||||
is_nvidia_daily_ci_workflow = os.environ.get("GITHUB_WORKFLOW_REF").startswith(nvidia_daily_ci_workflow)
|
||||
is_scheduled_ci_run = os.environ.get("GITHUB_EVENT_NAME") == "schedule"
|
||||
# TODO: remove this one
|
||||
is_scheduled_ci_run = True
|
||||
|
||||
with open(f"ci_results_{job_name}/quantization_results.json", "w", encoding="UTF-8") as fp:
|
||||
json.dump(quantization_results, fp, indent=4, ensure_ascii=False)
|
||||
|
||||
target_workflow = "huggingface/transformers/.github/workflows/self-scheduled-caller.yml@refs/heads/main"
|
||||
is_scheduled_ci_run = os.environ.get("CI_WORKFLOW_REF") == target_workflow
|
||||
|
||||
# upload results to Hub dataset (only for the scheduled daily CI run on `main`)
|
||||
if is_scheduled_ci_run:
|
||||
api.upload_file(
|
||||
path_or_fileobj=f"ci_results_{job_name}/quantization_results.json",
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_{job_name}/quantization_results.json",
|
||||
path_in_repo=f"{report_repo_folder}/ci_results_{job_name}/quantization_results.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
|
||||
@ -18,13 +18,14 @@ import os
|
||||
from collections import Counter
|
||||
from copy import deepcopy
|
||||
|
||||
from get_previous_daily_ci import get_last_daily_ci_run
|
||||
from huggingface_hub import HfApi
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
api = HfApi()
|
||||
|
||||
with open("new_model_failures_with_bad_commit_temp.json") as fp:
|
||||
with open("new_model_failures_with_bad_commit.json") as fp:
|
||||
data = json.load(fp)
|
||||
|
||||
# TODO: extend
|
||||
@ -68,16 +69,30 @@ if __name__ == "__main__":
|
||||
new_data_full[author] = {k: v for k, v in _data.items() if len(v) > 0}
|
||||
|
||||
# Upload to Hub and get the url
|
||||
with open("new_model_failures_with_bad_commit_grouped_by_authors_temp.json", "w") as fp:
|
||||
report_repo_subfolder = ""
|
||||
if os.getenv("GITHUB_EVENT_NAME") != "schedule":
|
||||
# use workflow run id (if it is not a scheduled run)
|
||||
report_repo_subfolder = f"{os.getenv('GITHUB_RUN_NUMBER')}-{os.getenv('GITHUB_RUN_ID')}"
|
||||
report_repo_subfolder = f"runs/{report_repo_subfolder}"
|
||||
|
||||
workflow_run = get_last_daily_ci_run(token=os.environ["ACCESS_REPO_INFO_TOKEN"], workflow_run_id=os.getenv('GITHUB_RUN_ID'))
|
||||
workflow_run_created_time = workflow_run["created_at"]
|
||||
|
||||
report_repo_folder = workflow_run_created_time.split("T")[0]
|
||||
|
||||
if report_repo_subfolder:
|
||||
report_repo_folder = f"{report_repo_folder}/{report_repo_subfolder}"
|
||||
|
||||
with open("new_model_failures_with_bad_commit_grouped_by_authors.json", "w") as fp:
|
||||
json.dump(new_data_full, fp, ensure_ascii=False, indent=4)
|
||||
commit_info = api.upload_file(
|
||||
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors_temp.json",
|
||||
path_in_repo=f"{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors_temp.json",
|
||||
path_or_fileobj="new_model_failures_with_bad_commit_grouped_by_authors.json",
|
||||
path_in_repo=f"{report_repo_folder}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json",
|
||||
repo_id="hf-internal-testing/transformers_daily_ci",
|
||||
repo_type="dataset",
|
||||
token=os.environ.get("TRANSFORMERS_CI_RESULTS_UPLOAD_TOKEN", None),
|
||||
)
|
||||
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{datetime.datetime.today().strftime('%Y-%m-%d')}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors_temp.json"
|
||||
url = f"https://huggingface.co/datasets/hf-internal-testing/transformers_daily_ci/raw/{commit_info.oid}/{report_repo_folder}/ci_results_run_models_gpu/new_model_failures_with_bad_commit_grouped_by_authors.json"
|
||||
|
||||
# Add `GH_` prefix as keyword mention
|
||||
output = {}
|
||||
|
||||
@ -49,38 +49,7 @@ if __name__ == "__main__":
|
||||
tests = os.getcwd()
|
||||
model_tests = os.listdir(os.path.join(tests, "models"))
|
||||
d1 = sorted(filter(os.path.isdir, os.listdir(tests)))
|
||||
d1 = ["generation", "models"]
|
||||
# d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests if x not in ["deformable_detr", "grounding_dino", "omdet_turbo", "rt_detr", "nat", "dinat"]]))
|
||||
d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests if x in [
|
||||
# "bark",
|
||||
# "depth_anything",
|
||||
# "depth_pro",
|
||||
# "dpt",
|
||||
# "emu3",
|
||||
# "gemma3",
|
||||
# "gpt_neox",
|
||||
# "granitemoeshared",
|
||||
"idefics2",
|
||||
# "idefics3",
|
||||
# "mpt",
|
||||
# "paligemma",
|
||||
# "paligemma2",
|
||||
# "phi3",
|
||||
# "prompt_depth_anything",
|
||||
# "qwen3",
|
||||
# "qwen3_moe",
|
||||
# "rag",
|
||||
# "reformer",
|
||||
# "smolvlm",
|
||||
# "superglue",
|
||||
# "upernet",
|
||||
# "vitmatte",
|
||||
# "vitpose",
|
||||
# "xglm",
|
||||
# "zamba2",
|
||||
# "zoedepth",
|
||||
]
|
||||
]))
|
||||
d2 = sorted(filter(os.path.isdir, [f"models/{x}" for x in model_tests]))
|
||||
d1.remove("models")
|
||||
d = d2 + d1
|
||||
|
||||
|
||||
Reference in New Issue
Block a user