Compare commits

...

9 Commits

Author SHA1 Message Date
8a4e8372ab ping myself 2024-09-25 15:59:15 +02:00
10a829feea ping myself 2024-09-25 15:47:59 +02:00
a6be9b2ebc ping myself 2024-09-25 15:22:51 +02:00
96e619bed7 ping myself 2024-09-25 15:00:13 +02:00
52ed976f83 ping myself 2024-09-25 14:54:29 +02:00
fe80f82be9 ping myself 2024-09-24 17:14:11 +02:00
7e102cca3b ping myself 2024-09-24 16:51:22 +02:00
aa9bbd6080 ping myself 2024-09-24 16:48:14 +02:00
3c0908f31f ping myself 2024-09-24 16:43:17 +02:00
5 changed files with 53 additions and 106 deletions

View File

@ -7,7 +7,7 @@ on:
- cron: "17 2 * * *"
push:
branches:
- run_scheduled_ci*
- ping_author
jobs:
model-ci:
@ -15,64 +15,8 @@ jobs:
uses: ./.github/workflows/self-scheduled.yml
with:
job: run_models_gpu
slack_report_channel: "#transformers-ci-daily-models"
slack_report_channel: "#transformers-ci-feedback-tests"
runner: daily-ci
docker: huggingface/transformers-all-latest-gpu
ci_event: Daily CI
secrets: inherit
torch-pipeline:
name: Torch pipeline CI
uses: ./.github/workflows/self-scheduled.yml
with:
job: run_pipelines_torch_gpu
slack_report_channel: "#transformers-ci-daily-pipeline-torch"
runner: daily-ci
docker: huggingface/transformers-pytorch-gpu
ci_event: Daily CI
secrets: inherit
tf-pipeline:
name: TF pipeline CI
uses: ./.github/workflows/self-scheduled.yml
with:
job: run_pipelines_tf_gpu
slack_report_channel: "#transformers-ci-daily-pipeline-tf"
runner: daily-ci
docker: huggingface/transformers-tensorflow-gpu
ci_event: Daily CI
secrets: inherit
example-ci:
name: Example CI
uses: ./.github/workflows/self-scheduled.yml
with:
job: run_examples_gpu
slack_report_channel: "#transformers-ci-daily-examples"
runner: daily-ci
docker: huggingface/transformers-all-latest-gpu
ci_event: Daily CI
secrets: inherit
deepspeed-ci:
name: DeepSpeed CI
uses: ./.github/workflows/self-scheduled.yml
with:
job: run_torch_cuda_extensions_gpu
slack_report_channel: "#transformers-ci-daily-deepspeed"
runner: daily-ci
docker: huggingface/transformers-pytorch-deepspeed-latest-gpu
ci_event: Daily CI
working-directory-prefix: /workspace
secrets: inherit
quantization-ci:
name: Quantization CI
uses: ./.github/workflows/self-scheduled.yml
with:
job: run_quantization_torch_gpu
slack_report_channel: "#transformers-ci-daily-quantization"
runner: daily-ci
docker: huggingface/transformers-quantization-latest-gpu
ci_event: Daily CI
secrets: inherit

View File

@ -50,7 +50,7 @@ jobs:
name: Setup
strategy:
matrix:
machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
machine_type: [aws-g4dn-2xlarge-cache]
runs-on:
group: '${{ matrix.machine_type }}'
container:
@ -103,7 +103,7 @@ jobs:
strategy:
fail-fast: false
matrix:
machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
machine_type: [aws-g4dn-2xlarge-cache]
slice_id: ${{ fromJSON(needs.setup.outputs.slice_ids) }}
uses: ./.github/workflows/model_jobs.yml
with:
@ -120,7 +120,7 @@ jobs:
strategy:
fail-fast: false
matrix:
machine_type: [aws-g4dn-2xlarge-cache, aws-g4dn-12xlarge-cache]
machine_type: [aws-g4dn-2xlarge-cache]
runs-on:
group: '${{ matrix.machine_type }}'
container:
@ -496,46 +496,46 @@ jobs:
name: ${{ env.machine_type }}_run_quantization_torch_gpu_${{ env.matrix_folders }}_test_reports
path: /transformers/reports/${{ env.machine_type }}_run_quantization_torch_gpu_${{ matrix.folders }}_test_reports
run_extract_warnings:
# Let's only do this for the job `run_models_gpu` to simplify the (already complex) logic.
if: ${{ always() && inputs.job == 'run_models_gpu' }}
name: Extract warnings in CI artifacts
runs-on: ubuntu-22.04
needs: [setup, run_models_gpu]
steps:
- name: Checkout transformers
uses: actions/checkout@v4
with:
fetch-depth: 2
- name: Install transformers
run: pip install transformers
- name: Show installed libraries and their versions
run: pip freeze
- name: Create output directory
run: mkdir warnings_in_ci
- uses: actions/download-artifact@v4
with:
path: warnings_in_ci
- name: Show artifacts
run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')"
working-directory: warnings_in_ci
- name: Extract warnings in CI artifacts
run: |
python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh
echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')"
- name: Upload artifact
if: ${{ always() }}
uses: actions/upload-artifact@v4
with:
name: warnings_in_ci
path: warnings_in_ci/selected_warnings.json
# run_extract_warnings:
# # Let's only do this for the job `run_models_gpu` to simplify the (already complex) logic.
# if: ${{ always() && inputs.job == 'run_models_gpu' }}
# name: Extract warnings in CI artifacts
# runs-on: ubuntu-22.04
# needs: [setup, run_models_gpu]
# steps:
# - name: Checkout transformers
# uses: actions/checkout@v4
# with:
# fetch-depth: 2
#
# - name: Install transformers
# run: pip install transformers
#
# - name: Show installed libraries and their versions
# run: pip freeze
#
# - name: Create output directory
# run: mkdir warnings_in_ci
#
# - uses: actions/download-artifact@v4
# with:
# path: warnings_in_ci
#
# - name: Show artifacts
# run: echo "$(python3 -c 'import os; d = os.listdir(); print(d)')"
# working-directory: warnings_in_ci
#
# - name: Extract warnings in CI artifacts
# run: |
# python3 utils/extract_warnings.py --workflow_run_id ${{ github.run_id }} --output_dir warnings_in_ci --token ${{ secrets.ACCESS_REPO_INFO_TOKEN }} --from_gh
# echo "$(python3 -c 'import os; import json; fp = open("warnings_in_ci/selected_warnings.json"); d = json.load(fp); d = "\n".join(d) ;print(d)')"
#
# - name: Upload artifact
# if: ${{ always() }}
# uses: actions/upload-artifact@v4
# with:
# name: warnings_in_ci
# path: warnings_in_ci/selected_warnings.json
send_results:
name: Slack Report
@ -547,7 +547,7 @@ jobs:
run_examples_gpu,
run_torch_cuda_extensions_gpu,
run_quantization_torch_gpu,
run_extract_warnings
# run_extract_warnings
]
if: ${{ always() }}
uses: ./.github/workflows/slack-report.yml

View File

@ -53,6 +53,7 @@ jobs:
CI_WORKFLOW_REF: ${{ github.workflow_ref }}
CI_TEST_JOB: ${{ inputs.job }}
SETUP_STATUS: ${{ inputs.setup_status }}
TEMP: ${{ secrets[format('{0}_{1}', github.actor, 'SLACK_ID')] }}
# We pass `needs.setup.outputs.matrix` as the argument. A processing in `notification_service.py` to change
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
# For a job that doesn't depend on (i.e. `needs`) `setup`, the value for `inputs.folder_slices` would be an
@ -83,6 +84,7 @@ jobs:
CI_SHA: ${{ github.sha }}
CI_TEST_JOB: ${{ inputs.job }}
SETUP_STATUS: ${{ inputs.setup_status }}
TEMP: ${{ secrets[format('{0}_{1}', github.actor, 'SLACK_ID')] }}
# We pass `needs.setup.outputs.quantization_matrix` as the argument. A processing in `notification_service_quantization.py` to change
# `quantization/bnb` to `quantization_bnb` is required, as the artifact names use `_` instead of `/`.
run: |

View File

@ -209,13 +209,13 @@ class Message:
return {
"type": "section",
"text": {
"type": "plain_text",
"type": "mrkdwn",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\n"
f"Number of model failures: {self.n_model_failures}.\n"
f"The suite ran in {self.time}."
f"The suite ran in {self.time}.\n"
f"Try to ping <@{os.environ['TEMP']}>"
),
"emoji": True,
},
"accessory": {
"type": "button",
@ -622,7 +622,7 @@ class Message:
print("Sending the following payload")
print(json.dumps({"blocks": json.loads(payload)}))
text = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else "All tests passed."
text = f"Try to ping @Yih-Dar SHIEH"
self.thread_ts = client.chat_postMessage(
channel=SLACK_REPORT_CHANNEL_ID,

View File

@ -62,4 +62,5 @@ if __name__ == "__main__":
start = end
end = start + num_jobs_per_splits + (1 if idx < num_jobs % args.num_splits else 0)
model_splits.append(d[start:end])
model_splits = [["models/vit"], ["models/altclip"]]
print(model_splits)