From 85fa66be04b6f78139da4f0ec8f8b1956291e1c5 Mon Sep 17 00:00:00 2001 From: Catherine Lee Date: Wed, 24 Jul 2024 19:56:00 +0000 Subject: [PATCH] Add rerun_disabled_tests for inductor (#131681) Test in prod? THis also turns on mem leak check Briefly checked that ``` python3 ".github/scripts/filter_test_configs.py" \ --workflow "inductor" \ --job-name "cuda12.1-py3.10-gcc9-sm86 / build" \ --test-matrix "{ include: [ { config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" }, { config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "dynamic_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "dynamic_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "dynamic_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "dynamic_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "dynamic_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "aot_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "aot_inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "aot_inductor_timm", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "aot_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "aot_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" }, { config: "inductor_cpp_wrapper_abi_compatible", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" }, ]} " \ --selected-test-configs "" \ --pr-number "${PR_NUMBER}" \ --tag "${TAG}" \ --event-name "schedule" \ --schedule "29 8 * * *" \ --branch "${HEAD_BRANCH}" ``` has rerun disabled tests option in the test matrix I don't think all these things need to run but I'm not sure which ones (probably just inductor?) Pull Request resolved: https://github.com/pytorch/pytorch/pull/131681 Approved by: https://github.com/zou3519 --- .github/workflows/inductor-cu124.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/inductor-cu124.yml b/.github/workflows/inductor-cu124.yml index ae435aed1f7a..39d1204a4e70 100644 --- a/.github/workflows/inductor-cu124.yml +++ b/.github/workflows/inductor-cu124.yml @@ -9,6 +9,7 @@ on: # Run every 4 hours during the week and every 12 hours on the weekend - cron: 45 0,4,8,12,16,20 * * 1-5 - cron: 45 4,12 * * 0,6 + - cron: 29 8 * * * # about 1:29am PDT, for mem leak check and rerun disabled tests concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}