mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
This pull request adds support for running operator microbenchmarks on ROCm (AMD GPU) environments in the CI workflow. The main changes involve introducing new build and test jobs for ROCm in the `.github/workflows/operator_microbenchmark.yml` file. Pull Request resolved: https://github.com/pytorch/pytorch/pull/164173 Approved by: https://github.com/huydhn
101 lines
3.5 KiB
YAML
101 lines
3.5 KiB
YAML
name: operator_microbenchmark
|
|
|
|
on:
|
|
push:
|
|
tags:
|
|
- ciflow/op-benchmark/*
|
|
workflow_dispatch:
|
|
schedule:
|
|
# Run at 06:00 UTC everyday
|
|
- cron: 0 6 * * *
|
|
|
|
concurrency:
|
|
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref_name }}-${{ github.ref_type == 'branch' && github.sha }}-${{ github.event_name == 'workflow_dispatch' }}-${{ github.event_name == 'schedule' }}
|
|
cancel-in-progress: true
|
|
|
|
permissions:
|
|
id-token: write
|
|
contents: read
|
|
|
|
jobs:
|
|
# H100 A100 runners
|
|
opmicrobenchmark-build:
|
|
if: github.repository_owner == 'pytorch'
|
|
name: opmicrobenchmark-build
|
|
uses: ./.github/workflows/_linux-build.yml
|
|
with:
|
|
runner: linux.12xlarge.memory
|
|
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
|
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
|
|
cuda-arch-list: '8.0 9.0'
|
|
test-matrix: |
|
|
{ include: [
|
|
{ config: "operator_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.aws.h100" },
|
|
{ config: "operator_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.aws.a100" },
|
|
]}
|
|
secrets: inherit
|
|
|
|
opmicrobenchmark-test:
|
|
name: opmicrobenchmark-test
|
|
uses: ./.github/workflows/_linux-test.yml
|
|
needs: opmicrobenchmark-build
|
|
with:
|
|
timeout-minutes: 500
|
|
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm80
|
|
docker-image: ${{ needs.opmicrobenchmark-build.outputs.docker-image }}
|
|
test-matrix: ${{ needs.opmicrobenchmark-build.outputs.test-matrix }}
|
|
secrets: inherit
|
|
|
|
# B200 runner
|
|
opmicrobenchmark-build-b200:
|
|
if: github.repository_owner == 'pytorch'
|
|
name: opmicrobenchmark-build-b200
|
|
uses: ./.github/workflows/_linux-build.yml
|
|
with:
|
|
runner: linux.12xlarge.memory
|
|
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm100
|
|
docker-image-name: ci-image:pytorch-linux-jammy-cuda12.8-cudnn9-py3-gcc11
|
|
cuda-arch-list: '10.0'
|
|
test-matrix: |
|
|
{ include: [
|
|
{ config: "operator_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.dgx.b200" },
|
|
]}
|
|
secrets: inherit
|
|
|
|
opmicrobenchmark-test-b200:
|
|
name: opmicrobenchmark-test-b200
|
|
uses: ./.github/workflows/_linux-test.yml
|
|
needs: opmicrobenchmark-build-b200
|
|
with:
|
|
timeout-minutes: 500
|
|
build-environment: linux-jammy-cuda12.8-py3.10-gcc9-sm100
|
|
docker-image: ${{ needs.opmicrobenchmark-build-b200.outputs.docker-image }}
|
|
test-matrix: ${{ needs.opmicrobenchmark-build-b200.outputs.test-matrix }}
|
|
aws-role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
|
|
secrets: inherit
|
|
|
|
# ROCM MI300 runner
|
|
opmicrobenchmark-build-rocm:
|
|
if: github.repository_owner == 'pytorch'
|
|
name: opmicrobenchmark-build-rocm
|
|
uses: ./.github/workflows/_linux-build.yml
|
|
with:
|
|
build-environment: linux-jammy-rocm-py3_10
|
|
docker-image-name: ci-image:pytorch-linux-jammy-rocm-n-py3-benchmarks
|
|
test-matrix: |
|
|
{ include: [
|
|
{ config: "operator_microbenchmark_test", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.gfx942.1" },
|
|
]}
|
|
secrets: inherit
|
|
|
|
opmicrobenchmark-test-rocm:
|
|
name: opmicrobenchmark-test-rocm
|
|
uses: ./.github/workflows/_rocm-test.yml
|
|
needs: opmicrobenchmark-build-rocm
|
|
with:
|
|
timeout-minutes: 500
|
|
build-environment: linux-jammy-rocm-py3_10
|
|
docker-image: ${{ needs.opmicrobenchmark-build-rocm.outputs.docker-image }}
|
|
test-matrix: ${{ needs.opmicrobenchmark-build-rocm.outputs.test-matrix }}
|
|
secrets: inherit
|