mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
This PR introduces a new "operator microbenchmark" CI workflow and GitHub Actions for operator microbenchmarks, updating test scripts and job matrices to support new parameters, and broadening the operator benchmark tests to include more data types, larger shapes, and gradient tests. The benchmark configurations now focus more on different cuda hardware and multiple dtypes (bf16, fp16, fp32), for both compile and eager mode. **Benchmark Configuration and Coverage:** * Expanded operator benchmark configurations in `addmm_test.py`, `bmm_test.py`, `matmul_test.py`, and `mm_test.py` to benchmark multiple dtypes on CUDA devices, in eager and compile mode, for forward and backward run. The configs with tag "long" for the above mentioned files are being run in CI. * The CI benchmarking is running on various hardwares: H100, A100. * The CI job also uploads the microbenchmarking outputs to a [HUD](https://hud.pytorch.org/benchmark/llms?repoName=pytorch%2Fpytorch&benchmarkName=PyTorch+operator+microbenchmark) dashboard. Pull Request resolved: https://github.com/pytorch/pytorch/pull/162530 Approved by: https://github.com/huydhn Co-authored-by: Huy Do <huydhn@gmail.com>
90 lines
2.4 KiB
Python
90 lines
2.4 KiB
Python
import operator_benchmark as op_bench
|
|
|
|
import torch
|
|
|
|
|
|
"""Microbenchmarks for add_ operator. Supports both Caffe2/PyTorch."""
|
|
|
|
# Configs for PT add operator
|
|
add_long_configs = op_bench.cross_product_configs(
|
|
M=[8, 128], N=[32, 64], K=[256, 512], device=["cpu", "cuda"], tags=["long"]
|
|
)
|
|
|
|
|
|
add_short_configs = op_bench.config_list(
|
|
attr_names=["M", "N", "K"],
|
|
attrs=[
|
|
[1, 1, 1],
|
|
[64, 64, 64],
|
|
[64, 64, 128],
|
|
],
|
|
cross_product_configs={
|
|
"device": ["cpu", "cuda"],
|
|
},
|
|
tags=["short"],
|
|
)
|
|
|
|
|
|
class AddBenchmark(op_bench.TorchBenchmarkBase):
|
|
def init(self, M, N, K, device):
|
|
self.inputs = {
|
|
"input_one": torch.rand(
|
|
M, N, K, device=device, requires_grad=self.auto_set()
|
|
),
|
|
"input_two": torch.rand(
|
|
M, N, K, device=device, requires_grad=self.auto_set()
|
|
),
|
|
}
|
|
self.set_module_name("add")
|
|
|
|
def forward(self, input_one, input_two):
|
|
return torch.add(input_one, input_two)
|
|
|
|
|
|
# The generated test names based on add_short_configs will be in the following pattern:
|
|
# add_M8_N16_K32_devicecpu
|
|
# add_M8_N16_K32_devicecpu_bwdall
|
|
# add_M8_N16_K32_devicecpu_bwd1
|
|
# add_M8_N16_K32_devicecpu_bwd2
|
|
# ...
|
|
# Those names can be used to filter tests.
|
|
|
|
op_bench.generate_pt_test(add_long_configs + add_short_configs, AddBenchmark)
|
|
op_bench.generate_pt_gradient_test(add_long_configs + add_short_configs, AddBenchmark)
|
|
|
|
"""Mircobenchmark for addr operator."""
|
|
|
|
|
|
class AddrBenchmark(op_bench.TorchBenchmarkBase):
|
|
def init(self, M, N, device, dtype):
|
|
self.inputs = {
|
|
"input_one": torch.rand(
|
|
(M, N), device=device, requires_grad=self.auto_set(), dtype=dtype
|
|
),
|
|
"vec1": torch.rand(
|
|
(M,), device=device, requires_grad=self.auto_set(), dtype=dtype
|
|
),
|
|
"vec2": torch.rand(
|
|
(N,), device=device, requires_grad=self.auto_set(), dtype=dtype
|
|
),
|
|
}
|
|
self.set_module_name("addr")
|
|
|
|
def forward(self, input_one, vec1, vec2):
|
|
return torch.addr(input_one, vec1, vec2)
|
|
|
|
|
|
addr_configs = op_bench.cross_product_configs(
|
|
M=[8, 256],
|
|
N=[256, 16],
|
|
device=["cpu", "cuda"],
|
|
dtype=[torch.double, torch.half],
|
|
tags=["addr"],
|
|
)
|
|
|
|
op_bench.generate_pt_test(addr_configs, AddrBenchmark)
|
|
op_bench.generate_pt_gradient_test(addr_configs, AddrBenchmark)
|
|
|
|
if __name__ == "__main__":
|
|
op_bench.benchmark_runner.main()
|