Files
pytorch/benchmarks/operator_benchmark/c2/quantile_op_test.py
Xuehai Pan 0dae2ba5bd [2/N][Easy] fix typo for usort config in pyproject.toml (kown -> known): sort caffe2 (#127123)
The `usort` config in `pyproject.toml` has no effect due to a typo. Fixing the typo make `usort` do more and generate the changes in the PR. Except `pyproject.toml`, all changes are generated by `lintrunner -a --take UFMT --all-files`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/127123
Approved by: https://github.com/Skylion007
ghstack dependencies: #127122
2024-05-25 18:26:34 +00:00

49 lines
1.2 KiB
Python

import benchmark_caffe2 as op_bench_c2
from benchmark_caffe2 import Caffe2BenchmarkBase # noqa: F401
import operator_benchmark as op_bench
from caffe2.python import core
"""Microbenchmarks for QuantileOp operator."""
# Configs for C2 QuantileOp operator
quantile_op_long_configs = op_bench.cross_product_configs(
M=[32, 64, 128], N=range(32, 128, 32), dtype=["float", "double"], tags=["long"]
)
quantile_op_short_configs = op_bench.config_list(
attrs=[
[16, 16, "float"],
[16, 16, "double"],
[64, 64, "float"],
[64, 64, "double"],
],
attr_names=["M", "N", "dtype"],
tags=["short"],
)
class QuantileOpBenchmark(op_bench_c2.Caffe2BenchmarkBase):
def init(self, M, N, dtype):
self.data = [self.tensor([N], dtype) for _ in range(M)]
self.quantile = 0.3
self.output = self.tensor([1], dtype)
self.set_module_name("quantile_op")
def forward(self):
op = core.CreateOperator(
"Quantile", inputs=self.data, outputs=self.output, quantile=self.quantile
)
return op
op_bench_c2.generate_c2_test(
quantile_op_long_configs + quantile_op_short_configs, QuantileOpBenchmark
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()