Files
pytorch/benchmarks/operator_benchmark/common/repeat_benchmark.py
Xuehai Pan 7763c83af6 [5/N][Easy] fix typo for usort config in pyproject.toml (kown -> known): sort torch (#127126)
The `usort` config in `pyproject.toml` has no effect due to a typo. Fixing the typo make `usort` do more and generate the changes in the PR. Except `pyproject.toml`, all changes are generated by `lintrunner -a --take UFMT --all-files`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/127126
Approved by: https://github.com/kit1980
ghstack dependencies: #127122, #127123, #127124, #127125
2024-05-27 04:22:18 +00:00

64 lines
1.6 KiB
Python

import time
import numpy as np
import torch
"""Microbenchmarks for Tensor repeat operator. Supports PyTorch."""
input_shapes = (
(4, 4, 1),
(16, 1, 32),
(64, 64, 1, 1),
(8, 256, 128),
(1, 64, 128, 32),
(512, 512),
)
repeats = (
(1, 1, 1, 64),
(1, 4, 1, 2),
(1, 2, 2, 15),
(1, 1, 3, 2),
(128, 1, 8, 1),
(1, 1, 2, 16),
)
NUM_WARMUP_ITERS = 5
NUM_BENCHMARK_ITERS = 10
DTYPE_TO_BYTES = {"float": 4}
def generate_data_for_repeat():
input_tensors = [torch.randn(*input_shape) for input_shape in input_shapes]
total_num_elements = 0
for input_tensor, repeat in zip(input_tensors, repeats):
total_num_elements += input_tensor.numel()
total_num_elements += input_tensor.numel() * np.prod(repeat)
return input_tensors, (total_num_elements * DTYPE_TO_BYTES["float"])
input_tensors, total_bytes = generate_data_for_repeat()
BYTES_TO_MB = 1.0 / 1000.0 / 1000.0
def pt_repeat(input_tensor, repeat):
return input_tensor.repeat(repeat)
def pt_repeat_n_times(niters):
for _ in range(niters):
for input_tensor, repeat in zip(input_tensors, repeats):
pt_repeat(input_tensor, repeat)
if __name__ == "__main__":
# Warm up runs.
pt_repeat_n_times(NUM_WARMUP_ITERS)
s = time.time()
pt_repeat_n_times(NUM_BENCHMARK_ITERS)
total_time_s = time.time() - s
total_time_per_iter_s = total_time_s / NUM_BENCHMARK_ITERS
achieved_bandwidth = (total_bytes * BYTES_TO_MB) / total_time_per_iter_s
print(f"Time:{total_time_per_iter_s} Achieved Bandwidth:{achieved_bandwidth} MB/s")