Files
pytorch/benchmarks/operator_benchmark/pt/qembeddingbag_test.py
Xuehai Pan 26f4f10ac8 [5/N][Easy] fix typo for usort config in pyproject.toml (kown -> known): sort torch (#127126)
The `usort` config in `pyproject.toml` has no effect due to a typo. Fixing the typo make `usort` do more and generate the changes in the PR. Except `pyproject.toml`, all changes are generated by `lintrunner -a --take UFMT --all-files`.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/127126
Approved by: https://github.com/kit1980
2024-05-27 14:49:57 +00:00

50 lines
1.3 KiB
Python

import numpy
from pt import configs
import operator_benchmark as op_bench
import torch
import torch.ao.nn.quantized as nnq
"""
Microbenchmarks for qEmbeddingBag operators.
"""
class QEmbeddingBagBenchmark(op_bench.TorchBenchmarkBase):
def init(
self,
embeddingbags,
dim,
mode,
input_size,
offset,
sparse,
include_last_offset,
device,
):
self.embedding = nnq.EmbeddingBag(
num_embeddings=embeddingbags,
embedding_dim=dim,
mode=mode,
include_last_offset=include_last_offset,
).to(device=device)
numpy.random.seed((1 << 32) - 1)
self.input = torch.tensor(
numpy.random.randint(0, embeddingbags, input_size), device=device
).long()
offset = torch.LongTensor([offset], device=device)
self.offset = torch.cat(
(offset, torch.tensor([self.input.size(0)], dtype=torch.long)), 0
)
self.inputs = {"input": self.input, "offset": self.offset}
self.set_module_name("qEmbeddingBag")
def forward(self, input, offset):
return self.embedding(input, offset)
op_bench.generate_pt_test(configs.embeddingbag_short_configs, QEmbeddingBagBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()