mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 12:54:11 +08:00
when we call benchmarker.benchmark(fn, (), {}) it attempts to infer the device from the args and kwargs, which are both empty. in this case the default behavior is to assume CPU, since `is_cpu_device` is implemented as `all([x.device == "cpu" for x in ... if x is Tensor])`, and `all([]) == True`. I've added a PR that makes this raise an error, but we should just fix this one callsite first Pull Request resolved: https://github.com/pytorch/pytorch/pull/133290 Approved by: https://github.com/eellison
36 lines
986 B
Python
36 lines
986 B
Python
# Owner(s): ["module: inductor"]
|
|
|
|
import functools
|
|
import logging
|
|
|
|
import torch
|
|
from torch._inductor.runtime.benchmarking import benchmarker
|
|
from torch._inductor.test_case import run_tests, TestCase
|
|
from torch._inductor.utils import do_bench_using_profiling
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
class TestBench(TestCase):
|
|
@classmethod
|
|
def setUpClass(cls):
|
|
super().setUpClass()
|
|
x = torch.rand(1024, 10).cuda().half()
|
|
w = torch.rand(512, 10).cuda().half()
|
|
cls._bench_fn = functools.partial(torch.nn.functional.linear, x, w)
|
|
|
|
def test_benchmarker(self):
|
|
res = benchmarker.benchmark_gpu(self._bench_fn)
|
|
log.warning("do_bench result: %s", res)
|
|
self.assertGreater(res, 0)
|
|
|
|
def test_do_bench_using_profiling(self):
|
|
res = do_bench_using_profiling(self._bench_fn)
|
|
log.warning("do_bench_using_profiling result: %s", res)
|
|
self.assertGreater(res, 0)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
run_tests("cuda")
|