mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Summary: random coredump Pull Request resolved: https://github.com/pytorch/pytorch/pull/17331 Differential Revision: D14162018 Pulled By: bddppq fbshipit-source-id: 3ed15a79b7bca2498c50f6af80cbd6be7229dea8
24 lines
839 B
Python
24 lines
839 B
Python
import unittest
|
|
from caffe2.python import convnet_benchmarks as cb
|
|
from caffe2.python import test_util, workspace
|
|
|
|
|
|
# TODO: investigate why this randomly core dump in ROCM CI
|
|
@unittest.skipIf(not workspace.has_cuda_support, "no cuda gpu")
|
|
class TestConvnetBenchmarks(test_util.TestCase):
|
|
def testConvnetBenchmarks(self):
|
|
all_args = [
|
|
'--batch_size 16 --order NCHW --iterations 1 '
|
|
'--warmup_iterations 1',
|
|
'--batch_size 16 --order NCHW --iterations 1 '
|
|
'--warmup_iterations 1 --forward_only',
|
|
]
|
|
for model in [cb.AlexNet, cb.OverFeat, cb.VGGA, cb.Inception]:
|
|
for arg_str in all_args:
|
|
args = cb.GetArgumentParser().parse_args(arg_str.split(' '))
|
|
cb.Benchmark(model, args)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|