Remove speech_transformer workaround, torchbench handles it correctly now (#100558)

Signed-off-by: Edward Z. Yang <ezyang@meta.com>

Pull Request resolved: https://github.com/pytorch/pytorch/pull/100558
Approved by: https://github.com/albanD
This commit is contained in:
Edward Z. Yang
2023-05-03 12:40:15 -04:00
committed by PyTorch MergeBot
parent fd841763e1
commit d25c93f919

View File

@ -291,10 +291,6 @@ class TorchBenchmarkRunner(BenchmarkRunner):
if self.args.accuracy and model_name in MAX_BATCH_SIZE_FOR_ACCURACY_CHECK:
batch_size = min(batch_size, MAX_BATCH_SIZE_FOR_ACCURACY_CHECK[model_name])
# See https://github.com/pytorch/benchmark/issues/1560
if model_name == "speech_transformer":
batch_size = 10
# workaround "RuntimeError: not allowed to set torch.backends.cudnn flags"
torch.backends.__allow_nonbracketed_mutation_flag = True
extra_args = []