update the baseline data for the operator benchmark (#162693)

According to the results of the last four operator benchmark runs, we found that five models achieved more than a 30% improvement compared to the baseline. Therefore, we will update the operator benchmark baseline data.
We use the average results from the four runs as the new baseline for the five models.

And add a pull request trigger for the operator benchmark workflow

Benchmarking   Framework | Benchmarking   Module Name | Case Name | tag | run_backward | baseline   old | r1 | r2 | r3 | r4 | avg | speedup
-- | -- | -- | -- | -- | -- | -- | -- | -- | -- | -- | --
PyTorch | add | add_M1_N1_K1_cpu | short | FALSE | 3.9497 | 2.57 | 2.54 | 2.38 | 2.31 | 2.45 | 1.61
PyTorch | functional.hardtanh | functional.hardtanh_dims(512	512)_contigFalse_inplaceFalse_dtypetorch.quint8 | short | FALSE | 67.118 | 50.02 | 49.80 | 46.78 | 48.94 | 48.88 | 1.37
PyTorch | relu6 | relu6_dims(512	512)_contigFalse_inplaceFalse_dtypetorch.quint8 | short | FALSE | 68.739 | 51.17 | 51.19 | 48.07 | 50.42 | 50.21 | 1.37
PyTorch | relu6 | relu6_dims(256	1024)_contigFalse_inplaceFalse_dtypetorch.quint8 | short | FALSE | 69.1875 | 51.97 | 52.77 | 50.00 | 51.24 | 51.50 | 1.34
PyTorch | functional.hardtanh | functional.hardtanh_dims(256	1024)_contigFalse_inplaceFalse_dtypetorch.quint8 | short | FALSE | 67.436 | 50.98 | 51.69 | 49.06 | 49.87 | 50.40 | 1.34

@chuanqi129 @huydhn @desertfire @jainapurva

Pull Request resolved: https://github.com/pytorch/pytorch/pull/162693
Approved by: https://github.com/huydhn
This commit is contained in:
LifengWang
2025-09-12 20:53:26 +00:00
committed by PyTorch MergeBot
parent 65d642d6db
commit f7ea4975ab
2 changed files with 10 additions and 6 deletions

View File

@ -1,5 +1,5 @@
Benchmarking Framework,Benchmarking Module Name,Case Name,tag,run_backward,Execution Time
PyTorch,add,add_M1_N1_K1_cpu,short,FALSE,3.9497
PyTorch,add,add_M1_N1_K1_cpu,short,FALSE,2.459
PyTorch,add,add_M64_N64_K64_cpu,short,FALSE,14.3181
PyTorch,add,add_M64_N64_K128_cpu,short,FALSE,14.6826
PyTorch,add,add_M1_N1_K1_cpu_bwdall_BACKWARD,short,TRUE,58.1449
@ -376,10 +376,10 @@ PyTorch,relu6,"relu6_dims(3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint32",sho
PyTorch,relu6,"relu6_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,9.6588
PyTorch,relu6,"relu6_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint8",short,FALSE,9.5969
PyTorch,relu6,"relu6_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint32",short,FALSE,9.547
PyTorch,relu6,"relu6_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,68.739
PyTorch,relu6,"relu6_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,50.21375
PyTorch,relu6,"relu6_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.qint8",short,FALSE,45.14133333
PyTorch,relu6,"relu6_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.qint32",short,FALSE,52.6664
PyTorch,relu6,"relu6_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,69.1875
PyTorch,relu6,"relu6_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,51.49525
PyTorch,relu6,"relu6_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.qint8",short,FALSE,48.3458
PyTorch,relu6,"relu6_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.qint32",short,FALSE,62.0719
PyTorch,functional.hardtanh,"functional.hardtanh_dims(3,4,5)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,7.5728
@ -388,10 +388,10 @@ PyTorch,functional.hardtanh,"functional.hardtanh_dims(3,4,5)_contigFalse_inplace
PyTorch,functional.hardtanh,"functional.hardtanh_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,8.1647
PyTorch,functional.hardtanh,"functional.hardtanh_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint8",short,FALSE,8.1768
PyTorch,functional.hardtanh,"functional.hardtanh_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint32",short,FALSE,8.0619
PyTorch,functional.hardtanh,"functional.hardtanh_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,67.118
PyTorch,functional.hardtanh,"functional.hardtanh_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,48.88475
PyTorch,functional.hardtanh,"functional.hardtanh_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.qint8",short,FALSE,43.702
PyTorch,functional.hardtanh,"functional.hardtanh_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.qint32",short,FALSE,50.3613
PyTorch,functional.hardtanh,"functional.hardtanh_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,67.436
PyTorch,functional.hardtanh,"functional.hardtanh_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,50.3995
PyTorch,functional.hardtanh,"functional.hardtanh_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.qint8",short,FALSE,46.9813
PyTorch,functional.hardtanh,"functional.hardtanh_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.qint32",short,FALSE,59.2295
PyTorch,functional.hardsigmoid,"functional.hardsigmoid_dims(3,4,5)_contigFalse_inplaceFalse_dtypetorch.quint8",short,FALSE,6.5189
@ -1316,4 +1316,4 @@ PyTorch,where,"where_cond_shape(8,16,1)_input_shape(1,)_other_shape(1,)_cpu_dtyp
PyTorch,where,"where_cond_shape(8,16,1)_input_shape(16,1)_other_shape(8,16,1)_cpu_dtypetorch.float32",short,FALSE,5.763
PyTorch,where,"where_cond_shape(8,16,1)_input_shape(8,1,1)_other_shape(1,)_cpu_dtypetorch.float32",short,FALSE,5.744666667
PyTorch,clamp,clamp_M512_N512_cpu,short,FALSE,15.26233333
PyTorch,gelu,gelu_M512_N512_cpu,short,FALSE,31.33166667
PyTorch,gelu,gelu_M512_N512_cpu,short,FALSE,31.33166667

1 Benchmarking Framework Benchmarking Module Name Case Name tag run_backward Execution Time
2 PyTorch add add_M1_N1_K1_cpu short FALSE 3.9497 2.459
3 PyTorch add add_M64_N64_K64_cpu short FALSE 14.3181
4 PyTorch add add_M64_N64_K128_cpu short FALSE 14.6826
5 PyTorch add add_M1_N1_K1_cpu_bwdall_BACKWARD short TRUE 58.1449
376 PyTorch relu6 relu6_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.quint8 short FALSE 9.6588
377 PyTorch relu6 relu6_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint8 short FALSE 9.5969
378 PyTorch relu6 relu6_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint32 short FALSE 9.547
379 PyTorch relu6 relu6_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.quint8 short FALSE 68.739 50.21375
380 PyTorch relu6 relu6_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.qint8 short FALSE 45.14133333
381 PyTorch relu6 relu6_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.qint32 short FALSE 52.6664
382 PyTorch relu6 relu6_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.quint8 short FALSE 69.1875 51.49525
383 PyTorch relu6 relu6_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.qint8 short FALSE 48.3458
384 PyTorch relu6 relu6_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.qint32 short FALSE 62.0719
385 PyTorch functional.hardtanh functional.hardtanh_dims(3,4,5)_contigFalse_inplaceFalse_dtypetorch.quint8 short FALSE 7.5728
388 PyTorch functional.hardtanh functional.hardtanh_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.quint8 short FALSE 8.1647
389 PyTorch functional.hardtanh functional.hardtanh_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint8 short FALSE 8.1768
390 PyTorch functional.hardtanh functional.hardtanh_dims(2,3,4,5)_contigFalse_inplaceFalse_dtypetorch.qint32 short FALSE 8.0619
391 PyTorch functional.hardtanh functional.hardtanh_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.quint8 short FALSE 67.118 48.88475
392 PyTorch functional.hardtanh functional.hardtanh_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.qint8 short FALSE 43.702
393 PyTorch functional.hardtanh functional.hardtanh_dims(512,512)_contigFalse_inplaceFalse_dtypetorch.qint32 short FALSE 50.3613
394 PyTorch functional.hardtanh functional.hardtanh_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.quint8 short FALSE 67.436 50.3995
395 PyTorch functional.hardtanh functional.hardtanh_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.qint8 short FALSE 46.9813
396 PyTorch functional.hardtanh functional.hardtanh_dims(256,1024)_contigFalse_inplaceFalse_dtypetorch.qint32 short FALSE 59.2295
397 PyTorch functional.hardsigmoid functional.hardsigmoid_dims(3,4,5)_contigFalse_inplaceFalse_dtypetorch.quint8 short FALSE 6.5189
1316 PyTorch where where_cond_shape(8,16,1)_input_shape(16,1)_other_shape(8,16,1)_cpu_dtypetorch.float32 short FALSE 5.763
1317 PyTorch where where_cond_shape(8,16,1)_input_shape(8,1,1)_other_shape(1,)_cpu_dtypetorch.float32 short FALSE 5.744666667
1318 PyTorch clamp clamp_M512_N512_cpu short FALSE 15.26233333
1319 PyTorch gelu gelu_M512_N512_cpu short FALSE 31.33166667