mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-22 22:25:10 +08:00
Compare commits
1329 Commits
Author | SHA1 | Date | |
---|---|---|---|
14f2f8ec26 | |||
774ca93fd2 | |||
5fe9515d35 | |||
81322aee74 | |||
adaa0fea5a | |||
dcaa111dc8 | |||
4d7bf72d93 | |||
fa5f572748 | |||
4410c44ae6 | |||
6f275ae4d0 | |||
f9f85bfc0b | |||
cbb7e26acd | |||
0b5792c0ae | |||
87b406d7e5 | |||
92ac9ee83c | |||
97cfc65dbc | |||
e5de25896f | |||
1f162a5fce | |||
8714b7fc69 | |||
7c83f5f7d5 | |||
0effcb70ef | |||
567482973d | |||
1ad0f38a37 | |||
c03e667276 | |||
3710a79622 | |||
f44739cf42 | |||
a7f54c7f8a | |||
634b62f111 | |||
ea78b0c177 | |||
f422027fce | |||
06ebf87a1e | |||
619029e892 | |||
95046c86e3 | |||
f093cd4086 | |||
7c289c2a5c | |||
f0d7164cb9 | |||
103b6ccab2 | |||
af4da0799c | |||
d727e2f2d1 | |||
60fc01d0ab | |||
43b98fa521 | |||
2c4303c1d1 | |||
741c1710e8 | |||
0bf9a091ec | |||
c3e77d144e | |||
b0a597fcb4 | |||
4865c6425c | |||
55dc82bef9 | |||
988ed4d5db | |||
dafef3ff35 | |||
c35f12c67c | |||
22fd89c904 | |||
34e57025e1 | |||
2b1df24877 | |||
2a1f22e57f | |||
18418a7dbb | |||
e5657024b5 | |||
ea4b80e6d6 | |||
febadda107 | |||
d443fbc025 | |||
9c69684af8 | |||
ba941769b5 | |||
ae3ac9cb64 | |||
6f54e961ea | |||
3100455b8e | |||
b75cc70875 | |||
da030e7add | |||
207564bab1 | |||
e568c91a7b | |||
0d8dedb01b | |||
b6215f44ef | |||
ff25dfca5a | |||
b7d287fbec | |||
ae0edadea0 | |||
c16e90fe06 | |||
9ae40c6bc0 | |||
c6a676add4 | |||
c101c4517a | |||
536b5b19b5 | |||
7f2436014e | |||
7ce5b5767c | |||
b5b91b418d | |||
18b7633bfb | |||
26c2b92525 | |||
9c6c0deadc | |||
d97d962082 | |||
a2f630a9a4 | |||
fc872e98f3 | |||
726a287271 | |||
5c6edd29ec | |||
c50b189280 | |||
bc18863713 | |||
cd9bae30de | |||
578388bed8 | |||
1cae60a87e | |||
0a4fe2ff86 | |||
973037be6a | |||
492de213e2 | |||
f21a21828a | |||
3896ba3260 | |||
72d9135679 | |||
9c1ba5ac10 | |||
68ad3eb722 | |||
ca023f77bc | |||
fb9bc6d74a | |||
5ed72ff5f5 | |||
be7bf20234 | |||
6f662e9575 | |||
c4a2b6a943 | |||
a833582dbb | |||
f7d7b94017 | |||
fed8b0055f | |||
9c612df504 | |||
bac10cdd6f | |||
0d66ccaf23 | |||
f261c6ebe8 | |||
354edb232a | |||
215013daad | |||
9822fdc354 | |||
f52b2ee90f | |||
2a51ccc77e | |||
c9551a3f50 | |||
c5b66c3fe1 | |||
5db9bd467e | |||
b1942a1af4 | |||
79c41bb58a | |||
75ab027fbb | |||
fdc83610f2 | |||
da24823e06 | |||
5835ff1ed5 | |||
a4576dad34 | |||
9f401187c7 | |||
dfd1d1971e | |||
4fcfd475be | |||
df9d1b44e7 | |||
a205a53c50 | |||
79e34800c3 | |||
798b9652f7 | |||
5bc18ec0a1 | |||
6adc725157 | |||
358da54be5 | |||
ceedee23ec | |||
2abc7cc21b | |||
551b3c6dca | |||
ce499eee0c | |||
83c95c48f7 | |||
86bca69c5f | |||
e14a0f45ed | |||
46c52661bc | |||
80a421a54d | |||
cd2638be09 | |||
b81767161e | |||
1b3b4c2fb9 | |||
1352f13f78 | |||
cf090e222e | |||
4b7ee51260 | |||
08d5423d33 | |||
0beeac35fa | |||
b4b7477d3f | |||
6b3460ae0d | |||
7d4cb21098 | |||
a7aa066b09 | |||
a09910d3a9 | |||
fe3e6878c4 | |||
9d94b122f0 | |||
ae73489b7d | |||
bef085bdfa | |||
ce4d95143f | |||
a7715e36de | |||
c83b941141 | |||
d31f866b33 | |||
81ea298600 | |||
81df076bfd | |||
417c83e7cf | |||
b38de2f9e2 | |||
bd3452f431 | |||
99967e1119 | |||
6ce0bd7d3b | |||
637cc8d27f | |||
a1590e16df | |||
cb2bce98de | |||
6367f02a0e | |||
e29657efb6 | |||
10c7f037fe | |||
a17d1e5322 | |||
5abe7ebd41 | |||
99c68f7bea | |||
868d9a4f12 | |||
68751799b8 | |||
007e75958f | |||
9912209743 | |||
85b8503621 | |||
7a3ab1fe79 | |||
fb696bf264 | |||
44815ed67e | |||
5b5a1f5202 | |||
fd43a2ba27 | |||
3be4922a9d | |||
37d4d04309 | |||
fb5cb17fbe | |||
df83142131 | |||
bb9a73f767 | |||
c23d103afa | |||
9c9744c3ac | |||
f85bda8bdd | |||
1d93367cfa | |||
721a798886 | |||
42f647219a | |||
adb65682af | |||
a6345d3477 | |||
3477ee38e4 | |||
3689471ea4 | |||
c6cce976b2 | |||
cb4bec311a | |||
e4c51d22c5 | |||
cab90b0049 | |||
edf273edf4 | |||
71efbf701d | |||
a5f816df18 | |||
3e48d92733 | |||
86fb76e871 | |||
953c6476bd | |||
b139b5090f | |||
312652c325 | |||
6c4efd4e95 | |||
ded469cfbd | |||
e235db98c9 | |||
31df1d235e | |||
e836ee1955 | |||
29861779ce | |||
d1e0653fad | |||
f2c9f0c0db | |||
abe81d5d05 | |||
d44c30e2f9 | |||
75fa10066d | |||
7f08d3d9a0 | |||
4c19623800 | |||
a28bb3268d | |||
60d9f3f7d9 | |||
b4cc25f126 | |||
17ca0d0edf | |||
00335a27b4 | |||
922d2737d5 | |||
44a773c121 | |||
f9bb258892 | |||
5e467604c3 | |||
09d57f577b | |||
856fe230c7 | |||
3fe324ffb6 | |||
1e61cb8c87 | |||
f059201e0d | |||
3e53cae0fc | |||
36e2608783 | |||
a8985a97f9 | |||
22c809aa73 | |||
9158bb7837 | |||
6dc64026cb | |||
64139987c0 | |||
cd683212a2 | |||
e16276b9bf | |||
b428f1ad77 | |||
d325aaef39 | |||
a18568f293 | |||
61017eb77b | |||
63743b223c | |||
f4774d64bf | |||
d7b7f8b79f | |||
c8ab2e8b63 | |||
acf9e31cf8 | |||
16d53cb7d5 | |||
010009e642 | |||
f4dcf2ae93 | |||
f053be2a97 | |||
31bb65de19 | |||
c5c9dbece1 | |||
d0ad13fa42 | |||
d1b832e739 | |||
940e4477ab | |||
0c44684901 | |||
e423224546 | |||
1b57dce35f | |||
f794cf59bd | |||
98929ceae3 | |||
a3ce9eddd6 | |||
9983242c8e | |||
3d138af943 | |||
3957b3b349 | |||
dc5f37193f | |||
dfe3534134 | |||
3f50e197c4 | |||
01ec03bac6 | |||
2f219f7d79 | |||
096eca2f9a | |||
520a4642bf | |||
da66e50e6e | |||
963f430d13 | |||
aa4899eee9 | |||
e019540c9e | |||
bf609630ae | |||
10c831567b | |||
acd03ca2d9 | |||
c5110f6388 | |||
0267b2ddcb | |||
7c43f59a45 | |||
bd0252fb98 | |||
a1a2023eb8 | |||
1927c40684 | |||
c5ede865c4 | |||
0fcbca9adb | |||
e5841bb8d5 | |||
126796d239 | |||
d1d0a7080f | |||
6fc771d19b | |||
df50452279 | |||
18e75c098b | |||
739fc01ac9 | |||
faebaef089 | |||
3d56673b24 | |||
8ff243bcf1 | |||
784e3b4123 | |||
889ed48a22 | |||
7c5f3cd049 | |||
7ea8a3c9b8 | |||
7192ee0735 | |||
a33ee73a28 | |||
e7ab7b83bc | |||
eea4ece256 | |||
4b05d9d233 | |||
8f6765f7a7 | |||
c0735a3dd3 | |||
8f1c2e1e28 | |||
78a0b010eb | |||
3240bff56a | |||
30fc4b06f5 | |||
c9f1db265e | |||
7128504424 | |||
e590168865 | |||
21eeedb455 | |||
d496145534 | |||
f78b79daaa | |||
5b5f4b02c2 | |||
6dfa53ca76 | |||
fa3953a2e1 | |||
54da35a2e0 | |||
57d05f2616 | |||
551f3b92b2 | |||
f3962cfd9c | |||
fa4e489d70 | |||
e98587c58d | |||
bffb278700 | |||
d62d351107 | |||
42f3d7e948 | |||
07b06f0f0a | |||
26be691e6b | |||
9c9ac670a0 | |||
0b9995c1ce | |||
e2e624a02f | |||
a7a7363be0 | |||
da8af685ac | |||
8405ba21c1 | |||
99ec7bbee7 | |||
0af8c8a981 | |||
dafbd603ee | |||
51fa0bd436 | |||
9108b74bbc | |||
cd70ac884f | |||
efb73eda51 | |||
d95a019704 | |||
2fe7c1fe04 | |||
779fc8119e | |||
8a9725bedb | |||
a9a744e442 | |||
b0d0114f5b | |||
a79bb8db91 | |||
7bbd6cf931 | |||
a21d4363d2 | |||
042d764872 | |||
9b902b3ee3 | |||
35600bcaad | |||
01e41f1814 | |||
e2eb33b089 | |||
29c68df600 | |||
9e1e58e052 | |||
64a04d2225 | |||
735044191f | |||
8f70bf7a94 | |||
62b710782d | |||
4fc9157e90 | |||
0abcca85b7 | |||
8af58f66bb | |||
424cd1e1df | |||
1026b0f687 | |||
31fc5b8966 | |||
9ee8c18309 | |||
91a8376d47 | |||
c77c139878 | |||
c686304277 | |||
3b77b122c5 | |||
1e27af335e | |||
be2d79a16b | |||
111f9b5d44 | |||
89646ebb11 | |||
921c116089 | |||
b2ac8d2af3 | |||
45844e0d4e | |||
7955cd3e83 | |||
fb078c20c1 | |||
872d972e41 | |||
aa0352ca38 | |||
d7680a564b | |||
29ffa20bb1 | |||
b5fdbc1a9f | |||
b6f781e433 | |||
39357ba06f | |||
c22e66896f | |||
1ddb100318 | |||
deefc10dd3 | |||
26de2c2487 | |||
8ec5ba960f | |||
2631a96f2a | |||
1f6c1fcd36 | |||
bf05ea2bab | |||
080149cb38 | |||
1f3e2d7877 | |||
aa7ea6b45c | |||
ec789a3c9d | |||
4eb449f7dc | |||
34e94c507a | |||
9105d54c6b | |||
75443d3daf | |||
d146a62e77 | |||
9fb2dec7a6 | |||
e3b3431c42 | |||
03440a1c13 | |||
3fd128361e | |||
dacc33d2fa | |||
f1df13f023 | |||
257b9c7936 | |||
6c2a8b6b38 | |||
2926655761 | |||
6cb0ad3375 | |||
dc75ec252a | |||
37e3c60897 | |||
b6379591a9 | |||
72fa864098 | |||
a796358330 | |||
86e2d16ba0 | |||
07450e9713 | |||
0441173ab2 | |||
95a5958db4 | |||
3c6c3b9448 | |||
8ef8240172 | |||
eb1ff76f23 | |||
ebeeb22669 | |||
567dd1a3ca | |||
badc638eb6 | |||
ccc4ee7793 | |||
5c9d5272e4 | |||
1ad683033b | |||
3e2df3ca9d | |||
6353a12e6a | |||
76259ebfdd | |||
f6edd1f7c9 | |||
c2d0b7b96d | |||
6240cfd5c7 | |||
600bf978ba | |||
83e6ec2ccd | |||
46366888d7 | |||
7e4329c258 | |||
9645eaaaec | |||
eeef68671d | |||
87693b534c | |||
8c2c3a03fb | |||
750c701e49 | |||
78cda9a810 | |||
53d67165c0 | |||
f86dbae247 | |||
fdd0a7f9b4 | |||
b02186ffc1 | |||
bb0f3df562 | |||
1956d87c1f | |||
c9dc9887db | |||
433b691f98 | |||
19e17216a2 | |||
b6dc37bb4e | |||
ca5d13c672 | |||
e385bf8ef8 | |||
a83eaf1c3a | |||
cc9b005bf2 | |||
75f64e1203 | |||
e1b426b345 | |||
313eec02cc | |||
f6a0be5023 | |||
04a0d85620 | |||
eb1583dbc1 | |||
e62073d799 | |||
24b6c5a41f | |||
f845a7a91a | |||
7b0e9a27ba | |||
8755e035d2 | |||
4dd3cff234 | |||
68484621fe | |||
fff633f087 | |||
8a5fda0377 | |||
1a689ea38c | |||
35a197defa | |||
fe5424d0f8 | |||
4ee1cb9b95 | |||
2effbcfcd8 | |||
67c9ec2b6d | |||
3fec0efd34 | |||
6d75604ef1 | |||
7837a12474 | |||
9ae78a578c | |||
a18eb651d3 | |||
4cb8cb04a7 | |||
b93bf55b6a | |||
86cadc6385 | |||
da5f37515e | |||
e34b7e6af3 | |||
13d4be1dc7 | |||
a6da01bd01 | |||
18ae3bab2f | |||
b0e5c9514d | |||
56935684c3 | |||
9120992c72 | |||
8a67daf283 | |||
58f346c874 | |||
a676b7c5f3 | |||
5d1763d159 | |||
89696db4b0 | |||
3ef44df667 | |||
ec47d4d9a8 | |||
7b5a8424a1 | |||
065c386990 | |||
2bc6f329b2 | |||
dfd55d1714 | |||
3d96217891 | |||
c0782e7c81 | |||
a32ce5ce34 | |||
6063bb9d45 | |||
83caf4960f | |||
00d7bba2fa | |||
fa6c0fe3e4 | |||
24f69eef6a | |||
f06e3a1569 | |||
7bda23ef84 | |||
0a337613f8 | |||
f5ff1a3ab9 | |||
5b96a552df | |||
bc8883a7c4 | |||
45f3e20527 | |||
99456a612b | |||
6120aa3718 | |||
db4c7bb7fc | |||
59e4e92556 | |||
26d633b721 | |||
c12a4f2e65 | |||
6897631ceb | |||
b84036e3fb | |||
04264efab6 | |||
e40f50cb87 | |||
494057d6d4 | |||
59eb2897f1 | |||
2e3ff394bf | |||
eabe6574c0 | |||
635d6c9d66 | |||
fe4032fe20 | |||
98d34d849d | |||
22a06869f2 | |||
424068d0d2 | |||
a0dac3de31 | |||
999eec8dea | |||
d21993bbb8 | |||
c43923a116 | |||
73eb4503cc | |||
b019f38fdd | |||
7854d84acb | |||
8d4216af8c | |||
4b8a5e0374 | |||
4b598d87d3 | |||
b9d3cedd64 | |||
c07a799ed5 | |||
36b9d9cfcd | |||
deaab33f3f | |||
8ba0f6c7c2 | |||
9e1f3ecaa7 | |||
d4b6ff6fbe | |||
0ffb17547e | |||
169b4ca07e | |||
fb5888c719 | |||
3fc279633b | |||
7cf0b90e49 | |||
f911957573 | |||
d80939e5e9 | |||
67416a2996 | |||
305ba62906 | |||
83a4a8b510 | |||
5e7ac69a67 | |||
b8398b771c | |||
1d0efedc85 | |||
602b5cb218 | |||
39427288f4 | |||
23adf166e1 | |||
71f5ecd1ee | |||
dabaebd339 | |||
ec284d3a74 | |||
4dcc1ceff3 | |||
389492e264 | |||
a4d7aa498b | |||
9174d14551 | |||
e0bba37d66 | |||
321bdcb372 | |||
04206d1898 | |||
5c6af2b583 | |||
5ceba6a3cb | |||
82c8fc3a2b | |||
483dbfcf2a | |||
2d9012ad25 | |||
0680e6cd1c | |||
ad607b91f4 | |||
a028e5862d | |||
ff026f3d0a | |||
9f29a2291c | |||
c9798d123b | |||
cf392d8a89 | |||
4082759925 | |||
5ee893a84a | |||
9b5b93c58f | |||
ea588d7fd3 | |||
84ad5452f6 | |||
e19042481b | |||
9450e198aa | |||
c9ceae3fac | |||
b9697eacd3 | |||
cdbd6542d0 | |||
27a14405d3 | |||
0b7e8df7d8 | |||
7bb558fd6e | |||
b6689e0fb8 | |||
90f6043368 | |||
64f1111d38 | |||
5ad2ad5921 | |||
b8e5678ad2 | |||
13316a8d46 | |||
7b1988f922 | |||
795db80975 | |||
28480dd7dc | |||
d3d6764082 | |||
90f82426b9 | |||
895316119d | |||
e9aefad641 | |||
cca85c96cd | |||
87d14ad419 | |||
61bf1452a3 | |||
b9a1c2c991 | |||
8e4f7f742f | |||
7373492c9b | |||
1b1fd0f4fe | |||
bc68907caa | |||
9cf8e5dd32 | |||
f7708ffebb | |||
474d743dba | |||
25cec43678 | |||
3b531eace7 | |||
303ad8d7f5 | |||
52009068bc | |||
42d490d41d | |||
53fafdd0c3 | |||
211f38e742 | |||
92be3403ea | |||
f2840bb220 | |||
ead97ee486 | |||
c422a9549d | |||
8b8e2fcdda | |||
000f2d637b | |||
610894e978 | |||
d02bba519c | |||
7420bad74c | |||
c04cec609d | |||
bd3a11776f | |||
6181e65cd8 | |||
cda4d4887d | |||
c718e2f43b | |||
54f27b886e | |||
555f71a15b | |||
a89a1ed072 | |||
90d5a6f001 | |||
b7e7a4cb01 | |||
9554a9af87 | |||
0d0d42c4a7 | |||
112ef79f29 | |||
d1f9e822dd | |||
575bc1e3af | |||
f389541ce0 | |||
87ebd627a7 | |||
52341c28e8 | |||
bbd47f7b2f | |||
1c5df9107d | |||
fd414d6189 | |||
e1499f6342 | |||
e58ef5b65f | |||
551e412718 | |||
79959d707c | |||
ae0f84d89c | |||
45f2876934 | |||
e68ee2cadb | |||
1865fe282f | |||
b1f486aff9 | |||
7cf454ec52 | |||
0298560ca2 | |||
816e8a3f21 | |||
b045878f81 | |||
7ebffef4d0 | |||
dd00f5e78d | |||
53f462c506 | |||
e317a8b264 | |||
45b2931b7e | |||
fb40ba6fc2 | |||
ad76da6c16 | |||
b38f6d4cd2 | |||
f8db12a538 | |||
665d6ea05b | |||
e364290718 | |||
0e6bb7f1ce | |||
fd4af87855 | |||
cb1c56caba | |||
71ebe5121a | |||
bbdeff76fc | |||
6508f0f5d4 | |||
63474620ab | |||
0314c4c101 | |||
4ca8eecca4 | |||
8bfd9e9815 | |||
533c4190f9 | |||
665dbc2f52 | |||
0e1e289033 | |||
f18becaaf1 | |||
381ce0821c | |||
c5f7755e86 | |||
1bb1e3463c | |||
aa4ee2cb9e | |||
b24787b757 | |||
e6bfa2958b | |||
4d04203852 | |||
533395e204 | |||
c4dd752d97 | |||
514f9279f8 | |||
c012013aa6 | |||
1315be4893 | |||
78e40b271b | |||
8b6391ee59 | |||
81de71fdc5 | |||
14dc08ddc7 | |||
4c1e4c5f30 | |||
7b57ddd38c | |||
b22f0f5f51 | |||
41bb81b582 | |||
00f675bb4c | |||
7b7f357042 | |||
5f912f480c | |||
e096faaf30 | |||
fbca70718f | |||
8edb7b96b1 | |||
30bfdf1afc | |||
18fdc0ae5b | |||
93a33bf3ac | |||
1a54bb0f96 | |||
063facf352 | |||
c888ee3632 | |||
cb4919344a | |||
7b910285db | |||
df51d0b623 | |||
e53d959028 | |||
c89a9f5d17 | |||
9094248090 | |||
d21f311af8 | |||
662e9e1076 | |||
8a2fed7e6a | |||
287c68c5ec | |||
7b9e6430ed | |||
f85d1e845a | |||
dadc0ed4c8 | |||
b91a9dc328 | |||
62ccf6d7cd | |||
440d8fbd4a | |||
17d1723aee | |||
cac6f99d41 | |||
749c03406c | |||
856541c701 | |||
3e02ecd740 | |||
94dc3253a0 | |||
e165a5971f | |||
0e6118a68e | |||
1afd492d88 | |||
10c64c3b49 | |||
4f9399bd0d | |||
79aabaf626 | |||
905dfa186c | |||
920ebccca2 | |||
72e3aca227 | |||
88a35b5b64 | |||
73ba226d98 | |||
cb126711cd | |||
b57fa8d9c0 | |||
9ffdbb5d12 | |||
64743de6d8 | |||
7661d1220a | |||
b0044e2e18 | |||
b72ef9df0d | |||
f42d5b6dca | |||
858fb05dac | |||
2f8b301c32 | |||
5b14943213 | |||
c5b9ee7408 | |||
1c75ddff35 | |||
ef55446538 | |||
9d1b65b569 | |||
40e8675fcb | |||
9103b40a47 | |||
92ca17d85d | |||
d52684e9a8 | |||
ebf25e128c | |||
58cefaf53b | |||
2db33054b3 | |||
8edd4c71c6 | |||
bdc39eef3b | |||
bb4ab59651 | |||
feb3f3ad77 | |||
237c4e6163 | |||
bdd11483ea | |||
b0ae0db815 | |||
fb0c51b61c | |||
715b09ae2d | |||
479ce5e2f4 | |||
2c7c286fa4 | |||
53be7ff0e4 | |||
62e5d045c0 | |||
9795dba1e0 | |||
b697808056 | |||
e1c1052829 | |||
27ae1f981d | |||
5d8e23b49c | |||
08b616281f | |||
311fadb1fb | |||
217aac96d7 | |||
f0443ad174 | |||
d97dfe9313 | |||
8f320fd6c6 | |||
fafa1867d1 | |||
68b33453f4 | |||
123812790b | |||
aee512cc9d | |||
6b5fbc544e | |||
914d3ca2ba | |||
632910e2a8 | |||
62e425ab03 | |||
2b1b055a96 | |||
c008488b9c | |||
5c676bb8b3 | |||
3a2fdbb142 | |||
f73b451e78 | |||
b542825066 | |||
e8dbb45e98 | |||
e99a24ce7c | |||
880e894c39 | |||
8cd9b10456 | |||
ff89ebc50a | |||
0acd09aecd | |||
31c9e3d2f4 | |||
8758fedbfc | |||
5da428d9eb | |||
d4022b4658 | |||
cc8193c707 | |||
9c929f6ce9 | |||
9dd8f8cf8b | |||
c027c8935b | |||
43060a1dbc | |||
31d5753247 | |||
63a724d8e1 | |||
5fba5d83f0 | |||
adc14adb88 | |||
61fa3de4cb | |||
aace8ffc00 | |||
f2f4dde2d3 | |||
e84cf805d2 | |||
254487f288 | |||
73340f0909 | |||
8c2542623b | |||
734891ac22 | |||
ddb95dbb0d | |||
832fc35211 | |||
65286883d4 | |||
fc5b0ff2d7 | |||
b2a9b8d485 | |||
e0aa992d73 | |||
2bb8ee602b | |||
7178b4e987 | |||
ea47d542ca | |||
54b0006cb2 | |||
799acd31b4 | |||
0d25f096c1 | |||
6d2b3c90f1 | |||
ad2593cb86 | |||
19f3abcde4 | |||
609ffaf717 | |||
d8db074988 | |||
859fa183fe | |||
a2b1673dfb | |||
9d06e3783d | |||
a6ac6447b5 | |||
571a0db132 | |||
277f2914a5 | |||
fca408fa29 | |||
73f5d2b787 | |||
df94d57c0a | |||
b5d541609d | |||
bafd68b4fc | |||
0707811286 | |||
0fc603ece4 | |||
1b92bdd0ea | |||
236fbcbdf4 | |||
7d33ff59ba | |||
ffb50fb691 | |||
3397d5ef90 | |||
118f9ceb7c | |||
e49525275d | |||
7fac03aee9 | |||
50567f7081 | |||
d3e8b8bf47 | |||
ba92f5277f | |||
3a185778ed | |||
a584b2a389 | |||
fcf2a1378b | |||
2f88597aad | |||
1f0a68b572 | |||
acefc5c016 | |||
eb9f4da11e | |||
8771e3429c | |||
ed5b8432cd | |||
df85f34a14 | |||
4bc90185fb | |||
eda375a490 | |||
2458f79f83 | |||
b0d2fe6299 | |||
5ffb032be6 | |||
35c78668b4 | |||
99f042d336 | |||
670b94c9c8 | |||
c5e0b84484 | |||
cb5e9183c6 | |||
ac5f565fa7 | |||
d9c294c672 | |||
a0e1e20c41 | |||
3b798df853 | |||
cec31050b4 | |||
e47603a549 | |||
2227da4431 | |||
4cc3fb5ee2 | |||
5dc4f652bc | |||
44722c6b10 | |||
1babeddbbf | |||
5bc9835d64 | |||
9a7e2519d3 | |||
fe8558b7aa | |||
abde6cab4c | |||
04a5d3228e | |||
44483972bd | |||
bdffd9f0c6 | |||
1a527915a6 | |||
d77a1aaa86 | |||
1877b7896c | |||
77830d509f | |||
84c86e56bd | |||
4e03263224 | |||
26e374e3ca | |||
9818283da1 | |||
ec616da518 | |||
108318ad10 | |||
4817180601 | |||
22d258427b | |||
e6d4451ae8 | |||
f2805a0408 | |||
3dd5f0ecbb | |||
304c934572 | |||
6e43897912 | |||
60baeee59f | |||
e3a39d49a0 | |||
f7eae27946 | |||
d9eaa224f2 | |||
59b4983dc0 | |||
17abbafdfc | |||
4061b3b822 | |||
c017c97333 | |||
4e97d37fd9 | |||
22f1793c0a | |||
43998711a7 | |||
e12fa93b8b | |||
9e8443b56f | |||
fbc7559ceb | |||
c35ffaf954 | |||
beb29836cd | |||
11ff5345d2 | |||
b70440f0a7 | |||
95b5ea9cde | |||
8415a4ba98 | |||
3b8c9b8ab1 | |||
1835e3beab | |||
7baf32b5e7 | |||
1fd7496ab2 | |||
163847b1bb | |||
8953725e6d | |||
a489792bb2 | |||
8c06eae17e | |||
a59766ee05 | |||
0f89e66d17 | |||
1577328ea4 | |||
b181b58857 | |||
213eba7d2e | |||
c172b58fe0 | |||
5344c41d43 | |||
d35cdee97f | |||
153362fbc9 | |||
c6b180a316 | |||
fc2913fb80 | |||
73b78d1cbe | |||
316b729677 | |||
a87d82abd7 | |||
f6e6e55fa7 | |||
95ac2d6482 | |||
dff6342a0b | |||
bfad0aee44 | |||
2a41fc0390 | |||
24443fe16a | |||
e3093849e5 | |||
0f81473d7b | |||
b0282071c4 | |||
b40a033c38 | |||
a52c8ace98 | |||
74e11a4210 | |||
f9dae86222 | |||
6cbdbb6c3c | |||
f8d60e0e0a | |||
979edbbe12 | |||
e4d8aa4d24 | |||
cc518ebd38 | |||
f1ee3589a1 | |||
a61939467a | |||
ab13980424 | |||
6079c50910 | |||
94c0dcbe1d | |||
f0d68120f4 | |||
18634048a1 | |||
7a39755da2 | |||
60bbdc0b40 | |||
de4f379cf2 | |||
b50c0e94c2 | |||
e4c32d14a8 | |||
472211c97a | |||
4ccbf711e2 | |||
108adbc726 | |||
9ebf77b13b | |||
7e092a62e6 | |||
62a0e39ced | |||
2d01f87737 | |||
846bb30e13 | |||
5efe71f134 | |||
f37121bb74 | |||
3f47c72268 | |||
73ba432d32 | |||
6616ad030f | |||
5d9a609b4f | |||
d67923b955 | |||
271852aa7e | |||
ba19ed9a1a | |||
574a2cbcb7 | |||
0492ec460a | |||
bd72e28314 | |||
52d4442a00 | |||
4abecd7102 | |||
fd27138c4a | |||
d3a4d9e4fe | |||
bca2cf00ed | |||
f103247a14 | |||
e9c6e8369c | |||
65d3ddcb8b | |||
e9a29aaa4a | |||
e6e102cf85 | |||
11de50f17c | |||
4b96575a09 | |||
1aafb9eb90 | |||
9c77332116 | |||
2e5366fbc0 | |||
d50712e5e3 | |||
9035fff2de | |||
27458cc097 | |||
a6bd154a42 | |||
b94c52dd29 | |||
be0eec9031 | |||
2367161e4b | |||
d7fc871175 | |||
2357490524 | |||
d4807da802 | |||
089e76cca3 | |||
1fb4effe7a | |||
4c84af0f5d | |||
f75f5987aa | |||
732b4e9074 | |||
8629939a51 | |||
ee140a198f | |||
c187593418 | |||
c339efaf02 | |||
c76a9d13cb | |||
9972e5f447 | |||
a2d9c430b4 | |||
dfc4b608e1 | |||
e1dfc61250 | |||
bfd5ea93e0 | |||
1f46284f9e | |||
e397ad6883 | |||
7e734e2d08 | |||
99988be423 | |||
03e8a4cf45 | |||
43ae3073f9 | |||
0344f95c2e | |||
03725a0512 | |||
dd3b79a08f | |||
e886122e98 | |||
4669c6d3ae | |||
674be9d3be | |||
18f35d9e12 | |||
f48f7615dc | |||
9ac08dab1f | |||
eff01bce21 | |||
ba3726d02b | |||
685fcfb40d | |||
0186b386cd | |||
f48ca2561d | |||
41df20c07c | |||
6895a5804c | |||
6564d63e69 | |||
ae2359638b | |||
6f181756dc | |||
18f5357f4f | |||
9ebec1f345 | |||
6767e38267 | |||
afdaa7fc95 | |||
c486e2ab64 | |||
61421c42c0 | |||
b72989a2b5 | |||
8c20f53a5e | |||
865d7b3424 | |||
3a0006ef22 | |||
6211e67e49 | |||
bf8a05f483 | |||
c8e9656a12 | |||
8763d44bf1 | |||
790138fdc7 | |||
3b28dc6c9d | |||
c0b40ab42e | |||
a3af32c2fb | |||
39193b10e8 | |||
c54e358bdb | |||
cdc37e4bff | |||
5a80d2df84 | |||
9f55c80a9f | |||
a265556362 | |||
de9a072ac4 | |||
8733c4f4be | |||
dd19c9150c | |||
52f529105d | |||
d5780396c7 | |||
9a8917fdbd | |||
a0604193a2 | |||
3e3435678c | |||
0fdd8d84fa | |||
bdeb9225b0 | |||
e2a72313e8 | |||
7c370d2fb0 | |||
b05b8d3989 | |||
e9b81e4edf | |||
c63ccead5e | |||
17b45e905a | |||
93a14aba6e | |||
49366b2640 | |||
cf7adc2fa1 | |||
edb45dce85 | |||
7cc07a3eb1 | |||
2b9465d62a | |||
d0c08926d1 | |||
1fd2cd26a0 | |||
c897651392 | |||
88974fedd0 | |||
ce79b09415 | |||
0678742924 | |||
14c9eb5ed2 | |||
518c9e6455 | |||
c52eda896e | |||
1f6e84fa68 | |||
ea541dd965 | |||
ade3d07483 | |||
b86b4ace88 | |||
83bb9b7c53 | |||
2229884102 | |||
d3b8230639 | |||
d630e1e838 | |||
7fe9ab9ccc | |||
3f9b8446cf | |||
ede74940a1 | |||
c1cd946818 | |||
c472cec565 | |||
25b7537a27 | |||
eb1db6702f | |||
4423e1bbdc | |||
3bc2004f91 | |||
2fa6f80b13 | |||
8db4a41973 | |||
e2610240f9 | |||
bb3cf8a339 | |||
b4a7b543e5 | |||
1f302d6885 | |||
f4edd67fe7 | |||
f39ab8a0fe | |||
3008644297 | |||
55a6b38f52 | |||
6206da55ef | |||
2b28b107db | |||
6aef2052ea | |||
87072dcfdb | |||
c53d65b3d3 | |||
bb13fad7aa | |||
c0ea8fc3a3 | |||
ff3ba99320 | |||
1026b7cfbe | |||
cba840fde9 | |||
0444e89931 | |||
67e6c76a18 | |||
dd143d44cc | |||
cc231a8e2b | |||
7775fee10f | |||
ec1fdda196 | |||
817ce6835b | |||
6d1b1ddd3e | |||
7db501ba2b | |||
d71f92213c | |||
624e8ae491 | |||
a70a7337d2 | |||
0f52dc7e51 | |||
5001f41b90 | |||
f89574fa23 | |||
81e4e12f02 | |||
c5172b8de8 | |||
9e39c62908 | |||
f2dcbe89d6 | |||
8df56afc20 | |||
b19c2319e4 | |||
3c971d2ef3 | |||
15ab636007 | |||
5ef70faaa7 | |||
71f491554c | |||
abc3eec22d | |||
2e065f2486 | |||
46a35a1ed4 | |||
26433b86de | |||
2386045e4f | |||
1edcb31d34 | |||
ebb00a92bd | |||
1602c7d0c8 | |||
04037f3d22 | |||
0b331fd5d7 | |||
8b3daf1768 | |||
a421699998 | |||
dcc0093dba | |||
62311257ad | |||
089f9a116a | |||
77a0ca66e4 | |||
c0b87afcad | |||
02e7519ac3 | |||
8cf302dce4 | |||
86b5df3e71 | |||
7c2058338a | |||
3ddec713b8 | |||
85eeb90d2c | |||
7f6daf289b | |||
3d55d84ec2 | |||
bb2a995529 | |||
9538bf4e7c | |||
219da29dfd | |||
fb013ecb24 | |||
6af4c6acad | |||
786c24a4cd | |||
5d8c7f39d4 | |||
c9c1fed065 | |||
94fea82d66 | |||
447173198b | |||
b79d056e76 | |||
eb567b1f40 | |||
1dd2431f86 | |||
5fcb5f0c8b | |||
a55d0d9718 | |||
8c1247cffb | |||
70a1e85718 | |||
adb699189b | |||
45dccfddcd | |||
3e09123797 | |||
61f922c2ca | |||
984b1a8c35 | |||
205410cb44 | |||
cac7a22b92 | |||
8a09940a54 | |||
1d233b8f50 | |||
491c4a5dcb | |||
4345d98663 | |||
a838e90964 | |||
29081059b6 | |||
f8c45996d5 | |||
c13e03c874 | |||
053930e194 | |||
9a38cae299 | |||
55901fb3da | |||
fc77fdca6f | |||
648625b230 | |||
207c2248a8 | |||
a206dcc79e | |||
f2d7f235a6 | |||
402b289f3b | |||
a32157c67c | |||
24e7f29099 | |||
5b5d269d34 |
@ -1,5 +1,5 @@
|
||||
0.6b
|
||||
manylinux_2_17
|
||||
rocm6
|
||||
04b5df8c8123f90cba3ede7e971e6fbc6040d506
|
||||
3db6ecbc915893ff967abd6e1b43bd5f54949868873be60dc802086c3863e648
|
||||
rocm6.1
|
||||
7f07e8a1cb1f99627eb6d77f5c0e9295c775f3c7
|
||||
77c29fa3f3b614e187d7213d745e989a92708cee2bc6020419ab49019af399d1
|
||||
|
@ -373,6 +373,13 @@ case "$image" in
|
||||
CONDA_CMAKE=yes
|
||||
EXECUTORCH=yes
|
||||
;;
|
||||
pytorch-linux-jammy-py3.12-halide)
|
||||
CUDA_VERSION=12.4
|
||||
ANACONDA_PYTHON_VERSION=3.12
|
||||
GCC_VERSION=11
|
||||
CONDA_CMAKE=yes
|
||||
HALIDE=yes
|
||||
;;
|
||||
pytorch-linux-focal-linter)
|
||||
# TODO: Use 3.9 here because of this issue https://github.com/python/mypy/issues/13627.
|
||||
# We will need to update mypy version eventually, but that's for another day. The task
|
||||
@ -490,6 +497,7 @@ docker build \
|
||||
--build-arg "DOCS=${DOCS}" \
|
||||
--build-arg "INDUCTOR_BENCHMARKS=${INDUCTOR_BENCHMARKS}" \
|
||||
--build-arg "EXECUTORCH=${EXECUTORCH}" \
|
||||
--build-arg "HALIDE=${HALIDE}" \
|
||||
--build-arg "XPU_VERSION=${XPU_VERSION}" \
|
||||
--build-arg "ACL=${ACL:-}" \
|
||||
--build-arg "SKIP_SCCACHE_INSTALL=${SKIP_SCCACHE_INSTALL:-}" \
|
||||
|
@ -1 +1 @@
|
||||
d4b3e5cc607e97afdba79dc90f8ef968142f347c
|
||||
c572f9e509b5ec5d56f4d218271e36269bba244f
|
||||
|
1
.ci/docker/ci_commit_pins/halide.txt
Normal file
1
.ci/docker/ci_commit_pins/halide.txt
Normal file
@ -0,0 +1 @@
|
||||
340136fec6d3ebc73e7a19eba1663e9b0ba8ab2d
|
@ -1 +1 @@
|
||||
01cbe5045a6898c9a925f01435c8277b2fe6afcc
|
||||
21eae954efa5bf584da70324b640288c3ee7aede
|
||||
|
@ -1 +1 @@
|
||||
b8c64f64c18d8cac598b3adb355c21e7439c21de
|
||||
1b2f15840e0d70eec50d84c7a0575cb835524def
|
||||
|
@ -1 +1 @@
|
||||
45fff310c891f5a92d55445adf8cc9d29df5841e
|
||||
dedb7bdf339a3546896d4820366ca562c586bfa0
|
||||
|
@ -9,7 +9,7 @@ TARBALL='aotriton.tar.bz2'
|
||||
read -d "\n" VER MANYLINUX ROCMBASE PINNED_COMMIT SHA256 < aotriton_version.txt || true
|
||||
ARCH=$(uname -m)
|
||||
AOTRITON_INSTALL_PREFIX="$1"
|
||||
AOTRITON_URL="https://github.com/ROCm/aotriton/releases/download/${VER}/aotriton-${VER}-${MANYLINUX}_${ARCH}-${ROCMBASE}.tar.bz2"
|
||||
AOTRITON_URL="https://github.com/ROCm/aotriton/releases/download/${VER}/aotriton-${VER}-${MANYLINUX}_${ARCH}-${ROCMBASE}-shared.tar.bz2"
|
||||
|
||||
cd "${AOTRITON_INSTALL_PREFIX}"
|
||||
# Must use -L to follow redirects
|
||||
|
@ -85,7 +85,7 @@ fi
|
||||
else
|
||||
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2021.4.0 mkl-include=2021.4.0 setuptools"
|
||||
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.11" ] || [ "$ANACONDA_PYTHON_VERSION" = "3.12" ]; then
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.11" ] || [ "$ANACONDA_PYTHON_VERSION" = "3.12" ] || [ "$ANACONDA_PYTHON_VERSION" = "3.13" ]; then
|
||||
conda_install numpy=1.26.0 ${CONDA_COMMON_DEPS}
|
||||
else
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS}
|
||||
|
@ -37,6 +37,9 @@ install_conda_dependencies() {
|
||||
|
||||
install_pip_dependencies() {
|
||||
pushd executorch/.ci/docker
|
||||
# Install PyTorch CPU build beforehand to avoid installing the much bigger CUDA
|
||||
# binaries later, ExecuTorch only needs CPU
|
||||
pip_install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
|
||||
# Install all Python dependencies
|
||||
pip_install -r requirements-ci.txt
|
||||
popd
|
||||
@ -44,13 +47,14 @@ install_pip_dependencies() {
|
||||
|
||||
setup_executorch() {
|
||||
pushd executorch
|
||||
source .ci/scripts/utils.sh
|
||||
# Setup swiftshader and Vulkan SDK which are required to build the Vulkan delegate
|
||||
as_jenkins bash .ci/scripts/setup-vulkan-linux-deps.sh
|
||||
|
||||
install_flatc_from_source
|
||||
pip_install .
|
||||
export PYTHON_EXECUTABLE=python
|
||||
export EXECUTORCH_BUILD_PYBIND=ON
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
|
||||
# Make sure that all the newly generate files are owned by Jenkins
|
||||
chown -R jenkins .
|
||||
as_jenkins .ci/scripts/setup-linux.sh cmake
|
||||
popd
|
||||
}
|
||||
|
||||
|
46
.ci/docker/common/install_halide.sh
Normal file
46
.ci/docker/common/install_halide.sh
Normal file
@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
source "$(dirname "${BASH_SOURCE[0]}")/common_utils.sh"
|
||||
|
||||
COMMIT=$(get_pinned_commit halide)
|
||||
test -n "$COMMIT"
|
||||
|
||||
# activate conda to populate CONDA_PREFIX
|
||||
test -n "$ANACONDA_PYTHON_VERSION"
|
||||
eval "$(conda shell.bash hook)"
|
||||
conda activate py_$ANACONDA_PYTHON_VERSION
|
||||
|
||||
if [ -n "${UBUNTU_VERSION}" ];then
|
||||
apt update
|
||||
apt-get install -y lld liblld-15-dev libpng-dev libjpeg-dev libgl-dev \
|
||||
libopenblas-dev libeigen3-dev libatlas-base-dev libzstd-dev
|
||||
fi
|
||||
|
||||
conda_install numpy scipy imageio cmake ninja
|
||||
|
||||
git clone --depth 1 --branch release/16.x --recursive https://github.com/llvm/llvm-project.git
|
||||
cmake -DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_ENABLE_PROJECTS="clang" \
|
||||
-DLLVM_TARGETS_TO_BUILD="X86;NVPTX" \
|
||||
-DLLVM_ENABLE_TERMINFO=OFF -DLLVM_ENABLE_ASSERTIONS=ON \
|
||||
-DLLVM_ENABLE_EH=ON -DLLVM_ENABLE_RTTI=ON -DLLVM_BUILD_32_BITS=OFF \
|
||||
-S llvm-project/llvm -B llvm-build -G Ninja
|
||||
cmake --build llvm-build
|
||||
cmake --install llvm-build --prefix llvm-install
|
||||
export LLVM_ROOT=`pwd`/llvm-install
|
||||
export LLVM_CONFIG=$LLVM_ROOT/bin/llvm-config
|
||||
|
||||
git clone https://github.com/halide/Halide.git
|
||||
pushd Halide
|
||||
git checkout ${COMMIT} && git submodule update --init --recursive
|
||||
pip_install -r requirements.txt
|
||||
cmake -G Ninja -DCMAKE_BUILD_TYPE=Release -S . -B build
|
||||
cmake --build build
|
||||
test -e ${CONDA_PREFIX}/lib/python3 || ln -s python${ANACONDA_PYTHON_VERSION} ${CONDA_PREFIX}/lib/python3
|
||||
cmake --install build --prefix ${CONDA_PREFIX}
|
||||
chown -R jenkins ${CONDA_PREFIX}
|
||||
popd
|
||||
rm -rf Halide llvm-build llvm-project llvm-install
|
||||
|
||||
python -c "import halide" # check for errors
|
@ -33,7 +33,9 @@ pip_install coloredlogs packaging
|
||||
pip_install onnxruntime==1.18
|
||||
pip_install onnx==1.16.0
|
||||
# pip_install "onnxscript@git+https://github.com/microsoft/onnxscript@3e869ef8ccf19b5ebd21c10d3e9c267c9a9fa729" --no-deps
|
||||
pip_install onnxscript==0.1.0.dev20240523 --no-deps
|
||||
pip_install onnxscript==0.1.0.dev20240613 --no-deps
|
||||
# required by onnxscript
|
||||
pip_install ml_dtypes
|
||||
|
||||
# Cache the transformers model to be used later by ONNX tests. We need to run the transformers
|
||||
# package to download the model. By default, the model is cached at ~/.cache/huggingface/hub/
|
||||
|
@ -85,10 +85,10 @@ librosa>=0.6.2 ; python_version < "3.11"
|
||||
#Pinned versions:
|
||||
#test that import:
|
||||
|
||||
mypy==1.9.0
|
||||
mypy==1.10.0
|
||||
# Pin MyPy version because new errors are likely to appear with each release
|
||||
#Description: linter
|
||||
#Pinned versions: 1.9.0
|
||||
#Pinned versions: 1.10.0
|
||||
#test that import: test_typing.py, test_type_hints.py
|
||||
|
||||
networkx==2.8.8
|
||||
@ -306,7 +306,7 @@ pywavelets==1.5.0 ; python_version >= "3.12"
|
||||
#Pinned versions: 1.4.1
|
||||
#test that import:
|
||||
|
||||
lxml==5.0.0.
|
||||
lxml==5.0.0
|
||||
#Description: This is a requirement of unittest-xml-reporting
|
||||
|
||||
# Python-3.9 binaries
|
||||
|
@ -103,6 +103,14 @@ COPY triton_version.txt triton_version.txt
|
||||
RUN if [ -n "${TRITON}" ]; then bash ./install_triton.sh; fi
|
||||
RUN rm install_triton.sh common_utils.sh triton.txt triton_version.txt
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
# Install ccache/sccache (do this last, so we get priority in PATH)
|
||||
COPY ./common/install_cache.sh install_cache.sh
|
||||
ENV PATH /opt/cache/bin:$PATH
|
||||
|
@ -155,6 +155,14 @@ COPY ci_commit_pins/executorch.txt executorch.txt
|
||||
RUN if [ -n "${EXECUTORCH}" ]; then bash ./install_executorch.sh; fi
|
||||
RUN rm install_executorch.sh common_utils.sh executorch.txt
|
||||
|
||||
ARG HALIDE
|
||||
# Build and install halide
|
||||
COPY ./common/install_halide.sh install_halide.sh
|
||||
COPY ./common/common_utils.sh common_utils.sh
|
||||
COPY ci_commit_pins/halide.txt halide.txt
|
||||
RUN if [ -n "${HALIDE}" ]; then bash ./install_halide.sh; fi
|
||||
RUN rm install_halide.sh common_utils.sh halide.txt
|
||||
|
||||
ARG ONNX
|
||||
# Install ONNX dependencies
|
||||
COPY ./common/install_onnx.sh ./common/common_utils.sh ./
|
||||
|
@ -230,6 +230,10 @@ if [[ "${BUILD_ENVIRONMENT}" != *android* && "${BUILD_ENVIRONMENT}" != *cuda* ]]
|
||||
export BUILD_STATIC_RUNTIME_BENCHMARK=ON
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then
|
||||
export CMAKE_BUILD_TYPE=RelWithAssert
|
||||
fi
|
||||
|
||||
# Do not change workspace permissions for ROCm CI jobs
|
||||
# as it can leave workspace with bad permissions for cancelled jobs
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
|
||||
@ -284,12 +288,26 @@ else
|
||||
# Which should be backward compatible with Numpy-1.X
|
||||
python -mpip install --pre numpy==2.0.0rc1
|
||||
fi
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
|
||||
WERROR=1 python setup.py clean
|
||||
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
BUILD_LIBTORCH_WHL=1 BUILD_PYTHON_ONLY=0 python setup.py bdist_wheel
|
||||
BUILD_LIBTORCH_WHL=0 BUILD_PYTHON_ONLY=1 python setup.py bdist_wheel --cmake
|
||||
else
|
||||
WERROR=1 python setup.py bdist_wheel
|
||||
fi
|
||||
else
|
||||
python setup.py clean
|
||||
if [[ "$BUILD_ENVIRONMENT" == *xla* ]]; then
|
||||
source .ci/pytorch/install_cache_xla.sh
|
||||
fi
|
||||
python setup.py bdist_wheel
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
echo "USE_SPLIT_BUILD cannot be used with xla or rocm"
|
||||
exit 1
|
||||
else
|
||||
python setup.py bdist_wheel
|
||||
fi
|
||||
fi
|
||||
pip_install_whl "$(echo dist/*.whl)"
|
||||
|
||||
@ -328,9 +346,10 @@ else
|
||||
CUSTOM_OP_TEST="$PWD/test/custom_operator"
|
||||
python --version
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
|
||||
mkdir -p "$CUSTOM_OP_BUILD"
|
||||
pushd "$CUSTOM_OP_BUILD"
|
||||
cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$CUSTOM_OP_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
@ -343,7 +362,7 @@ else
|
||||
SITE_PACKAGES="$(python -c 'from distutils.sysconfig import get_python_lib; print(get_python_lib())')"
|
||||
mkdir -p "$JIT_HOOK_BUILD"
|
||||
pushd "$JIT_HOOK_BUILD"
|
||||
cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$JIT_HOOK_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
@ -355,7 +374,7 @@ else
|
||||
python --version
|
||||
mkdir -p "$CUSTOM_BACKEND_BUILD"
|
||||
pushd "$CUSTOM_BACKEND_BUILD"
|
||||
cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch" -DPython_EXECUTABLE="$(which python)" \
|
||||
cmake "$CUSTOM_BACKEND_TEST" -DCMAKE_PREFIX_PATH="$SITE_PACKAGES/torch;$SITE_PACKAGES" -DPython_EXECUTABLE="$(which python)" \
|
||||
-DCMAKE_MODULE_PATH="$CUSTOM_TEST_MODULE_PATH" -DUSE_ROCM="$CUSTOM_TEST_USE_ROCM"
|
||||
make VERBOSE=1
|
||||
popd
|
||||
|
@ -56,9 +56,29 @@ function assert_git_not_dirty() {
|
||||
function pip_install_whl() {
|
||||
# This is used to install PyTorch and other build artifacts wheel locally
|
||||
# without using any network connection
|
||||
python3 -mpip install --no-index --no-deps "$@"
|
||||
|
||||
# Convert the input arguments into an array
|
||||
local args=("$@")
|
||||
|
||||
# Check if the first argument contains multiple paths separated by spaces
|
||||
if [[ "${args[0]}" == *" "* ]]; then
|
||||
# Split the string by spaces into an array
|
||||
IFS=' ' read -r -a paths <<< "${args[0]}"
|
||||
# Loop through each path and install individually
|
||||
for path in "${paths[@]}"; do
|
||||
echo "Installing $path"
|
||||
python3 -mpip install --no-index --no-deps "$path"
|
||||
done
|
||||
else
|
||||
# Loop through each argument and install individually
|
||||
for path in "${args[@]}"; do
|
||||
echo "Installing $path"
|
||||
python3 -mpip install --no-index --no-deps "$path"
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
function pip_install() {
|
||||
# retry 3 times
|
||||
# old versions of pip don't have the "--progress-bar" flag
|
||||
@ -188,28 +208,6 @@ function clone_pytorch_xla() {
|
||||
fi
|
||||
}
|
||||
|
||||
function checkout_install_torchdeploy() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit multipy)
|
||||
pushd ..
|
||||
git clone --recurse-submodules https://github.com/pytorch/multipy.git
|
||||
pushd multipy
|
||||
git checkout "${commit}"
|
||||
python multipy/runtime/example/generate_examples.py
|
||||
BUILD_CUDA_TESTS=1 pip install -e .
|
||||
popd
|
||||
popd
|
||||
}
|
||||
|
||||
function test_torch_deploy(){
|
||||
pushd ..
|
||||
pushd multipy
|
||||
./multipy/runtime/build/test_deploy
|
||||
./multipy/runtime/build/test_deploy_gpu
|
||||
popd
|
||||
popd
|
||||
}
|
||||
|
||||
function checkout_install_torchbench() {
|
||||
local commit
|
||||
commit=$(get_pinned_commit torchbench)
|
||||
@ -224,6 +222,8 @@ function checkout_install_torchbench() {
|
||||
# to install and test other models
|
||||
python install.py --continue_on_fail
|
||||
fi
|
||||
echo "Print all dependencies after TorchBench is installed"
|
||||
python -mpip freeze
|
||||
popd
|
||||
}
|
||||
|
||||
|
@ -18,8 +18,9 @@ time python test/run_test.py --verbose -i distributed/test_c10d_gloo
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_nccl
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_gloo
|
||||
time python test/run_test.py --verbose -i distributed/test_c10d_spawn_nccl
|
||||
time python test/run_test.py --verbose -i distributed/test_cuda_p2p
|
||||
time python test/run_test.py --verbose -i distributed/test_compute_comm_reordering
|
||||
time python test/run_test.py --verbose -i distributed/test_store
|
||||
time python test/run_test.py --verbose -i distributed/test_symmetric_memory
|
||||
time python test/run_test.py --verbose -i distributed/test_pg_wrapper
|
||||
time python test/run_test.py --verbose -i distributed/rpc/cuda/test_tensorpipe_agent
|
||||
# FSDP tests
|
||||
|
@ -249,9 +249,7 @@ fi
|
||||
# This tests that the debug asserts are working correctly.
|
||||
if [[ "$BUILD_ENVIRONMENT" == *-debug* ]]; then
|
||||
echo "We are in debug mode: $BUILD_ENVIRONMENT. Expect the python assertion to fail"
|
||||
# TODO: Enable the check after we setup the build to run debug asserts without having
|
||||
# to do a full (and slow) debug build
|
||||
# (cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_debug_asserts_fail(424242)")
|
||||
(cd test && ! get_exit_code python -c "import torch; torch._C._crash_if_debug_asserts_fail(424242)")
|
||||
elif [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]]; then
|
||||
# Noop when debug is disabled. Skip bazel jobs because torch isn't available there yet.
|
||||
echo "We are not in debug mode: $BUILD_ENVIRONMENT. Expect the assertion to pass"
|
||||
@ -264,18 +262,6 @@ elif [[ $TEST_CONFIG == 'nogpu_AVX512' ]]; then
|
||||
export ATEN_CPU_CAPABILITY=avx2
|
||||
fi
|
||||
|
||||
# temp workarounds for https://github.com/pytorch/pytorch/issues/126692, remove when fixed
|
||||
if [[ "$BUILD_ENVIRONMENT" != *-bazel-* ]]; then
|
||||
pushd test
|
||||
CUDA_VERSION=$(python -c "import torch; print(torch.version.cuda)")
|
||||
if [ "$CUDA_VERSION" == "12.4" ]; then
|
||||
ISCUDA124="cu124"
|
||||
else
|
||||
ISCUDA124=""
|
||||
fi
|
||||
popd
|
||||
fi
|
||||
|
||||
test_python_legacy_jit() {
|
||||
time python test/run_test.py --include test_jit_legacy test_jit_fuser_legacy --verbose
|
||||
assert_git_not_dirty
|
||||
@ -289,6 +275,9 @@ test_python_shard() {
|
||||
|
||||
# Bare --include flag is not supported and quoting for lint ends up with flag not being interpreted correctly
|
||||
# shellcheck disable=SC2086
|
||||
|
||||
# modify LD_LIBRARY_PATH to ensure it has the conda env.
|
||||
# This set of tests has been shown to be buggy without it for the split-build
|
||||
time python test/run_test.py --exclude-jit-executor --exclude-distributed-tests $INCLUDE_CLAUSE --shard "$1" "$NUM_TEST_SHARDS" --verbose $PYTHON_TEST_EXTRA_OPTION
|
||||
|
||||
assert_git_not_dirty
|
||||
@ -347,17 +336,31 @@ test_inductor_distributed() {
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_inductor() {
|
||||
python tools/dynamo/verify_dynamo.py
|
||||
python test/run_test.py --inductor --include test_modules test_ops test_ops_gradients test_torch --verbose
|
||||
# Do not add --inductor for the following inductor unit tests, otherwise we will fail because of nested dynamo state
|
||||
python test/run_test.py --include inductor/test_torchinductor inductor/test_torchinductor_opinfo inductor/test_aot_inductor --verbose
|
||||
test_inductor_shard() {
|
||||
if [[ -z "$NUM_TEST_SHARDS" ]]; then
|
||||
echo "NUM_TEST_SHARDS must be defined to run a Python test shard"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
python tools/dynamo/verify_dynamo.py
|
||||
python test/run_test.py --inductor \
|
||||
--include test_modules test_ops test_ops_gradients test_torch \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
|
||||
# Do not add --inductor for the following inductor unit tests, otherwise we will fail because of nested dynamo state
|
||||
python test/run_test.py \
|
||||
--include inductor/test_torchinductor inductor/test_torchinductor_opinfo inductor/test_aot_inductor \
|
||||
--shard "$1" "$NUM_TEST_SHARDS" \
|
||||
--verbose
|
||||
}
|
||||
|
||||
test_inductor_aoti() {
|
||||
# docker build uses bdist_wheel which does not work with test_aot_inductor
|
||||
# TODO: need a faster way to build
|
||||
if [[ "$BUILD_ENVIRONMENT" != *rocm* ]]; then
|
||||
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference
|
||||
BUILD_AOT_INDUCTOR_TEST=1 python setup.py develop
|
||||
CPP_TESTS_DIR="${BUILD_BIN_DIR}" LD_LIBRARY_PATH="${TORCH_LIB_DIR}" python test/run_test.py --cpp --verbose -i cpp/test_aoti_abi_check cpp/test_aoti_inference
|
||||
fi
|
||||
}
|
||||
|
||||
@ -376,7 +379,7 @@ test_inductor_cpp_wrapper_abi_compatible() {
|
||||
--output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_training.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_cpp_wrapper_training.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${ISCUDA124}/inductor_timm_training.csv"
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/inductor_timm_training.csv"
|
||||
}
|
||||
|
||||
# "Global" flags for inductor benchmarking controlled by TEST_CONFIG
|
||||
@ -401,7 +404,7 @@ if [[ "${TEST_CONFIG}" == *dynamic* ]]; then
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--dynamic-shapes --dynamic-batch-only)
|
||||
fi
|
||||
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--device cpu)
|
||||
else
|
||||
DYNAMO_BENCHMARK_FLAGS+=(--device cuda)
|
||||
@ -526,9 +529,10 @@ test_single_dynamo_benchmark() {
|
||||
test_perf_for_dashboard "$suite" \
|
||||
"${DYNAMO_BENCHMARK_FLAGS[@]}" "$@" "${partition_flags[@]}"
|
||||
else
|
||||
if [[ "${TEST_CONFIG}" == *aot_inductor* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *aot_inductor* && "${TEST_CONFIG}" != *cpu_aot_inductor* ]]; then
|
||||
# Test AOTInductor with the ABI-compatible mode on CI
|
||||
# This can be removed once the ABI-compatible mode becomes default.
|
||||
# For CPU device, we perfer non ABI-compatible mode on CI when testing AOTInductor.
|
||||
export TORCHINDUCTOR_ABI_COMPATIBLE=1
|
||||
fi
|
||||
python "benchmarks/dynamo/$suite.py" \
|
||||
@ -538,10 +542,10 @@ test_single_dynamo_benchmark() {
|
||||
--output "$TEST_REPORTS_DIR/${name}_${suite}.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${ISCUDA124}/${TEST_CONFIG}_${name}.csv"
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
|
||||
python benchmarks/dynamo/check_graph_breaks.py \
|
||||
--actual "$TEST_REPORTS_DIR/${name}_$suite.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${ISCUDA124}/${TEST_CONFIG}_${name}.csv"
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${TEST_CONFIG}_${name}.csv"
|
||||
fi
|
||||
}
|
||||
|
||||
@ -550,6 +554,11 @@ test_inductor_micro_benchmark() {
|
||||
python benchmarks/gpt_fast/benchmark.py --output "${TEST_REPORTS_DIR}/gpt_fast_benchmark.csv"
|
||||
}
|
||||
|
||||
test_inductor_halide() {
|
||||
python test/run_test.py --include inductor/test_halide.py --verbose
|
||||
assert_git_not_dirty
|
||||
}
|
||||
|
||||
test_dynamo_benchmark() {
|
||||
# Usage: test_dynamo_benchmark huggingface 0
|
||||
TEST_REPORTS_DIR=$(pwd)/test/test-reports
|
||||
@ -564,11 +573,15 @@ test_dynamo_benchmark() {
|
||||
elif [[ "${TEST_CONFIG}" == *perf* ]]; then
|
||||
test_single_dynamo_benchmark "dashboard" "$suite" "$shard_id" "$@"
|
||||
else
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then
|
||||
local dt="float32"
|
||||
if [[ "${TEST_CONFIG}" == *amp* ]]; then
|
||||
dt="amp"
|
||||
fi
|
||||
if [[ "${TEST_CONFIG}" == *freezing* ]]; then
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --float32 --freezing "$@"
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --"$dt" --freezing "$@"
|
||||
else
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --float32 "$@"
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --"$dt" "$@"
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *aot_inductor* ]]; then
|
||||
test_single_dynamo_benchmark "inference" "$suite" "$shard_id" --inference --bfloat16 "$@"
|
||||
@ -592,7 +605,7 @@ test_inductor_torchbench_smoketest_perf() {
|
||||
--bfloat16 --inference --inductor --only moco --output "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_cpp_wrapper_inference.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${ISCUDA124}/inductor_torchbench_inference.csv"
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/inductor_torchbench_inference.csv"
|
||||
|
||||
python benchmarks/dynamo/torchbench.py --device cuda --performance --backend inductor --float16 --training \
|
||||
--batch-size-file "$(realpath benchmarks/dynamo/torchbench_models_list.txt)" --only hf_Bert \
|
||||
@ -607,13 +620,8 @@ test_inductor_torchbench_smoketest_perf() {
|
||||
# https://github.com/pytorch/pytorch/actions/runs/7158691360/job/19491437314,
|
||||
# and thus we lower its threshold to reduce flakiness. If this continues to be a problem,
|
||||
# we switch to use some other model.
|
||||
# Use 4.7 for cuda 12.4, change back to 4.9 after fixing https://github.com/pytorch/pytorch/issues/126692
|
||||
if [ "$CUDA_VERSION" == "12.4" ]; then
|
||||
THRESHOLD=4.7
|
||||
else
|
||||
THRESHOLD=4.9
|
||||
fi
|
||||
python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_inference_smoketest.csv" -t $THRESHOLD
|
||||
# lowering threshold from 4.9 to 4.7 for cu124. Will bump it up after cuda 12.4.0->12.4.1 update
|
||||
python benchmarks/dynamo/check_perf_csv.py -f "$TEST_REPORTS_DIR/inductor_inference_smoketest.csv" -t 4.7
|
||||
|
||||
# Check memory compression ratio for a few models
|
||||
for test in hf_Albert timm_vision_transformer; do
|
||||
@ -632,7 +640,7 @@ test_inductor_torchbench_smoketest_perf() {
|
||||
--only $test --output "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv"
|
||||
python benchmarks/dynamo/check_accuracy.py \
|
||||
--actual "$TEST_REPORTS_DIR/inductor_warm_start_smoketest_$test.csv" \
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/${ISCUDA124}/inductor_huggingface_training.csv"
|
||||
--expected "benchmarks/dynamo/ci_expected_accuracy/inductor_huggingface_training.csv"
|
||||
done
|
||||
}
|
||||
|
||||
@ -1169,15 +1177,21 @@ test_executorch() {
|
||||
|
||||
pushd /executorch
|
||||
|
||||
# NB: We need to build ExecuTorch runner here and not inside the Docker image
|
||||
# because it depends on PyTorch
|
||||
export PYTHON_EXECUTABLE=python
|
||||
export EXECUTORCH_BUILD_PYBIND=ON
|
||||
export CMAKE_ARGS="-DEXECUTORCH_BUILD_XNNPACK=ON -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON"
|
||||
|
||||
# NB: We need to rebuild ExecuTorch runner here because it depends on PyTorch
|
||||
# from the PR
|
||||
# shellcheck disable=SC1091
|
||||
source .ci/scripts/utils.sh
|
||||
build_executorch_runner "cmake"
|
||||
source .ci/scripts/setup-linux.sh cmake
|
||||
|
||||
echo "Run ExecuTorch unit tests"
|
||||
pytest -v -n auto
|
||||
# shellcheck disable=SC1091
|
||||
LLVM_PROFDATA=llvm-profdata-12 LLVM_COV=llvm-cov-12 bash test/run_oss_cpp_tests.sh
|
||||
|
||||
echo "Run ExecuTorch regression tests for some models"
|
||||
# NB: This is a sample model, more can be added here
|
||||
export PYTHON_EXECUTABLE=python
|
||||
# TODO(huydhn): Add more coverage here using ExecuTorch's gather models script
|
||||
# shellcheck disable=SC1091
|
||||
source .ci/scripts/test.sh mv3 cmake xnnpack-quantization-delegation ''
|
||||
@ -1237,11 +1251,10 @@ elif [[ "$TEST_CONFIG" == distributed ]]; then
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_rpc
|
||||
fi
|
||||
elif [[ "$TEST_CONFIG" == deploy ]]; then
|
||||
checkout_install_torchdeploy
|
||||
test_torch_deploy
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_distributed* ]]; then
|
||||
test_inductor_distributed
|
||||
elif [[ "${TEST_CONFIG}" == *inductor-halide* ]]; then
|
||||
test_inductor_halide
|
||||
elif [[ "${TEST_CONFIG}" == *inductor-micro-benchmark* ]]; then
|
||||
test_inductor_micro_benchmark
|
||||
elif [[ "${TEST_CONFIG}" == *huggingface* ]]; then
|
||||
@ -1253,13 +1266,14 @@ elif [[ "${TEST_CONFIG}" == *timm* ]]; then
|
||||
id=$((SHARD_NUMBER-1))
|
||||
test_dynamo_benchmark timm_models "$id"
|
||||
elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* ]]; then
|
||||
if [[ "${TEST_CONFIG}" == *cpu_inductor* || "${TEST_CONFIG}" == *cpu_aot_inductor* ]]; then
|
||||
install_torchaudio cpu
|
||||
else
|
||||
install_torchaudio cuda
|
||||
fi
|
||||
install_torchtext
|
||||
install_torchvision
|
||||
TORCH_CUDA_ARCH_LIST="8.0;8.6" pip_install git+https://github.com/pytorch/ao.git
|
||||
id=$((SHARD_NUMBER-1))
|
||||
# https://github.com/opencv/opencv-python/issues/885
|
||||
pip_install opencv-python==4.8.0.74
|
||||
@ -1278,7 +1292,7 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
checkout_install_torchbench
|
||||
# Do this after checkout_install_torchbench to ensure we clobber any
|
||||
# nightlies that torchbench may pull in
|
||||
if [[ "${TEST_CONFIG}" != *cpu_inductor* ]]; then
|
||||
if [[ "${TEST_CONFIG}" != *cpu_inductor* && "${TEST_CONFIG}" != *cpu_aot_inductor* ]]; then
|
||||
install_torchrec_and_fbgemm
|
||||
fi
|
||||
PYTHONPATH=$(pwd)/torchbench test_dynamo_benchmark torchbench "$id"
|
||||
@ -1286,17 +1300,19 @@ elif [[ "${TEST_CONFIG}" == *torchbench* ]]; then
|
||||
elif [[ "${TEST_CONFIG}" == *inductor_cpp_wrapper_abi_compatible* ]]; then
|
||||
install_torchvision
|
||||
test_inductor_cpp_wrapper_abi_compatible
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* && "${SHARD_NUMBER}" == 1 ]]; then
|
||||
elif [[ "${TEST_CONFIG}" == *inductor* ]]; then
|
||||
install_torchvision
|
||||
test_inductor
|
||||
test_inductor_distributed
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* && "${SHARD_NUMBER}" == 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
install_torchvision
|
||||
test_dynamo_shard 1
|
||||
test_aten
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* && $SHARD_NUMBER -gt 1 && $NUM_TEST_SHARDS -gt 1 ]]; then
|
||||
test_inductor_shard "${SHARD_NUMBER}"
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_inductor_aoti
|
||||
test_inductor_distributed
|
||||
fi
|
||||
elif [[ "${TEST_CONFIG}" == *dynamo* ]]; then
|
||||
install_torchvision
|
||||
test_dynamo_shard "${SHARD_NUMBER}"
|
||||
if [[ "${SHARD_NUMBER}" == 1 ]]; then
|
||||
test_aten
|
||||
fi
|
||||
elif [[ "${BUILD_ENVIRONMENT}" == *rocm* && -n "$TESTS_TO_INCLUDE" ]]; then
|
||||
install_torchvision
|
||||
test_python_shard "$SHARD_NUMBER"
|
||||
|
@ -97,8 +97,16 @@ if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
)
|
||||
elif [[ "$PACKAGE_TYPE" != libtorch ]]; then
|
||||
if [[ "\$BUILD_ENVIRONMENT" != *s390x* ]]; then
|
||||
pip install "\$pkg" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
if [[ "$USE_SPLIT_BUILD" == "true" ]]; then
|
||||
pkg_no_python="$(ls -1 /final_pkgs/torch_no_python* | sort |tail -1)"
|
||||
pkg_torch="$(ls -1 /final_pkgs/torch-* | sort |tail -1)"
|
||||
# todo: after folder is populated use the pypi_pkg channel instead
|
||||
pip install "\$pkg_no_python" "\$pkg_torch" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}_pypi_pkg"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
else
|
||||
pip install "\$pkg" --index-url "https://download.pytorch.org/whl/\${CHANNEL}/${DESIRED_CUDA}"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
fi
|
||||
else
|
||||
pip install "\$pkg"
|
||||
retry pip install -q numpy protobuf typing-extensions
|
||||
@ -110,6 +118,12 @@ if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
cd /tmp/libtorch
|
||||
fi
|
||||
|
||||
if [[ "$GPU_ARCH_TYPE" == xpu ]]; then
|
||||
# Workaround for __mkl_tmp_MOD unbound variable issue, refer https://github.com/pytorch/pytorch/issues/130543
|
||||
set +u
|
||||
source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh
|
||||
fi
|
||||
|
||||
# Test the package
|
||||
/builder/check_binary.sh
|
||||
|
||||
|
@ -33,9 +33,9 @@ if [[ -z "$DOCKER_IMAGE" ]]; then
|
||||
if [[ "$PACKAGE_TYPE" == conda ]]; then
|
||||
export DOCKER_IMAGE="pytorch/conda-cuda"
|
||||
elif [[ "$DESIRED_CUDA" == cpu ]]; then
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cpu"
|
||||
export DOCKER_IMAGE="pytorch/manylinux:cpu"
|
||||
else
|
||||
export DOCKER_IMAGE="pytorch/manylinux-cuda${DESIRED_CUDA:2}"
|
||||
export DOCKER_IMAGE="pytorch/manylinux-builder:${DESIRED_CUDA:2}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -75,9 +75,9 @@ export PYTORCH_BUILD_NUMBER=1
|
||||
TRITON_VERSION=$(cat $PYTORCH_ROOT/.ci/docker/triton_version.txt)
|
||||
|
||||
# Here PYTORCH_EXTRA_INSTALL_REQUIREMENTS is already set for the all the wheel builds hence append TRITON_CONSTRAINT
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.13'"
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
# Only linux Python < 3.13 are supported wheels for triton
|
||||
TRITON_CONSTRAINT="platform_system == 'Linux' and platform_machine == 'x86_64' and python_version < '3.13'"
|
||||
TRITON_REQUIREMENT="triton==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton.txt)
|
||||
@ -87,11 +87,11 @@ if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:
|
||||
fi
|
||||
|
||||
# Set triton via PYTORCH_EXTRA_INSTALL_REQUIREMENTS for triton rocm package
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*rocm.* && $(uname) == "Linux" && "$DESIRED_PYTHON" != "3.12" ]]; then
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}"
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*rocm.* && $(uname) == "Linux" ]]; then
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}; ${TRITON_CONSTRAINT}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton-rocm.txt)
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}+${TRITON_SHORTHASH}"
|
||||
TRITON_REQUIREMENT="pytorch-triton-rocm==${TRITON_VERSION}+${TRITON_SHORTHASH}; ${TRITON_CONSTRAINT}"
|
||||
fi
|
||||
if [[ -z "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${TRITON_REQUIREMENT}"
|
||||
@ -100,30 +100,18 @@ if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_B
|
||||
fi
|
||||
fi
|
||||
|
||||
JAVA_HOME=
|
||||
BUILD_JNI=OFF
|
||||
if [[ "$PACKAGE_TYPE" == libtorch ]]; then
|
||||
POSSIBLE_JAVA_HOMES=()
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/local)
|
||||
POSSIBLE_JAVA_HOMES+=(/usr/lib/jvm/java-8-openjdk-amd64)
|
||||
POSSIBLE_JAVA_HOMES+=(/Library/Java/JavaVirtualMachines/*.jdk/Contents/Home)
|
||||
# Add the Windows-specific JNI path
|
||||
POSSIBLE_JAVA_HOMES+=("$PWD/pytorch/.circleci/windows-jni/")
|
||||
for JH in "${POSSIBLE_JAVA_HOMES[@]}" ; do
|
||||
if [[ -e "$JH/include/jni.h" ]] ; then
|
||||
# Skip if we're not on Windows but haven't found a JAVA_HOME
|
||||
if [[ "$JH" == "$PWD/pytorch/.circleci/windows-jni/" && "$OSTYPE" != "msys" ]] ; then
|
||||
break
|
||||
fi
|
||||
echo "Found jni.h under $JH"
|
||||
JAVA_HOME="$JH"
|
||||
BUILD_JNI=ON
|
||||
break
|
||||
# Set triton via PYTORCH_EXTRA_INSTALL_REQUIREMENTS for triton xpu package
|
||||
if [[ "$PACKAGE_TYPE" =~ .*wheel.* && -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*xpu.* && $(uname) == "Linux" ]]; then
|
||||
TRITON_REQUIREMENT="pytorch-triton-xpu==${TRITON_VERSION}"
|
||||
if [[ -n "$PYTORCH_BUILD_VERSION" && "$PYTORCH_BUILD_VERSION" =~ .*dev.* ]]; then
|
||||
TRITON_SHORTHASH=$(cut -c1-10 $PYTORCH_ROOT/.ci/docker/ci_commit_pins/triton-xpu.txt)
|
||||
TRITON_REQUIREMENT="pytorch-triton-xpu==${TRITON_VERSION}+${TRITON_SHORTHASH}"
|
||||
fi
|
||||
if [[ -z "${PYTORCH_EXTRA_INSTALL_REQUIREMENTS:-}" ]]; then
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${TRITON_REQUIREMENT}"
|
||||
else
|
||||
export PYTORCH_EXTRA_INSTALL_REQUIREMENTS="${PYTORCH_EXTRA_INSTALL_REQUIREMENTS} | ${TRITON_REQUIREMENT}"
|
||||
fi
|
||||
done
|
||||
if [ -z "$JAVA_HOME" ]; then
|
||||
echo "Did not find jni.h"
|
||||
fi
|
||||
fi
|
||||
|
||||
cat >"$envfile" <<EOL
|
||||
@ -136,6 +124,7 @@ export DESIRED_PYTHON="${DESIRED_PYTHON:-}"
|
||||
export DESIRED_CUDA="$DESIRED_CUDA"
|
||||
export LIBTORCH_VARIANT="${LIBTORCH_VARIANT:-}"
|
||||
export BUILD_PYTHONLESS="${BUILD_PYTHONLESS:-}"
|
||||
export USE_SPLIT_BUILD="${USE_SPLIT_BUILD:-}"
|
||||
if [[ "${OSTYPE}" == "msys" ]]; then
|
||||
export LIBTORCH_CONFIG="${LIBTORCH_CONFIG:-}"
|
||||
if [[ "${LIBTORCH_CONFIG:-}" == 'debug' ]]; then
|
||||
@ -159,8 +148,6 @@ export TORCH_CONDA_BUILD_FOLDER='pytorch-nightly'
|
||||
export ANACONDA_USER='pytorch'
|
||||
|
||||
export USE_FBGEMM=1
|
||||
export JAVA_HOME=$JAVA_HOME
|
||||
export BUILD_JNI=$BUILD_JNI
|
||||
export PIP_UPLOAD_FOLDER="$PIP_UPLOAD_FOLDER"
|
||||
export DOCKER_IMAGE="$DOCKER_IMAGE"
|
||||
|
||||
|
@ -25,6 +25,15 @@ if [[ "${DRY_RUN}" = "disabled" ]]; then
|
||||
AWS_S3_CP="aws s3 cp"
|
||||
fi
|
||||
|
||||
if [[ "${USE_SPLIT_BUILD:-false}" == "true" ]]; then
|
||||
UPLOAD_SUBFOLDER="${UPLOAD_SUBFOLDER}_pypi_pkg"
|
||||
fi
|
||||
|
||||
# this is special build with all dependencies packaged
|
||||
if [[ ${BUILD_NAME} == *-full* ]]; then
|
||||
UPLOAD_SUBFOLDER="${UPLOAD_SUBFOLDER}_full"
|
||||
fi
|
||||
|
||||
# Sleep 2 minutes between retries for conda upload
|
||||
retry () {
|
||||
"$@" || (sleep 5m && "$@") || (sleep 5m && "$@") || (sleep 5m && "$@") || (sleep 5m && "$@")
|
||||
|
@ -40,3 +40,7 @@ e6ec0efaf87703c5f889cfc20b29be455885d58d
|
||||
a53cda1ddc15336dc1ff0ce1eff2a49cdc5f882e
|
||||
# 2024-01-02 clangformat: fused adam #116583
|
||||
9dc68d1aa9e554d09344a10fff69f7b50b2d23a0
|
||||
# 2024-06-28 enable UFMT in `torch/storage.py`
|
||||
d80939e5e9337e8078f11489afefec59fd42f93b
|
||||
# 2024-06-28 enable UFMT in `torch.utils.data`
|
||||
7cf0b90e49689d45be91aa539fdf54cf2ea8a9a3
|
||||
|
2
.github/actionlint.yaml
vendored
2
.github/actionlint.yaml
vendored
@ -47,3 +47,5 @@ self-hosted-runner:
|
||||
- macos-latest-xlarge
|
||||
- macos-13-xlarge
|
||||
- macos-14-xlarge
|
||||
# Organization-wide Intel hosted XPU runners
|
||||
- linux.idc.xpu
|
||||
|
6
.github/actions/diskspace-cleanup/action.yml
vendored
6
.github/actions/diskspace-cleanup/action.yml
vendored
@ -14,12 +14,14 @@ runs:
|
||||
- name: Cleans up diskspace
|
||||
shell: bash
|
||||
run: |
|
||||
set -ex
|
||||
diskspace_cutoff=${{ inputs.diskspace-cutoff }}
|
||||
diskspace=$(df -H / --output=pcent | sed -n 2p | sed 's/%//' | sed 's/ //')
|
||||
docker_root_dir=$(docker info -f '{{.DockerRootDir}}')
|
||||
diskspace=$(df -H --output=pcent ${docker_root_dir} | sed -n 2p | sed 's/%//' | sed 's/ //')
|
||||
msg="Please file an issue on pytorch/pytorch reporting the faulty runner. Include a link to the runner logs so the runner can be identified"
|
||||
if [[ "$diskspace" -ge "$diskspace_cutoff" ]] ; then
|
||||
docker system prune -af
|
||||
diskspace_new=$(df -H / --output=pcent | sed -n 2p | sed 's/%//' | sed 's/ //')
|
||||
diskspace_new=$(df -H --output=pcent ${docker_root_dir} | sed -n 2p | sed 's/%//' | sed 's/ //')
|
||||
if [[ "$diskspace_new" -gt "$diskspace_cutoff" ]] ; then
|
||||
echo "Error: Available diskspace is less than $diskspace_cutoff percent. Not enough diskspace."
|
||||
echo "$msg"
|
||||
|
21
.github/actions/linux-build/action.yml
vendored
21
.github/actions/linux-build/action.yml
vendored
@ -52,6 +52,13 @@ inputs:
|
||||
description: Hugging Face Hub token
|
||||
required: false
|
||||
default: ""
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
outputs:
|
||||
docker-image:
|
||||
value: ${{ steps.calculate-docker-image.outputs.docker-image }}
|
||||
@ -144,6 +151,7 @@ runs:
|
||||
DEBUG: ${{ inputs.build-with-debug == 'true' && '1' || '0' }}
|
||||
OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ inputs.HUGGING_FACE_HUB_TOKEN }}
|
||||
USE_SPLIT_BUILD: ${{ inputs.use_split_build }}
|
||||
shell: bash
|
||||
run: |
|
||||
# detached container should get cleaned up by teardown_ec2_linux
|
||||
@ -163,6 +171,7 @@ runs:
|
||||
-e PR_LABELS \
|
||||
-e OUR_GITHUB_JOB_ID \
|
||||
-e HUGGING_FACE_HUB_TOKEN \
|
||||
-e USE_SPLIT_BUILD \
|
||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
||||
--security-opt seccomp=unconfined \
|
||||
--cap-add=SYS_PTRACE \
|
||||
@ -183,7 +192,7 @@ runs:
|
||||
|
||||
- name: Store PyTorch Build Artifacts on S3
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: inputs.build-generates-artifacts == 'true' && steps.build.outcome != 'skipped'
|
||||
if: inputs.build-generates-artifacts == 'true' && steps.build.outcome != 'skipped' && inputs.use_split_build != 'true'
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}
|
||||
retention-days: 14
|
||||
@ -191,6 +200,16 @@ runs:
|
||||
path: artifacts.zip
|
||||
s3-bucket: ${{ inputs.s3-bucket }}
|
||||
|
||||
- name: Store PyTorch Build Artifacts on S3 for split build
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: inputs.build-generates-artifacts == 'true' && steps.build.outcome != 'skipped' && inputs.use_split_build == 'true'
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}-experimental-split-build
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: artifacts.zip
|
||||
s3-bucket: ${{ inputs.s3-bucket }}
|
||||
|
||||
- name: Upload sccache stats
|
||||
if: steps.build.outcome != 'skipped'
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
|
11
.github/actions/test-pytorch-binary/action.yml
vendored
11
.github/actions/test-pytorch-binary/action.yml
vendored
@ -26,6 +26,7 @@ runs:
|
||||
-e PYTORCH_FINAL_PACKAGE_DIR \
|
||||
-e PYTORCH_ROOT \
|
||||
-e SKIP_ALL_TESTS \
|
||||
-e USE_SPLIT_BUILD \
|
||||
--tty \
|
||||
--detach \
|
||||
-v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \
|
||||
@ -35,7 +36,8 @@ runs:
|
||||
"${DOCKER_IMAGE}"
|
||||
)
|
||||
|
||||
if [[ "${GPU_ARCH_TYPE}" != "rocm" && "${BUILD_ENVIRONMENT}" != "linux-aarch64-binary-manywheel" && "${BUILD_ENVIRONMENT}" != "linux-s390x-binary-manywheel" ]]; then
|
||||
echo "CONTAINER_NAME=${container_name}" >> "$GITHUB_ENV"
|
||||
if [[ "${GPU_ARCH_TYPE}" != "rocm" && "${BUILD_ENVIRONMENT}" != "linux-aarch64-binary-manywheel" && "${BUILD_ENVIRONMENT}" != "linux-s390x-binary-manywheel" && "${GPU_ARCH_TYPE}" != "xpu" ]]; then
|
||||
# Propagate download.pytorch.org IP to container. This is only needed on Linux non aarch64 runner
|
||||
grep download.pytorch.org /etc/hosts | docker exec -i "${container_name}" bash -c "/bin/cat >> /etc/hosts"
|
||||
fi
|
||||
@ -46,10 +48,9 @@ runs:
|
||||
docker exec -t "${container_name}" bash -c "source ${BINARY_ENV_FILE} && bash -x /run.sh"
|
||||
|
||||
- name: Cleanup docker
|
||||
if: always() && env.BUILD_ENVIRONMENT == 'linux-s390x-binary-manywheel'
|
||||
if: always() && (env.BUILD_ENVIRONMENT == 'linux-s390x-binary-manywheel' || env.GPU_ARCH_TYPE == 'xpu')
|
||||
shell: bash
|
||||
run: |
|
||||
# on s390x stop the container for clean worker stop
|
||||
# ignore expansion of "docker ps -q" since it could be empty
|
||||
# on s390x or xpu stop the container for clean worker stop
|
||||
# shellcheck disable=SC2046
|
||||
docker stop $(docker ps -q) || true
|
||||
docker stop "${{ env.CONTAINER_NAME }}" || true
|
||||
|
2
.github/ci_commit_pins/audio.txt
vendored
2
.github/ci_commit_pins/audio.txt
vendored
@ -1 +1 @@
|
||||
b829e936f7cc61b48149f5f957a451a38bf2a178
|
||||
69b2a0adc2ec03ab99990d7e8be3d4510438c148
|
||||
|
2
.github/ci_commit_pins/torchbench.txt
vendored
2
.github/ci_commit_pins/torchbench.txt
vendored
@ -1 +1 @@
|
||||
d6015d42d9a1834bc7595c4bd6852562fb80b30b
|
||||
23512dbebd44a11eb84afbf53c3c071dd105297e
|
||||
|
33
.github/merge_rules.yaml
vendored
33
.github/merge_rules.yaml
vendored
@ -27,11 +27,9 @@
|
||||
- third_party/onnx
|
||||
- caffe2/python/onnx/**
|
||||
approved_by:
|
||||
- BowenBao
|
||||
- justinchuby
|
||||
- liqunfu
|
||||
- shubhambhokare1
|
||||
- thiagocrepaldi
|
||||
- titaiwangms
|
||||
- wschin
|
||||
- xadupre
|
||||
@ -244,6 +242,7 @@
|
||||
- torch/csrc/xpu/**
|
||||
- torch/xpu/**
|
||||
- test/xpu/**
|
||||
- test/test_xpu.py
|
||||
- third_party/xpu.txt
|
||||
- .ci/docker/ci_commit_pins/triton-xpu.txt
|
||||
approved_by:
|
||||
@ -287,6 +286,7 @@
|
||||
- test/cpp/dist_autograd/**
|
||||
- test/cpp/rpc/**
|
||||
approved_by:
|
||||
- wconstab
|
||||
- mrshenli
|
||||
- pritamdamania87
|
||||
- zhaojuanmao
|
||||
@ -313,6 +313,25 @@
|
||||
- Lint
|
||||
- pull
|
||||
|
||||
- name: DCP
|
||||
patterns:
|
||||
- torch/distributed/checkpoint/**
|
||||
approved_by:
|
||||
- LucasLLC
|
||||
- fegin
|
||||
- wz337
|
||||
- saumishr
|
||||
- daulet-askarov
|
||||
- pradeepdfb
|
||||
- kirtiteja
|
||||
- mhorowitz
|
||||
- saiteja64
|
||||
mandatory_checks_name:
|
||||
- EasyCLA
|
||||
- Lint
|
||||
- pull
|
||||
|
||||
|
||||
- name: IDEEP
|
||||
patterns:
|
||||
- third_party/ideep
|
||||
@ -376,13 +395,21 @@
|
||||
|
||||
- name: CPU inductor
|
||||
patterns:
|
||||
- torch/_inductor/mkldnn_ir.py
|
||||
- torch/_inductor/mkldnn_lowerings.py
|
||||
- torch/_inductor/fx_passes/mkldnn_fusion.py
|
||||
- torch/_inductor/fx_passes/quantization.py
|
||||
- torch/_inductor/codegen/cpp_prefix.h
|
||||
- torch/_inductor/codegen/cpp.py
|
||||
- torch/_inductor/codegen/cpp_utils.py
|
||||
- torch/_inductor/codegen/cpp_micro_gemm.py
|
||||
- torch/_inductor/codegen/cpp_template_kernel.py
|
||||
- torch/_inductor/codegen/cpp_template.py
|
||||
- torch/_inductor/codegen/cpp_gemm_template.py
|
||||
- test/inductor/test_mkldnn_pattern_matcher.py
|
||||
- test/inductor/test_cpu_repo.py
|
||||
- test/inductor/test_cpu_repro.py
|
||||
- test/inductor/test_cpu_cpp_wrapper.py
|
||||
- test/inductor/test_cpu_select_algorithm.py
|
||||
- aten/src/ATen/cpu/**
|
||||
- aten/src/ATen/native/quantized/cpu/**
|
||||
- test/quantization/core/test_quantized_op.py
|
||||
|
1
.github/pytorch-probot.yml
vendored
1
.github/pytorch-probot.yml
vendored
@ -26,3 +26,4 @@ retryable_workflows:
|
||||
- windows-binary
|
||||
labeler_config: labeler.yml
|
||||
label_to_label_config: label_to_label.yml
|
||||
mergebot: True
|
||||
|
2
.github/scripts/amd/package_triton_wheel.sh
vendored
2
.github/scripts/amd/package_triton_wheel.sh
vendored
@ -93,6 +93,8 @@ done
|
||||
|
||||
# Copy Include Files
|
||||
cp -r $ROCM_HOME/include/hip $TRITON_ROCM_DIR/include
|
||||
cp -r $ROCM_HOME/include/roctracer $TRITON_ROCM_DIR/include
|
||||
cp -r $ROCM_HOME/include/hsa $TRITON_ROCM_DIR/include
|
||||
|
||||
# Copy linker
|
||||
mkdir -p $TRITON_ROCM_DIR/llvm/bin
|
||||
|
29
.github/scripts/build_triton_wheel.py
vendored
29
.github/scripts/build_triton_wheel.py
vendored
@ -11,8 +11,12 @@ SCRIPT_DIR = Path(__file__).parent
|
||||
REPO_DIR = SCRIPT_DIR.parent.parent
|
||||
|
||||
|
||||
def read_triton_pin(rocm_hash: bool = False) -> str:
|
||||
triton_file = "triton.txt" if not rocm_hash else "triton-rocm.txt"
|
||||
def read_triton_pin(device: str = "cuda") -> str:
|
||||
triton_file = "triton.txt"
|
||||
if device == "rocm":
|
||||
triton_file = "triton-rocm.txt"
|
||||
elif device == "xpu":
|
||||
triton_file = "triton-xpu.txt"
|
||||
with open(REPO_DIR / ".ci" / "docker" / "ci_commit_pins" / triton_file) as f:
|
||||
return f.read().strip()
|
||||
|
||||
@ -49,7 +53,7 @@ def build_triton(
|
||||
version: str,
|
||||
commit_hash: str,
|
||||
build_conda: bool = False,
|
||||
build_rocm: bool = False,
|
||||
device: str = "cuda",
|
||||
py_version: Optional[str] = None,
|
||||
release: bool = False,
|
||||
) -> Path:
|
||||
@ -69,11 +73,14 @@ def build_triton(
|
||||
triton_basedir = Path(tmpdir) / "triton"
|
||||
triton_pythondir = triton_basedir / "python"
|
||||
triton_repo = "https://github.com/openai/triton"
|
||||
if build_rocm:
|
||||
if device == "rocm":
|
||||
triton_pkg_name = "pytorch-triton-rocm"
|
||||
elif device == "xpu":
|
||||
triton_pkg_name = "pytorch-triton-xpu"
|
||||
triton_repo = "https://github.com/intel/intel-xpu-backend-for-triton"
|
||||
else:
|
||||
triton_pkg_name = "pytorch-triton"
|
||||
check_call(["git", "clone", triton_repo], cwd=tmpdir)
|
||||
check_call(["git", "clone", triton_repo, "triton"], cwd=tmpdir)
|
||||
if release:
|
||||
ver, rev, patch = version.split(".")
|
||||
check_call(
|
||||
@ -140,7 +147,7 @@ def build_triton(
|
||||
expected_version=None,
|
||||
)
|
||||
|
||||
if build_rocm:
|
||||
if device == "rocm":
|
||||
check_call(
|
||||
[f"{SCRIPT_DIR}/amd/package_triton_wheel.sh"],
|
||||
cwd=triton_basedir,
|
||||
@ -155,7 +162,7 @@ def build_triton(
|
||||
whl_path = next(iter((triton_pythondir / "dist").glob("*.whl")))
|
||||
shutil.copy(whl_path, Path.cwd())
|
||||
|
||||
if build_rocm:
|
||||
if device == "rocm":
|
||||
check_call(
|
||||
[f"{SCRIPT_DIR}/amd/patch_triton_wheel.sh", Path.cwd()],
|
||||
cwd=triton_basedir,
|
||||
@ -170,17 +177,19 @@ def main() -> None:
|
||||
parser = ArgumentParser("Build Triton binaries")
|
||||
parser.add_argument("--release", action="store_true")
|
||||
parser.add_argument("--build-conda", action="store_true")
|
||||
parser.add_argument("--build-rocm", action="store_true")
|
||||
parser.add_argument(
|
||||
"--device", type=str, default="cuda", choices=["cuda", "rocm", "xpu"]
|
||||
)
|
||||
parser.add_argument("--py-version", type=str)
|
||||
parser.add_argument("--commit-hash", type=str)
|
||||
parser.add_argument("--triton-version", type=str, default=read_triton_version())
|
||||
args = parser.parse_args()
|
||||
|
||||
build_triton(
|
||||
build_rocm=args.build_rocm,
|
||||
device=args.device,
|
||||
commit_hash=args.commit_hash
|
||||
if args.commit_hash
|
||||
else read_triton_pin(args.build_rocm),
|
||||
else read_triton_pin(args.device),
|
||||
version=args.triton_version,
|
||||
build_conda=args.build_conda,
|
||||
py_version=args.py_version,
|
||||
|
114
.github/scripts/cherry_pick.py
vendored
114
.github/scripts/cherry_pick.py
vendored
@ -3,11 +3,11 @@
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
from typing import Any, Optional
|
||||
from typing import Any, cast, Dict, List, Optional
|
||||
|
||||
from urllib.error import HTTPError
|
||||
|
||||
from github_utils import gh_fetch_url, gh_post_pr_comment
|
||||
from github_utils import gh_fetch_url, gh_post_pr_comment, gh_query_issues_by_labels
|
||||
|
||||
from gitutils import get_git_remote_name, get_git_repo_dir, GitRepo
|
||||
from trymerge import get_pr_commit_sha, GitHubPR
|
||||
@ -19,6 +19,7 @@ REQUIRES_ISSUE = {
|
||||
"critical",
|
||||
"fixnewfeature",
|
||||
}
|
||||
RELEASE_BRANCH_REGEX = re.compile(r"release/(?P<version>.+)")
|
||||
|
||||
|
||||
def parse_args() -> Any:
|
||||
@ -58,6 +59,33 @@ def get_merge_commit_sha(repo: GitRepo, pr: GitHubPR) -> Optional[str]:
|
||||
return commit_sha if pr.is_closed() else None
|
||||
|
||||
|
||||
def get_release_version(onto_branch: str) -> Optional[str]:
|
||||
"""
|
||||
Return the release version if the target branch is a release branch
|
||||
"""
|
||||
m = re.match(RELEASE_BRANCH_REGEX, onto_branch)
|
||||
return m.group("version") if m else ""
|
||||
|
||||
|
||||
def get_tracker_issues(
|
||||
org: str, project: str, onto_branch: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Find the tracker issue from the repo. The tracker issue needs to have the title
|
||||
like [VERSION] Release Tracker following the convention on PyTorch
|
||||
"""
|
||||
version = get_release_version(onto_branch)
|
||||
if not version:
|
||||
return []
|
||||
|
||||
tracker_issues = gh_query_issues_by_labels(org, project, labels=["release tracker"])
|
||||
if not tracker_issues:
|
||||
return []
|
||||
|
||||
# Figure out the tracker issue from the list by looking at the title
|
||||
return [issue for issue in tracker_issues if version in issue.get("title", "")]
|
||||
|
||||
|
||||
def cherry_pick(
|
||||
github_actor: str,
|
||||
repo: GitRepo,
|
||||
@ -77,17 +105,49 @@ def cherry_pick(
|
||||
)
|
||||
|
||||
try:
|
||||
org, project = repo.gh_owner_and_name()
|
||||
|
||||
cherry_pick_pr = ""
|
||||
if not dry_run:
|
||||
org, project = repo.gh_owner_and_name()
|
||||
cherry_pick_pr = submit_pr(repo, pr, cherry_pick_branch, onto_branch)
|
||||
|
||||
msg = f"The cherry pick PR is at {cherry_pick_pr}"
|
||||
if fixes:
|
||||
msg += f" and it is linked with issue {fixes}"
|
||||
elif classification in REQUIRES_ISSUE:
|
||||
msg += f" and it is recommended to link a {classification} cherry pick PR with an issue"
|
||||
tracker_issues_comments = []
|
||||
tracker_issues = get_tracker_issues(org, project, onto_branch)
|
||||
for issue in tracker_issues:
|
||||
issue_number = int(str(issue.get("number", "0")))
|
||||
if not issue_number:
|
||||
continue
|
||||
|
||||
post_comment(org, project, pr.pr_num, msg)
|
||||
res = cast(
|
||||
Dict[str, Any],
|
||||
post_tracker_issue_comment(
|
||||
org,
|
||||
project,
|
||||
issue_number,
|
||||
pr.pr_num,
|
||||
cherry_pick_pr,
|
||||
classification,
|
||||
fixes,
|
||||
dry_run,
|
||||
),
|
||||
)
|
||||
|
||||
comment_url = res.get("html_url", "")
|
||||
if comment_url:
|
||||
tracker_issues_comments.append(comment_url)
|
||||
|
||||
msg = f"The cherry pick PR is at {cherry_pick_pr}"
|
||||
if fixes:
|
||||
msg += f" and it is linked with issue {fixes}."
|
||||
elif classification in REQUIRES_ISSUE:
|
||||
msg += f" and it is recommended to link a {classification} cherry pick PR with an issue."
|
||||
|
||||
if tracker_issues_comments:
|
||||
msg += " The following tracker issues are updated:\n"
|
||||
for tracker_issues_comment in tracker_issues_comments:
|
||||
msg += f"* {tracker_issues_comment}\n"
|
||||
|
||||
post_pr_comment(org, project, pr.pr_num, msg, dry_run)
|
||||
|
||||
finally:
|
||||
if current_branch:
|
||||
@ -159,7 +219,9 @@ def submit_pr(
|
||||
raise RuntimeError(msg) from error
|
||||
|
||||
|
||||
def post_comment(org: str, project: str, pr_num: int, msg: str) -> None:
|
||||
def post_pr_comment(
|
||||
org: str, project: str, pr_num: int, msg: str, dry_run: bool = False
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Post a comment on the PR itself to point to the cherry picking PR when success
|
||||
or print the error when failure
|
||||
@ -182,7 +244,35 @@ def post_comment(org: str, project: str, pr_num: int, msg: str) -> None:
|
||||
comment = "\n".join(
|
||||
(f"### Cherry picking #{pr_num}", f"{msg}", "", f"{internal_debugging}")
|
||||
)
|
||||
gh_post_pr_comment(org, project, pr_num, comment)
|
||||
return gh_post_pr_comment(org, project, pr_num, comment, dry_run)
|
||||
|
||||
|
||||
def post_tracker_issue_comment(
|
||||
org: str,
|
||||
project: str,
|
||||
issue_num: int,
|
||||
pr_num: int,
|
||||
cherry_pick_pr: str,
|
||||
classification: str,
|
||||
fixes: str,
|
||||
dry_run: bool = False,
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Post a comment on the tracker issue (if any) to record the cherry pick
|
||||
"""
|
||||
comment = "\n".join(
|
||||
(
|
||||
"Link to landed trunk PR (if applicable):",
|
||||
f"* https://github.com/{org}/{project}/pull/{pr_num}",
|
||||
"",
|
||||
"Link to release branch PR:",
|
||||
f"* {cherry_pick_pr}",
|
||||
"",
|
||||
"Criteria Category:",
|
||||
" - ".join((classification.capitalize(), fixes.capitalize())),
|
||||
)
|
||||
)
|
||||
return gh_post_pr_comment(org, project, issue_num, comment, dry_run)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
@ -214,7 +304,7 @@ def main() -> None:
|
||||
|
||||
except RuntimeError as error:
|
||||
if not args.dry_run:
|
||||
post_comment(org, project, pr_num, str(error))
|
||||
post_pr_comment(org, project, pr_num, str(error))
|
||||
else:
|
||||
raise error
|
||||
|
||||
|
BIN
.github/scripts/drci_mocks.json.gz
vendored
BIN
.github/scripts/drci_mocks.json.gz
vendored
Binary file not shown.
74
.github/scripts/generate_binary_build_matrix.py
vendored
74
.github/scripts/generate_binary_build_matrix.py
vendored
@ -8,6 +8,7 @@ architectures:
|
||||
* CPU
|
||||
* Latest CUDA
|
||||
* Latest ROCM
|
||||
* Latest XPU
|
||||
"""
|
||||
|
||||
import os
|
||||
@ -24,6 +25,7 @@ CUDA_ARCHES_CUDNN_VERSION = {"11.8": "9", "12.1": "9", "12.4": "9"}
|
||||
|
||||
ROCM_ARCHES = ["6.0", "6.1"]
|
||||
|
||||
XPU_ARCHES = ["xpu"]
|
||||
|
||||
CPU_CXX11_ABI_ARCH = ["cpu-cxx11-abi"]
|
||||
|
||||
@ -48,7 +50,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu11==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
),
|
||||
"12.1": (
|
||||
@ -61,7 +63,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
),
|
||||
"12.4": (
|
||||
@ -74,7 +76,7 @@ PYTORCH_EXTRA_INSTALL_REQUIREMENTS = {
|
||||
"nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | "
|
||||
"nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'"
|
||||
),
|
||||
@ -132,6 +134,8 @@ def arch_type(arch_version: str) -> str:
|
||||
return "cuda"
|
||||
elif arch_version in ROCM_ARCHES:
|
||||
return "rocm"
|
||||
elif arch_version in XPU_ARCHES:
|
||||
return "xpu"
|
||||
elif arch_version in CPU_CXX11_ABI_ARCH:
|
||||
return "cpu-cxx11-abi"
|
||||
elif arch_version in CPU_AARCH64_ARCH:
|
||||
@ -156,6 +160,7 @@ WHEEL_CONTAINER_IMAGES = {
|
||||
gpu_arch: f"pytorch/manylinux-builder:rocm{gpu_arch}-{DEFAULT_TAG}"
|
||||
for gpu_arch in ROCM_ARCHES
|
||||
},
|
||||
"xpu": f"pytorch/manylinux2_28-builder:xpu-{DEFAULT_TAG}",
|
||||
"cpu": f"pytorch/manylinux-builder:cpu-{DEFAULT_TAG}",
|
||||
"cpu-cxx11-abi": f"pytorch/manylinuxcxx11-abi-builder:cpu-cxx11-abi-{DEFAULT_TAG}",
|
||||
"cpu-aarch64": f"pytorch/manylinuxaarch64-builder:cpu-aarch64-{DEFAULT_TAG}",
|
||||
@ -221,6 +226,7 @@ def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str:
|
||||
"cuda": f"cu{gpu_arch_version.replace('.', '')}",
|
||||
"cuda-aarch64": "cu124",
|
||||
"rocm": f"rocm{gpu_arch_version}",
|
||||
"xpu": "xpu",
|
||||
}.get(gpu_arch_type, gpu_arch_version)
|
||||
|
||||
|
||||
@ -325,13 +331,13 @@ def generate_wheels_matrix(
|
||||
package_type = "manywheel"
|
||||
|
||||
if python_versions is None:
|
||||
python_versions = FULL_PYTHON_VERSIONS
|
||||
python_versions = FULL_PYTHON_VERSIONS + ["3.13"]
|
||||
|
||||
if arches is None:
|
||||
# Define default compute archivectures
|
||||
arches = ["cpu"]
|
||||
if os == "linux":
|
||||
arches += CPU_CXX11_ABI_ARCH + CUDA_ARCHES + ROCM_ARCHES
|
||||
arches += CPU_CXX11_ABI_ARCH + CUDA_ARCHES + ROCM_ARCHES + XPU_ARCHES
|
||||
elif os == "windows":
|
||||
arches += CUDA_ARCHES
|
||||
elif os == "linux-aarch64":
|
||||
@ -347,10 +353,6 @@ def generate_wheels_matrix(
|
||||
for python_version in python_versions:
|
||||
for arch_version in arches:
|
||||
gpu_arch_type = arch_type(arch_version)
|
||||
# Disable py3.12 builds for ROCm because of triton dependency
|
||||
# on llnl-hatchet, which doesn't have py3.12 wheels available
|
||||
if gpu_arch_type == "rocm" and python_version == "3.12":
|
||||
continue
|
||||
gpu_arch_version = (
|
||||
""
|
||||
if arch_version == "cpu"
|
||||
@ -358,9 +360,16 @@ def generate_wheels_matrix(
|
||||
or arch_version == "cpu-aarch64"
|
||||
or arch_version == "cpu-s390x"
|
||||
or arch_version == "cuda-aarch64"
|
||||
or arch_version == "xpu"
|
||||
else arch_version
|
||||
)
|
||||
|
||||
# TODO: Enable python 3.13 on rocm, xpu, aarch64, windows
|
||||
if (
|
||||
gpu_arch_type in ["rocm", "xpu"] or os != "linux"
|
||||
) and python_version == "3.13":
|
||||
continue
|
||||
|
||||
# 12.1 linux wheels require PYTORCH_EXTRA_INSTALL_REQUIREMENTS to install
|
||||
if (
|
||||
arch_version in ["12.4", "12.1", "11.8"]
|
||||
@ -390,6 +399,49 @@ def generate_wheels_matrix(
|
||||
),
|
||||
}
|
||||
)
|
||||
if arch_version != "cuda-aarch64":
|
||||
ret.append(
|
||||
{
|
||||
"python_version": python_version,
|
||||
"gpu_arch_type": gpu_arch_type,
|
||||
"gpu_arch_version": gpu_arch_version,
|
||||
"desired_cuda": translate_desired_cuda(
|
||||
gpu_arch_type, gpu_arch_version
|
||||
),
|
||||
"use_split_build": "True",
|
||||
"devtoolset": "",
|
||||
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
|
||||
"package_type": package_type,
|
||||
"pytorch_extra_install_requirements": (
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS[arch_version] # fmt: skip
|
||||
if os != "linux-aarch64"
|
||||
else ""
|
||||
),
|
||||
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}-split".replace( # noqa: B950
|
||||
".", "_"
|
||||
),
|
||||
}
|
||||
)
|
||||
# Special build building to use on Colab. PyThon 3.10 for 12.1 CUDA
|
||||
if python_version == "3.10" and arch_version == "12.1":
|
||||
ret.append(
|
||||
{
|
||||
"python_version": python_version,
|
||||
"gpu_arch_type": gpu_arch_type,
|
||||
"gpu_arch_version": gpu_arch_version,
|
||||
"desired_cuda": translate_desired_cuda(
|
||||
gpu_arch_type, gpu_arch_version
|
||||
),
|
||||
"use_split_build": "False",
|
||||
"devtoolset": "",
|
||||
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
|
||||
"package_type": package_type,
|
||||
"pytorch_extra_install_requirements": "",
|
||||
"build_name": f"{package_type}-py{python_version}-{gpu_arch_type}{gpu_arch_version}-full".replace( # noqa: B950
|
||||
".", "_"
|
||||
),
|
||||
}
|
||||
)
|
||||
else:
|
||||
ret.append(
|
||||
{
|
||||
@ -400,7 +452,9 @@ def generate_wheels_matrix(
|
||||
gpu_arch_type, gpu_arch_version
|
||||
),
|
||||
"devtoolset": (
|
||||
"cxx11-abi" if arch_version == "cpu-cxx11-abi" else ""
|
||||
"cxx11-abi"
|
||||
if arch_version in ["cpu-cxx11-abi", "xpu"]
|
||||
else ""
|
||||
),
|
||||
"container_image": WHEEL_CONTAINER_IMAGES[arch_version],
|
||||
"package_type": package_type,
|
||||
|
99
.github/scripts/get_workflow_type.py
vendored
99
.github/scripts/get_workflow_type.py
vendored
@ -1,99 +0,0 @@
|
||||
import json
|
||||
from argparse import ArgumentParser
|
||||
from typing import Any
|
||||
|
||||
from github import Auth, Github
|
||||
from github.Issue import Issue
|
||||
|
||||
|
||||
WORKFLOW_TYPE_LABEL = "label"
|
||||
WORKFLOW_TYPE_RG = "rg"
|
||||
WORKFLOW_TYPE_BOTH = "both"
|
||||
|
||||
|
||||
def parse_args() -> Any:
|
||||
parser = ArgumentParser("Get dynamic rollout settings")
|
||||
parser.add_argument("--github-token", type=str, required=True, help="GitHub token")
|
||||
parser.add_argument(
|
||||
"--github-repo",
|
||||
type=str,
|
||||
required=False,
|
||||
default="pytorch/test-infra",
|
||||
help="GitHub repo to get the issue",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-issue", type=int, required=True, help="GitHub issue umber"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-user", type=str, required=True, help="GitHub username"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-branch", type=str, required=True, help="Current GitHub branch"
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def get_gh_client(github_token: str) -> Github:
|
||||
auth = Auth.Token(github_token)
|
||||
return Github(auth=auth)
|
||||
|
||||
|
||||
def get_issue(gh: Github, repo: str, issue_num: int) -> Issue:
|
||||
repo = gh.get_repo(repo)
|
||||
return repo.get_issue(number=issue_num)
|
||||
|
||||
|
||||
def is_exception_branch(branch: str) -> bool:
|
||||
return branch.split("/")[0] in {"main", "nightly", "release", "landchecks"}
|
||||
|
||||
|
||||
def get_workflow_type(issue: Issue, username: str) -> str:
|
||||
user_list = issue.get_comments()[0].body.split("\r\n")
|
||||
try:
|
||||
run_option = issue.get_comments()[1].body.split("\r\n")[0]
|
||||
except Exception as e:
|
||||
run_option = "single"
|
||||
|
||||
if user_list[0] == "!":
|
||||
# Use old runners for everyone
|
||||
return WORKFLOW_TYPE_LABEL
|
||||
elif user_list[1] == "*":
|
||||
if run_option == WORKFLOW_TYPE_BOTH:
|
||||
# Use ARC runners and old runners for everyone
|
||||
return WORKFLOW_TYPE_BOTH
|
||||
else:
|
||||
# Use only ARC runners for everyone
|
||||
return WORKFLOW_TYPE_RG
|
||||
elif username in user_list:
|
||||
if run_option == WORKFLOW_TYPE_BOTH:
|
||||
# Use ARC runners and old runners for a specific user
|
||||
return WORKFLOW_TYPE_BOTH
|
||||
else:
|
||||
# Use only ARC runners for a specific user
|
||||
return WORKFLOW_TYPE_RG
|
||||
else:
|
||||
# Use old runners by default
|
||||
return WORKFLOW_TYPE_LABEL
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
|
||||
if is_exception_branch(args.github_branch):
|
||||
output = {"workflow_type": WORKFLOW_TYPE_LABEL}
|
||||
else:
|
||||
try:
|
||||
gh = get_gh_client(args.github_token)
|
||||
issue = get_issue(gh, args.github_repo, args.github_issue)
|
||||
|
||||
output = {"workflow_type": get_workflow_type(issue, args.github_user)}
|
||||
except Exception as e:
|
||||
output = {"workflow_type": WORKFLOW_TYPE_LABEL}
|
||||
|
||||
json_output = json.dumps(output)
|
||||
print(json_output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
9
.github/scripts/github_utils.py
vendored
9
.github/scripts/github_utils.py
vendored
@ -202,3 +202,12 @@ def gh_update_pr_state(org: str, repo: str, pr_num: int, state: str = "open") ->
|
||||
)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
def gh_query_issues_by_labels(
|
||||
org: str, repo: str, labels: List[str], state: str = "open"
|
||||
) -> List[Dict[str, Any]]:
|
||||
url = f"{GITHUB_API_URL}/repos/{org}/{repo}/issues"
|
||||
return gh_fetch_json(
|
||||
url, method="GET", params={"labels": ",".join(labels), "state": state}
|
||||
)
|
||||
|
BIN
.github/scripts/gql_mocks.json.gz
vendored
BIN
.github/scripts/gql_mocks.json.gz
vendored
Binary file not shown.
1
.github/scripts/lintrunner.sh
vendored
1
.github/scripts/lintrunner.sh
vendored
@ -29,6 +29,7 @@ python3 -m tools.pyi.gen_pyi \
|
||||
--native-functions-path aten/src/ATen/native/native_functions.yaml \
|
||||
--tags-path aten/src/ATen/native/tags.yaml \
|
||||
--deprecated-functions-path "tools/autograd/deprecated.yaml"
|
||||
python3 torch/utils/data/datapipes/gen_pyi.py
|
||||
|
||||
RC=0
|
||||
# Run lintrunner on all files
|
||||
|
210
.github/scripts/runner_determinator.py
vendored
Normal file
210
.github/scripts/runner_determinator.py
vendored
Normal file
@ -0,0 +1,210 @@
|
||||
# flake8: noqa: G004
|
||||
|
||||
import logging
|
||||
import os
|
||||
from argparse import ArgumentParser
|
||||
from logging import LogRecord
|
||||
from typing import Any, Iterable
|
||||
|
||||
from github import Auth, Github
|
||||
from github.Issue import Issue
|
||||
|
||||
|
||||
WORKFLOW_LABEL_META = "" # use meta runners
|
||||
WORKFLOW_LABEL_LF = "lf." # use runners from the linux foundation
|
||||
|
||||
GITHUB_OUTPUT = os.getenv("GITHUB_OUTPUT", "")
|
||||
GH_OUTPUT_KEY_LABEL_TYPE = "label-type"
|
||||
|
||||
|
||||
class ColorFormatter(logging.Formatter):
|
||||
"""Color codes the log messages based on the log level"""
|
||||
|
||||
COLORS = {
|
||||
"WARNING": "\033[33m", # Yellow
|
||||
"ERROR": "\033[31m", # Red
|
||||
"CRITICAL": "\033[31m", # Red
|
||||
"INFO": "\033[0m", # Reset
|
||||
"DEBUG": "\033[0m", # Reset
|
||||
}
|
||||
|
||||
def format(self, record: LogRecord) -> str:
|
||||
log_color = self.COLORS.get(record.levelname, "\033[0m") # Default to reset
|
||||
record.msg = f"{log_color}{record.msg}\033[0m"
|
||||
return super().format(record)
|
||||
|
||||
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(ColorFormatter(fmt="%(levelname)-8s: %(message)s"))
|
||||
|
||||
log = logging.getLogger(os.path.basename(__file__))
|
||||
log.addHandler(handler)
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
|
||||
def set_github_output(key: str, value: str) -> None:
|
||||
"""
|
||||
Defines outputs of the github action that invokes this script
|
||||
"""
|
||||
if not GITHUB_OUTPUT:
|
||||
# See https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/ for deprecation notice
|
||||
log.warning(
|
||||
"No env var found for GITHUB_OUTPUT, you must be running this code locally. Falling back to the deprecated print method."
|
||||
)
|
||||
print(f"::set-output name={key}::{value}")
|
||||
return
|
||||
|
||||
with open(GITHUB_OUTPUT, "a") as f:
|
||||
log.info(f"Setting output: {key}='{value}'")
|
||||
f.write(f"{key}={value}\n")
|
||||
|
||||
|
||||
def parse_args() -> Any:
|
||||
parser = ArgumentParser("Get dynamic rollout settings")
|
||||
parser.add_argument("--github-token", type=str, required=True, help="GitHub token")
|
||||
parser.add_argument(
|
||||
"--github-issue-repo",
|
||||
type=str,
|
||||
required=False,
|
||||
default="pytorch/test-infra",
|
||||
help="GitHub repo to get the issue",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-repo",
|
||||
type=str,
|
||||
required=True,
|
||||
help="GitHub repo where CI is running",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-issue", type=int, required=True, help="GitHub issue number"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-actor", type=str, required=True, help="GitHub triggering_actor"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-issue-owner", type=str, required=True, help="GitHub issue owner"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-branch", type=str, required=True, help="Current GitHub branch or tag"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-ref-type",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Current GitHub ref type, branch or tag",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def get_gh_client(github_token: str) -> Github:
|
||||
auth = Auth.Token(github_token)
|
||||
return Github(auth=auth)
|
||||
|
||||
|
||||
def get_issue(gh: Github, repo: str, issue_num: int) -> Issue:
|
||||
repo = gh.get_repo(repo)
|
||||
return repo.get_issue(number=issue_num)
|
||||
|
||||
|
||||
def get_potential_pr_author(
|
||||
gh: Github, repo: str, username: str, ref_type: str, ref_name: str
|
||||
) -> str:
|
||||
# If the trigger was a new tag added by a bot, this is a ciflow case
|
||||
# Fetch the actual username from the original PR. The PR number is
|
||||
# embedded in the tag name: ciflow/<name>/<pr-number>
|
||||
if username == "pytorch-bot[bot]" and ref_type == "tag":
|
||||
split_tag = ref_name.split("/")
|
||||
if (
|
||||
len(split_tag) == 3
|
||||
and split_tag[0] == "ciflow"
|
||||
and split_tag[2].isnumeric()
|
||||
):
|
||||
pr_number = split_tag[2]
|
||||
try:
|
||||
repository = gh.get_repo(repo)
|
||||
pull = repository.get_pull(number=int(pr_number))
|
||||
except Exception as e:
|
||||
raise Exception( # noqa: TRY002
|
||||
f"issue with pull request {pr_number} from repo {repository}"
|
||||
) from e
|
||||
return pull.user.login
|
||||
# In all other cases, return the original input username
|
||||
return username
|
||||
|
||||
|
||||
def is_exception_branch(branch: str) -> bool:
|
||||
return branch.split("/")[0] in {"main", "nightly", "release", "landchecks"}
|
||||
|
||||
|
||||
def get_workflow_type(issue: Issue, workflow_requestors: Iterable[str]) -> str:
|
||||
try:
|
||||
first_comment = issue.get_comments()[0].body.strip("\n\t ")
|
||||
|
||||
if first_comment[0] == "!":
|
||||
log.info("LF Workflows are disabled for everyone. Using meta runners.")
|
||||
return WORKFLOW_LABEL_META
|
||||
elif first_comment[0] == "*":
|
||||
log.info("LF Workflows are enabled for everyone. Using LF runners.")
|
||||
return WORKFLOW_LABEL_LF
|
||||
else:
|
||||
all_opted_in_users = {
|
||||
usr_raw.strip("\n\t@ ") for usr_raw in first_comment.split()
|
||||
}
|
||||
opted_in_requestors = {
|
||||
usr for usr in workflow_requestors if usr in all_opted_in_users
|
||||
}
|
||||
if opted_in_requestors:
|
||||
log.info(
|
||||
f"LF Workflows are enabled for {', '.join(opted_in_requestors)}. Using LF runners."
|
||||
)
|
||||
return WORKFLOW_LABEL_LF
|
||||
else:
|
||||
log.info(
|
||||
f"LF Workflows are disabled for {', '.join(workflow_requestors)}. Using meta runners."
|
||||
)
|
||||
return WORKFLOW_LABEL_META
|
||||
|
||||
except Exception as e:
|
||||
log.error(
|
||||
f"Failed to get determine workflow type. Falling back to meta runners. Exception: {e}"
|
||||
)
|
||||
return WORKFLOW_LABEL_META
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
|
||||
if args.github_ref_type == "branch" and is_exception_branch(args.github_branch):
|
||||
log.info(f"Exception branch: '{args.github_branch}', using meta runners")
|
||||
label_type = WORKFLOW_LABEL_META
|
||||
else:
|
||||
try:
|
||||
gh = get_gh_client(args.github_token)
|
||||
# The default issue we use - https://github.com/pytorch/test-infra/issues/5132
|
||||
issue = get_issue(gh, args.github_issue_repo, args.github_issue)
|
||||
username = get_potential_pr_author(
|
||||
gh,
|
||||
args.github_repo,
|
||||
args.github_actor,
|
||||
args.github_ref_type,
|
||||
args.github_branch,
|
||||
)
|
||||
label_type = get_workflow_type(
|
||||
issue,
|
||||
(
|
||||
args.github_issue_owner,
|
||||
username,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(
|
||||
f"Failed to get issue. Falling back to meta runners. Exception: {e}"
|
||||
)
|
||||
label_type = WORKFLOW_LABEL_META
|
||||
|
||||
set_github_output(GH_OUTPUT_KEY_LABEL_TYPE, label_type)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -2,7 +2,7 @@
|
||||
|
||||
set -eoux pipefail
|
||||
|
||||
SYNC_BRANCH=fbcode/pytorch-stable-prototype
|
||||
SYNC_BRANCH=pytorch-stable-prototype
|
||||
|
||||
git config user.email "fake@example.com"
|
||||
git config user.name "PyTorch Stable Bot"
|
||||
@ -11,7 +11,9 @@ git fetch origin main
|
||||
git fetch origin "$SYNC_BRANCH"
|
||||
git checkout "$SYNC_BRANCH"
|
||||
|
||||
for SHA in $(git log 4333e122d4b74cdf84351ed2907045c6a767b4cd..origin/main --pretty="%h" --reverse -- torch/distributed torch/csrc/distributed test/distributed test/cpp/c10d benchmarks/distributed)
|
||||
# Using a hardcoded SHA here is a massive speedup as we can skip the entire history of the pytorch GitHub repo.
|
||||
# This specific SHA was chosen as it was before the "branch point" of the stable branch
|
||||
for SHA in $(git log ba3b05fdf37ddbc3c301294d6a560a816335e717..origin/main --pretty="%h" --reverse -- torch/distributed torch/csrc/distributed test/distributed test/cpp/c10d benchmarks/distributed)
|
||||
do
|
||||
# `git merge-base --is-ancestor` exits with code 0 if the given SHA is an ancestor, and non-0 otherwise
|
||||
if git merge-base --is-ancestor $SHA HEAD || [[ $(git log --grep="(cherry picked from commit $SHA") ]]
|
||||
@ -20,7 +22,12 @@ do
|
||||
continue
|
||||
fi
|
||||
echo "Copying $SHA"
|
||||
git cherry-pick -x "$SHA"
|
||||
git cherry-pick -x "$SHA" -X theirs
|
||||
git reset --soft HEAD~1
|
||||
git add torch/distributed torch/csrc/distributed test/distributed test/cpp/c10d benchmarks/distributed
|
||||
git checkout .
|
||||
git commit --reuse-message=HEAD@{1}
|
||||
git clean -f
|
||||
done
|
||||
|
||||
if [[ "${WITH_PUSH}" == true ]]; then
|
||||
|
@ -41,7 +41,7 @@ def main() -> None:
|
||||
)
|
||||
|
||||
options = parser.parse_args()
|
||||
tagged_images: Dict[str, bool] = dict()
|
||||
tagged_images: Dict[str, bool] = {}
|
||||
platform_images = [
|
||||
generate_binary_build_matrix.WHEEL_CONTAINER_IMAGES,
|
||||
generate_binary_build_matrix.LIBTORCH_CONTAINER_IMAGES,
|
||||
|
1
.github/scripts/td_llm_indexer.sh
vendored
1
.github/scripts/td_llm_indexer.sh
vendored
@ -7,6 +7,7 @@ cd llm-target-determinator
|
||||
pip install -q -r requirements.txt
|
||||
cd ../codellama
|
||||
pip install -e .
|
||||
pip install numpy==1.26.0
|
||||
|
||||
# Run indexer
|
||||
cd ../llm-target-determinator
|
||||
|
24
.github/scripts/test_trymerge.py
vendored
24
.github/scripts/test_trymerge.py
vendored
@ -180,6 +180,9 @@ def mock_gh_get_info() -> Any:
|
||||
return {
|
||||
"closed": False,
|
||||
"isCrossRepository": False,
|
||||
"headRefName": "foo",
|
||||
"baseRefName": "bar",
|
||||
"baseRepository": {"defaultBranchRef": {"name": "bar"}},
|
||||
"files": {"nodes": [], "pageInfo": {"hasNextPage": False}},
|
||||
"changedFiles": 0,
|
||||
}
|
||||
@ -394,6 +397,7 @@ class TestTryMerge(TestCase):
|
||||
# self.assertGreater(len(pr.get_checkrun_conclusions()), 3)
|
||||
self.assertGreater(pr.get_commit_count(), 60)
|
||||
|
||||
@skip("GitHub doesn't keep this data anymore")
|
||||
def test_gql_retrieve_checksuites(self, *args: Any) -> None:
|
||||
"Fetch comments and conclusions for PR with 60 commits"
|
||||
pr = GitHubPR("pytorch", "pytorch", 94787)
|
||||
@ -891,6 +895,24 @@ class TestBypassFailures(TestCase):
|
||||
self.assertTrue(len(ignorable["FLAKY"]) == 1)
|
||||
self.assertTrue(len(ignorable["BROKEN_TRUNK"]) == 0)
|
||||
|
||||
def test_ignore_failures_older_run_same_workflow(self, *args: Any) -> None:
|
||||
pr = GitHubPR("pytorch", "pytorch", 129013)
|
||||
checks = pr.get_checkrun_conclusions()
|
||||
checks = get_classifications(
|
||||
pr.pr_num,
|
||||
pr.project,
|
||||
checks,
|
||||
[],
|
||||
)
|
||||
pending, failed, ignorable = categorize_checks(
|
||||
checks,
|
||||
list(checks.keys()),
|
||||
)
|
||||
self.assertTrue(len(pending) == 0)
|
||||
self.assertTrue(len(failed) == 0)
|
||||
self.assertTrue(len(ignorable["FLAKY"]) == 2)
|
||||
self.assertTrue(len(ignorable["UNSTABLE"]) == 13)
|
||||
|
||||
@mock.patch("trymerge.read_merge_rules", side_effect=xla_merge_rules)
|
||||
def test_dont_ignore_flaky_failures(self, *args: Any) -> None:
|
||||
"""
|
||||
@ -1019,7 +1041,7 @@ class TestGitHubPRGhstackDependencies(TestCase):
|
||||
)
|
||||
|
||||
@skip(
|
||||
reason="This test is run against a mutalbe PR that has changed, so it no longer works. The test should be changed"
|
||||
reason="This test is run against a mutable PR that has changed, so it no longer works. The test should be changed"
|
||||
)
|
||||
@mock.patch("trymerge.read_merge_rules")
|
||||
@mock.patch("trymerge.GitRepo")
|
||||
|
119
.github/scripts/trymerge.py
vendored
119
.github/scripts/trymerge.py
vendored
@ -81,9 +81,10 @@ JobNameToStateDict = Dict[str, JobCheckState]
|
||||
|
||||
|
||||
class WorkflowCheckState:
|
||||
def __init__(self, name: str, url: str, status: Optional[str]):
|
||||
def __init__(self, name: str, url: str, run_id: int, status: Optional[str]):
|
||||
self.name: str = name
|
||||
self.url: str = url
|
||||
self.run_id: int = run_id
|
||||
self.status: Optional[str] = status
|
||||
self.jobs: JobNameToStateDict = {}
|
||||
|
||||
@ -122,6 +123,7 @@ fragment PRCheckSuites on CheckSuiteConnection {
|
||||
workflowRun {
|
||||
workflow {
|
||||
name
|
||||
databaseId
|
||||
}
|
||||
databaseId
|
||||
url
|
||||
@ -512,7 +514,7 @@ def add_workflow_conclusions(
|
||||
workflows: Dict[str, WorkflowCheckState] = {}
|
||||
|
||||
# for the jobs that don't have a workflow
|
||||
no_workflow_obj: WorkflowCheckState = WorkflowCheckState("", "", None)
|
||||
no_workflow_obj: WorkflowCheckState = WorkflowCheckState("", "", 0, None)
|
||||
|
||||
def add_conclusions(edges: Any) -> None:
|
||||
for edge_idx, edge in enumerate(edges):
|
||||
@ -523,18 +525,30 @@ def add_workflow_conclusions(
|
||||
workflow_obj: WorkflowCheckState = no_workflow_obj
|
||||
|
||||
if workflow_run is not None:
|
||||
# This is the usual workflow run ID we see on GitHub
|
||||
workflow_run_id = workflow_run["databaseId"]
|
||||
# While this is the metadata name and ID of the workflow itself
|
||||
workflow_name = workflow_run["workflow"]["name"]
|
||||
workflow_id = workflow_run["workflow"]["databaseId"]
|
||||
|
||||
workflow_conclusion = node["conclusion"]
|
||||
# Do not override existing status with cancelled
|
||||
if workflow_conclusion == "CANCELLED" and workflow_name in workflows:
|
||||
continue
|
||||
if workflow_name not in workflows:
|
||||
workflows[workflow_name] = WorkflowCheckState(
|
||||
|
||||
# Only keep the latest workflow run for each workflow, heuristically,
|
||||
# it's the run with largest run ID
|
||||
if (
|
||||
workflow_id not in workflows
|
||||
or workflows[workflow_id].run_id < workflow_run_id
|
||||
):
|
||||
workflows[workflow_id] = WorkflowCheckState(
|
||||
name=workflow_name,
|
||||
status=workflow_conclusion,
|
||||
url=workflow_run["url"],
|
||||
run_id=workflow_run_id,
|
||||
)
|
||||
workflow_obj = workflows[workflow_name]
|
||||
workflow_obj = workflows[workflow_id]
|
||||
|
||||
while checkruns is not None:
|
||||
for checkrun_node in checkruns["nodes"]:
|
||||
@ -572,12 +586,12 @@ def add_workflow_conclusions(
|
||||
# the jobs in but don't put the workflow in. We care more about the jobs in
|
||||
# the workflow that ran than the container workflow.
|
||||
res: JobNameToStateDict = {}
|
||||
for workflow_name, workflow in workflows.items():
|
||||
for workflow in workflows.values():
|
||||
if len(workflow.jobs) > 0:
|
||||
for job_name, job in workflow.jobs.items():
|
||||
res[job_name] = job
|
||||
else:
|
||||
res[workflow_name] = JobCheckState(
|
||||
res[workflow.name] = JobCheckState(
|
||||
workflow.name,
|
||||
workflow.url,
|
||||
workflow.status,
|
||||
@ -1163,7 +1177,6 @@ class GitHubPR:
|
||||
# Finally, upload the record to Rockset. The list of pending and failed
|
||||
# checks are at the time of the merge
|
||||
save_merge_record(
|
||||
collection=ROCKSET_MERGES_COLLECTION,
|
||||
comment_id=comment_id,
|
||||
pr_num=self.pr_num,
|
||||
owner=self.org,
|
||||
@ -1179,10 +1192,8 @@ class GitHubPR:
|
||||
merge_base_sha=self.get_merge_base(),
|
||||
merge_commit_sha=merge_commit_sha,
|
||||
is_failed=False,
|
||||
dry_run=dry_run,
|
||||
skip_mandatory_checks=skip_mandatory_checks,
|
||||
ignore_current=bool(ignore_current_checks),
|
||||
workspace=ROCKSET_MERGES_WORKSPACE,
|
||||
)
|
||||
else:
|
||||
print("Missing comment ID or PR number, couldn't upload to Rockset")
|
||||
@ -1489,7 +1500,6 @@ def checks_to_markdown_bullets(
|
||||
|
||||
@retries_decorator()
|
||||
def save_merge_record(
|
||||
collection: str,
|
||||
comment_id: int,
|
||||
pr_num: int,
|
||||
owner: str,
|
||||
@ -1505,59 +1515,44 @@ def save_merge_record(
|
||||
merge_base_sha: str,
|
||||
merge_commit_sha: str = "",
|
||||
is_failed: bool = False,
|
||||
dry_run: bool = False,
|
||||
skip_mandatory_checks: bool = False,
|
||||
ignore_current: bool = False,
|
||||
error: str = "",
|
||||
workspace: str = "commons",
|
||||
) -> None:
|
||||
"""
|
||||
This saves the merge records into Rockset, so we can query them (for fun and profit)
|
||||
This saves the merge records as a json, which can later be uploaded to s3
|
||||
"""
|
||||
if dry_run:
|
||||
# Decide not to save the record to Rockset if dry-run is set to not pollute
|
||||
# the collection
|
||||
return
|
||||
|
||||
try:
|
||||
import rockset # type: ignore[import]
|
||||
# Prepare the record to be written into Rockset
|
||||
data = [
|
||||
{
|
||||
"comment_id": comment_id,
|
||||
"pr_num": pr_num,
|
||||
"owner": owner,
|
||||
"project": project,
|
||||
"author": author,
|
||||
"pending_checks": pending_checks,
|
||||
"failed_checks": failed_checks,
|
||||
"ignore_current_checks": ignore_current_checks,
|
||||
"broken_trunk_checks": broken_trunk_checks,
|
||||
"flaky_checks": flaky_checks,
|
||||
"unstable_checks": unstable_checks,
|
||||
"last_commit_sha": last_commit_sha,
|
||||
"merge_base_sha": merge_base_sha,
|
||||
"merge_commit_sha": merge_commit_sha,
|
||||
"is_failed": is_failed,
|
||||
"skip_mandatory_checks": skip_mandatory_checks,
|
||||
"ignore_current": ignore_current,
|
||||
"error": error,
|
||||
# This is a unique identifier for the record for deduping purposes
|
||||
# in rockset. Any unique string would work
|
||||
"_id": f"{project}-{pr_num}-{comment_id}-{os.environ.get('GITHUB_RUN_ID')}",
|
||||
}
|
||||
]
|
||||
repo_root = Path(__file__).resolve().parent.parent.parent
|
||||
|
||||
# Prepare the record to be written into Rockset
|
||||
data = [
|
||||
{
|
||||
"comment_id": comment_id,
|
||||
"pr_num": pr_num,
|
||||
"owner": owner,
|
||||
"project": project,
|
||||
"author": author,
|
||||
"pending_checks": pending_checks,
|
||||
"failed_checks": failed_checks,
|
||||
"ignore_current_checks": ignore_current_checks,
|
||||
"broken_trunk_checks": broken_trunk_checks,
|
||||
"flaky_checks": flaky_checks,
|
||||
"unstable_checks": unstable_checks,
|
||||
"last_commit_sha": last_commit_sha,
|
||||
"merge_base_sha": merge_base_sha,
|
||||
"merge_commit_sha": merge_commit_sha,
|
||||
"is_failed": is_failed,
|
||||
"skip_mandatory_checks": skip_mandatory_checks,
|
||||
"ignore_current": ignore_current,
|
||||
"error": error,
|
||||
}
|
||||
]
|
||||
|
||||
client = rockset.RocksetClient(
|
||||
host="api.usw2a1.rockset.com", api_key=os.environ["ROCKSET_API_KEY"]
|
||||
)
|
||||
client.Documents.add_documents(
|
||||
collection=collection,
|
||||
data=data,
|
||||
workspace=workspace,
|
||||
)
|
||||
|
||||
except ModuleNotFoundError:
|
||||
print("Rockset is missing, no record will be saved")
|
||||
return
|
||||
with open(repo_root / "merge_record.json", "w") as f:
|
||||
json.dump(data, f)
|
||||
|
||||
|
||||
@retries_decorator(rc=[])
|
||||
@ -2330,6 +2325,15 @@ def main() -> None:
|
||||
dry_run=args.dry_run,
|
||||
)
|
||||
return
|
||||
if not pr.is_ghstack_pr() and pr.base_ref() != pr.default_branch():
|
||||
gh_post_pr_comment(
|
||||
org,
|
||||
project,
|
||||
args.pr_num,
|
||||
f"PR targets {pr.base_ref()} rather than {pr.default_branch()}, refusing merge request",
|
||||
dry_run=args.dry_run,
|
||||
)
|
||||
return
|
||||
|
||||
if args.check_mergeability:
|
||||
if pr.is_ghstack_pr():
|
||||
@ -2365,7 +2369,6 @@ def main() -> None:
|
||||
# list of pending and failed checks here, but they are not really
|
||||
# needed at the moment
|
||||
save_merge_record(
|
||||
collection=ROCKSET_MERGES_COLLECTION,
|
||||
comment_id=args.comment_id,
|
||||
pr_num=args.pr_num,
|
||||
owner=org,
|
||||
@ -2380,11 +2383,9 @@ def main() -> None:
|
||||
last_commit_sha=pr.last_commit().get("oid", ""),
|
||||
merge_base_sha=pr.get_merge_base(),
|
||||
is_failed=True,
|
||||
dry_run=args.dry_run,
|
||||
skip_mandatory_checks=args.force,
|
||||
ignore_current=args.ignore_current,
|
||||
error=str(e),
|
||||
workspace=ROCKSET_MERGES_WORKSPACE,
|
||||
)
|
||||
else:
|
||||
print("Missing comment ID or PR number, couldn't upload to Rockset")
|
||||
|
@ -81,7 +81,7 @@ jobs:
|
||||
!{{ config["build_name"] }}-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: !{{ config["build_name"] }}-build
|
||||
{%- if config["gpu_arch_type"] != "rocm" %}
|
||||
{%- if config["gpu_arch_type"] not in ["rocm", "xpu"] %}
|
||||
uses: ./.github/workflows/_binary-test-linux.yml
|
||||
with:!{{ upload.binary_env_as_input(config) }}
|
||||
build_name: !{{ config["build_name"] }}
|
||||
@ -101,6 +101,40 @@ jobs:
|
||||
{%- endif %}
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
{%- elif config["gpu_arch_type"] == "xpu" %}
|
||||
runs-on: linux.idc.xpu
|
||||
timeout-minutes: !{{ common.timeout_minutes }}
|
||||
!{{ upload.binary_env(config) }}
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
steps:
|
||||
- name: Setup XPU
|
||||
uses: ./.github/actions/setup-xpu
|
||||
- name: configure aws credentials
|
||||
id: aws_creds
|
||||
uses: aws-actions/configure-aws-credentials@v1.7.0
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_s3_and_ecr_read_only
|
||||
aws-region: us-east-1
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
- uses: !{{ common.download_artifact_action }}
|
||||
name: Download Build Artifacts
|
||||
with:
|
||||
name: !{{ config["build_name"] }}
|
||||
path: "${{ runner.temp }}/artifacts/"
|
||||
!{{ common.checkout(deep_clone=False, directory="pytorch") }}
|
||||
!{{ common.checkout(deep_clone=False, directory="builder", repository=common.builder_repo, branch=common.builder_branch) }}
|
||||
- name: Pull Docker image
|
||||
uses: pytorch/test-infra/.github/actions/pull-docker-image@main
|
||||
with:
|
||||
docker-image: !{{ config["container_image"] }}
|
||||
- name: Test Pytorch binary
|
||||
uses: ./pytorch/.github/actions/test-pytorch-binary
|
||||
- name: Teardown XPU
|
||||
uses: ./.github/actions/teardown-xpu
|
||||
{%- else %}
|
||||
runs-on: linux.rocm.gpu
|
||||
timeout-minutes: !{{ common.timeout_minutes }}
|
||||
|
4
.github/templates/upload.yml.j2
vendored
4
.github/templates/upload.yml.j2
vendored
@ -30,6 +30,9 @@
|
||||
{%- if config["devtoolset"] %}
|
||||
DESIRED_DEVTOOLSET: !{{ config["devtoolset"] }}
|
||||
{%- endif %}
|
||||
{%- if config.use_split_build is defined %}
|
||||
use_split_build: !{{ config["use_split_build"] }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if config["package_type"] == "libtorch" %}
|
||||
{%- if config["libtorch_config"] %}
|
||||
@ -44,6 +47,7 @@
|
||||
# without this value pip does not get installed for some reason
|
||||
DESIRED_PYTHON: "3.8"
|
||||
{%- endif %}
|
||||
|
||||
{%- else %}
|
||||
DESIRED_PYTHON: "!{{ config["python_version"] }}"
|
||||
{%- endif %}
|
||||
|
7
.github/workflows/_bazel-build-test.yml
vendored
7
.github/workflows/_bazel-build-test.yml
vendored
@ -27,6 +27,11 @@ on:
|
||||
type: string
|
||||
description: |
|
||||
A JSON description of what configs to run later on.
|
||||
runner:
|
||||
required: false
|
||||
type: string
|
||||
default: "linux.large"
|
||||
description: Runner type
|
||||
|
||||
env:
|
||||
GIT_DEFAULT_BRANCH: ${{ github.event.repository.default_branch }}
|
||||
@ -34,7 +39,7 @@ env:
|
||||
jobs:
|
||||
filter:
|
||||
if: github.repository_owner == 'pytorch'
|
||||
runs-on: [self-hosted, linux.large]
|
||||
runs-on: ${{ inputs.runner }}
|
||||
outputs:
|
||||
test-matrix: ${{ steps.filter.outputs.test-matrix }}
|
||||
is-test-matrix-empty: ${{ steps.filter.outputs.is-test-matrix-empty }}
|
||||
|
10
.github/workflows/_binary-build-linux.yml
vendored
10
.github/workflows/_binary-build-linux.yml
vendored
@ -21,6 +21,13 @@ on:
|
||||
default: 210
|
||||
type: number
|
||||
description: timeout for the job
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
ALPINE_IMAGE:
|
||||
required: false
|
||||
type: string
|
||||
@ -110,6 +117,7 @@ jobs:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
USE_SPLIT_BUILD: ${{ inputs.use_split_build }}
|
||||
steps:
|
||||
- name: Make the env permanent during this workflow (but not the secrets)
|
||||
shell: bash
|
||||
@ -137,6 +145,7 @@ jobs:
|
||||
echo "PR_NUMBER=${{ env.PR_NUMBER }}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
echo "SHA1=${{ env.SHA1 }}"
|
||||
echo "USE_SPLIT_BUILD=${{ env.use_split_build }}"
|
||||
} >> "${GITHUB_ENV} }}"
|
||||
|
||||
- name: List the env
|
||||
@ -246,6 +255,7 @@ jobs:
|
||||
-e PYTORCH_ROOT \
|
||||
-e SKIP_ALL_TESTS \
|
||||
-e PYTORCH_EXTRA_INSTALL_REQUIREMENTS \
|
||||
-e USE_SPLIT_BUILD \
|
||||
--tty \
|
||||
--detach \
|
||||
-v "${GITHUB_WORKSPACE}/pytorch:/pytorch" \
|
||||
|
9
.github/workflows/_binary-test-linux.yml
vendored
9
.github/workflows/_binary-test-linux.yml
vendored
@ -63,6 +63,13 @@ on:
|
||||
required: true
|
||||
type: string
|
||||
description: Hardware to run this job on. Valid values are linux.4xlarge, linux.4xlarge.nvidia.gpu, linux.arm64.2xlarge, and linux.rocm.gpu
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
secrets:
|
||||
github-token:
|
||||
required: true
|
||||
@ -97,6 +104,7 @@ jobs:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
USE_SPLIT_BUILD: ${{ inputs.use_split_build }}
|
||||
steps:
|
||||
- name: Make the env permanent during this workflow (but not the secrets)
|
||||
shell: bash
|
||||
@ -124,6 +132,7 @@ jobs:
|
||||
echo "PR_NUMBER=${{ env.PR_NUMBER }}"
|
||||
echo "PYTORCH_FINAL_PACKAGE_DIR=${{ env.PYTORCH_FINAL_PACKAGE_DIR }}"
|
||||
echo "SHA1=${{ env.SHA1 }}"
|
||||
echo "USE_SPLIT_BUILD=${{ env.USE_SPLIT_BUILD }}"
|
||||
} >> "${GITHUB_ENV} }}"
|
||||
|
||||
- name: "[FB EMPLOYEES] Enable SSH (Click me for login details)"
|
||||
|
8
.github/workflows/_binary-upload.yml
vendored
8
.github/workflows/_binary-upload.yml
vendored
@ -55,6 +55,13 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
description: Desired python version
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
secrets:
|
||||
github-token:
|
||||
required: true
|
||||
@ -93,6 +100,7 @@ jobs:
|
||||
PR_NUMBER: ${{ github.event.pull_request.number }}
|
||||
PYTORCH_FINAL_PACKAGE_DIR: /artifacts
|
||||
SHA1: ${{ github.event.pull_request.head.sha || github.sha }}
|
||||
USE_SPLIT_BUILD: ${{ inputs.use_split_build }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
|
8
.github/workflows/_linux-build-label.yml
vendored
8
.github/workflows/_linux-build-label.yml
vendored
@ -56,6 +56,13 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN:
|
||||
required: false
|
||||
@ -107,3 +114,4 @@ jobs:
|
||||
aws-role-to-assume: ${{ inputs.aws-role-to-assume }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
use_split_build: ${{ inputs.use_split_build }}
|
||||
|
24
.github/workflows/_linux-build.yml
vendored
24
.github/workflows/_linux-build.yml
vendored
@ -39,7 +39,7 @@ on:
|
||||
type: string
|
||||
default: "linux.2xlarge"
|
||||
description: |
|
||||
List of CUDA architectures CI build should target.
|
||||
Label of the runner this job should run on.
|
||||
test-matrix:
|
||||
required: false
|
||||
type: string
|
||||
@ -64,6 +64,14 @@ on:
|
||||
required: false
|
||||
type: string
|
||||
default: ""
|
||||
use_split_build:
|
||||
description: |
|
||||
[Experimental] Build a libtorch only wheel and build pytorch such that
|
||||
are built from the libtorch wheel.
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN:
|
||||
required: false
|
||||
@ -181,6 +189,7 @@ jobs:
|
||||
DEBUG: ${{ inputs.build-with-debug && '1' || '0' }}
|
||||
OUR_GITHUB_JOB_ID: ${{ steps.get-job-id.outputs.job-id }}
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
USE_SPLIT_BUILD: ${{ inputs.use_split_build }}
|
||||
run: |
|
||||
# detached container should get cleaned up by teardown_ec2_linux
|
||||
container_name=$(docker run \
|
||||
@ -199,6 +208,7 @@ jobs:
|
||||
-e PR_LABELS \
|
||||
-e OUR_GITHUB_JOB_ID \
|
||||
-e HUGGING_FACE_HUB_TOKEN \
|
||||
-e USE_SPLIT_BUILD \
|
||||
--env-file="/tmp/github_env_${GITHUB_RUN_ID}" \
|
||||
--security-opt seccomp=unconfined \
|
||||
--cap-add=SYS_PTRACE \
|
||||
@ -218,7 +228,7 @@ jobs:
|
||||
|
||||
- name: Store PyTorch Build Artifacts on S3
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped'
|
||||
if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped' && inputs.use_split_build != 'true'
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}
|
||||
retention-days: 14
|
||||
@ -226,6 +236,16 @@ jobs:
|
||||
path: artifacts.zip
|
||||
s3-bucket: ${{ inputs.s3-bucket }}
|
||||
|
||||
- name: Store PyTorch Build Artifacts on S3
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
if: inputs.build-generates-artifacts && steps.build.outcome != 'skipped' && inputs.use_split_build == 'true'
|
||||
with:
|
||||
name: ${{ inputs.build-environment }}-experimental-split-build
|
||||
retention-days: 14
|
||||
if-no-files-found: error
|
||||
path: artifacts.zip
|
||||
s3-bucket: ${{ inputs.s3-bucket }}
|
||||
|
||||
- name: Upload sccache stats
|
||||
if: steps.build.outcome != 'skipped'
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
|
272
.github/workflows/_runner-determinator.yml
vendored
272
.github/workflows/_runner-determinator.yml
vendored
@ -3,39 +3,272 @@ name: Check whether the workflow owner can use ARC runners
|
||||
on:
|
||||
workflow_call:
|
||||
inputs:
|
||||
user_name:
|
||||
triggering_actor:
|
||||
required: true
|
||||
type: string
|
||||
description: The name of the workflow owner.
|
||||
description: The triggering_actor for the workflow. Use github.triggering_actor
|
||||
issue_owner:
|
||||
required: true
|
||||
type: string
|
||||
description: The owner of the issue. Use github.event.pull_request.user.login || github.event.issue.user.login
|
||||
curr_branch:
|
||||
required: true
|
||||
type: string
|
||||
description: Current branch.
|
||||
description: Current branch or tag.
|
||||
curr_ref_type:
|
||||
required: false
|
||||
type: string
|
||||
default: branch
|
||||
description: The value of "github.ref_type", "branch" or "tag"
|
||||
issue_number:
|
||||
required: false
|
||||
type: string
|
||||
default: "5132"
|
||||
description: |
|
||||
Fetch's GitHub Issue from pytorch/test-infra
|
||||
Example: https://github.com/pytorch/test-infra/issues/5132
|
||||
|
||||
outputs:
|
||||
workflow-type:
|
||||
label-type:
|
||||
description: Type of runners to use
|
||||
value: ${{ jobs.runner-determinator.outputs.workflow-type }}
|
||||
value: ${{ jobs.runner-determinator.outputs.label-type }}
|
||||
|
||||
jobs:
|
||||
runner-determinator:
|
||||
runs-on: linux.4xlarge
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
workflow-type: ${{ steps.set-condition.outputs.workflow-type }}
|
||||
label-type: ${{ steps.set-condition.outputs.label-type }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
ISSUE_NUMBER: ${{ inputs.issue_number }}
|
||||
USERNAME: ${{ inputs.user_name }}
|
||||
TRIGGERING_ACTOR: ${{ inputs.triggering_actor }}
|
||||
ISSUE_OWNER: ${{ inputs.issue_owner }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
with:
|
||||
fetch-depth: 1
|
||||
submodules: true
|
||||
# - name: Checkout PyTorch
|
||||
# uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
# with:
|
||||
# fetch-depth: 1
|
||||
# submodules: true
|
||||
|
||||
# TODO: Remove the hardcoded step below
|
||||
# Hardcoding below is temporary for testing ALI runners
|
||||
# This file below should match the script found in .github/scripts/runner_determinator.py
|
||||
- name: Hardcode runner-determinator script
|
||||
run: |
|
||||
cat <<EOF > runner_determinator.py
|
||||
# flake8: noqa: G004
|
||||
|
||||
import logging
|
||||
import os
|
||||
from argparse import ArgumentParser
|
||||
from logging import LogRecord
|
||||
from typing import Any, Iterable
|
||||
|
||||
from github import Auth, Github
|
||||
from github.Issue import Issue
|
||||
|
||||
|
||||
WORKFLOW_LABEL_META = "" # use meta runners
|
||||
WORKFLOW_LABEL_LF = "lf." # use runners from the linux foundation
|
||||
|
||||
GITHUB_OUTPUT = os.getenv("GITHUB_OUTPUT", "")
|
||||
GH_OUTPUT_KEY_LABEL_TYPE = "label-type"
|
||||
|
||||
|
||||
class ColorFormatter(logging.Formatter):
|
||||
"""Color codes the log messages based on the log level"""
|
||||
|
||||
COLORS = {
|
||||
"WARNING": "\033[33m", # Yellow
|
||||
"ERROR": "\033[31m", # Red
|
||||
"CRITICAL": "\033[31m", # Red
|
||||
"INFO": "\033[0m", # Reset
|
||||
"DEBUG": "\033[0m", # Reset
|
||||
}
|
||||
|
||||
def format(self, record: LogRecord) -> str:
|
||||
log_color = self.COLORS.get(record.levelname, "\033[0m") # Default to reset
|
||||
record.msg = f"{log_color}{record.msg}\033[0m"
|
||||
return super().format(record)
|
||||
|
||||
|
||||
handler = logging.StreamHandler()
|
||||
handler.setFormatter(ColorFormatter(fmt="%(levelname)-8s: %(message)s"))
|
||||
|
||||
log = logging.getLogger(os.path.basename(__file__))
|
||||
log.addHandler(handler)
|
||||
log.setLevel(logging.INFO)
|
||||
|
||||
|
||||
def set_github_output(key: str, value: str) -> None:
|
||||
"""
|
||||
Defines outputs of the github action that invokes this script
|
||||
"""
|
||||
if not GITHUB_OUTPUT:
|
||||
# See https://github.blog/changelog/2022-10-11-github-actions-deprecating-save-state-and-set-output-commands/ for deprecation notice
|
||||
log.warning(
|
||||
"No env var found for GITHUB_OUTPUT, you must be running this code locally. Falling back to the deprecated print method."
|
||||
)
|
||||
print(f"::set-output name={key}::{value}")
|
||||
return
|
||||
|
||||
with open(GITHUB_OUTPUT, "a") as f:
|
||||
log.info(f"Setting output: {key}='{value}'")
|
||||
f.write(f"{key}={value}\n")
|
||||
|
||||
|
||||
def parse_args() -> Any:
|
||||
parser = ArgumentParser("Get dynamic rollout settings")
|
||||
parser.add_argument("--github-token", type=str, required=True, help="GitHub token")
|
||||
parser.add_argument(
|
||||
"--github-issue-repo",
|
||||
type=str,
|
||||
required=False,
|
||||
default="pytorch/test-infra",
|
||||
help="GitHub repo to get the issue",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-repo",
|
||||
type=str,
|
||||
required=True,
|
||||
help="GitHub repo where CI is running",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-issue", type=int, required=True, help="GitHub issue number"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-actor", type=str, required=True, help="GitHub triggering_actor"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-issue-owner", type=str, required=True, help="GitHub issue owner"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-branch", type=str, required=True, help="Current GitHub branch or tag"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--github-ref-type",
|
||||
type=str,
|
||||
required=True,
|
||||
help="Current GitHub ref type, branch or tag",
|
||||
)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def get_gh_client(github_token: str) -> Github:
|
||||
auth = Auth.Token(github_token)
|
||||
return Github(auth=auth)
|
||||
|
||||
|
||||
def get_issue(gh: Github, repo: str, issue_num: int) -> Issue:
|
||||
repo = gh.get_repo(repo)
|
||||
return repo.get_issue(number=issue_num)
|
||||
|
||||
|
||||
def get_potential_pr_author(
|
||||
gh: Github, repo: str, username: str, ref_type: str, ref_name: str
|
||||
) -> str:
|
||||
# If the trigger was a new tag added by a bot, this is a ciflow case
|
||||
# Fetch the actual username from the original PR. The PR number is
|
||||
# embedded in the tag name: ciflow/<name>/<pr-number>
|
||||
if username == "pytorch-bot[bot]" and ref_type == "tag":
|
||||
split_tag = ref_name.split("/")
|
||||
if (
|
||||
len(split_tag) == 3
|
||||
and split_tag[0] == "ciflow"
|
||||
and split_tag[2].isnumeric()
|
||||
):
|
||||
pr_number = split_tag[2]
|
||||
try:
|
||||
repository = gh.get_repo(repo)
|
||||
pull = repository.get_pull(number=int(pr_number))
|
||||
except Exception as e:
|
||||
raise Exception( # noqa: TRY002
|
||||
f"issue with pull request {pr_number} from repo {repository}"
|
||||
) from e
|
||||
return pull.user.login
|
||||
# In all other cases, return the original input username
|
||||
return username
|
||||
|
||||
|
||||
def is_exception_branch(branch: str) -> bool:
|
||||
return branch.split("/")[0] in {"main", "nightly", "release", "landchecks"}
|
||||
|
||||
|
||||
def get_workflow_type(issue: Issue, workflow_requestors: Iterable[str]) -> str:
|
||||
try:
|
||||
first_comment = issue.get_comments()[0].body.strip("\n\t ")
|
||||
|
||||
if first_comment[0] == "!":
|
||||
log.info("LF Workflows are disabled for everyone. Using meta runners.")
|
||||
return WORKFLOW_LABEL_META
|
||||
elif first_comment[0] == "*":
|
||||
log.info("LF Workflows are enabled for everyone. Using LF runners.")
|
||||
return WORKFLOW_LABEL_LF
|
||||
else:
|
||||
all_opted_in_users = {
|
||||
usr_raw.strip("\n\t@ ") for usr_raw in first_comment.split()
|
||||
}
|
||||
opted_in_requestors = {
|
||||
usr for usr in workflow_requestors if usr in all_opted_in_users
|
||||
}
|
||||
if opted_in_requestors:
|
||||
log.info(
|
||||
f"LF Workflows are enabled for {', '.join(opted_in_requestors)}. Using LF runners."
|
||||
)
|
||||
return WORKFLOW_LABEL_LF
|
||||
else:
|
||||
log.info(
|
||||
f"LF Workflows are disabled for {', '.join(workflow_requestors)}. Using meta runners."
|
||||
)
|
||||
return WORKFLOW_LABEL_META
|
||||
|
||||
except Exception as e:
|
||||
log.error(
|
||||
f"Failed to get determine workflow type. Falling back to meta runners. Exception: {e}"
|
||||
)
|
||||
return WORKFLOW_LABEL_META
|
||||
|
||||
|
||||
def main() -> None:
|
||||
args = parse_args()
|
||||
|
||||
if args.github_ref_type == "branch" and is_exception_branch(args.github_branch):
|
||||
log.info(f"Exception branch: '{args.github_branch}', using meta runners")
|
||||
label_type = WORKFLOW_LABEL_META
|
||||
else:
|
||||
try:
|
||||
gh = get_gh_client(args.github_token)
|
||||
# The default issue we use - https://github.com/pytorch/test-infra/issues/5132
|
||||
issue = get_issue(gh, args.github_issue_repo, args.github_issue)
|
||||
username = get_potential_pr_author(
|
||||
gh,
|
||||
args.github_repo,
|
||||
args.github_actor,
|
||||
args.github_ref_type,
|
||||
args.github_branch,
|
||||
)
|
||||
label_type = get_workflow_type(
|
||||
issue,
|
||||
(
|
||||
args.github_issue_owner,
|
||||
username,
|
||||
),
|
||||
)
|
||||
except Exception as e:
|
||||
log.error(
|
||||
f"Failed to get issue. Falling back to meta runners. Exception: {e}"
|
||||
)
|
||||
label_type = WORKFLOW_LABEL_META
|
||||
|
||||
set_github_output(GH_OUTPUT_KEY_LABEL_TYPE, label_type)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
EOF
|
||||
|
||||
cat runner_determinator.py
|
||||
|
||||
- name: Install dependencies
|
||||
run: python3 -m pip install urllib3==1.26.18 PyGithub==2.3.0
|
||||
@ -44,15 +277,14 @@ jobs:
|
||||
id: set-condition
|
||||
run: |
|
||||
curr_branch="${{ inputs.curr_branch }}"
|
||||
curr_ref_type="${{ inputs.curr_ref_type }}"
|
||||
echo "Current branch is '$curr_branch'"
|
||||
|
||||
output="$(python3 .github/scripts/get_workflow_type.py \
|
||||
python3 runner_determinator.py \
|
||||
--github-token "$GITHUB_TOKEN" \
|
||||
--github-issue "$ISSUE_NUMBER" \
|
||||
--github-branch "$curr_branch" \
|
||||
--github-user "$USERNAME")"
|
||||
|
||||
echo "Output: '${output}'"
|
||||
|
||||
WORKFLOW_TYPE=$(echo "${output}" | jq -r '.workflow_type')
|
||||
echo "workflow-type=$WORKFLOW_TYPE" >> "$GITHUB_OUTPUT"
|
||||
--github-actor "$TRIGGERING_ACTOR" \
|
||||
--github-issue-owner "$ISSUE_OWNER" \
|
||||
--github-ref-type "$curr_ref_type" \
|
||||
--github-repo "$GITHUB_REPOSITORY"
|
||||
|
12
.github/workflows/_win-build.yml
vendored
12
.github/workflows/_win-build.yml
vendored
@ -30,6 +30,12 @@ on:
|
||||
An option JSON description of what test configs to run later on. This
|
||||
is moved here from the Linux test workflow so that we can apply filter
|
||||
logic using test-config labels earlier and skip unnecessary builds
|
||||
runner:
|
||||
required: false
|
||||
type: string
|
||||
default: "windows.4xlarge.nonephemeral"
|
||||
description: |
|
||||
Label of the runner this job should run on.
|
||||
|
||||
outputs:
|
||||
test-matrix:
|
||||
@ -43,10 +49,13 @@ jobs:
|
||||
build:
|
||||
# Don't run on forked repos.
|
||||
if: github.repository_owner == 'pytorch'
|
||||
runs-on: [self-hosted, windows.4xlarge.nonephemeral]
|
||||
runs-on: ${{ inputs.runner }}
|
||||
timeout-minutes: 240
|
||||
outputs:
|
||||
test-matrix: ${{ steps.filter.outputs.test-matrix }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
# Duplicated in win-test because this MUST go before a checkout
|
||||
- name: Enable git symlinks on Windows and disable fsmonitor daemon
|
||||
@ -89,6 +98,7 @@ jobs:
|
||||
|
||||
- name: Parse ref
|
||||
id: parse-ref
|
||||
shell: bash
|
||||
run: python3 .github/scripts/parse_ref.py
|
||||
|
||||
- name: Get workflow job id
|
||||
|
4
.github/workflows/_win-test.yml
vendored
4
.github/workflows/_win-test.yml
vendored
@ -41,6 +41,9 @@ jobs:
|
||||
fail-fast: false
|
||||
runs-on: ${{ matrix.runner }}
|
||||
timeout-minutes: ${{ matrix.mem_leak_check == 'mem_leak_check' && 600 || inputs.timeout-minutes }}
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
# Duplicated in win-build because this MUST go before a checkout
|
||||
- name: Enable git symlinks on Windows and disable fsmonitor daemon
|
||||
@ -224,6 +227,7 @@ jobs:
|
||||
|
||||
- name: Parse ref
|
||||
id: parse-ref
|
||||
shell: bash
|
||||
run: python3 .github/scripts/parse_ref.py
|
||||
|
||||
- name: Uninstall PyTorch
|
||||
|
17
.github/workflows/build-triton-wheel.yml
vendored
17
.github/workflows/build-triton-wheel.yml
vendored
@ -14,6 +14,7 @@ on:
|
||||
- .github/ci_commit_pins/triton.txt
|
||||
- .ci/docker/ci_commit_pins/triton.txt
|
||||
- .ci/docker/ci_commit_pins/triton-rocm.txt
|
||||
- .ci/docker/ci_commit_pins/triton-xpu.txt
|
||||
pull_request:
|
||||
paths:
|
||||
- .github/workflows/build-triton-wheel.yml
|
||||
@ -21,6 +22,7 @@ on:
|
||||
- .github/ci_commit_pins/triton.txt
|
||||
- .ci/docker/ci_commit_pins/triton.txt
|
||||
- .ci/docker/ci_commit_pins/triton-rocm.txt
|
||||
- .ci/docker/ci_commit_pins/triton-xpu.txt
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name == 'workflow_dispatch' }}
|
||||
@ -34,7 +36,7 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
py_vers: [ "3.8", "3.9", "3.10", "3.11", "3.12" ]
|
||||
device: ["cuda", "rocm"]
|
||||
device: ["cuda", "rocm", "xpu"]
|
||||
include:
|
||||
- device: "rocm"
|
||||
rocm_version: "6.1"
|
||||
@ -102,11 +104,6 @@ jobs:
|
||||
;;
|
||||
esac
|
||||
|
||||
BUILD_ROCM=""
|
||||
if [[ "$BUILD_DEVICE" == "rocm" ]]; then
|
||||
BUILD_ROCM="--build-rocm"
|
||||
fi
|
||||
|
||||
RELEASE=""
|
||||
if [[ "${IS_RELEASE_TAG}" == true ]]; then
|
||||
RELEASE="--release"
|
||||
@ -114,7 +111,13 @@ jobs:
|
||||
|
||||
docker exec -t "${container_name}" yum install -y zlib-devel zip
|
||||
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" -m pip install -U setuptools==67.4.0
|
||||
docker exec -t "${container_name}" "${PYTHON_EXECUTABLE}" /pytorch/.github/scripts/build_triton_wheel.py $BUILD_ROCM $RELEASE
|
||||
# Triton xpu build use GCC11
|
||||
if [[ "${BUILD_DEVICE}" == xpu ]]; then
|
||||
docker exec -t "${container_name}" yum install -y devtoolset-11-gcc-c++
|
||||
docker exec -t "${container_name}" bash -c "source /opt/rh/devtoolset-11/enable && ${PYTHON_EXECUTABLE} /pytorch/.github/scripts/build_triton_wheel.py --device=$BUILD_DEVICE $RELEASE"
|
||||
else
|
||||
docker exec -t "${container_name}" bash -c "${PYTHON_EXECUTABLE} /pytorch/.github/scripts/build_triton_wheel.py --device=$BUILD_DEVICE $RELEASE"
|
||||
fi
|
||||
docker exec -t "${container_name}" chown -R 1000.1000 /artifacts
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
|
42
.github/workflows/create_release.yml
vendored
42
.github/workflows/create_release.yml
vendored
@ -5,6 +5,11 @@ on:
|
||||
branches:
|
||||
- main
|
||||
- release/*
|
||||
tags:
|
||||
# Final Release tags look like: v1.11.0
|
||||
- v[0-9]+.[0-9]+.[0-9]+
|
||||
# Release candidate tags look like: v1.11.0-rc1
|
||||
- v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+
|
||||
release:
|
||||
types: [published]
|
||||
pull_request:
|
||||
@ -18,6 +23,8 @@ jobs:
|
||||
# https://github.com/softprops/action-gh-release?tab=readme-ov-file#permissions
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
pt_release_name: ${{ steps.release_name.outputs.pt_release_name }}
|
||||
steps:
|
||||
- uses: malfet/checkout@silent-checkout
|
||||
with:
|
||||
@ -49,11 +56,44 @@ jobs:
|
||||
# Create archive
|
||||
tar -czf "$PT_RELEASE_FILE" "$PT_RELEASE_NAME"
|
||||
echo "Created source archive $PT_RELEASE_FILE with content: $(ls -a "$PT_RELEASE_NAME")"
|
||||
- name: Upload source distribution
|
||||
- name: Upload source distribution for release
|
||||
if: ${{ github.event_name == 'release' }}
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: ${{env.PT_RELEASE_FILE}}
|
||||
- name: Upload source distribution to GHA artifacts for release tags
|
||||
if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && contains(github.ref, 'rc') }}
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: ${{ env.PT_RELEASE_FILE }}
|
||||
path: ${{ env.PT_RELEASE_FILE }}
|
||||
- name: Set output
|
||||
id: release_name
|
||||
run: echo "::set-output name=pt_release_name::${{ env.PT_RELEASE_NAME }}.tar.gz"
|
||||
|
||||
upload_source_code_to_s3:
|
||||
if: ${{ github.repository == 'pytorch/pytorch' && github.event_name == 'push' && startsWith(github.ref, 'refs/tags/v') && contains(github.ref, 'rc') }}
|
||||
runs-on: linux.2xlarge
|
||||
environment: sourcecode-upload
|
||||
name: Upload source code to S3 for release tags
|
||||
permissions:
|
||||
id-token: write
|
||||
needs: release
|
||||
steps:
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: ${{ needs.release.outputs.pt_release_name }}
|
||||
- name: Configure AWS credentials(PyTorch account)
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::749337293305:role/gha_pytorch_source_code_upload_role
|
||||
aws-region: us-east-1
|
||||
- uses: seemethere/upload-artifact-s3@v5
|
||||
with:
|
||||
s3-bucket: pytorch
|
||||
s3-prefix: source_code/test
|
||||
if-no-files-found: warn
|
||||
path: ${{ needs.release.outputs.pt_release_name }}
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.sha }}-${{ github.event_name }}
|
||||
|
1
.github/workflows/docker-builds.yml
vendored
1
.github/workflows/docker-builds.yml
vendored
@ -54,6 +54,7 @@ jobs:
|
||||
pytorch-linux-focal-py3-clang9-android-ndk-r21e,
|
||||
pytorch-linux-jammy-py3.8-gcc11,
|
||||
pytorch-linux-jammy-py3.8-gcc11-inductor-benchmarks,
|
||||
pytorch-linux-jammy-py3.12-halide,
|
||||
pytorch-linux-jammy-xpu-2024.0-py3,
|
||||
pytorch-linux-jammy-py3-clang15-asan,
|
||||
pytorch-linux-focal-py3-clang10-onnx,
|
||||
|
10
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
10
.github/workflows/generated-linux-aarch64-binary-manywheel-nightly.yml
generated
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_8-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cpu-aarch64-test: # Testing
|
||||
@ -162,7 +162,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_9-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cpu-aarch64-test: # Testing
|
||||
@ -270,7 +270,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_10-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cpu-aarch64-test: # Testing
|
||||
@ -378,7 +378,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_11-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cpu-aarch64-test: # Testing
|
||||
@ -486,7 +486,7 @@ jobs:
|
||||
ALPINE_IMAGE: "arm64v8/alpine"
|
||||
build_name: manywheel-py3_12-cpu-aarch64
|
||||
build_environment: linux-aarch64-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cpu-aarch64-test: # Testing
|
||||
|
132
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
132
.github/workflows/generated-linux-binary-manywheel-main.yml
generated
vendored
@ -48,7 +48,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda11_8-test: # Testing
|
||||
@ -72,6 +72,48 @@ jobs:
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
manywheel-py3_8-cuda11_8-split-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
use_split_build: True
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8-split
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu11==11.8.89; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu11==11.8.87; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu11==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu11==11.11.3.6; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu11==10.9.0.58; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu11==10.3.0.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu11==11.4.1.48; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu11==11.7.5.86; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu11==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu11==11.8.86; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda11_8-split-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: manywheel-py3_8-cuda11_8-split-build
|
||||
uses: ./.github/workflows/_binary-test-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu118
|
||||
GPU_ARCH_VERSION: 11.8
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda11.8-main
|
||||
use_split_build: True
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda11_8-split
|
||||
build_environment: linux-binary-manywheel
|
||||
runs_on: linux.4xlarge.nvidia.gpu
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
manywheel-py3_8-cuda12_1-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
@ -88,7 +130,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda12_1-test: # Testing
|
||||
@ -112,6 +154,48 @@ jobs:
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
manywheel-py3_8-cuda12_1-split-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
use_split_build: True
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1-split
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda12_1-split-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: manywheel-py3_8-cuda12_1-split-build
|
||||
uses: ./.github/workflows/_binary-test-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu121
|
||||
GPU_ARCH_VERSION: 12.1
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.1-main
|
||||
use_split_build: True
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_1-split
|
||||
build_environment: linux-binary-manywheel
|
||||
runs_on: linux.4xlarge.nvidia.gpu
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
manywheel-py3_8-cuda12_4-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
@ -128,7 +212,7 @@ jobs:
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_4
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda12_4-test: # Testing
|
||||
@ -151,3 +235,45 @@ jobs:
|
||||
runs_on: linux.4xlarge.nvidia.gpu
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
manywheel-py3_8-cuda12_4-split-build:
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
uses: ./.github/workflows/_binary-build-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
|
||||
use_split_build: True
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_4-split
|
||||
build_environment: linux-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.4.2.65; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.2.0.44; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.5.119; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.6.0.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.3.0.142; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvjitlink-cu12==12.4.99; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cuda12_4-split-test: # Testing
|
||||
if: ${{ github.repository_owner == 'pytorch' }}
|
||||
needs: manywheel-py3_8-cuda12_4-split-build
|
||||
uses: ./.github/workflows/_binary-test-linux.yml
|
||||
with:
|
||||
PYTORCH_ROOT: /pytorch
|
||||
BUILDER_ROOT: /builder
|
||||
PACKAGE_TYPE: manywheel
|
||||
# TODO: This is a legacy variable that we eventually want to get rid of in
|
||||
# favor of GPU_ARCH_VERSION
|
||||
DESIRED_CUDA: cu124
|
||||
GPU_ARCH_VERSION: 12.4
|
||||
GPU_ARCH_TYPE: cuda
|
||||
DOCKER_IMAGE: pytorch/manylinux-builder:cuda12.4-main
|
||||
use_split_build: True
|
||||
DESIRED_PYTHON: "3.8"
|
||||
build_name: manywheel-py3_8-cuda12_4-split
|
||||
build_environment: linux-binary-manywheel
|
||||
runs_on: linux.4xlarge.nvidia.gpu
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
2359
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
2359
.github/workflows/generated-linux-binary-manywheel-nightly.yml
generated
vendored
File diff suppressed because it is too large
Load Diff
10
.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
generated
vendored
10
.github/workflows/generated-linux-s390x-binary-manywheel-nightly.yml
generated
vendored
@ -54,7 +54,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_8-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_8-cpu-s390x-test: # Testing
|
||||
@ -117,7 +117,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_9-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_9-cpu-s390x-test: # Testing
|
||||
@ -180,7 +180,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_10-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_10-cpu-s390x-test: # Testing
|
||||
@ -243,7 +243,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_11-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_11-cpu-s390x-test: # Testing
|
||||
@ -306,7 +306,7 @@ jobs:
|
||||
ALPINE_IMAGE: "docker.io/s390x/alpine"
|
||||
build_name: manywheel-py3_12-cpu-s390x
|
||||
build_environment: linux-s390x-binary-manywheel
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
secrets:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
manywheel-py3_12-cpu-s390x-test: # Testing
|
||||
|
10
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
10
.github/workflows/generated-macos-arm64-binary-wheel-nightly.yml
generated
vendored
@ -46,7 +46,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
@ -165,7 +165,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
@ -284,7 +284,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
@ -403,7 +403,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
@ -522,7 +522,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
# For sccache access (only on non-forked PRs)
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.MACOS_SCCACHE_S3_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.MACOS_SCCACHE_S3_SECRET_ACCESS_KEY }}
|
||||
|
40
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
40
.github/workflows/generated-windows-binary-wheel-nightly.yml
generated
vendored
@ -46,7 +46,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -290,7 +290,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -536,7 +536,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -782,7 +782,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.8"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -1027,7 +1027,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -1271,7 +1271,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -1517,7 +1517,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -1763,7 +1763,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.9"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2008,7 +2008,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2252,7 +2252,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2498,7 +2498,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2744,7 +2744,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.10"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -2989,7 +2989,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -3233,7 +3233,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -3479,7 +3479,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -3725,7 +3725,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.11"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -3970,7 +3970,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cpu
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -4214,7 +4214,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -4460,7 +4460,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
@ -4706,7 +4706,7 @@ jobs:
|
||||
GPU_ARCH_TYPE: cuda
|
||||
SKIP_ALL_TESTS: 1
|
||||
DESIRED_PYTHON: "3.12"
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.20.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
PYTORCH_EXTRA_INSTALL_REQUIREMENTS: nvidia-cuda-nvrtc-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-runtime-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cuda-cupti-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cudnn-cu12==9.1.0.70; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cublas-cu12==12.1.3.1; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cufft-cu12==11.0.2.54; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-curand-cu12==10.3.2.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusolver-cu12==11.4.5.107; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-cusparse-cu12==12.1.0.106; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nccl-cu12==2.21.5; platform_system == 'Linux' and platform_machine == 'x86_64' | nvidia-nvtx-cu12==12.1.105; platform_system == 'Linux' and platform_machine == 'x86_64'
|
||||
steps:
|
||||
- name: Display EC2 information
|
||||
shell: bash
|
||||
|
6
.github/workflows/inductor-cu124.yml
vendored
6
.github/workflows/inductor-cu124.yml
vendored
@ -28,7 +28,8 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
@ -95,7 +96,8 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_4-py3_12-gcc9-inductor-test:
|
||||
|
26
.github/workflows/inductor-periodic.yml
vendored
26
.github/workflows/inductor-periodic.yml
vendored
@ -56,3 +56,29 @@ jobs:
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-periodic-dynamo-benchmarks-build.outputs.test-matrix }}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp:
|
||||
name: cuda12.1-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
|
||||
]}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-inductor-test-gcp:
|
||||
name: cuda12.1-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
|
||||
use-gha: anything-non-empty-to-use-gha
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
65
.github/workflows/inductor.yml
vendored
65
.github/workflows/inductor.yml
vendored
@ -24,7 +24,8 @@ jobs:
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.rocm.gpu.2" },
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.rocm.gpu.2" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.rocm.gpu.2" },
|
||||
]}
|
||||
|
||||
linux-focal-rocm6_1-py3_8-inductor-test:
|
||||
@ -48,7 +49,8 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_distributed", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor_timm", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
@ -81,32 +83,6 @@ jobs:
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp:
|
||||
name: cuda12.1-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9-inductor-benchmarks
|
||||
cuda-arch-list: '8.0'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor_torchbench_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.gcp.a100" },
|
||||
]}
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-inductor-test-gcp:
|
||||
name: cuda12.1-py3.10-gcc9-sm80
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm80
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-inductor-build-gcp.outputs.test-matrix }}
|
||||
use-gha: anything-non-empty-to-use-gha
|
||||
secrets:
|
||||
HUGGING_FACE_HUB_TOKEN: ${{ secrets.HUGGING_FACE_HUB_TOKEN }}
|
||||
|
||||
linux-focal-cuda12_1-py3_12-gcc9-inductor-build:
|
||||
name: cuda12.1-py3.12-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
@ -116,7 +92,8 @@ jobs:
|
||||
cuda-arch-list: '8.6'
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor", shard: 1, num_shards: 1, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 1, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "inductor", shard: 2, num_shards: 2, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_1-py3_12-gcc9-inductor-test:
|
||||
@ -128,6 +105,26 @@ jobs:
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_12-gcc9-inductor-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_12-gcc9-inductor-build.outputs.test-matrix }}
|
||||
|
||||
linux-jammy-cpu-py3_12-inductor-halide-build:
|
||||
name: linux-jammy-cpu-py3.12-gcc11-inductor-halide
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
with:
|
||||
build-environment: linux-jammy-py3.12-gcc11
|
||||
docker-image-name: pytorch-linux-jammy-py3.12-halide
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "inductor-halide", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||
]}
|
||||
|
||||
linux-jammy-cpu-py3_12-inductor-halide-test:
|
||||
name: linux-jammy-cpu-py3.12-gcc11-inductor-halide
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-jammy-cpu-py3_12-inductor-halide-build
|
||||
with:
|
||||
build-environment: linux-jammy-py3.12-gcc11
|
||||
docker-image: ${{ needs.linux-jammy-cpu-py3_12-inductor-halide-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-cpu-py3_12-inductor-halide-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-inductor-build:
|
||||
# Should be synced with the one in inductor-periodic.yml but this only runs inductor_timm
|
||||
name: cuda12.4-py3.10-gcc9-sm86
|
||||
@ -175,11 +172,21 @@ jobs:
|
||||
{ config: "cpu_inductor_timm_freezing", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "cpu_inductor_torchbench_freezing", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "cpu_inductor_torchbench_freezing", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "cpu_inductor_huggingface_amp_freezing", shard: 1, num_shards: 1, runner: "linux.16xlarge.spr" },
|
||||
{ config: "cpu_inductor_timm_amp_freezing", shard: 1, num_shards: 2, runner: "linux.16xlarge.spr" },
|
||||
{ config: "cpu_inductor_timm_amp_freezing", shard: 2, num_shards: 2, runner: "linux.16xlarge.spr" },
|
||||
{ config: "cpu_inductor_torchbench_amp_freezing", shard: 1, num_shards: 2, runner: "linux.16xlarge.spr" },
|
||||
{ config: "cpu_inductor_torchbench_amp_freezing", shard: 2, num_shards: 2, runner: "linux.16xlarge.spr" },
|
||||
{ config: "dynamic_cpu_inductor_huggingface", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||
{ config: "dynamic_cpu_inductor_timm", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "dynamic_cpu_inductor_timm", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "dynamic_cpu_inductor_torchbench", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "dynamic_cpu_inductor_torchbench", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "cpu_aot_inductor_huggingface_freezing", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||
{ config: "cpu_aot_inductor_timm_freezing", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "cpu_aot_inductor_timm_freezing", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "cpu_aot_inductor_torchbench_freezing", shard: 1, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "cpu_aot_inductor_torchbench_freezing", shard: 2, num_shards: 2, runner: "linux.12xlarge" },
|
||||
{ config: "inductor_torchbench_cpu_smoketest_perf", shard: 1, num_shards: 1, runner: "linux.24xl.spr-metal" },
|
||||
]}
|
||||
secrets:
|
||||
|
30
.github/workflows/llm_td_retrieval.yml
vendored
30
.github/workflows/llm_td_retrieval.yml
vendored
@ -36,33 +36,24 @@ jobs:
|
||||
ref: v0.0.2
|
||||
path: llm-target-determinator
|
||||
|
||||
- name: Setup Conda
|
||||
uses: conda-incubator/setup-miniconda@v2.1.1
|
||||
- name: Setup miniconda
|
||||
uses: pytorch/test-infra/.github/actions/setup-miniconda@main
|
||||
with:
|
||||
miniconda-version: "py39_4.12.0"
|
||||
python-version: 3.9
|
||||
python-version: "3.9"
|
||||
|
||||
- name: Install Requirements
|
||||
- name: Install requirements
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
conda create \
|
||||
--yes \
|
||||
--quiet \
|
||||
--name "tdenv" \
|
||||
"python=3.9"
|
||||
conda activate tdenv
|
||||
cd "${GITHUB_WORKSPACE}/llm-target-determinator"
|
||||
pip install -r requirements.txt
|
||||
cd ../codellama
|
||||
pip install -e .
|
||||
${CONDA_RUN} pip install -r llm-target-determinator/requirements.txt
|
||||
cd "${GITHUB_WORKSPACE}/codellama"
|
||||
${CONDA_RUN} pip install -e .
|
||||
|
||||
- name: Fetch CodeLlama Checkpoint
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
conda activate tdenv
|
||||
cd codellama/
|
||||
cd "${GITHUB_WORKSPACE}/codellama"
|
||||
mkdir "CodeLlama-7b-Python"
|
||||
aws s3 cp "s3://target-determinator-assets/CodeLlama-7b-Python" "CodeLlama-7b-Python" --recursive --no-progress
|
||||
|
||||
@ -75,7 +66,7 @@ jobs:
|
||||
shell: bash
|
||||
command: |
|
||||
set -euxo pipefail
|
||||
python3 -m pip install awscli==1.29.40
|
||||
${CONDA_RUN} python -m pip install awscli==1.29.40
|
||||
cd "${GITHUB_WORKSPACE}"/llm-target-determinator/assets
|
||||
aws s3 cp "s3://target-determinator-assets/indexes/latest" . --recursive
|
||||
|
||||
@ -88,9 +79,8 @@ jobs:
|
||||
shell: bash -l {0}
|
||||
run: |
|
||||
set -euxo pipefail
|
||||
conda activate tdenv
|
||||
cd "${GITHUB_WORKSPACE}"/llm-target-determinator
|
||||
torchrun \
|
||||
${CONDA_RUN} torchrun \
|
||||
--standalone \
|
||||
--nnodes=1 \
|
||||
--nproc-per-node=1 \
|
||||
|
51
.github/workflows/periodic.yml
vendored
51
.github/workflows/periodic.yml
vendored
@ -73,7 +73,6 @@ jobs:
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
@ -295,3 +294,53 @@ jobs:
|
||||
build-environment: linux-focal-rocm6.1-py3.8
|
||||
docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
use_split_build: true
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build-test:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs:
|
||||
- linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build
|
||||
- target-determination
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build.outputs.test-matrix }}
|
||||
|
||||
|
||||
linux-focal-cuda11_8-py3_9-gcc9-experimental-split-build:
|
||||
name: linux-focal-cuda11.8-py3.9-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
use_split_build: true
|
||||
build-environment: linux-focal-cuda11.8-py3.9-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9
|
||||
cuda-arch-list: 8.6
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "multigpu", shard: 1, num_shards: 1, runner: "linux.g5.12xlarge.nvidia.gpu" },
|
||||
]}
|
||||
build-with-debug: false
|
||||
|
||||
linux-focal-cuda11_8-py3_9-gcc9-experimental-split-build-test:
|
||||
name: linux-focal-cuda11.8-py3.9-gcc9-experimental-split-build-test
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs:
|
||||
- linux-focal-cuda11_8-py3_9-gcc9-experimental-split-build
|
||||
- target-determination
|
||||
with:
|
||||
build-environment: linux-focal-cuda11.8-py3.9-gcc9-experimental-split-build
|
||||
docker-image: ${{ needs.linux-focal-cuda11_8-py3_9-gcc9-experimental-split-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda11_8-py3_9-gcc9-experimental-split-build.outputs.test-matrix }}
|
||||
|
232
.github/workflows/pull.yml
vendored
232
.github/workflows/pull.yml
vendored
@ -35,22 +35,33 @@ jobs:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: ./.github/workflows/_runner-determinator.yml
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
|
||||
linux-jammy-py3_8-gcc11-build:
|
||||
name: linux-jammy-py3.8-gcc11
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-py3.8-gcc11
|
||||
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "docs_test", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "backwards_compat", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "distributed", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "distributed", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "docs_test", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "backwards_compat", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "distributed", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "distributed", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
]}
|
||||
|
||||
linux-jammy-py3_8-gcc11-test:
|
||||
@ -75,7 +86,9 @@ jobs:
|
||||
linux-jammy-py3_8-gcc11-no-ops:
|
||||
name: linux-jammy-py3.8-gcc11-no-ops
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-py3.8-gcc11-no-ops
|
||||
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
|
||||
test-matrix: |
|
||||
@ -86,7 +99,9 @@ jobs:
|
||||
linux-jammy-py3_8-gcc11-pch:
|
||||
name: linux-jammy-py3.8-gcc11-pch
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-py3.8-gcc11-pch
|
||||
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
|
||||
test-matrix: |
|
||||
@ -98,17 +113,19 @@ jobs:
|
||||
linux-jammy-py3_10-clang15-asan-build:
|
||||
name: linux-jammy-py3.10-clang15-asan
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-py3.10-clang15-asan
|
||||
docker-image-name: pytorch-linux-jammy-py3-clang15-asan
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 6, runner: "linux.4xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 6, runner: "linux.4xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 6, runner: "linux.4xlarge" },
|
||||
{ config: "default", shard: 4, num_shards: 6, runner: "linux.4xlarge" },
|
||||
{ config: "default", shard: 5, num_shards: 6, runner: "linux.4xlarge" },
|
||||
{ config: "default", shard: 6, num_shards: 6, runner: "linux.4xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "default", shard: 4, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "default", shard: 5, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "default", shard: 6, num_shards: 6, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
]}
|
||||
sync-tag: asan-build
|
||||
|
||||
@ -128,13 +145,15 @@ jobs:
|
||||
linux-focal-py3_8-clang10-onnx-build:
|
||||
name: linux-focal-py3.8-clang10-onnx
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-py3.8-clang10-onnx
|
||||
docker-image-name: pytorch-linux-focal-py3-clang10-onnx
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
]}
|
||||
|
||||
linux-focal-py3_8-clang10-onnx-test:
|
||||
@ -151,19 +170,22 @@ jobs:
|
||||
linux-focal-py3_8-clang10-build:
|
||||
name: linux-focal-py3.8-clang10
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-py3.8-clang10
|
||||
docker-image-name: pytorch-linux-focal-py3.8-clang10
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "crossref", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "crossref", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "crossref", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "crossref", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
]}
|
||||
linux-focal-py3_8-clang10-test:
|
||||
name: linux-focal-py3.8-clang10
|
||||
@ -179,22 +201,24 @@ jobs:
|
||||
linux-focal-py3_11-clang10-build:
|
||||
name: linux-focal-py3.11-clang10
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-py3.11-clang10
|
||||
docker-image-name: pytorch-linux-focal-py3.11-clang10
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "crossref", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "crossref", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "crossref", shard: 1, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "crossref", shard: 2, num_shards: 2, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
]}
|
||||
|
||||
|
||||
linux-focal-py3_11-clang10-test:
|
||||
name: linux-focal-py3.11-clang10
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
@ -209,17 +233,20 @@ jobs:
|
||||
linux-focal-py3_12-clang10-build:
|
||||
name: linux-focal-py3.12-clang10
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-py3.12-clang10
|
||||
docker-image-name: pytorch-linux-focal-py3.12-clang10
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "default", shard: 4, num_shards: 4, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
]}
|
||||
|
||||
linux-focal-py3_12-clang10-test:
|
||||
@ -235,14 +262,16 @@ jobs:
|
||||
linux-focal-cuda11_8-py3_10-gcc9-build:
|
||||
name: linux-focal-cuda11.8-py3.10-gcc9
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-cuda11.8-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
|
||||
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
|
||||
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
|
||||
{ config: "distributed", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.nvidia.gpu" },
|
||||
{ config: "distributed", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.nvidia.gpu" },
|
||||
{ config: "distributed", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.8xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda11_8-py3_10-gcc9-test:
|
||||
@ -260,17 +289,18 @@ jobs:
|
||||
linux-focal-cuda12_1-py3_10-gcc9-build:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "deploy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-test:
|
||||
@ -288,7 +318,9 @@ jobs:
|
||||
linux-jammy-py3-clang12-mobile-build:
|
||||
name: linux-jammy-py3-clang12-mobile-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-py3-clang12-mobile-build
|
||||
docker-image-name: pytorch-linux-jammy-py3-clang15-asan
|
||||
build-generates-artifacts: false
|
||||
@ -300,7 +332,9 @@ jobs:
|
||||
linux-jammy-cuda-11_8-cudnn9-py3_8-clang12-build:
|
||||
name: linux-jammy-cuda11.8-cudnn9-py3.8-clang12
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-cuda11.8-cudnn9-py3.8-clang12
|
||||
docker-image-name: pytorch-linux-jammy-cuda11.8-cudnn9-py3.8-clang12
|
||||
test-matrix: |
|
||||
@ -311,7 +345,9 @@ jobs:
|
||||
linux-focal-py3-clang9-mobile-custom-build-static:
|
||||
name: linux-focal-py3-clang9-mobile-custom-build-static
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-py3-clang9-mobile-custom-build-static
|
||||
docker-image-name: pytorch-linux-focal-py3-clang9-android-ndk-r21e
|
||||
build-generates-artifacts: false
|
||||
@ -323,12 +359,14 @@ jobs:
|
||||
linux-focal-py3_8-clang9-xla-build:
|
||||
name: linux-focal-py3_8-clang9-xla
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-py3.8-clang9-xla
|
||||
docker-image-name: 308535385114.dkr.ecr.us-east-1.amazonaws.com/pytorch/xla_base:v1.1-lite
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "xla", shard: 1, num_shards: 1, runner: "linux.12xlarge" },
|
||||
{ config: "xla", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.12xlarge" },
|
||||
]}
|
||||
|
||||
linux-focal-py3_8-clang9-xla-test:
|
||||
@ -345,51 +383,59 @@ jobs:
|
||||
if: github.event_name == 'pull_request'
|
||||
name: win-vs2019-cpu-py3
|
||||
uses: ./.github/workflows/_win-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
build-environment: win-vs2019-cpu-py3
|
||||
cuda-version: cpu
|
||||
sync-tag: win-cpu-build
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||
]}
|
||||
|
||||
linux-focal-cpu-py3_10-gcc9-bazel-test:
|
||||
name: linux-focal-cpu-py3.10-gcc9-bazel-test
|
||||
uses: ./.github/workflows/_bazel-build-test.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.large"
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
cuda-version: cpu
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 1, runner: "linux.4xlarge" },
|
||||
{ config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-bazel-test:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
|
||||
uses: ./.github/workflows/_bazel-build-test.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.large"
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-bazel-test
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
cuda-version: "12.1"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-bazel-test:
|
||||
name: linux-focal-cuda12.4-py3.10-gcc9-bazel-test
|
||||
uses: ./.github/workflows/_bazel-build-test.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.large"
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-bazel-test
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9
|
||||
cuda-version: "12.4"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-py3-clang9-android-ndk-r21e-gradle-custom-build-single:
|
||||
@ -417,7 +463,9 @@ jobs:
|
||||
linux-jammy-py3_8-gcc11-mobile-lightweight-dispatch-build:
|
||||
name: linux-jammy-py3.8-gcc11-mobile-lightweight-dispatch-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-py3.8-gcc111-mobile-lightweight-dispatch-build
|
||||
docker-image-name: pytorch-linux-jammy-py3.8-gcc11
|
||||
build-generates-artifacts: false
|
||||
@ -431,7 +479,9 @@ jobs:
|
||||
if: github.event_name == 'pull_request'
|
||||
name: linux-focal-rocm6.1-py3.8
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-rocm6.1-py3.8
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
sync-tag: rocm-build
|
||||
@ -445,17 +495,19 @@ jobs:
|
||||
linux-focal-cuda12_1-py3_10-gcc9-sm86-build:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-sm86
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
cuda-arch-list: 8.6
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.g5.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-sm86-test:
|
||||
@ -472,12 +524,14 @@ jobs:
|
||||
linux-jammy-py3-clang12-executorch-build:
|
||||
name: linux-jammy-py3-clang12-executorch
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-py3-clang12-executorch
|
||||
docker-image-name: pytorch-linux-jammy-py3-clang12-executorch
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "executorch", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "executorch", shard: 1, num_shards: 1, runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge" },
|
||||
]}
|
||||
|
||||
linux-jammy-py3-clang12-executorch-test:
|
||||
@ -488,3 +542,59 @@ jobs:
|
||||
build-environment: linux-jammy-py3-clang12-executorch
|
||||
docker-image: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-jammy-py3-clang12-executorch-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
use_split_build: true
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.1-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build-test:
|
||||
name: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs:
|
||||
- linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build
|
||||
- target-determination
|
||||
with:
|
||||
timeout-minutes: 360
|
||||
build-environment: linux-focal-cuda12.1-py3.10-gcc9-experimental-split-build
|
||||
docker-image: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_1-py3_10-gcc9-experimental-split-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-py3_12-clang10-experimental-split-build:
|
||||
name: linux-focal-py3.12-clang10-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
use_split_build: True
|
||||
build-environment: linux-focal-py3.12-clang10
|
||||
docker-image-name: pytorch-linux-focal-py3.12-clang10
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 1, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 2, num_shards: 3, runner: "linux.2xlarge" },
|
||||
{ config: "dynamo", shard: 3, num_shards: 3, runner: "linux.2xlarge" },
|
||||
]}
|
||||
linux-focal-py3_12-clang10-experimental-split-build-test:
|
||||
name: linux-focal-py3.12-clang10-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs: linux-focal-py3_12-clang10-experimental-split-build
|
||||
with:
|
||||
build-environment: linux-focal-py3.12-clang10-experimental-split-build
|
||||
docker-image: ${{ needs.linux-focal-py3_12-clang10-experimental-split-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-py3_12-clang10-experimental-split-build.outputs.test-matrix }}
|
||||
timeout-minutes: 600
|
||||
|
23
.github/workflows/slow.yml
vendored
23
.github/workflows/slow.yml
vendored
@ -36,6 +36,15 @@ jobs:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: ./.github/workflows/_runner-determinator.yml
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
linux-focal-cuda12_1-py3-gcc9-slow-gradcheck-build:
|
||||
name: linux-focal-cuda12.1-py3-gcc9-slow-gradcheck
|
||||
uses: ./.github/workflows/_linux-build.yml
|
||||
@ -97,7 +106,8 @@ jobs:
|
||||
docker-image-name: pytorch-linux-focal-py3.8-clang10
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "slow", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "slow", shard: 1, num_shards: 2, runner: "linux.2xlarge" },
|
||||
{ config: "slow", shard: 2, num_shards: 2, runner: "linux.2xlarge" },
|
||||
]}
|
||||
|
||||
linux-focal-py3_8-clang10-test:
|
||||
@ -119,7 +129,8 @@ jobs:
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "slow", shard: 1, num_shards: 1, runner: "linux.rocm.gpu" },
|
||||
{ config: "slow", shard: 1, num_shards: 2, runner: "linux.rocm.gpu" },
|
||||
{ config: "slow", shard: 2, num_shards: 2, runner: "linux.rocm.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-rocm6_1-py3_8-test:
|
||||
@ -139,14 +150,16 @@ jobs:
|
||||
linux-jammy-py3_10-clang15-asan-build:
|
||||
name: linux-jammy-py3.10-clang15-asan
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-jammy-py3.10-clang15-asan
|
||||
docker-image-name: pytorch-linux-jammy-py3-clang15-asan
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "slow", shard: 1, num_shards: 3, runner: "linux.4xlarge" },
|
||||
{ config: "slow", shard: 2, num_shards: 3, runner: "linux.4xlarge" },
|
||||
{ config: "slow", shard: 3, num_shards: 3, runner: "linux.4xlarge" },
|
||||
{ config: "slow", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "slow", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
{ config: "slow", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}linux.4xlarge" },
|
||||
]}
|
||||
sync-tag: asan-build
|
||||
|
||||
|
96
.github/workflows/trunk.yml
vendored
96
.github/workflows/trunk.yml
vendored
@ -34,6 +34,15 @@ jobs:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
get-label-type:
|
||||
name: get-label-type
|
||||
uses: ./.github/workflows/_runner-determinator.yml
|
||||
with:
|
||||
triggering_actor: ${{ github.triggering_actor }}
|
||||
issue_owner: ${{ github.event.pull_request.user.login || github.event.issue.user.login }}
|
||||
curr_branch: ${{ github.head_ref || github.ref_name }}
|
||||
curr_ref_type: ${{ github.ref_type }}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-sm86-build:
|
||||
name: linux-focal-cuda12.4-py3.10-gcc9-sm86
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
@ -170,15 +179,17 @@ jobs:
|
||||
win-vs2019-cpu-py3-build:
|
||||
name: win-vs2019-cpu-py3
|
||||
uses: ./.github/workflows/_win-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
build-environment: win-vs2019-cpu-py3
|
||||
cuda-version: cpu
|
||||
sync-tag: win-cpu-build
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 1, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 2, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||
{ config: "default", shard: 3, num_shards: 3, runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral" },
|
||||
]}
|
||||
|
||||
win-vs2019-cpu-py3-test:
|
||||
@ -192,28 +203,21 @@ jobs:
|
||||
cuda-version: cpu
|
||||
test-matrix: ${{ needs.win-vs2019-cpu-py3-build.outputs.test-matrix }}
|
||||
|
||||
win-vs2019-cuda11_8-py3-build:
|
||||
name: win-vs2019-cuda11.8-py3
|
||||
win-vs2019-cuda12_1-py3-build:
|
||||
name: win-vs2019-cuda12.1-py3
|
||||
uses: ./.github/workflows/_win-build.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
build-environment: win-vs2019-cuda11.8-py3
|
||||
cuda-version: "11.8"
|
||||
sync-tag: win-cuda-build
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "default", shard: 1, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 3, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 6, num_shards: 6, runner: "windows.g5.4xlarge.nvidia.gpu" },
|
||||
{ config: "force_on_cpu", shard: 1, num_shards: 1, runner: "windows.4xlarge.nonephemeral" },
|
||||
]}
|
||||
build-environment: win-vs2019-cuda12.1-py3
|
||||
cuda-version: "12.1"
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}windows.4xlarge.nonephemeral"
|
||||
|
||||
linux-focal-rocm6_1-py3_8-build:
|
||||
name: linux-focal-rocm6.1-py3.8
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
needs: get-label-type
|
||||
with:
|
||||
runner: "${{ needs.get-label-type.outputs.label-type }}linux.2xlarge"
|
||||
build-environment: linux-focal-rocm6.1-py3.8
|
||||
docker-image-name: pytorch-linux-focal-rocm-n-py3
|
||||
sync-tag: rocm-build
|
||||
@ -238,3 +242,59 @@ jobs:
|
||||
docker-image: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-rocm6_1-py3_8-build.outputs.test-matrix }}
|
||||
tests-to-include: "test_nn test_torch test_cuda test_ops test_unary_ufuncs test_binary_ufuncs test_autograd inductor/test_torchinductor distributed/test_c10d_common distributed/test_c10d_nccl"
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-experimental-split-build:
|
||||
name: linux-focal-cuda12.4-py3.10-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
use_split_build: true
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda12.4-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "nogpu_AVX512", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "nogpu_NO_AVX2", shard: 1, num_shards: 1, runner: "linux.2xlarge" },
|
||||
{ config: "jit_legacy", shard: 1, num_shards: 1, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 1, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 2, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 3, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 4, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
{ config: "default", shard: 5, num_shards: 5, runner: "linux.4xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda12_4-py3_10-gcc9-experimental-split-build-test:
|
||||
name: linux-focal-cuda12.4-py3.10-gcc9-experimental-split-build-test
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs:
|
||||
- linux-focal-cuda12_4-py3_10-gcc9-experimental-split-build
|
||||
- target-determination
|
||||
with:
|
||||
build-environment: linux-focal-cuda12.4-py3.10-gcc9-experimental-split-build
|
||||
docker-image: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-experimental-split-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda12_4-py3_10-gcc9-experimental-split-build.outputs.test-matrix }}
|
||||
|
||||
linux-focal-cuda11_8-py3_10-gcc9-experimental-split-build:
|
||||
name: linux-focal-cuda11.8-py3.10-gcc9-experimental-split-build
|
||||
uses: ./.github/workflows/_linux-build-label.yml
|
||||
with:
|
||||
use_split_build: true
|
||||
build-environment: linux-focal-cuda11.8-py3.10-gcc9
|
||||
docker-image-name: pytorch-linux-focal-cuda11.8-cudnn9-py3-gcc9
|
||||
test-matrix: |
|
||||
{ include: [
|
||||
{ config: "distributed", shard: 1, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
|
||||
{ config: "distributed", shard: 2, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
|
||||
{ config: "distributed", shard: 3, num_shards: 3, runner: "linux.8xlarge.nvidia.gpu" },
|
||||
]}
|
||||
|
||||
linux-focal-cuda11_8-py3_10-gcc9-experimental-split-build-test:
|
||||
name: linux-focal-cuda11.8-py3.10-gcc9-experimental-split-build-test
|
||||
uses: ./.github/workflows/_linux-test.yml
|
||||
needs:
|
||||
- linux-focal-cuda11_8-py3_10-gcc9-experimental-split-build
|
||||
- target-determination
|
||||
with:
|
||||
timeout-minutes: 360
|
||||
build-environment: linux-focal-cuda11.8-py3.10-gcc9-experimental-split-build
|
||||
docker-image: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-experimental-split-build.outputs.docker-image }}
|
||||
test-matrix: ${{ needs.linux-focal-cuda11_8-py3_10-gcc9-experimental-split-build.outputs.test-matrix }}
|
||||
|
19
.github/workflows/trymerge.yml
vendored
19
.github/workflows/trymerge.yml
vendored
@ -9,6 +9,8 @@ jobs:
|
||||
name: try_merge_pr_${{ github.event.client_payload.pr_num }}
|
||||
runs-on: linux.20_04.4x
|
||||
environment: mergebot
|
||||
permissions:
|
||||
id-token: write
|
||||
env:
|
||||
GH_RUN_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||
steps:
|
||||
@ -43,6 +45,7 @@ jobs:
|
||||
IGNORE_CURRENT: ${{ github.event.client_payload.ignore_current }}
|
||||
ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
|
||||
DRCI_BOT_KEY: ${{ secrets.DRCI_BOT_KEY }}
|
||||
GITHUB_RUN_ID: ${{ github.run_id }}
|
||||
run: |
|
||||
set -x
|
||||
if [ -n "${REBASE}" ]; then
|
||||
@ -84,6 +87,22 @@ jobs:
|
||||
set -x
|
||||
python3 .github/scripts/comment_on_pr.py "${PR_NUM}" "merge"
|
||||
|
||||
- name: configure aws credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::308535385114:role/upload_to_ossci_raw_job_status
|
||||
aws-region: us-east-1
|
||||
|
||||
- name: Upload merge record to s3
|
||||
if: always()
|
||||
continue-on-error: true
|
||||
uses: seemethere/upload-artifact-s3@v5
|
||||
with:
|
||||
s3-bucket: ossci-raw-job-status
|
||||
s3-prefix: merges/${{ github.repository }}/${{ github.event.client_payload.pr_num }}/${{ github.event.client_payload.comment_id }}/${{ github.run_id }}
|
||||
path: merge_record.json
|
||||
|
||||
# We want newer merge commands to supercede old ones
|
||||
concurrency:
|
||||
group: try-merge-${{ github.event.client_payload.pr_num }}
|
||||
|
24
.github/workflows/upload-test-stats.yml
vendored
24
.github/workflows/upload-test-stats.yml
vendored
@ -25,12 +25,11 @@ jobs:
|
||||
|
||||
upload-test-stats:
|
||||
needs: get_workflow_conclusion
|
||||
if:
|
||||
github.repository_owner == 'pytorch' &&
|
||||
(github.event.workflow_run.conclusion == 'success' || github.event.workflow_run.conclusion == 'failure' ||
|
||||
needs.get_workflow_conclusion.outputs.conclusion == 'success' || needs.get_workflow_conclusion.outputs.conclusion == 'failure')
|
||||
if: github.repository_owner == 'pytorch'
|
||||
runs-on: ubuntu-22.04
|
||||
environment: upload-stats
|
||||
permissions:
|
||||
id-token: write
|
||||
name: Upload test stats for ${{ github.event.workflow_run.id }}, attempt ${{ github.event.workflow_run.run_attempt }}
|
||||
steps:
|
||||
- name: Print workflow information
|
||||
@ -41,6 +40,13 @@ jobs:
|
||||
- name: Checkout PyTorch
|
||||
uses: pytorch/pytorch/.github/actions/checkout-pytorch@main
|
||||
|
||||
- name: Configure aws credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_upload-torch-test-stats
|
||||
aws-region: us-east-1
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
@ -52,8 +58,6 @@ jobs:
|
||||
- name: Upload test artifacts
|
||||
id: upload-s3
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
WORKFLOW_ARTIFACTS_URL: ${{ github.event.workflow_run.artifacts_url }}
|
||||
WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
|
||||
@ -69,8 +73,6 @@ jobs:
|
||||
- name: Upload test stats
|
||||
env:
|
||||
ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
|
||||
WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
|
||||
@ -84,8 +86,6 @@ jobs:
|
||||
|
||||
- name: Analyze disabled tests rerun
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
WORKFLOW_ARTIFACTS_URL: ${{ github.event.workflow_run.artifacts_url }}
|
||||
WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
|
||||
@ -99,14 +99,12 @@ jobs:
|
||||
if: steps.upload-s3.outcome && steps.upload-s3.outcome == 'success' && github.event.workflow_run.name == 'inductor-micro-benchmark'
|
||||
env:
|
||||
ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
|
||||
WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
|
||||
REPO_FULLNAME: ${{ github.event.workflow_run.repository.full_name }}
|
||||
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
|
||||
run: |
|
||||
python3 -m tools.stats.upload_dynamo_perf_stats --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}" --head-branch "${HEAD_BRANCH}" --rockset-collection oss_ci_benchmark --rockset-workspace benchmarks --match-filename "^gpt_fast_benchmark"
|
||||
python3 -m tools.stats.upload_dynamo_perf_stats --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}" --head-branch "${HEAD_BRANCH}" --rockset-collection oss_ci_benchmark --rockset-workspace benchmarks --dynamodb-table torchci-oss-ci-benchmark --match-filename "^gpt_fast_benchmark"
|
||||
|
||||
check-api-rate:
|
||||
if: ${{ always() && github.repository_owner == 'pytorch' }}
|
||||
|
@ -26,6 +26,8 @@ jobs:
|
||||
github.event.workflow_run.conclusion == 'failure' || needs.get-conclusion.outputs.conclusion == 'failure'
|
||||
runs-on: ubuntu-22.04
|
||||
environment: upload-stats
|
||||
permissions:
|
||||
id-token: write
|
||||
name: Upload dynamo performance stats for ${{ github.event.workflow_run.id }}, attempt ${{ github.event.workflow_run.run_attempt }}
|
||||
steps:
|
||||
- name: Checkout PyTorch
|
||||
@ -34,6 +36,13 @@ jobs:
|
||||
submodules: false
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Configure aws credentials
|
||||
uses: aws-actions/configure-aws-credentials@v3
|
||||
continue-on-error: true
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::308535385114:role/gha_workflow_upload-torch-test-stats
|
||||
aws-region: us-east-1
|
||||
|
||||
- uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
@ -45,8 +54,6 @@ jobs:
|
||||
- name: Upload torch dynamo performance stats to S3
|
||||
id: upload-s3
|
||||
env:
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
WORKFLOW_ARTIFACTS_URL: ${{ github.event.workflow_run.artifacts_url }}
|
||||
WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
|
||||
@ -61,11 +68,9 @@ jobs:
|
||||
if: steps.upload-s3.outcome && steps.upload-s3.outcome == 'success'
|
||||
env:
|
||||
ROCKSET_API_KEY: ${{ secrets.ROCKSET_API_KEY }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
||||
WORKFLOW_RUN_ID: ${{ github.event.workflow_run.id }}
|
||||
WORKFLOW_RUN_ATTEMPT: ${{ github.event.workflow_run.run_attempt }}
|
||||
REPO_FULLNAME: ${{ github.event.workflow_run.repository.full_name }}
|
||||
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
|
||||
run: |
|
||||
python3 -m tools.stats.upload_dynamo_perf_stats --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}" --head-branch "${HEAD_BRANCH}" --rockset-collection torch_dynamo_perf_stats --rockset-workspace inductor --match-filename "^inductor_"
|
||||
python3 -m tools.stats.upload_dynamo_perf_stats --workflow-run-id "${WORKFLOW_RUN_ID}" --workflow-run-attempt "${WORKFLOW_RUN_ATTEMPT}" --repo "${REPO_FULLNAME}" --head-branch "${HEAD_BRANCH}" --rockset-collection torch_dynamo_perf_stats --rockset-workspace inductor --dynamodb-table torchci-dynamo-perf-stats --match-filename "^inductor_"
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -129,6 +129,7 @@ env
|
||||
scripts/release_notes/*.json
|
||||
sccache-stats*.json
|
||||
lint.json
|
||||
merge_record.json
|
||||
|
||||
# These files get copied over on invoking setup.py
|
||||
torchgen/packaged/*
|
||||
|
316
.lintrunner.toml
316
.lintrunner.toml
@ -68,6 +68,8 @@ include_patterns = [
|
||||
'aten/src/ATen/native/cudnn/*.cpp',
|
||||
'c10/**/*.h',
|
||||
'c10/**/*.cpp',
|
||||
'distributed/c10d/*DMAConnectivity.*',
|
||||
'distributed/c10d/*SymmetricMemory.*',
|
||||
'torch/csrc/**/*.h',
|
||||
'torch/csrc/**/*.hpp',
|
||||
'torch/csrc/**/*.cpp',
|
||||
@ -136,7 +138,7 @@ init_command = [
|
||||
'numpy==1.24.3 ; python_version == "3.8"',
|
||||
'numpy==1.26.0 ; python_version >= "3.9"',
|
||||
'expecttest==0.1.6',
|
||||
'mypy==1.9.0',
|
||||
'mypy==1.10.0',
|
||||
'sympy==1.11.1',
|
||||
'types-requests==2.27.25',
|
||||
'types-PyYAML==6.0.7',
|
||||
@ -202,6 +204,8 @@ include_patterns = [
|
||||
'torch/csrc/*.cpp',
|
||||
'torch/csrc/**/*.h',
|
||||
'torch/csrc/**/*.cpp',
|
||||
'torch/csrc/jit/serialization/*.h',
|
||||
'torch/csrc/jit/serialization/*.cpp',
|
||||
]
|
||||
exclude_patterns = [
|
||||
# The negative filters below are to exclude files that include onnx_pb.h or
|
||||
@ -216,7 +220,6 @@ exclude_patterns = [
|
||||
'c10/util/complex_math.h',
|
||||
'c10/util/complex_utils.h',
|
||||
'c10/util/flat_hash_map.h',
|
||||
'c10/util/Float8*.h',
|
||||
'c10/util/logging*.h',
|
||||
'c10/util/hash.h',
|
||||
'c10/util/strong_type.h',
|
||||
@ -224,7 +227,6 @@ exclude_patterns = [
|
||||
'c10/util/win32-headers.h',
|
||||
'c10/util/*inl.h',
|
||||
'c10/test/**/*.h',
|
||||
'aten/src/ATen/core/TensorImpl_test.cpp',
|
||||
'third_party/**/*',
|
||||
'torch/csrc/api/**',
|
||||
'torch/csrc/autograd/generated/**',
|
||||
@ -232,10 +234,8 @@ exclude_patterns = [
|
||||
'torch/csrc/dynamo/eval_frame.h',
|
||||
'torch/csrc/inductor/**/*',
|
||||
'torch/csrc/jit/**/*',
|
||||
'torch/csrc/jit/serialization/import_legacy.cpp',
|
||||
'torch/csrc/jit/serialization/export.cpp',
|
||||
'torch/csrc/jit/serialization/mobile_bytecode_generated.h',
|
||||
'torch/csrc/lazy/**/*',
|
||||
'torch/csrc/mps/**/*',
|
||||
]
|
||||
init_command = [
|
||||
'python3',
|
||||
@ -999,7 +999,6 @@ command = [
|
||||
]
|
||||
exclude_patterns = [
|
||||
'tools/gen_vulkan_spv.py',
|
||||
'torch/__init__.py', # Skip this file to format because it's part of the public API
|
||||
# We don't care too much about files in this directory, don't enforce
|
||||
# formatting on them
|
||||
'caffe2/**/*.py',
|
||||
@ -1099,14 +1098,12 @@ exclude_patterns = [
|
||||
'test/test_namedtuple_return_api.py',
|
||||
'test/test_native_functions.py',
|
||||
'test/test_native_mha.py',
|
||||
'test/test_nestedtensor.py',
|
||||
'test/test_nn.py',
|
||||
'test/test_out_dtype_op.py',
|
||||
'test/test_overrides.py',
|
||||
'test/test_prims.py',
|
||||
'test/test_proxy_tensor.py',
|
||||
'test/test_pruning_op.py',
|
||||
'test/test_public_bindings.py',
|
||||
'test/test_quantization.py',
|
||||
'test/test_reductions.py',
|
||||
'test/test_scatter_gather_ops.py',
|
||||
@ -1132,8 +1129,6 @@ exclude_patterns = [
|
||||
'test/test_type_promotion.py',
|
||||
'test/test_unary_ufuncs.py',
|
||||
'test/test_vulkan.py',
|
||||
'test/test_xnnpack_integration.py',
|
||||
'test/torch_np/numpy_test/**/*.py',
|
||||
'torch/_awaits/__init__.py',
|
||||
'torch/_custom_op/__init__.py',
|
||||
'torch/_custom_op/autograd.py',
|
||||
@ -1194,9 +1189,6 @@ exclude_patterns = [
|
||||
'torch/_export/serde/upgrade.py',
|
||||
'torch/_export/trace.py',
|
||||
'torch/_export/verifier.py',
|
||||
'torch/_higher_order_ops/__init__.py',
|
||||
'torch/_higher_order_ops/out_dtype.py',
|
||||
'torch/_higher_order_ops/wrap.py',
|
||||
'torch/_vendor/**',
|
||||
'torch/ao/__init__.py',
|
||||
'torch/ao/nn/__init__.py',
|
||||
@ -1393,172 +1385,8 @@ exclude_patterns = [
|
||||
'torch/contrib/_tensorboard_vis.py',
|
||||
"torch/cuda/_gpu_trace.py",
|
||||
'torch/cuda/_memory_viz.py', # mypy: Value of type "object" is not indexable
|
||||
'torch/distributed/__init__.py',
|
||||
'torch/distributed/_composable_state.py',
|
||||
'torch/distributed/_shard/__init__.py',
|
||||
'torch/distributed/_shard/_utils.py',
|
||||
'torch/distributed/_shard/api.py',
|
||||
'torch/distributed/_shard/checkpoint/__init__.py',
|
||||
'torch/distributed/_shard/common_op_utils.py',
|
||||
'torch/distributed/_shard/metadata.py',
|
||||
'torch/distributed/_shard/op_registry_utils.py',
|
||||
'torch/distributed/_shard/sharded_optim/__init__.py',
|
||||
'torch/distributed/_shard/sharded_optim/api.py',
|
||||
'torch/distributed/_shard/sharded_tensor/__init__.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/__init__.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/_common.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/binary_cmp.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/init.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/misc_ops.py',
|
||||
'torch/distributed/_shard/sharded_tensor/_ops/tensor_ops.py',
|
||||
'torch/distributed/_shard/sharded_tensor/api.py',
|
||||
'torch/distributed/_shard/sharded_tensor/logger.py',
|
||||
'torch/distributed/_shard/sharded_tensor/logging_handlers.py',
|
||||
'torch/distributed/_shard/sharded_tensor/metadata.py',
|
||||
'torch/distributed/_shard/sharded_tensor/reshard.py',
|
||||
'torch/distributed/_shard/sharded_tensor/shard.py',
|
||||
'torch/distributed/_shard/sharded_tensor/utils.py',
|
||||
'torch/distributed/_shard/sharder.py',
|
||||
'torch/distributed/_shard/sharding_plan/__init__.py',
|
||||
'torch/distributed/_shard/sharding_plan/api.py',
|
||||
'torch/distributed/_shard/sharding_spec/__init__.py',
|
||||
'torch/distributed/_shard/sharding_spec/_internals.py',
|
||||
'torch/distributed/_shard/sharding_spec/api.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/__init__.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/_common.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding.py',
|
||||
'torch/distributed/_shard/sharding_spec/chunk_sharding_spec_ops/embedding_bag.py',
|
||||
'torch/distributed/_sharded_tensor/__init__.py',
|
||||
'torch/distributed/_sharding_spec/__init__.py',
|
||||
'torch/distributed/_tools/__init__.py',
|
||||
'torch/distributed/_tools/memory_tracker.py',
|
||||
'torch/distributed/algorithms/__init__.py',
|
||||
'torch/distributed/algorithms/_checkpoint/__init__.py',
|
||||
'torch/distributed/algorithms/_checkpoint/checkpoint_wrapper.py',
|
||||
'torch/distributed/algorithms/_comm_hooks/__init__.py',
|
||||
'torch/distributed/algorithms/_comm_hooks/default_hooks.py',
|
||||
'torch/distributed/algorithms/_optimizer_overlap/__init__.py',
|
||||
'torch/distributed/algorithms/_optimizer_overlap/optimizer_overlap.py',
|
||||
'torch/distributed/algorithms/_quantization/__init__.py',
|
||||
'torch/distributed/algorithms/_quantization/quantization.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/__init__.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/ddp_zero_hook.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/debugging_hooks.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/default_hooks.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/mixed_precision_hooks.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/optimizer_overlap_hooks.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/post_localSGD_hook.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/powerSGD_hook.py',
|
||||
'torch/distributed/algorithms/ddp_comm_hooks/quantization_hooks.py',
|
||||
'torch/distributed/algorithms/join.py',
|
||||
'torch/distributed/algorithms/model_averaging/__init__.py',
|
||||
'torch/distributed/algorithms/model_averaging/averagers.py',
|
||||
'torch/distributed/algorithms/model_averaging/hierarchical_model_averager.py',
|
||||
'torch/distributed/algorithms/model_averaging/utils.py',
|
||||
'torch/distributed/argparse_util.py',
|
||||
'torch/distributed/autograd/__init__.py',
|
||||
'torch/distributed/benchmarks/benchmark_ddp_rpc.py',
|
||||
'torch/distributed/c10d_logger.py',
|
||||
'torch/distributed/collective_utils.py',
|
||||
'torch/distributed/constants.py',
|
||||
'torch/distributed/distributed_c10d.py',
|
||||
'torch/distributed/elastic/__init__.py',
|
||||
'torch/distributed/elastic/agent/__init__.py',
|
||||
'torch/distributed/elastic/agent/server/__init__.py',
|
||||
'torch/distributed/elastic/agent/server/api.py',
|
||||
'torch/distributed/elastic/agent/server/local_elastic_agent.py',
|
||||
'torch/distributed/elastic/events/__init__.py',
|
||||
'torch/distributed/elastic/events/api.py',
|
||||
'torch/distributed/elastic/events/handlers.py',
|
||||
'torch/distributed/elastic/metrics/__init__.py',
|
||||
'torch/distributed/elastic/metrics/api.py',
|
||||
'torch/distributed/elastic/multiprocessing/__init__.py',
|
||||
'torch/distributed/elastic/multiprocessing/api.py',
|
||||
'torch/distributed/elastic/multiprocessing/errors/__init__.py',
|
||||
'torch/distributed/elastic/multiprocessing/errors/error_handler.py',
|
||||
'torch/distributed/elastic/multiprocessing/errors/handlers.py',
|
||||
'torch/distributed/elastic/multiprocessing/redirects.py',
|
||||
'torch/distributed/elastic/multiprocessing/tail_log.py',
|
||||
'torch/distributed/elastic/rendezvous/__init__.py',
|
||||
'torch/distributed/elastic/rendezvous/api.py',
|
||||
'torch/distributed/elastic/rendezvous/c10d_rendezvous_backend.py',
|
||||
'torch/distributed/elastic/rendezvous/dynamic_rendezvous.py',
|
||||
'torch/distributed/elastic/rendezvous/etcd_rendezvous.py',
|
||||
'torch/distributed/elastic/rendezvous/etcd_rendezvous_backend.py',
|
||||
'torch/distributed/elastic/rendezvous/etcd_server.py',
|
||||
'torch/distributed/elastic/rendezvous/etcd_store.py',
|
||||
'torch/distributed/elastic/rendezvous/registry.py',
|
||||
'torch/distributed/elastic/rendezvous/static_tcp_rendezvous.py',
|
||||
'torch/distributed/elastic/rendezvous/utils.py',
|
||||
'torch/distributed/elastic/timer/__init__.py',
|
||||
'torch/distributed/elastic/timer/api.py',
|
||||
'torch/distributed/elastic/timer/file_based_local_timer.py',
|
||||
'torch/distributed/elastic/timer/local_timer.py',
|
||||
'torch/distributed/elastic/utils/__init__.py',
|
||||
'torch/distributed/elastic/utils/api.py',
|
||||
'torch/distributed/elastic/utils/data/__init__.py',
|
||||
'torch/distributed/elastic/utils/data/cycling_iterator.py',
|
||||
'torch/distributed/elastic/utils/data/elastic_distributed_sampler.py',
|
||||
'torch/distributed/elastic/utils/distributed.py',
|
||||
'torch/distributed/elastic/utils/log_level.py',
|
||||
'torch/distributed/elastic/utils/logging.py',
|
||||
'torch/distributed/elastic/utils/store.py',
|
||||
'torch/distributed/examples/memory_tracker_example.py',
|
||||
'torch/distributed/launch.py',
|
||||
'torch/distributed/launcher/__init__.py',
|
||||
'torch/distributed/launcher/api.py',
|
||||
'torch/distributed/logging_handlers.py',
|
||||
'torch/distributed/nn/__init__.py',
|
||||
'torch/distributed/nn/api/__init__.py',
|
||||
'torch/distributed/nn/api/remote_module.py',
|
||||
'torch/distributed/nn/functional.py',
|
||||
'torch/distributed/nn/jit/__init__.py',
|
||||
'torch/distributed/nn/jit/instantiator.py',
|
||||
'torch/distributed/nn/jit/templates/__init__.py',
|
||||
'torch/distributed/nn/jit/templates/remote_module_template.py',
|
||||
'torch/distributed/optim/__init__.py',
|
||||
'torch/distributed/optim/apply_optimizer_in_backward.py',
|
||||
'torch/distributed/optim/functional_adadelta.py',
|
||||
'torch/distributed/optim/functional_adagrad.py',
|
||||
'torch/distributed/optim/functional_adam.py',
|
||||
'torch/distributed/optim/functional_adamax.py',
|
||||
'torch/distributed/optim/functional_adamw.py',
|
||||
'torch/distributed/optim/functional_rmsprop.py',
|
||||
'torch/distributed/optim/functional_rprop.py',
|
||||
'torch/distributed/optim/functional_sgd.py',
|
||||
'torch/distributed/optim/named_optimizer.py',
|
||||
'torch/distributed/optim/optimizer.py',
|
||||
'torch/distributed/optim/post_localSGD_optimizer.py',
|
||||
'torch/distributed/optim/utils.py',
|
||||
'torch/distributed/optim/zero_redundancy_optimizer.py',
|
||||
'torch/distributed/remote_device.py',
|
||||
'torch/distributed/rendezvous.py',
|
||||
'torch/distributed/rpc/__init__.py',
|
||||
'torch/distributed/rpc/_testing/__init__.py',
|
||||
'torch/distributed/rpc/_testing/faulty_agent_backend_registry.py',
|
||||
'torch/distributed/rpc/_utils.py',
|
||||
'torch/distributed/rpc/api.py',
|
||||
'torch/distributed/rpc/backend_registry.py',
|
||||
'torch/distributed/rpc/constants.py',
|
||||
'torch/distributed/rpc/functions.py',
|
||||
'torch/distributed/rpc/internal.py',
|
||||
'torch/distributed/rpc/options.py',
|
||||
'torch/distributed/rpc/rref_proxy.py',
|
||||
'torch/distributed/rpc/server_process_global_profiler.py',
|
||||
'torch/distributed/run.py',
|
||||
'torch/distributed/tensor/__init__.py',
|
||||
'torch/distributed/tensor/parallel/__init__.py',
|
||||
'torch/distributed/tensor/parallel/_utils.py',
|
||||
'torch/distributed/tensor/parallel/_view_with_dim_change.py',
|
||||
'torch/distributed/tensor/parallel/api.py',
|
||||
'torch/distributed/tensor/parallel/fsdp.py',
|
||||
'torch/distributed/tensor/parallel/input_reshard.py',
|
||||
'torch/distributed/tensor/parallel/multihead_attention_tp.py',
|
||||
'torch/distributed/tensor/parallel/style.py',
|
||||
'torch/fft/__init__.py',
|
||||
'torch/func/__init__.py',
|
||||
'torch/functional.py',
|
||||
'torch/futures/__init__.py',
|
||||
'torch/fx/__init__.py',
|
||||
'torch/fx/_compatibility.py',
|
||||
@ -1644,20 +1472,9 @@ exclude_patterns = [
|
||||
'torch/fx/subgraph_rewriter.py',
|
||||
'torch/fx/tensor_type.py',
|
||||
'torch/fx/traceback.py',
|
||||
'torch/hub.py',
|
||||
'torch/library.py',
|
||||
'torch/linalg/__init__.py',
|
||||
'torch/monitor/__init__.py',
|
||||
'torch/nested/__init__.py',
|
||||
'torch/nn/__init__.py',
|
||||
'torch/nn/_reduction.py',
|
||||
'torch/nn/backends/__init__.py',
|
||||
'torch/nn/backends/thnn.py',
|
||||
'torch/nn/common_types.py',
|
||||
'torch/nn/cpp.py',
|
||||
'torch/nn/functional.py',
|
||||
'torch/nn/grad.py',
|
||||
'torch/nn/init.py',
|
||||
'torch/nn/intrinsic/__init__.py',
|
||||
'torch/nn/intrinsic/modules/__init__.py',
|
||||
'torch/nn/intrinsic/modules/fused.py',
|
||||
@ -1674,40 +1491,6 @@ exclude_patterns = [
|
||||
'torch/nn/intrinsic/quantized/modules/bn_relu.py',
|
||||
'torch/nn/intrinsic/quantized/modules/conv_relu.py',
|
||||
'torch/nn/intrinsic/quantized/modules/linear_relu.py',
|
||||
'torch/nn/modules/__init__.py',
|
||||
'torch/nn/modules/_functions.py',
|
||||
'torch/nn/modules/activation.py',
|
||||
'torch/nn/modules/adaptive.py',
|
||||
'torch/nn/modules/batchnorm.py',
|
||||
'torch/nn/modules/channelshuffle.py',
|
||||
'torch/nn/modules/container.py',
|
||||
'torch/nn/modules/conv.py',
|
||||
'torch/nn/modules/distance.py',
|
||||
'torch/nn/modules/dropout.py',
|
||||
'torch/nn/modules/flatten.py',
|
||||
'torch/nn/modules/fold.py',
|
||||
'torch/nn/modules/instancenorm.py',
|
||||
'torch/nn/modules/lazy.py',
|
||||
'torch/nn/modules/linear.py',
|
||||
'torch/nn/modules/loss.py',
|
||||
'torch/nn/modules/module.py',
|
||||
'torch/nn/modules/normalization.py',
|
||||
'torch/nn/modules/padding.py',
|
||||
'torch/nn/modules/pixelshuffle.py',
|
||||
'torch/nn/modules/pooling.py',
|
||||
'torch/nn/modules/rnn.py',
|
||||
'torch/nn/modules/sparse.py',
|
||||
'torch/nn/modules/transformer.py',
|
||||
'torch/nn/modules/upsampling.py',
|
||||
'torch/nn/modules/utils.py',
|
||||
'torch/nn/parallel/__init__.py',
|
||||
'torch/nn/parallel/_functions.py',
|
||||
'torch/nn/parallel/comm.py',
|
||||
'torch/nn/parallel/data_parallel.py',
|
||||
'torch/nn/parallel/parallel_apply.py',
|
||||
'torch/nn/parallel/replicate.py',
|
||||
'torch/nn/parallel/scatter_gather.py',
|
||||
'torch/nn/parameter.py',
|
||||
'torch/nn/qat/__init__.py',
|
||||
'torch/nn/qat/dynamic/__init__.py',
|
||||
'torch/nn/qat/dynamic/modules/__init__.py',
|
||||
@ -1745,35 +1528,6 @@ exclude_patterns = [
|
||||
'torch/nn/quantized/modules/normalization.py',
|
||||
'torch/nn/quantized/modules/rnn.py',
|
||||
'torch/nn/quantized/modules/utils.py',
|
||||
'torch/nn/utils/__init__.py',
|
||||
'torch/nn/utils/_deprecation_utils.py',
|
||||
'torch/nn/utils/_expanded_weights/__init__.py',
|
||||
'torch/nn/utils/_expanded_weights/conv_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/conv_utils.py',
|
||||
'torch/nn/utils/_expanded_weights/embedding_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/expanded_weights_impl.py',
|
||||
'torch/nn/utils/_expanded_weights/expanded_weights_utils.py',
|
||||
'torch/nn/utils/_expanded_weights/group_norm_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/instance_norm_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/layer_norm_expanded_weights.py',
|
||||
'torch/nn/utils/_expanded_weights/linear_expanded_weights.py',
|
||||
'torch/nn/utils/_per_sample_grad.py',
|
||||
'torch/nn/utils/clip_grad.py',
|
||||
'torch/nn/utils/convert_parameters.py',
|
||||
'torch/nn/utils/fusion.py',
|
||||
'torch/nn/utils/init.py',
|
||||
'torch/nn/utils/memory_format.py',
|
||||
'torch/nn/utils/parametrizations.py',
|
||||
'torch/nn/utils/parametrize.py',
|
||||
'torch/nn/utils/prune.py',
|
||||
'torch/nn/utils/rnn.py',
|
||||
'torch/nn/utils/spectral_norm.py',
|
||||
'torch/nn/utils/weight_norm.py',
|
||||
'torch/overrides.py',
|
||||
'torch/quasirandom.py',
|
||||
'torch/random.py',
|
||||
'torch/return_types.py',
|
||||
'torch/serialization.py',
|
||||
'torch/signal/__init__.py',
|
||||
'torch/signal/windows/__init__.py',
|
||||
'torch/signal/windows/windows.py',
|
||||
@ -1782,7 +1536,6 @@ exclude_patterns = [
|
||||
'torch/sparse/_triton_ops.py',
|
||||
'torch/sparse/semi_structured.py',
|
||||
'torch/special/__init__.py',
|
||||
'torch/storage.py',
|
||||
'torch/testing/_internal/__init__.py',
|
||||
'torch/testing/_internal/autocast_test_lists.py',
|
||||
'torch/testing/_internal/autograd_function_db.py',
|
||||
@ -1790,9 +1543,7 @@ exclude_patterns = [
|
||||
'torch/testing/_internal/codegen/__init__.py',
|
||||
'torch/testing/_internal/codegen/random_topo_test.py',
|
||||
'torch/testing/_internal/common_cuda.py',
|
||||
'torch/testing/_internal/common_device_type.py',
|
||||
'torch/testing/_internal/common_distributed.py',
|
||||
'torch/testing/_internal/common_dtype.py',
|
||||
'torch/testing/_internal/common_jit.py',
|
||||
'torch/testing/_internal/common_methods_invocations.py',
|
||||
'torch/testing/_internal/common_modules.py',
|
||||
@ -1857,7 +1608,6 @@ exclude_patterns = [
|
||||
'torch/testing/_internal/test_module/__init__.py',
|
||||
'torch/testing/_internal/test_module/future_div.py',
|
||||
'torch/testing/_internal/test_module/no_future_div.py',
|
||||
'torch/utils/__init__.py',
|
||||
'torch/utils/_contextlib.py',
|
||||
'torch/utils/_cpp_extension_versioner.py',
|
||||
'torch/utils/_crash_handler.py',
|
||||
@ -1908,53 +1658,6 @@ exclude_patterns = [
|
||||
'torch/utils/collect_env.py',
|
||||
'torch/utils/cpp_backtrace.py',
|
||||
'torch/utils/cpp_extension.py',
|
||||
'torch/utils/data/__init__.py',
|
||||
'torch/utils/data/_utils/__init__.py',
|
||||
'torch/utils/data/_utils/collate.py',
|
||||
'torch/utils/data/_utils/fetch.py',
|
||||
'torch/utils/data/_utils/pin_memory.py',
|
||||
'torch/utils/data/_utils/serialization.py',
|
||||
'torch/utils/data/_utils/signal_handling.py',
|
||||
'torch/utils/data/_utils/worker.py',
|
||||
'torch/utils/data/backward_compatibility.py',
|
||||
'torch/utils/data/dataloader.py',
|
||||
'torch/utils/data/datapipes/__init__.py',
|
||||
'torch/utils/data/datapipes/_decorator.py',
|
||||
'torch/utils/data/datapipes/_hook_iterator.py',
|
||||
'torch/utils/data/datapipes/_typing.py',
|
||||
'torch/utils/data/datapipes/dataframe/__init__.py',
|
||||
'torch/utils/data/datapipes/dataframe/dataframe_wrapper.py',
|
||||
'torch/utils/data/datapipes/dataframe/dataframes.py',
|
||||
'torch/utils/data/datapipes/dataframe/datapipes.py',
|
||||
'torch/utils/data/datapipes/dataframe/structures.py',
|
||||
'torch/utils/data/datapipes/datapipe.py',
|
||||
'torch/utils/data/datapipes/gen_pyi.py',
|
||||
'torch/utils/data/datapipes/iter/__init__.py',
|
||||
'torch/utils/data/datapipes/iter/callable.py',
|
||||
'torch/utils/data/datapipes/iter/combinatorics.py',
|
||||
'torch/utils/data/datapipes/iter/combining.py',
|
||||
'torch/utils/data/datapipes/iter/filelister.py',
|
||||
'torch/utils/data/datapipes/iter/fileopener.py',
|
||||
'torch/utils/data/datapipes/iter/grouping.py',
|
||||
'torch/utils/data/datapipes/iter/routeddecoder.py',
|
||||
'torch/utils/data/datapipes/iter/selecting.py',
|
||||
'torch/utils/data/datapipes/iter/sharding.py',
|
||||
'torch/utils/data/datapipes/iter/streamreader.py',
|
||||
'torch/utils/data/datapipes/iter/utils.py',
|
||||
'torch/utils/data/datapipes/map/__init__.py',
|
||||
'torch/utils/data/datapipes/map/callable.py',
|
||||
'torch/utils/data/datapipes/map/combinatorics.py',
|
||||
'torch/utils/data/datapipes/map/combining.py',
|
||||
'torch/utils/data/datapipes/map/grouping.py',
|
||||
'torch/utils/data/datapipes/map/utils.py',
|
||||
'torch/utils/data/datapipes/utils/__init__.py',
|
||||
'torch/utils/data/datapipes/utils/common.py',
|
||||
'torch/utils/data/datapipes/utils/decoder.py',
|
||||
'torch/utils/data/datapipes/utils/snapshot.py',
|
||||
'torch/utils/data/distributed.py',
|
||||
'torch/utils/data/graph.py',
|
||||
'torch/utils/data/graph_settings.py',
|
||||
'torch/utils/data/sampler.py',
|
||||
'torch/utils/dlpack.py',
|
||||
'torch/utils/file_baton.py',
|
||||
'torch/utils/flop_counter.py',
|
||||
@ -1994,8 +1697,9 @@ init_command = [
|
||||
'--dry-run={{DRYRUN}}',
|
||||
'--no-black-binary',
|
||||
'black==23.12.1',
|
||||
'ufmt==2.1.0',
|
||||
'usort==1.0.6',
|
||||
'ufmt==2.7.0',
|
||||
'usort==1.0.8.post1',
|
||||
'isort==5.13.2',
|
||||
]
|
||||
is_formatter = true
|
||||
|
||||
@ -2079,7 +1783,7 @@ init_command = [
|
||||
'python3',
|
||||
'tools/linter/adapters/pip_init.py',
|
||||
'--dry-run={{DRYRUN}}',
|
||||
'ruff==0.4.8',
|
||||
'ruff==0.5.0',
|
||||
]
|
||||
is_formatter = true
|
||||
|
||||
|
@ -461,7 +461,6 @@ filegroup(
|
||||
filegroup(
|
||||
name = "caffe2_perfkernels_srcs",
|
||||
srcs = [
|
||||
"caffe2/perfkernels/embedding_lookup.cc",
|
||||
"caffe2/perfkernels/embedding_lookup_idx.cc",
|
||||
],
|
||||
)
|
||||
@ -499,7 +498,6 @@ cc_library(
|
||||
hdrs = [
|
||||
"caffe2/core/common.h",
|
||||
"caffe2/perfkernels/common.h",
|
||||
"caffe2/perfkernels/embedding_lookup.h",
|
||||
"caffe2/perfkernels/embedding_lookup_idx.h",
|
||||
"caffe2/utils/fixed_divisor.h",
|
||||
] + glob([
|
||||
@ -746,6 +744,7 @@ cc_library(
|
||||
"torch/csrc/cuda/python_nccl.cpp",
|
||||
"torch/csrc/cuda/nccl.cpp",
|
||||
"torch/csrc/distributed/c10d/intra_node_comm.cu",
|
||||
"torch/csrc/distributed/c10d/CUDASymmetricMemory.cu",
|
||||
"torch/csrc/distributed/c10d/Utils.cu",
|
||||
"torch/csrc/distributed/c10d/quantization/quantization_gpu.cu",
|
||||
],
|
||||
@ -763,6 +762,7 @@ cc_library(
|
||||
":torch_headers",
|
||||
"@kineto",
|
||||
"@cpp-httplib",
|
||||
"@nlohmann",
|
||||
] + if_cuda([
|
||||
"@cuda//:nvToolsExt",
|
||||
"@cutlass",
|
||||
|
@ -208,7 +208,6 @@ endif()
|
||||
include(CMakeDependentOption)
|
||||
option(ATEN_NO_TEST "Do not build ATen test binaries" OFF)
|
||||
option(BUILD_BINARY "Build C++ binaries" OFF)
|
||||
option(BUILD_DOCS "Build Caffe2 documentation" OFF)
|
||||
option(BUILD_CUSTOM_PROTOBUF
|
||||
"Build and use Caffe2's own protobuf under third_party" ON)
|
||||
option(BUILD_PYTHON "Build Python binaries" ON)
|
||||
@ -750,7 +749,6 @@ if(NOT TORCH_BUILD_VERSION)
|
||||
CACHE STRING "Torch build version" FORCE)
|
||||
endif()
|
||||
caffe2_parse_version_str(TORCH ${TORCH_BUILD_VERSION})
|
||||
caffe2_parse_version_str(CAFFE2 ${TORCH_BUILD_VERSION})
|
||||
set(TORCH_SOVERSION "${TORCH_VERSION_MAJOR}.${TORCH_VERSION_MINOR}")
|
||||
|
||||
# ---[ CMake scripts + modules
|
||||
@ -1223,45 +1221,6 @@ endif()
|
||||
add_subdirectory(c10)
|
||||
add_subdirectory(caffe2)
|
||||
|
||||
# --[ Documentation
|
||||
if(BUILD_DOCS)
|
||||
# check if Doxygen is installed
|
||||
find_package(Doxygen)
|
||||
if(DOXYGEN_FOUND)
|
||||
message("Generating documentation")
|
||||
|
||||
set(DOXYGEN_C_IN ${CMAKE_CURRENT_SOURCE_DIR}/docs/caffe2/.Doxyfile-c)
|
||||
set(DOXYGEN_C_OUT ${CMAKE_CURRENT_SOURCE_DIR}/docs/caffe2/Doxyfile-c)
|
||||
set(DOXYGEN_P_IN ${CMAKE_CURRENT_SOURCE_DIR}/docs/caffe2/.Doxyfile-python)
|
||||
set(DOXYGEN_P_OUT ${CMAKE_CURRENT_SOURCE_DIR}/docs/caffe2/Doxyfile-python)
|
||||
|
||||
if(EXISTS ${CMAKE_CURRENT_BINARY_DIR}/docs)
|
||||
file(REMOVE_RECURSE ${CMAKE_CURRENT_BINARY_DIR}/docs)
|
||||
endif()
|
||||
|
||||
file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/docs)
|
||||
configure_file(${DOXYGEN_C_IN} ${DOXYGEN_C_OUT} @ONLY)
|
||||
configure_file(${DOXYGEN_P_IN} ${DOXYGEN_P_OUT} @ONLY)
|
||||
|
||||
add_custom_target(
|
||||
doc_doxygen_c ALL
|
||||
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_C_OUT}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "Generating C++ API documentation with Doxygen"
|
||||
VERBATIM)
|
||||
|
||||
add_custom_target(
|
||||
doc_doxygen_python ALL
|
||||
COMMAND ${DOXYGEN_EXECUTABLE} ${DOXYGEN_P_OUT}
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
COMMENT "Generating Python API documentation with Doxygen"
|
||||
VERBATIM)
|
||||
else()
|
||||
message(
|
||||
FATAL_ERROR "Doxygen needs to be installed to generate the documentation")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# ---[ CMake related files Uninistall option.
|
||||
if(NOT TARGET caffe2_uninstall)
|
||||
configure_file(
|
||||
|
30
CODEOWNERS
30
CODEOWNERS
@ -43,12 +43,12 @@ nn/qat/ @jerryzh168
|
||||
/torch/csrc/distributed/rpc/tensorpipe_agent.h @jiayisuse @osalpekar @lw
|
||||
|
||||
# ONNX Export
|
||||
/torch/_dynamo/backends/onnxrt.py @bowenbao @thiagocrepaldi @wschin
|
||||
/torch/csrc/jit/passes/onnx.h @bowenbao @thiagocrepaldi
|
||||
/torch/csrc/jit/passes/onnx.cpp @bowenbao @thiagocrepaldi
|
||||
/torch/csrc/jit/passes/onnx/ @bowenbao @thiagocrepaldi
|
||||
/torch/onnx/ @bowenbao @thiagocrepaldi @wschin
|
||||
/test/onnx/ @bowenbao @thiagocrepaldi @wschin
|
||||
/torch/_dynamo/backends/onnxrt.py @wschin @xadupre
|
||||
/torch/csrc/jit/passes/onnx.h @titaiwangms @shubhambhokare1 @xadupre
|
||||
/torch/csrc/jit/passes/onnx.cpp @titaiwangms @shubhambhokare1 @xadupre
|
||||
/torch/csrc/jit/passes/onnx/ @titaiwangms @shubhambhokare1 @xadupre
|
||||
/torch/onnx/ @titaiwangms @shubhambhokare1 @justinchuby @wschin @xadupre
|
||||
/test/onnx/ @titaiwangms @shubhambhokare1 @justinchuby @wschin @xadupre
|
||||
|
||||
# CI
|
||||
/.ci @pytorch/pytorch-dev-infra
|
||||
@ -57,6 +57,7 @@ nn/qat/ @jerryzh168
|
||||
/.ci/docker/ @jeffdaily
|
||||
/.ci/docker/ci_commit_pins/triton.txt @desertfire @Chillee @eellison @shunting314 @bertmaher @jeffdaily @jataylo @jithunnair-amd @pruthvistony
|
||||
/.ci/docker/ci_commit_pins/triton-rocm.txt @jeffdaily @jataylo @jithunnair-amd @pruthvistony
|
||||
/.ci/docker/ci_commit_pins/triton-xpu.txt @EikanWang @gujinghui
|
||||
|
||||
# Github Actions
|
||||
# This list is for people wanting to be notified every time there's a change
|
||||
@ -107,10 +108,10 @@ aten/src/ATen/detail/MTIAHooksInterface.h @egienvalue
|
||||
torch/csrc/mtia/ @egienvalue
|
||||
|
||||
# Profiler
|
||||
torch/csrc/autograd/profiler* @aaronenyeshi
|
||||
torch/autograd/profiler* @aaronenyeshi
|
||||
torch/csrc/profiler/ @aaronenyeshi
|
||||
torch/profiler/ @aaronenyeshi
|
||||
torch/csrc/autograd/profiler* @aaronenyeshi @sraikund16
|
||||
torch/autograd/profiler* @aaronenyeshi @sraikund16
|
||||
torch/csrc/profiler/ @aaronenyeshi @sraikund16
|
||||
torch/profiler/ @aaronenyeshi @sraikund16
|
||||
|
||||
# AOTDispatch tests
|
||||
test/functorch/test_aotdispatch.py @ezyang @Chillee
|
||||
@ -132,6 +133,15 @@ caffe2/operators/hip @jeffdaily @jithunnair-amd
|
||||
caffe2/operators/rnn/hip @jeffdaily @jithunnair-amd
|
||||
caffe2/utils/hip @jeffdaily @jithunnair-amd
|
||||
|
||||
# XPU-specific files
|
||||
/aten/src/ATen/xpu/ @EikanWang @gujinghui
|
||||
/c10/xpu/ @EikanWang @gujinghui
|
||||
/torch/csrc/xpu/ @EikanWang @gujinghui
|
||||
/torch/xpu/ @EikanWang @gujinghui
|
||||
/test/xpu/ @EikanWang @gujinghui
|
||||
/test/test_xpu.py @EikanWang @gujinghui
|
||||
/third_party/xpu.txt @EikanWang @gujinghui
|
||||
|
||||
# torch.export
|
||||
/torch/export/ @avikchaudhuri @gmagogsfm @tugsbayasgalan @zhxchen17
|
||||
/torch/_export/ @avikchaudhuri @gmagogsfm @tugsbayasgalan @zhxchen17
|
||||
|
@ -77,6 +77,11 @@ RUN case ${TARGETPLATFORM} in \
|
||||
esac && \
|
||||
/opt/conda/bin/conda clean -ya
|
||||
RUN /opt/conda/bin/pip install torchelastic
|
||||
RUN IS_CUDA=$(python -c 'import torch ; print(torch.cuda._is_compiled())'); \
|
||||
echo "Is torch compiled with cuda: ${IS_CUDA}"; \
|
||||
if test "${IS_CUDA}" != "True" -a ! -z "${CUDA_VERSION}"; then \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
FROM ${BASE_IMAGE} as official
|
||||
ARG PYTORCH_VERSION
|
||||
|
17
RELEASE.md
17
RELEASE.md
@ -51,6 +51,7 @@ Following is the Release Compatibility Matrix for PyTorch releases:
|
||||
|
||||
| PyTorch version | Python | Stable CUDA | Experimental CUDA | Stable ROCm |
|
||||
| --- | --- | --- | --- | --- |
|
||||
| 2.4 | >=3.8, <=3.12 | CUDA 11.8, CUDA 12.1, CUDNN 9.1.0.70 | CUDA 12.4, CUDNN 9.1.0.70 | ROCm 6.1 |
|
||||
| 2.3 | >=3.8, <=3.11, (3.12 experimental) | CUDA 11.8, CUDNN 8.7.0.84 | CUDA 12.1, CUDNN 8.9.2.26 | ROCm 6.0 |
|
||||
| 2.2 | >=3.8, <=3.11, (3.12 experimental) | CUDA 11.8, CUDNN 8.7.0.84 | CUDA 12.1, CUDNN 8.9.2.26 | ROCm 5.7 |
|
||||
| 2.1 | >=3.8, <=3.11 | CUDA 11.8, CUDNN 8.7.0.84 | CUDA 12.1, CUDNN 8.9.2.26 | ROCm 5.6 |
|
||||
@ -60,15 +61,19 @@ Following is the Release Compatibility Matrix for PyTorch releases:
|
||||
|
||||
## Release Cadence
|
||||
|
||||
Following is the release cadence for year 2023/2024. All dates below are tentative, for latest updates on the release scheduled please follow [dev discuss](https://dev-discuss.pytorch.org/c/release-announcements/27).
|
||||
Following is the release cadence for year 2023/2024. All dates below are tentative, for latest updates on the release scheduled please follow [dev discuss](https://dev-discuss.pytorch.org/c/release-announcements/27). Please note: Patch Releases are optional.
|
||||
|
||||
| Minor Version | Release branch cut | Release date | First patch release date | Second patch release date|
|
||||
| --- | --- | --- | --- | --- |
|
||||
| 2.1 | Aug 2023 | Oct 2023 | Nov 2023 | Dec 2023 |
|
||||
| 2.2 | Dec 2023 | Jan 2024 | Feb 2024 | Mar 2024 |
|
||||
| 2.3 | Mar 2024 | Apr 2024 | Jun 2024 | Not planned |
|
||||
| 2.4 | Jun 2024 | Jul 2024 | Aug 2024 | Sep 2024 |
|
||||
| 2.5 | Aug 2024 | Oct 2024 | Nov 2024 | Dec 2024 |
|
||||
| 2.4 | Jun 2024 | Jul 2024 | (Sept 2024) | Not planned |
|
||||
| 2.5 | Aug 2024 | Oct 2024 | (Nov 2024) | (Dec 2024) |
|
||||
| 2.6 | Dec 2024 | Jan 2025 | (Feb 2025) | (Mar 2025) |
|
||||
| 2.7 | Mar 2025 | Apr 2025 | (May 2025) | (Jun 2025) |
|
||||
| 2.8 | Jun 2025 | Jul 2025 | (Aug 2025) | (Sept 2025) |
|
||||
| 2.9 | Aug 2025 | Oct 2025 | (Nov 2025) | (Dec 2025) |
|
||||
|
||||
## General Overview
|
||||
|
||||
@ -290,7 +295,7 @@ After the final RC is created. The following tasks should be performed :
|
||||
|
||||
* Create validation issue for the release, see for example [Validations for 2.1.2 release](https://github.com/pytorch/pytorch/issues/114904) and perform required validations.
|
||||
|
||||
* Run performance tests in [benchmark repository](https://github.com/pytorch/benchmark). Make sure there are no prerformance regressions.
|
||||
* Run performance tests in [benchmark repository](https://github.com/pytorch/benchmark). Make sure there are no performance regressions.
|
||||
|
||||
* Prepare and stage PyPI binaries for promotion. This is done with this script:
|
||||
[`pytorch/builder:release/pypi/promote_pypi_to_staging.sh`](https://github.com/pytorch/builder/blob/main/release/pypi/promote_pypi_to_staging.sh)
|
||||
@ -429,12 +434,12 @@ need to support these particular versions of software.
|
||||
|
||||
## Operating Systems
|
||||
Supported OS flavors are summarized in the table below:
|
||||
| Operating System family | Architectrue | Notes |
|
||||
| Operating System family | Architecture | Notes |
|
||||
| --- | --- | --- |
|
||||
| Linux | aarch64, x86_64 | Wheels are manylinux2014 compatible, i.e. they should be runnable on any Linux system with glibc-2.17 or above. |
|
||||
| MacOS | arm64 | Builds should be compatible with MacOS 11 (Big Sur) or newer, but are actively tested against MacOS 14 (Sonoma). |
|
||||
| MacOS | x86_64 | Requires MacOS Catalina or above, not supported after 2.2, see https://github.com/pytorch/pytorch/issues/114602 |
|
||||
| Windows | x86_64 | Buils are compatible with Windows-10 or newer. |
|
||||
| Windows | x86_64 | Builds are compatible with Windows-10 or newer. |
|
||||
|
||||
# Submitting Tutorials
|
||||
|
||||
|
26
SECURITY.md
26
SECURITY.md
@ -6,7 +6,7 @@
|
||||
- [Untrusted inputs](#untrusted-inputs)
|
||||
- [Data privacy](#data-privacy)
|
||||
- [Using distributed features](#using-distributed-features)
|
||||
|
||||
- [**CI/CD security principles**](#cicd-security-principles)
|
||||
## Reporting Security Issues
|
||||
|
||||
Beware that none of the topics under [Using Pytorch Securely](#using-pytorch-securely) are considered vulnerabilities of Pytorch.
|
||||
@ -61,3 +61,27 @@ If applicable, prepare your model against bad inputs and prompt injections. Some
|
||||
PyTorch can be used for distributed computing, and as such there is a `torch.distributed` package. PyTorch Distributed features are intended for internal communication only. They are not built for use in untrusted environments or networks.
|
||||
|
||||
For performance reasons, none of the PyTorch Distributed primitives (including c10d, RPC, and TCPStore) include any authorization protocol and will send messages unencrypted. They accept connections from anywhere, and execute the workload sent without performing any checks. Therefore, if you run a PyTorch Distributed program on your network, anybody with access to the network can execute arbitrary code with the privileges of the user running PyTorch.
|
||||
|
||||
## CI/CD security principles
|
||||
_Audience_: Contributors and reviewers, especially if modifying the workflow files/build system.
|
||||
|
||||
PyTorch CI/CD security philosophy is based on finding a balance between open and transparent CI pipelines while keeping the environment efficient and safe.
|
||||
|
||||
PyTorch testing requirements are complex, and a large part of the code base can only be tested on specialized powerful hardware, such as GPU, making it a lucrative target for resource misuse. To prevent this, we require workflow run approval for PRs from non-member contributors. To keep the volume of those approvals relatively low, we easily extend write permissions to the repository to regular contributors.
|
||||
|
||||
More widespread write access to the repo presents challenges when it comes to reviewing changes, merging code into trunk, and creating releases. [Protected branches](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches) are used to restrict the ability to merge to the trunk/release branches only to the repository administrators and merge bot. The merge bot is responsible for mechanistically merging the change and validating reviews against the path-based rules defined in [merge_rules.yml](https://github.com/pytorch/pytorch/blob/main/.github/merge_rules.yaml). Once a PR has been reviewed by person(s) mentioned in these rules, leaving a `@pytorchbot merge` comment on the PR will initiate the merge process. To protect merge bot credentials from leaking, merge actions must be executed only on ephemeral runners (see definition below) using a specialized deployment environment.
|
||||
|
||||
To speed up the CI system, build steps of the workflow rely on the distributed caching mechanism backed by [sccache](https://github.com/mozilla/sccache), making them susceptible to cache corruption compromises. For that reason binary artifacts generated during CI should not be executed in an environment that contains an access to any sensitive/non-public information and should not be published for use by general audience. One should not have any expectation about the lifetime of those artifacts, although in practice they likely remain accessible for about two weeks after the PR has been closed.
|
||||
|
||||
To speed up CI system setup, PyTorch relies heavily on Docker to pre-build and pre-install the dependencies. To prevent a potentially malicious PR from altering ones that were published in the past, ECR has been configured to use immutable tags.
|
||||
|
||||
To improve runner availability and more efficient resource utilization, some of the CI runners are non-ephemeral, i.e., workflow steps from completely unrelated PRs could be scheduled sequentially on the same runner, making them susceptible to reverse shell attacks. For that reason, PyTorch does not rely on the repository secrets mechanism, as these can easily be compromised in such attacks.
|
||||
|
||||
### Release pipelines security
|
||||
|
||||
To ensure safe binary releases, PyTorch release pipelines are built on the following principles:
|
||||
- All binary builds/upload jobs must be run on ephemeral runners, i.e., on a machine that is allocated from the cloud to do the build and released back to the cloud after the build is finished. This protects those builds from interference from external actors, who potentially can get reverse shell access to a non-ephemeral runner and wait there for a binary build.
|
||||
- All binary builds are cold-start builds, i.e., distributed caching/incremental builds are not permitted. This renders builds much slower than incremental CI builds but isolates them from potential compromises of the intermediate artifacts caching systems.
|
||||
- All upload jobs are executed in a [deployment environments](https://docs.github.com/en/actions/deployment/targeting-different-environments/using-environments-for-deployment) that are restricted to protected branches
|
||||
- Security credentials needed to upload binaries to PyPI/conda or stable indexes `download.pytorch.org/whl` are never uploaded to repo secrets storage/environment. This requires an extra manual step to publish the release but ensures that access to those would not be compromised by deliberate/accidental leaks of secrets stored in the cloud.
|
||||
- No binary artifacts should be published to GitHub releases pages, as these are overwritable by anyone with write permission to the repo.
|
||||
|
@ -174,6 +174,12 @@ new_local_repository(
|
||||
path = "third_party/cpp-httplib",
|
||||
)
|
||||
|
||||
new_local_repository(
|
||||
name = "nlohmann",
|
||||
build_file = "//third_party:nlohmann.BUILD",
|
||||
path = "third_party/nlohmann",
|
||||
)
|
||||
|
||||
new_local_repository(
|
||||
name = "tensorpipe",
|
||||
build_file = "//third_party:tensorpipe.BUILD",
|
||||
|
@ -53,11 +53,6 @@ if(NOT BUILD_LITE_INTERPRETER)
|
||||
file(GLOB_RECURSE ATen_CORE_TEST_SRCS "core/*_test.cpp")
|
||||
endif()
|
||||
EXCLUDE(ATen_CORE_SRCS "${ATen_CORE_SRCS}" ${ATen_CORE_TEST_SRCS})
|
||||
# Exclude TensorImpl_test.cpp if compiling without Caffe2
|
||||
if(NOT BUILD_LITE_INTERPRETER)
|
||||
file(GLOB_RECURSE ATen_CORE_EXCLUDED_TEST_SRCS "core/TensorImpl_test.cpp")
|
||||
EXCLUDE(ATen_CORE_TEST_SRCS "${ATen_CORE_TEST_SRCS}" ${ATen_CORE_EXCLUDED_TEST_SRCS})
|
||||
endif()
|
||||
|
||||
file(GLOB base_h "*.h" "detail/*.h" "cpu/*.h" "cpu/vec/vec512/*.h" "cpu/vec/vec256/*.h" "cpu/vec/vec256/vsx/*.h" "cpu/vec/vec256/zarch/*.h" "cpu/vec/*.h" "quantized/*.h" "functorch/*.h")
|
||||
file(GLOB base_cpp "*.cpp" "detail/*.cpp" "cpu/*.cpp" "functorch/*.cpp")
|
||||
@ -473,6 +468,7 @@ endif()
|
||||
|
||||
if(USE_CUDA AND NOT USE_ROCM)
|
||||
list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/include)
|
||||
list(APPEND ATen_CUDA_INCLUDE ${CMAKE_CURRENT_SOURCE_DIR}/../../../third_party/cutlass/tools/util/include)
|
||||
if($ENV{ATEN_STATIC_CUDA})
|
||||
list(APPEND ATen_CUDA_DEPENDENCY_LIBS
|
||||
${CUDA_LIBRARIES}
|
||||
|
@ -222,7 +222,7 @@ c10::intrusive_ptr<c10::TensorImpl> CPUGeneratorImpl::get_state() const {
|
||||
static const size_t size = sizeof(CPUGeneratorImplState);
|
||||
static_assert(std::is_standard_layout_v<CPUGeneratorImplState>, "CPUGeneratorImplState is not a PODType");
|
||||
|
||||
auto state_tensor = at::detail::empty_cpu({(int64_t)size}, ScalarType::Byte, c10::nullopt, c10::nullopt, c10::nullopt, c10::nullopt);
|
||||
auto state_tensor = at::detail::empty_cpu({(int64_t)size}, ScalarType::Byte, std::nullopt, std::nullopt, std::nullopt, std::nullopt);
|
||||
auto rng_state = state_tensor.data_ptr();
|
||||
|
||||
// accumulate generator data to be copied into byte tensor
|
||||
|
@ -3,7 +3,7 @@
|
||||
#include <ATen/core/Generator.h>
|
||||
#include <ATen/core/MT19937RNGEngine.h>
|
||||
#include <c10/core/GeneratorImpl.h>
|
||||
#include <c10/util/Optional.h>
|
||||
#include <optional>
|
||||
|
||||
namespace at {
|
||||
|
||||
|
@ -56,6 +56,14 @@ void Context::setDeterministicCuDNN(bool b) {
|
||||
deterministic_cudnn = b;
|
||||
}
|
||||
|
||||
bool Context::deterministicMkldnn() const {
|
||||
return deterministic_mkldnn;
|
||||
}
|
||||
|
||||
void Context::setDeterministicMkldnn(bool b) {
|
||||
deterministic_mkldnn = b;
|
||||
}
|
||||
|
||||
bool Context::deterministicAlgorithms() const {
|
||||
return _deterministic_algorithms;
|
||||
}
|
||||
@ -145,6 +153,13 @@ void Context::setSDPUseCuDNN(bool e) {
|
||||
enabled_cudnnSDP = e;
|
||||
}
|
||||
|
||||
void Context::setSDPUseOverrideable(bool e) {
|
||||
enabled_overrideable = e;
|
||||
}
|
||||
|
||||
bool Context::userEnabledOverrideableSDP() const {
|
||||
return enabled_overrideable;
|
||||
}
|
||||
|
||||
// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays)
|
||||
static const char cublas_config_var_name[] = "CUBLAS_WORKSPACE_CONFIG";
|
||||
@ -263,7 +278,24 @@ void Context::setLinalgPreferredBackend(at::LinalgBackend b) {
|
||||
}
|
||||
}
|
||||
|
||||
at::BlasBackend Context::blasPreferredBackend() const {
|
||||
at::BlasBackend Context::blasPreferredBackend() {
|
||||
#ifdef USE_ROCM
|
||||
if (blas_preferred_backend == at::BlasBackend::Cublaslt) {
|
||||
static const bool hipblaslt_unsupported = []() {
|
||||
static const std::vector<std::string> archs = {"gfx90a", "gfx940", "gfx941", "gfx942"};
|
||||
for (auto index: c10::irange(getNumGPUs())) {
|
||||
if (!detail::getCUDAHooks().isGPUArch(index, archs)) {
|
||||
TORCH_WARN_ONCE(
|
||||
"Attempting to use hipBLASLt on an unsupported architecture! "
|
||||
"Overriding blas backend to hipblas");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}();
|
||||
if (hipblaslt_unsupported) blas_preferred_backend = at::BlasBackend::Cublas;
|
||||
}
|
||||
#endif
|
||||
return blas_preferred_backend;
|
||||
}
|
||||
|
||||
|
@ -59,12 +59,14 @@ class TORCH_API Context {
|
||||
}
|
||||
}
|
||||
const AcceleratorHooksInterface& getAcceleratorHooksInterface(
|
||||
std::optional<c10::DeviceType> opt_device_type = c10::nullopt) {
|
||||
std::optional<c10::DeviceType> opt_device_type = std::nullopt) {
|
||||
c10::DeviceType device_type = opt_device_type.has_value()
|
||||
? opt_device_type.value()
|
||||
: at::getAccelerator(true).value();
|
||||
if (device_type == at::kCUDA) {
|
||||
return at::detail::getCUDAHooks();
|
||||
} else if (device_type == at::kXPU) {
|
||||
return at::detail::getXPUHooks();
|
||||
} else if (device_type == at::kMPS) {
|
||||
return at::detail::getMPSHooks();
|
||||
} else if (device_type == at::kPrivateUse1) {
|
||||
@ -188,6 +190,8 @@ class TORCH_API Context {
|
||||
void setBenchmarkLimitCuDNN(int);
|
||||
bool deterministicCuDNN() const;
|
||||
void setDeterministicCuDNN(bool);
|
||||
bool deterministicMkldnn() const;
|
||||
void setDeterministicMkldnn(bool);
|
||||
bool userEnabledNNPACK() const;
|
||||
void setUserEnabledNNPACK(bool e);
|
||||
|
||||
@ -214,10 +218,13 @@ class TORCH_API Context {
|
||||
void setSDPUseCuDNN(bool);
|
||||
bool userEnabledCuDNNSDP() const;
|
||||
|
||||
void setSDPUseOverrideable(bool);
|
||||
bool userEnabledOverrideableSDP() const;
|
||||
|
||||
at::LinalgBackend linalgPreferredBackend() const;
|
||||
void setLinalgPreferredBackend(at::LinalgBackend);
|
||||
|
||||
at::BlasBackend blasPreferredBackend() const;
|
||||
at::BlasBackend blasPreferredBackend();
|
||||
void setBlasPreferredBackend(at::BlasBackend);
|
||||
|
||||
// Note [Enabling Deterministic Operations]
|
||||
@ -358,6 +365,7 @@ class TORCH_API Context {
|
||||
c10::once_flag thp_init;
|
||||
bool enabled_cudnn = true;
|
||||
bool deterministic_cudnn = false;
|
||||
bool deterministic_mkldnn = false;
|
||||
bool _deterministic_algorithms = false;
|
||||
bool _deterministic_algorithms_warn_only = false;
|
||||
bool _deterministic_fill_uninitialized_memory = true;
|
||||
@ -365,6 +373,7 @@ class TORCH_API Context {
|
||||
bool enabled_mem_efficientSDP = true;
|
||||
bool enabled_mathSDP = true;
|
||||
bool enabled_cudnnSDP = true;
|
||||
bool enabled_overrideable = true;
|
||||
#ifdef USE_ROCM
|
||||
bool benchmark_cudnn = true;
|
||||
#else
|
||||
@ -398,7 +407,7 @@ class TORCH_API Context {
|
||||
bool release_original_weights = false;
|
||||
#endif
|
||||
bool display_vmap_fallback_warnings_ = false;
|
||||
std::optional<at::QEngine> quantized_engine = c10::nullopt;
|
||||
std::optional<at::QEngine> quantized_engine = std::nullopt;
|
||||
bool enable_sparse_tensor_invariant_checks = false;
|
||||
bool allow_fp16_reduction_cpu = false;
|
||||
|
||||
@ -407,73 +416,73 @@ class TORCH_API Context {
|
||||
|
||||
TORCH_API Context& globalContext();
|
||||
|
||||
static inline void init() {
|
||||
inline void init() {
|
||||
globalContext();
|
||||
}
|
||||
|
||||
TORCH_API Allocator* getCPUAllocator();
|
||||
|
||||
static inline DeprecatedTypeProperties& getDeprecatedTypeProperties(
|
||||
inline DeprecatedTypeProperties& getDeprecatedTypeProperties(
|
||||
Backend p,
|
||||
ScalarType s) {
|
||||
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
||||
p, s);
|
||||
}
|
||||
|
||||
static inline DeprecatedTypeProperties& CPU(ScalarType s) {
|
||||
inline DeprecatedTypeProperties& CPU(ScalarType s) {
|
||||
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
||||
Backend::CPU, s);
|
||||
}
|
||||
|
||||
static inline DeprecatedTypeProperties& CUDA(ScalarType s) {
|
||||
inline DeprecatedTypeProperties& CUDA(ScalarType s) {
|
||||
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
||||
Backend::CUDA, s);
|
||||
}
|
||||
|
||||
static inline DeprecatedTypeProperties& HIP(ScalarType s) {
|
||||
inline DeprecatedTypeProperties& HIP(ScalarType s) {
|
||||
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
||||
Backend::HIP, s);
|
||||
}
|
||||
|
||||
static inline DeprecatedTypeProperties& MPS(ScalarType s) {
|
||||
inline DeprecatedTypeProperties& MPS(ScalarType s) {
|
||||
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(
|
||||
Backend::MPS, s);
|
||||
}
|
||||
|
||||
static inline bool hasCUDA() {
|
||||
inline bool hasCUDA() {
|
||||
return globalContext().hasCUDA();
|
||||
}
|
||||
|
||||
static inline bool hasMTIA() {
|
||||
inline bool hasMTIA() {
|
||||
return globalContext().hasMTIA();
|
||||
}
|
||||
|
||||
static inline bool hasHIP() {
|
||||
inline bool hasHIP() {
|
||||
return globalContext().hasHIP();
|
||||
}
|
||||
|
||||
static inline bool hasIPU() {
|
||||
inline bool hasIPU() {
|
||||
return globalContext().hasIPU();
|
||||
}
|
||||
|
||||
static inline bool hasXLA() {
|
||||
inline bool hasXLA() {
|
||||
return globalContext().hasXLA();
|
||||
}
|
||||
|
||||
static inline bool hasMPS() {
|
||||
inline bool hasMPS() {
|
||||
return globalContext().hasMPS();
|
||||
}
|
||||
|
||||
static inline bool hasMAIA() {
|
||||
inline bool hasMAIA() {
|
||||
return globalContext().hasMAIA();
|
||||
}
|
||||
|
||||
static inline bool hasXPU() {
|
||||
inline bool hasXPU() {
|
||||
return globalContext().hasXPU();
|
||||
}
|
||||
|
||||
// Despite its name, this function returns the number of *CUDA* GPUs.
|
||||
static inline size_t getNumGPUs() {
|
||||
inline size_t getNumGPUs() {
|
||||
// WARNING: DO NOT ADD LOGIC TO HANDLE OTHER DEVICE TYPES TO THIS
|
||||
// FUNCTION. If you are interested in interrogating the number of
|
||||
// devices for a specific device type, add that function to the
|
||||
@ -492,27 +501,27 @@ static inline size_t getNumGPUs() {
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool hasOpenMP() {
|
||||
inline bool hasOpenMP() {
|
||||
return globalContext().hasOpenMP();
|
||||
}
|
||||
|
||||
static inline bool hasMKL() {
|
||||
inline bool hasMKL() {
|
||||
return globalContext().hasMKL();
|
||||
}
|
||||
|
||||
static inline bool hasLAPACK() {
|
||||
inline bool hasLAPACK() {
|
||||
return globalContext().hasLAPACK();
|
||||
}
|
||||
|
||||
static inline bool hasMAGMA() {
|
||||
inline bool hasMAGMA() {
|
||||
return globalContext().hasMAGMA();
|
||||
}
|
||||
|
||||
static inline bool hasMKLDNN() {
|
||||
inline bool hasMKLDNN() {
|
||||
return globalContext().hasMKLDNN();
|
||||
}
|
||||
|
||||
static inline void manual_seed(uint64_t seed) {
|
||||
inline void manual_seed(uint64_t seed) {
|
||||
auto gen = globalContext().defaultGenerator(c10::DeviceType::CPU);
|
||||
{
|
||||
// See Note [Acquire lock when using random generators]
|
||||
|
@ -115,6 +115,9 @@ static DLDevice getDLDevice(const Tensor& tensor, c10::DeviceIndex device_id) {
|
||||
ctx.device_id =
|
||||
at::detail::getXPUHooks().getGlobalIdxFromDevice(tensor.device());
|
||||
break;
|
||||
case DeviceType::MAIA:
|
||||
ctx.device_type = DLDeviceType::kDLMAIA;
|
||||
break;
|
||||
default:
|
||||
TORCH_CHECK(false, "Cannot pack tensors on " + tensor.device().str());
|
||||
}
|
||||
@ -141,6 +144,8 @@ static Device getATenDevice(const DLDevice& ctx, void* data) {
|
||||
#endif
|
||||
case DLDeviceType::kDLOneAPI:
|
||||
return at::detail::getXPUHooks().getDeviceFromPtr(data);
|
||||
case DLDeviceType::kDLMAIA:
|
||||
return at::Device(DeviceType::MAIA, ctx.device_id);
|
||||
default:
|
||||
TORCH_CHECK(
|
||||
false, "Unsupported device_type: ", std::to_string(ctx.device_type));
|
||||
|
@ -1,39 +1,37 @@
|
||||
#include <ATen/DeviceAccelerator.h>
|
||||
#include <ATen/Context.h>
|
||||
|
||||
#include <ATen/DeviceAccelerator.h>
|
||||
namespace at {
|
||||
|
||||
C10_API std::optional<DeviceType> getAccelerator(bool checked) {
|
||||
#define CHECK_NO_CUDA \
|
||||
TORCH_CHECK(!at::hasCUDA(), "Cannot have both CUDA and PrivateUse1");
|
||||
#define DETECT_AND_ASSIGN_ACCELERATOR(device_name) \
|
||||
if (at::has##device_name()) { \
|
||||
device_type = k##device_name; \
|
||||
TORCH_CHECK( \
|
||||
!is_accelerator_detected, \
|
||||
"Cannot have ", \
|
||||
device_type.value(), \
|
||||
" with other accelerators."); \
|
||||
is_accelerator_detected = true; \
|
||||
}
|
||||
|
||||
#define CHECK_NO_PU1 \
|
||||
TORCH_CHECK(!is_privateuse1_backend_registered(), "Cannot have both CUDA and PrivateUse1");
|
||||
if (is_privateuse1_backend_registered()) {
|
||||
// We explicitly allow PrivateUse1 and another device at the same time as we
|
||||
// use this for testing. Whenever a PrivateUse1 device is registered, use it
|
||||
// first.
|
||||
return kPrivateUse1;
|
||||
}
|
||||
std::optional<DeviceType> device_type = std::nullopt;
|
||||
bool is_accelerator_detected = false;
|
||||
DETECT_AND_ASSIGN_ACCELERATOR(CUDA)
|
||||
DETECT_AND_ASSIGN_ACCELERATOR(MTIA)
|
||||
DETECT_AND_ASSIGN_ACCELERATOR(XPU)
|
||||
if (checked) {
|
||||
TORCH_CHECK(
|
||||
device_type, "Cannot access accelerator device when none is available.")
|
||||
}
|
||||
return device_type;
|
||||
|
||||
#define CHECK_NO_MTIA \
|
||||
TORCH_CHECK(!at::hasMTIA(), "Cannot have MTIA with other devices");
|
||||
|
||||
if (is_privateuse1_backend_registered()) {
|
||||
// We explicitly allow PrivateUse1 and another device at the same time
|
||||
// as we use this for testing.
|
||||
// Whenever a PrivateUse1 device is registered, use it first.
|
||||
return kPrivateUse1;
|
||||
} else if (at::hasCUDA()) {
|
||||
CHECK_NO_PU1
|
||||
CHECK_NO_MTIA
|
||||
return kCUDA;
|
||||
} else if (at::hasMTIA()) {
|
||||
CHECK_NO_CUDA
|
||||
CHECK_NO_PU1
|
||||
return kMTIA;
|
||||
} else {
|
||||
TORCH_CHECK(!checked, "Cannot access accelerator device when none is available.")
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
#undef CHECK_NO_CUDA
|
||||
#undef CHECK_NO_PU1
|
||||
#undef DETECT_AND_ASSIGN_ACCELERATOR
|
||||
}
|
||||
|
||||
|
||||
} // namespace at
|
||||
|
@ -13,9 +13,9 @@
|
||||
// - It provides a set of common APIs as defined by AcceleratorHooksInterface
|
||||
//
|
||||
// As of today, accelerator devices are (in no particular order):
|
||||
// CUDA, MTIA, PrivateUse1
|
||||
// CUDA, MTIA, XPU, PrivateUse1
|
||||
// We want to add once all the proper APIs are supported and tested:
|
||||
// HIP, MPS, XPU
|
||||
// HIP, MPS
|
||||
|
||||
namespace at {
|
||||
|
||||
|
@ -17,14 +17,14 @@ namespace at {
|
||||
/// Return the Device of a Tensor, if the Tensor is defined.
|
||||
inline std::optional<Device> device_of(const Tensor& t) {
|
||||
if (t.defined()) {
|
||||
return c10::make_optional(t.device());
|
||||
return std::make_optional(t.device());
|
||||
} else {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
inline std::optional<Device> device_of(const std::optional<Tensor>& t) {
|
||||
return t.has_value() ? device_of(t.value()) : c10::nullopt;
|
||||
return t.has_value() ? device_of(t.value()) : std::nullopt;
|
||||
}
|
||||
|
||||
/// Return the Device of a TensorList, if the list is non-empty and
|
||||
@ -34,7 +34,7 @@ inline std::optional<Device> device_of(ITensorListRef t) {
|
||||
if (!t.empty()) {
|
||||
return device_of(t.front());
|
||||
} else {
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -29,6 +29,7 @@ c10::Allocator* GetCPUAllocatorMaybePinned(bool pin_memory) {
|
||||
return c10::GetCPUAllocator();
|
||||
}
|
||||
|
||||
#ifndef C10_MOBILE
|
||||
constexpr uint64_t storage_max() {
|
||||
// int64_t and size_t are used somewhat inconsistently throughout ATen.
|
||||
// To be safe, storage size calculations must fit in both types.
|
||||
@ -38,6 +39,7 @@ constexpr uint64_t storage_max() {
|
||||
std::numeric_limits<size_t>::max());
|
||||
return std::min(int64_max, size_max);
|
||||
}
|
||||
#endif
|
||||
|
||||
inline void raise_warning_for_complex_half(ScalarType dtype) {
|
||||
if (dtype == kComplexHalf) {
|
||||
|
@ -76,7 +76,7 @@ TORCH_API TensorBase empty_cpu(
|
||||
IntArrayRef size,
|
||||
ScalarType dtype,
|
||||
bool pin_memory = false,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
|
||||
std::optional<c10::MemoryFormat> memory_format_opt = std::nullopt);
|
||||
|
||||
TORCH_API TensorBase empty_cpu(
|
||||
IntArrayRef size,
|
||||
@ -110,7 +110,7 @@ TORCH_API TensorBase empty_strided_cpu(
|
||||
TORCH_API TensorBase empty_meta(
|
||||
IntArrayRef size,
|
||||
ScalarType dtype,
|
||||
std::optional<c10::MemoryFormat> memory_format_opt = c10::nullopt);
|
||||
std::optional<c10::MemoryFormat> memory_format_opt = std::nullopt);
|
||||
|
||||
TORCH_API TensorBase empty_meta(
|
||||
IntArrayRef size,
|
||||
|
@ -462,7 +462,7 @@ inline Tensor _sum_to(
|
||||
reduce_dims.push_back(i);
|
||||
}
|
||||
for (int64_t i = leading_dims; i < static_cast<int64_t>(sizes.size()); ++i) {
|
||||
if (shape[i - leading_dims] == 1 &&
|
||||
if (TORCH_GUARD_SIZE_OBLIVIOUS(sym_eq(shape[i - leading_dims], 1)) &&
|
||||
TORCH_GUARD_SIZE_OBLIVIOUS(sym_ne(sizes[i], 1))) {
|
||||
reduce_dims.push_back(i);
|
||||
}
|
||||
@ -499,7 +499,7 @@ inline Tensor sum_to(
|
||||
return _sum_to(std::move(tensor), shape, always_return_non_view);
|
||||
}
|
||||
|
||||
static inline bool is_expandable_to(
|
||||
inline bool is_expandable_to(
|
||||
SymIntArrayRef shape,
|
||||
c10::SymIntArrayRef desired) {
|
||||
size_t ndim = shape.size();
|
||||
@ -517,7 +517,7 @@ static inline bool is_expandable_to(
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
|
||||
inline bool is_expandable_to(IntArrayRef shape, IntArrayRef desired) {
|
||||
auto sym_shape = c10::SymIntArrayRef(
|
||||
reinterpret_cast<const c10::SymInt*>(shape.data()), shape.size());
|
||||
auto sym_desired = c10::SymIntArrayRef(
|
||||
|
@ -303,7 +303,7 @@ Tensor FunctionalInverses::_nested_view_from_buffer_inverse(const Tensor& base,
|
||||
return Tensor();
|
||||
}
|
||||
|
||||
Tensor FunctionalInverses::_nested_view_from_jagged_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, const Tensor& offsets, const Tensor& dummy, const std::optional<Tensor>& lengths, int64_t ragged_idx) {
|
||||
Tensor FunctionalInverses::_nested_view_from_jagged_inverse(const Tensor& base, const Tensor& mutated_view, InverseReturnMode inverse_return_mode, const Tensor& offsets, const Tensor& dummy, const std::optional<Tensor>& lengths, int64_t ragged_idx, const std::optional<Tensor>& min_seqlen, const std::optional<Tensor>& max_seqlen) {
|
||||
auto values = at::_nested_get_values(mutated_view);
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return values;
|
||||
@ -317,7 +317,12 @@ Tensor FunctionalInverses::_nested_get_values_inverse(const Tensor& base, const
|
||||
auto lengths = at::_nested_get_lengths(base);
|
||||
auto ragged_idx = at::_nested_get_ragged_idx(base);
|
||||
auto dummy = at::_nested_get_jagged_dummy(base);
|
||||
auto nt = at::_nested_view_from_jagged(mutated_view, offsets, dummy, lengths, ragged_idx);
|
||||
auto min_seqlen = at::_nested_get_min_seqlen(base);
|
||||
auto max_seqlen = at::_nested_get_max_seqlen(base);
|
||||
auto nt = at::_nested_view_from_jagged(
|
||||
mutated_view, offsets, dummy, lengths, ragged_idx,
|
||||
(min_seqlen.defined() ? std::optional<Tensor>(min_seqlen) : std::nullopt),
|
||||
(max_seqlen.defined() ? std::optional<Tensor>(max_seqlen) : std::nullopt));
|
||||
|
||||
if (inverse_return_mode != InverseReturnMode::NeverView) {
|
||||
return nt;
|
||||
|
@ -514,6 +514,9 @@ c10::SymInt FunctionalTensorWrapper::sym_size_custom(int64_t d) const {
|
||||
c10::SymInt FunctionalTensorWrapper::sym_storage_offset_custom() const {
|
||||
return value_.unsafeGetTensorImpl()->sym_storage_offset();
|
||||
}
|
||||
c10::Layout FunctionalTensorWrapper::layout_impl() const {
|
||||
return value_.unsafeGetTensorImpl()->layout();
|
||||
}
|
||||
|
||||
namespace functionalization {
|
||||
namespace impl {
|
||||
@ -528,9 +531,9 @@ Tensor to_functional_tensor(const Tensor& tensor) {
|
||||
}
|
||||
std::optional<Tensor> to_functional_tensor(const std::optional<Tensor>& tensor) {
|
||||
if (tensor.has_value()) {
|
||||
return c10::make_optional<Tensor>(to_functional_tensor(*tensor));
|
||||
return std::make_optional<Tensor>(to_functional_tensor(*tensor));
|
||||
}
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
c10::List<::std::optional<Tensor>> to_functional_tensor(const c10::List<::std::optional<Tensor>>& t_list) {
|
||||
c10::List<::std::optional<Tensor>> outputs;
|
||||
@ -566,9 +569,9 @@ Tensor from_functional_tensor(const Tensor& tensor, bool assert_functional) {
|
||||
}
|
||||
std::optional<Tensor> from_functional_tensor(const std::optional<Tensor>& t, bool assert_functional) {
|
||||
if (t.has_value()) {
|
||||
return c10::make_optional<Tensor>(from_functional_tensor(*t, assert_functional));
|
||||
return std::make_optional<Tensor>(from_functional_tensor(*t, assert_functional));
|
||||
}
|
||||
return c10::nullopt;
|
||||
return std::nullopt;
|
||||
}
|
||||
std::vector<Tensor> from_functional_tensor(ITensorListRef t_list) {
|
||||
std::vector<Tensor> outputs;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user