mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-22 22:25:10 +08:00
Compare commits
1736 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| b13b7010b9 | |||
| 5c79046d39 | |||
| 30fd222b80 | |||
| 761eef1f19 | |||
| 8aa1cefed8 | |||
| 0d908d813b | |||
| 1c391f6f93 | |||
| be146fd721 | |||
| 2979f4b989 | |||
| 22b3600f19 | |||
| 215813d7ac | |||
| dc7695a47a | |||
| 032a65edff | |||
| e4b4e515cd | |||
| 4b1f5f4bd6 | |||
| afd576ec0e | |||
| 95aa2af377 | |||
| 6774d39c96 | |||
| 567faedc59 | |||
| 3eab8a71e2 | |||
| 2fd4d088ff | |||
| 5d274cd499 | |||
| 8051dec608 | |||
| f2c1071c33 | |||
| bb71117ecc | |||
| d25433a099 | |||
| 7dd45490f8 | |||
| bf632544e6 | |||
| 282402d4f3 | |||
| cce03074f5 | |||
| f2f63773d8 | |||
| 84aa41824c | |||
| 25c8a117af | |||
| ae122707b5 | |||
| b4fe5ad641 | |||
| 5a761dbe65 | |||
| dd893391d5 | |||
| e8196f990d | |||
| 269b77a1b2 | |||
| 476d85dd3f | |||
| 63f6c0d692 | |||
| b546fa3fcd | |||
| 1d656b6769 | |||
| 3acbbb30f2 | |||
| 52911f9e47 | |||
| a65e0f488c | |||
| 8dc5d2a22e | |||
| bb353ccc17 | |||
| ced0054a9e | |||
| 68ee5ede29 | |||
| 4df98e2927 | |||
| 6ccac5ce28 | |||
| 3865606299 | |||
| d3334db627 | |||
| 50f5a4dd18 | |||
| b60936b9ae | |||
| 2d750b9da5 | |||
| ca376d4584 | |||
| ef183a1d23 | |||
| f4d8944973 | |||
| 6b7aef63ac | |||
| b3ab4b1094 | |||
| 1e8cb82a2d | |||
| dd399a8d68 | |||
| faac0f5c25 | |||
| c36f47bd1e | |||
| 3d1888cd95 | |||
| 97a82a3018 | |||
| 5cd313ed23 | |||
| b414494035 | |||
| c10efc646e | |||
| 348531ad8d | |||
| 714b2b8bf6 | |||
| fe4bd5066b | |||
| e17d84d38e | |||
| b9aef6bc03 | |||
| 0056b08834 | |||
| bd0df61bb5 | |||
| d9678c2e34 | |||
| b3c0aa3b7d | |||
| 77fbc12f23 | |||
| 7e46eb1613 | |||
| 821656d2d8 | |||
| 86e40ed875 | |||
| b9379cfab7 | |||
| f0b75c4aa4 | |||
| 7654b3f49e | |||
| 37ebbc2809 | |||
| 29ddbc3e37 | |||
| 16a133ed9a | |||
| c4d1318662 | |||
| 379ae6d865 | |||
| 24376ff9d3 | |||
| be6322e4b5 | |||
| 62063b2f62 | |||
| 13b1580613 | |||
| e50a1f19b3 | |||
| e86db387ba | |||
| 704ee3ca68 | |||
| 9004652c7b | |||
| aca6ce984c | |||
| ed8773f7bd | |||
| 48f48b6ff2 | |||
| 615b27eadf | |||
| 170d790b66 | |||
| e216f557fd | |||
| 997312c233 | |||
| d602b3a834 | |||
| f531d98341 | |||
| 6bdd5ecaf5 | |||
| bfbde9d6eb | |||
| b9c816a796 | |||
| 2f5c215d34 | |||
| 01650ac9de | |||
| ce536aa355 | |||
| fc0af33a18 | |||
| c7c4778af6 | |||
| 73a65cd29f | |||
| b785ed0ac0 | |||
| b2d077d81d | |||
| b1c2714ad5 | |||
| a462edd0f6 | |||
| c2425fc9a1 | |||
| fbcedf2da2 | |||
| 3d95e13b33 | |||
| 228e1a8696 | |||
| 3fa8a3ff46 | |||
| 4647f753bc | |||
| 7ba5e7cea1 | |||
| 9b626a8047 | |||
| bd0e9a73c7 | |||
| 2b1cd919ce | |||
| 8e46a15605 | |||
| 15a9fbdedb | |||
| 6336300880 | |||
| 5073132837 | |||
| 65b66264d4 | |||
| 0f872ed02f | |||
| 761d6799be | |||
| 0d179aa8db | |||
| 5b171ad7c2 | |||
| ac9245aeb3 | |||
| 60736bdf99 | |||
| 7d58765cee | |||
| 76f7d749e4 | |||
| 0b7374eb44 | |||
| 6fff764155 | |||
| 8ced72ccb8 | |||
| b1ae7f90d5 | |||
| 8b61ee522e | |||
| 76ca3eb191 | |||
| fea50a51ee | |||
| 51e589ed73 | |||
| 2e87643761 | |||
| ba9a85f271 | |||
| 0714d7a3ca | |||
| 34ce58c909 | |||
| c238ee3681 | |||
| f5338a1fb8 | |||
| d96ad41191 | |||
| f17cfe4293 | |||
| aec182ae72 | |||
| c93c884ee2 | |||
| c42a2d4d24 | |||
| f89252c336 | |||
| 490c15fae9 | |||
| f2d72ba10f | |||
| 2108b42b92 | |||
| bae8df62d3 | |||
| 98775b6bb4 | |||
| b7cc2a501f | |||
| 0720ba53b3 | |||
| ff5fa11129 | |||
| 5e7f5db332 | |||
| b5f7592140 | |||
| f366e5fc81 | |||
| 48f087f6ce | |||
| 7ad948ffa9 | |||
| 3277d83648 | |||
| 1487278fdf | |||
| 977630bc15 | |||
| 12efd53dba | |||
| 37e05485d9 | |||
| c76770f40e | |||
| da725830c2 | |||
| fc6fcf23f7 | |||
| b190f1b5bc | |||
| dfca8dfdc5 | |||
| b46d5e0b04 | |||
| f19a11a306 | |||
| cfcf69703f | |||
| e22b8e0d17 | |||
| fbfba6bdca | |||
| 3cc89afde6 | |||
| 1e4aee057c | |||
| 8dfcf7e35a | |||
| 76de151ddd | |||
| 2676cc46c2 | |||
| 1bf7bc9768 | |||
| 3c41c9fe46 | |||
| 6ff7750364 | |||
| 4d25c3d048 | |||
| 267b7ade50 | |||
| 80429ad9f7 | |||
| 5ca6516ecb | |||
| 67f94557ff | |||
| 61bd5a0643 | |||
| 748d011c8b | |||
| 5d5cfe2e57 | |||
| 7cbe255296 | |||
| 4ef303698c | |||
| 83e8b3f6c3 | |||
| 502ebed796 | |||
| 68ff58d771 | |||
| 969c1602e6 | |||
| 5e1d6a3691 | |||
| 533cfc0381 | |||
| 2b23712dc3 | |||
| 88275da5e8 | |||
| bd7a5ad6f0 | |||
| 1f6f82dbcf | |||
| 1f8939937a | |||
| b3d41a5f96 | |||
| fec2d493a9 | |||
| 86ee75f63f | |||
| 31941918cf | |||
| 19a65d2bea | |||
| 819d4b2b83 | |||
| b87c113cf4 | |||
| b25182971f | |||
| 1ee2c47e37 | |||
| 2dc563f1f1 | |||
| 15ba71a275 | |||
| e5b3fc49d6 | |||
| ae1766951d | |||
| 02d08dafd9 | |||
| 13a5090695 | |||
| 8e32e4c04c | |||
| cf991310c3 | |||
| 938706099e | |||
| 3330287dc7 | |||
| 38c8520adf | |||
| 492e1746af | |||
| 91a8109cfd | |||
| 161490d34a | |||
| 9c302852eb | |||
| 8654fcfd60 | |||
| b3d527d9a0 | |||
| 4d495218c9 | |||
| 13a041284c | |||
| c60c1a003d | |||
| 97add1a5ea | |||
| ca02930e47 | |||
| 20d5e95077 | |||
| eb4a7dc11d | |||
| f722498b72 | |||
| aadfb6fe83 | |||
| 6c273594c9 | |||
| e475c82fa1 | |||
| 0c2e6665df | |||
| 6295e6e94b | |||
| 670a4aa708 | |||
| 1bdc2e64ed | |||
| c587be1e50 | |||
| bd481596f5 | |||
| a504d56b43 | |||
| 91c4dfccea | |||
| 27f618c44d | |||
| a14482a1df | |||
| aa50c5734b | |||
| 293001a4fe | |||
| 638cfdf150 | |||
| 5f80a14525 | |||
| 1342fd3975 | |||
| 8d4af38489 | |||
| 575a064e66 | |||
| 3ab21a3c4f | |||
| 2f592e6c7d | |||
| 5661ffb766 | |||
| 9b74503daa | |||
| 24848f1cd8 | |||
| a31a07ede9 | |||
| c8c4c9b23d | |||
| e1ed9303f0 | |||
| a43aab13c2 | |||
| c698b4a45e | |||
| c6a0ffab50 | |||
| 8ba7cc30d1 | |||
| 61bf08ca24 | |||
| 6ada3c0c16 | |||
| 60061fbe79 | |||
| 46e7042add | |||
| d0c182773b | |||
| b6f60585b5 | |||
| 4b0e3ee219 | |||
| 838842d4b2 | |||
| e71cf20192 | |||
| adb4cb2b5b | |||
| 6073f9b46c | |||
| 8e8022b735 | |||
| da82d2dd70 | |||
| 82176473a5 | |||
| 2d269a9a72 | |||
| 240372a991 | |||
| 5b10411c8c | |||
| 4c474a9939 | |||
| 7ea6ae57c8 | |||
| 42633f8986 | |||
| 84248690a9 | |||
| 53409ca0fb | |||
| c2c1710047 | |||
| 876202503f | |||
| 946a7d9bc3 | |||
| 608bcd3b15 | |||
| 632b02a477 | |||
| 0db9c63300 | |||
| 873ed4e6b6 | |||
| 01bd43037d | |||
| 68c9e3f232 | |||
| a25c8555eb | |||
| dfd1dff383 | |||
| 8f391d4d51 | |||
| 2a6b7685ae | |||
| eb9573107d | |||
| ee43cd7adc | |||
| 4ca26fbc1b | |||
| c165226325 | |||
| 49295ebe54 | |||
| 455038e470 | |||
| ca7f02ea0c | |||
| 04aba1caec | |||
| f6c1bbfa48 | |||
| 4e2c8c6db5 | |||
| c26b9c0a5e | |||
| aaf41c61a6 | |||
| dd844f741b | |||
| 7117a9012e | |||
| 1bdc28161a | |||
| 5e150caf38 | |||
| c0c62d099a | |||
| b9ece39685 | |||
| 15ef008877 | |||
| b14d6318f8 | |||
| 7c44506441 | |||
| 937ba581d7 | |||
| 2ae54f1194 | |||
| a217fefee1 | |||
| 34b7fed802 | |||
| 5221745c21 | |||
| 000ca44b16 | |||
| 8f3d44033b | |||
| 7cc14c595a | |||
| 797544c47a | |||
| 0426f2f3ec | |||
| 336eeee895 | |||
| 593f867e3e | |||
| 385913be1c | |||
| 6aaa14f5fe | |||
| 07f5b21ef1 | |||
| e454870396 | |||
| 2822013437 | |||
| 72c1982734 | |||
| 0de2ea305a | |||
| d899385a3d | |||
| c6d6cbe8a6 | |||
| 85e82e85d8 | |||
| a1534cc37d | |||
| 8c8dc791ef | |||
| 63edca44f2 | |||
| 8d90ab2d9b | |||
| bd5303010d | |||
| 16d2c3d7b3 | |||
| 407a92dc26 | |||
| 0a893abc7b | |||
| 34fa5e0dc7 | |||
| 712686ce91 | |||
| 518864a7e0 | |||
| 750fb5cc73 | |||
| 0f4749907a | |||
| bd2dc63ef6 | |||
| 19a8795450 | |||
| d9dccfdd71 | |||
| 7547a06c4f | |||
| 8929b75795 | |||
| 4d37ef878c | |||
| 126e77d5c6 | |||
| 53eec78bea | |||
| a4edaec81a | |||
| 92481b59d3 | |||
| 6c77fa9121 | |||
| aeb7a72620 | |||
| 73d232ee45 | |||
| c0c65bf915 | |||
| f6cee952af | |||
| e74184f679 | |||
| 3884d36176 | |||
| e7c6886a00 | |||
| ed8e92f63d | |||
| fb97df5d65 | |||
| e9b05c71b4 | |||
| 7926324385 | |||
| 1527b37c26 | |||
| de4659659b | |||
| a96a8c8336 | |||
| 691aa19b88 | |||
| 6b07dc9e22 | |||
| 8aa259b52b | |||
| ac9312e9f8 | |||
| 91a17b702b | |||
| c54597e0b2 | |||
| a9785bba44 | |||
| 833b8cbc7a | |||
| 75aeb16e05 | |||
| fc354a0d6e | |||
| 262611fcd3 | |||
| b8a34f3033 | |||
| 10bb6bb9b8 | |||
| 3c9ef69c37 | |||
| dee987d6ee | |||
| 138f254ec1 | |||
| c7c8aaa7f0 | |||
| d0db624e02 | |||
| e3e7b76310 | |||
| dad02bceb9 | |||
| b195285879 | |||
| 8f3da5b51d | |||
| 825e919eb8 | |||
| acb0ce8885 | |||
| 72089c9c36 | |||
| cf2f158fec | |||
| 41ddc2a786 | |||
| e4886f6589 | |||
| 6470b5bd21 | |||
| 44196955e2 | |||
| f08ec1394d | |||
| f8fb25e0a2 | |||
| 6a0c66752f | |||
| a1bd4efb08 | |||
| b43ce05268 | |||
| 80e56cfda9 | |||
| 24701fc5a7 | |||
| f78a266d99 | |||
| f096fb6859 | |||
| a3e11d606b | |||
| 79232c24e2 | |||
| 15d9d499ab | |||
| 962084c8e8 | |||
| 7518b1eefb | |||
| 8215d7a4ba | |||
| 5aaa220d84 | |||
| 12c16ab9bc | |||
| 76520512e7 | |||
| 66de965882 | |||
| 10d32fb0b7 | |||
| e72c9b6e4a | |||
| ac1f68127a | |||
| 60d1852c7b | |||
| d53eb521fc | |||
| 9808932f10 | |||
| ea876eb6d5 | |||
| 0a45864866 | |||
| 2560b39796 | |||
| 21afa4c88b | |||
| 9fc3c5e4d2 | |||
| 3e3501c98d | |||
| 5e6fcd02b5 | |||
| d46ebcfadf | |||
| 41480c8cf2 | |||
| 236890d902 | |||
| 55632d81d2 | |||
| 0b276d622e | |||
| c81491b37d | |||
| 42e189425f | |||
| 3cfa0d7199 | |||
| 7c9e088661 | |||
| e78aa4bb84 | |||
| f8e94d0d8b | |||
| ebe6f40fce | |||
| 5fb37efb46 | |||
| 4f47855873 | |||
| 52ae6f682f | |||
| c35f58f97b | |||
| 659b2f3154 | |||
| 5ea05cfb96 | |||
| dc9a5b7d2f | |||
| f7ab5a128a | |||
| 368cbe615d | |||
| d4c9a3782b | |||
| 172dca5e8b | |||
| 818bf0c408 | |||
| 03dcf8a83b | |||
| 604f607fd1 | |||
| 956d946c25 | |||
| 970caaa621 | |||
| 00a5980cdf | |||
| e24eee04f0 | |||
| f1b3af4ee2 | |||
| fb2d28f477 | |||
| 3a704ff725 | |||
| 0180e638e5 | |||
| 95c6ae04fb | |||
| 27c4c6e0af | |||
| da17414b3f | |||
| be2b27a747 | |||
| aec2c8f752 | |||
| 13e34b4679 | |||
| 57373c7c29 | |||
| 79f5bf84e5 | |||
| 3ed720079e | |||
| e7c1e6a8e3 | |||
| f1d0d73ed7 | |||
| 9c411513bf | |||
| ce78bc898b | |||
| 887002e932 | |||
| 31dea5ff23 | |||
| ec4602a973 | |||
| a38749d15f | |||
| 6ee77b4edd | |||
| 343d65db91 | |||
| 6328981fcf | |||
| a90913105c | |||
| 9368596059 | |||
| 80ed795ff1 | |||
| a2938e3d11 | |||
| 2ad967dbe4 | |||
| 7415c090ac | |||
| a1fa995044 | |||
| 3c2ecc6b15 | |||
| fa1516d319 | |||
| 5e26f49db4 | |||
| 7694f65120 | |||
| b5ebf68df1 | |||
| aa46055274 | |||
| 2cad802b68 | |||
| 2d01f384f1 | |||
| f8d4f980b3 | |||
| 4f5a6c366e | |||
| ecfcf39f30 | |||
| 3975a2676e | |||
| 138ee75a3b | |||
| 0048f228cb | |||
| 2748b920ab | |||
| a92a2312d4 | |||
| 945ce5cdb0 | |||
| b39de2cbbe | |||
| 49a555e0f5 | |||
| ce13900148 | |||
| 4c77ad6ee4 | |||
| 0bc4246425 | |||
| c45ff2efe6 | |||
| 99b520cc5d | |||
| e05607aee1 | |||
| a360ba1734 | |||
| c661b963b9 | |||
| e374dc1696 | |||
| 116e0c7f38 | |||
| 45596d5289 | |||
| 342e7b873d | |||
| 00410c4496 | |||
| 8b9276bbee | |||
| 3238786ea1 | |||
| 07ebbcbcb3 | |||
| ca555abcf9 | |||
| 63893c3fa2 | |||
| f8ae34706e | |||
| f8e89fbe11 | |||
| 30d208010c | |||
| 017c7efb43 | |||
| 0c69fd559a | |||
| c991258b93 | |||
| 9f89692dcd | |||
| c28575a4eb | |||
| c9db9c2317 | |||
| 16a09304b4 | |||
| 58a88d1ac0 | |||
| b740878697 | |||
| 7179002bfb | |||
| 43b5be1d78 | |||
| 173c81c2d2 | |||
| ee4c77c59f | |||
| 30ec12fdd5 | |||
| 269ec0566f | |||
| a0a95c95d4 | |||
| 1335b7c1da | |||
| 6d14ef8083 | |||
| 26a492acf3 | |||
| f2741e8038 | |||
| 8d1a6975d2 | |||
| c414bf0aaf | |||
| 99f4864674 | |||
| 784cbeff5b | |||
| 9302f860ae | |||
| ac8a5e7f0d | |||
| 798fc16bbf | |||
| 0f65c9267d | |||
| be45231ccb | |||
| 279aea683b | |||
| 8aa8f791fc | |||
| 6464e69e21 | |||
| a93812e4e5 | |||
| 225f942044 | |||
| d951d5b1cd | |||
| 2082ccbf59 | |||
| 473e795277 | |||
| a09f653f52 | |||
| 90fe6dd528 | |||
| 57a2ccf777 | |||
| b5f6fdb814 | |||
| 205b9bc05f | |||
| 14d5d52789 | |||
| 9c218b419f | |||
| a69d819901 | |||
| 517fb2f410 | |||
| fef2b1526d | |||
| 3719994c96 | |||
| 35c2821d71 | |||
| e4812b3903 | |||
| 4cc11066b2 | |||
| 85b64d77b7 | |||
| db7948d7d5 | |||
| 3d40c0562d | |||
| 146bcc0e70 | |||
| 8d9f6c2583 | |||
| ac32d8b706 | |||
| 15c1dad340 | |||
| 6d8baf7c30 | |||
| 7ced682ff5 | |||
| 89cab4f5e6 | |||
| a0afb79898 | |||
| d6fa3b3fd5 | |||
| f91bb96071 | |||
| 3b6644d195 | |||
| 652b468ec2 | |||
| af110d37f2 | |||
| 38967568ca | |||
| df79631a72 | |||
| 95f0fa8a92 | |||
| 1c6ff53b60 | |||
| 1dbf44c00d | |||
| 1259a0648b | |||
| b0055f6229 | |||
| 90040afc44 | |||
| 59bc96bdc2 | |||
| 676ffee542 | |||
| 77136e4c13 | |||
| 604e13775f | |||
| 02380a74e3 | |||
| 4461ae8090 | |||
| 2b948c42cd | |||
| 133c1e927f | |||
| b2ae054410 | |||
| 2290798a83 | |||
| fd600b11a6 | |||
| b5c9f5c4c3 | |||
| b8a5b1ed8e | |||
| ca74bb17b8 | |||
| 69d8331195 | |||
| eab5c1975c | |||
| e67b525388 | |||
| 5171e56b82 | |||
| f467848448 | |||
| 7e4ddcfe8a | |||
| 3152be5fb3 | |||
| b076944dc5 | |||
| 3a07228509 | |||
| 24a2f2e3a0 | |||
| b32dd4a876 | |||
| 4f4bd81228 | |||
| 59b23d79c6 | |||
| 8c14630e35 | |||
| cc32de8ef9 | |||
| 44696c1375 | |||
| 82088a8110 | |||
| d5e45b2278 | |||
| bdfef2975c | |||
| b4bb4b64a1 | |||
| 3e91c5e1ad | |||
| 2b88d85505 | |||
| 50651970b8 | |||
| 4a8906dd8a | |||
| 68e2769a13 | |||
| 17c998e99a | |||
| 35758f51f2 | |||
| e8102b0a9b | |||
| 04f2bc9aa7 | |||
| d070178dd3 | |||
| c9ec7fad52 | |||
| f0a6ca4d53 | |||
| fd92470e23 | |||
| 8369664445 | |||
| 35e1adfe82 | |||
| eb91fc5e5d | |||
| d186fdb34c | |||
| 0f04f71b7e | |||
| 87f1959be7 | |||
| a538055e81 | |||
| 0e345aaf6d | |||
| c976dd339d | |||
| 71cef62436 | |||
| 3a29055044 | |||
| 59d66e6963 | |||
| 46bc43a80f | |||
| 7fa60b2e44 | |||
| c78893f912 | |||
| 0d2a4e1a9e | |||
| 088f14c697 | |||
| 4bf7be7bd5 | |||
| b2ab6891c5 | |||
| 39ab5bcba8 | |||
| 42f131c09f | |||
| 89dca6ffdc | |||
| b7f36f93d5 | |||
| 58320d5082 | |||
| a461804a65 | |||
| 817f6cc59d | |||
| 108936169c | |||
| f60ae085e6 | |||
| 85dda09f95 | |||
| 4f479a98d4 | |||
| 35ba948dde | |||
| 6b4ed52f10 | |||
| dcf5f8671c | |||
| 5340291add | |||
| 1c6fe58574 | |||
| 9f2111af73 | |||
| 2ed6c6d479 | |||
| 01ac2d3791 | |||
| eac687df5a | |||
| 6a2785aef7 | |||
| 849cbf3a47 | |||
| a0c614ece3 | |||
| 1b97f088cb | |||
| 097399cdeb | |||
| 7ee152881e | |||
| 3074f8eb81 | |||
| 748208775f | |||
| 5df17050bf | |||
| 92df0eb2bf | |||
| 995195935b | |||
| be8376eb88 | |||
| b650a45b9c | |||
| 8a20e22239 | |||
| 7c5014d803 | |||
| 62ac1b4bdd | |||
| 0633c08ec9 | |||
| cf87cc9214 | |||
| f908432eb3 | |||
| 1bd291c57c | |||
| b277df6705 | |||
| ec4d597c59 | |||
| d2ef49384e | |||
| b5dc36f278 | |||
| 41976e2b60 | |||
| 3dac1b9936 | |||
| d2bb56647f | |||
| 224422eed6 | |||
| 3c26f7a205 | |||
| 9ac9809f27 | |||
| 7bf6e984ef | |||
| 10f78985e7 | |||
| dc95f66a95 | |||
| d8f4d5f91e | |||
| 47f56f0230 | |||
| b4018c4c30 | |||
| 43fbdd3b45 | |||
| 803d032077 | |||
| 9d2d884313 | |||
| c0600e655a | |||
| 671ed89f2a | |||
| e0372643e1 | |||
| b5cf1d2fc7 | |||
| c1ca9044bd | |||
| 52c2a92013 | |||
| 541ab961d8 | |||
| 849794cd2c | |||
| f47fa2cb04 | |||
| 7a162dd97a | |||
| b123bace1b | |||
| 483490cc25 | |||
| 8d60e39fdc | |||
| e7dff91cf3 | |||
| ab5776449c | |||
| a229582238 | |||
| a0df8fde62 | |||
| e4a3aa9295 | |||
| be98c5d12d | |||
| bc6a71b1f5 | |||
| 26f1e2ca9c | |||
| 75d850cfd2 | |||
| f4870ca5c6 | |||
| 235d5400e1 | |||
| 491d5ba4fd | |||
| d42eadfeb9 | |||
| 9a40821069 | |||
| 2975f539ff | |||
| 64ca584199 | |||
| 5263469e21 | |||
| c367e0b64e | |||
| 183b3aacd2 | |||
| 101950ce92 | |||
| 239ae94389 | |||
| 55e850d825 | |||
| 62af45d99f | |||
| 1ac038ab24 | |||
| 77a925ab66 | |||
| d0d33d3ae7 | |||
| 9b7eceddc8 | |||
| 24af02154c | |||
| 86ec14e594 | |||
| 8a29338837 | |||
| 29918c6ca5 | |||
| 80a44e84dc | |||
| 5497b1babb | |||
| bef70aa377 | |||
| 0d30f77889 | |||
| e27bb3e993 | |||
| 179d5efc81 | |||
| b55e38801d | |||
| e704ec5c6f | |||
| 6cda6bb34c | |||
| 46f0248466 | |||
| 310ec57fd7 | |||
| cd82b2b869 | |||
| 126a1cc398 | |||
| bf650f05b3 | |||
| f2606a7502 | |||
| b07fe52ee0 | |||
| b07358b329 | |||
| 2aea8077f9 | |||
| 41f9c14297 | |||
| 135687f04a | |||
| b140e70b58 | |||
| ec987b57f6 | |||
| 596677232c | |||
| 9d74e139e5 | |||
| d2a93c3102 | |||
| bc475cad67 | |||
| 45d6212fd2 | |||
| f45d75ed22 | |||
| b03407289f | |||
| 55a794e6ec | |||
| 93ed476e7d | |||
| 10faa303bc | |||
| 6fa371cb0d | |||
| 18a2691b4b | |||
| f7bd3f7932 | |||
| f8dee4620a | |||
| 800e24616a | |||
| d63a435787 | |||
| a9c2809ce3 | |||
| fa61159dd0 | |||
| a215e000e9 | |||
| f16a624b35 | |||
| 61c2896cb8 | |||
| 22ebc3f205 | |||
| 8fa9f443ec | |||
| bb72ccf1a5 | |||
| 2e73456f5c | |||
| 3e49a2b4b7 | |||
| 4694e4050b | |||
| 59b9eeff49 | |||
| 1744fad8c2 | |||
| e46d942ca6 | |||
| 93a6136863 | |||
| 230bde94e7 | |||
| 20fffc8bb7 | |||
| 861a3f3a30 | |||
| ee52102943 | |||
| 26516f667e | |||
| 5586f48ad5 | |||
| cc6e3c92d2 | |||
| a2ef5782d0 | |||
| 0c1c0e21b8 | |||
| ffcc38cf05 | |||
| cc24b68584 | |||
| 8a70067b92 | |||
| 33b227c45b | |||
| fb68be952d | |||
| f413ee087d | |||
| 6495f5dd30 | |||
| 8e09f0590b | |||
| 08d346df9c | |||
| 12cf96e358 | |||
| 765a720d1c | |||
| cace62f94c | |||
| 767c96850d | |||
| b73e78edbb | |||
| 7914cc119d | |||
| 2b13eb2a6c | |||
| 8768e64e97 | |||
| 9212b9ca09 | |||
| 0d0f197682 | |||
| 281e34d1b7 | |||
| 287ba38905 | |||
| ed9dbff4e0 | |||
| 6ba4e48521 | |||
| b7269f2295 | |||
| 5ab317d4a6 | |||
| 431bcf7afa | |||
| 41909e8c5b | |||
| 56245426eb | |||
| 3adcb2c157 | |||
| 6d12185cc9 | |||
| 258c9ffb2c | |||
| dede431dd9 | |||
| 6312d29d80 | |||
| ab5f26545b | |||
| 6567c1342d | |||
| 3d6c2e023c | |||
| 89d930335b | |||
| 04393cd47d | |||
| 28f0cf6cee | |||
| 1af9a9637f | |||
| 1031d671fb | |||
| ee91b22317 | |||
| 220183ed78 | |||
| 504d2ca171 | |||
| d535aa94a1 | |||
| 0376a1909b | |||
| f757077780 | |||
| 9f7114a4a1 | |||
| 7d03da0890 | |||
| 4e0cecae7f | |||
| 72dbb76a15 | |||
| cceb926af3 | |||
| 0d7d29fa57 | |||
| be3276fcdd | |||
| 09c94a170c | |||
| f2a18004a7 | |||
| 1a3ff1bd28 | |||
| a5d3c779c7 | |||
| 9d32e60dc2 | |||
| f6913f56ea | |||
| 801fe8408f | |||
| cf4a979836 | |||
| 91f2946310 | |||
| 2bd7a3c31d | |||
| a681f6759b | |||
| cb849524f3 | |||
| 1f5951693a | |||
| 87748ffd4c | |||
| 0580f5a928 | |||
| 88d9fdec2e | |||
| 506a40ce44 | |||
| bf0e185bd6 | |||
| 5b3ccec10d | |||
| eb07581502 | |||
| 934a2b6878 | |||
| bec6ab47b6 | |||
| 49480f1548 | |||
| 18a3c62d9b | |||
| 6322cf3234 | |||
| 4e2b154342 | |||
| bb1019d1ec | |||
| c2d32030a2 | |||
| 162170fd7b | |||
| ea728e7c5e | |||
| aea6ba4bcd | |||
| ab357c14fc | |||
| 606aa43da0 | |||
| 8bfa802665 | |||
| ff5b73c0b3 | |||
| 86c95014a4 | |||
| 288c950c5e | |||
| b27d4de850 | |||
| 61063ebade | |||
| 3e70e26278 | |||
| 66e7e42800 | |||
| 0fecec14b8 | |||
| a7f24ccb76 | |||
| 08a1bc71c0 | |||
| 04e896a4b4 | |||
| 5dcfb80b36 | |||
| 9da60c39ce | |||
| 379860e457 | |||
| bcfa2d6c79 | |||
| 8b492bbc47 | |||
| a49b7b0f58 | |||
| c781ac414a | |||
| 656dca6edb | |||
| 830adfd151 | |||
| 6f7c8e4ef8 | |||
| 2ba6678766 | |||
| 71a47d1bed | |||
| 51bf6321ea | |||
| aa8916e7c6 | |||
| 2e24da2a0b | |||
| c94ccafb61 | |||
| 80a827d3da | |||
| 6909c8da48 | |||
| c07105a796 | |||
| c40c061a9f | |||
| a9bd27ce5c | |||
| 2e36c4ea2d | |||
| 4e45385a8d | |||
| cf5e925c10 | |||
| 709255d995 | |||
| f3cb636294 | |||
| e3f440b1d0 | |||
| f6b94dd830 | |||
| 3911a1d395 | |||
| ebd3648fd6 | |||
| f698f09cb7 | |||
| 86aa5dae05 | |||
| 179c82ffb4 | |||
| 233017f01f | |||
| 597bbfeacd | |||
| 99a169c17e | |||
| 0613ac90cd | |||
| 78871d829a | |||
| d40a7bf9eb | |||
| b27f576f29 | |||
| 073dfd8b88 | |||
| 509dd57c2e | |||
| 7a837b7a14 | |||
| dee864116a | |||
| e51d0bef97 | |||
| 2fd78112ab | |||
| 5c14bd2888 | |||
| 84b4665e02 | |||
| 26d626a47c | |||
| 6ff6299c65 | |||
| 071e68d99d | |||
| 78c1094d93 | |||
| 56fc639c9f | |||
| 51084a9054 | |||
| f8ae5c93e9 | |||
| ad286c0692 | |||
| a483b3903d | |||
| 6564d39777 | |||
| 8f1b7230fe | |||
| c0b7608965 | |||
| 56dd4132c4 | |||
| 91494cb496 | |||
| 9057eade95 | |||
| a28317b263 | |||
| 25c3603266 | |||
| ae6f2dd11c | |||
| 3aaa1771d5 | |||
| 2034396a3c | |||
| 0cad668065 | |||
| f644a11b82 | |||
| d7e3b2ef29 | |||
| fc5ec87478 | |||
| ed4023127b | |||
| 2bd4e5f5f6 | |||
| d2dcbc26f8 | |||
| 2f05eefe9a | |||
| 7d1afa78b9 | |||
| dac9b020e0 | |||
| eb77b79df9 | |||
| 456998f043 | |||
| c09f07edd9 | |||
| 66320c498c | |||
| 8cb8a0a146 | |||
| aeed8a6ea4 | |||
| c82537462b | |||
| a8a02ff560 | |||
| 72a9df19c8 | |||
| 5b9b9634f9 | |||
| c279a91c03 | |||
| ef6a764509 | |||
| 4db5afdf7e | |||
| 7867187451 | |||
| 4f8e6ec42a | |||
| 64c8a13773 | |||
| 395ab4a287 | |||
| 15dc862056 | |||
| f2daa616d1 | |||
| 64a50f5ad3 | |||
| 1d0f86144c | |||
| 89e93bba9d | |||
| 3290d4c7d6 | |||
| ca22befc93 | |||
| b08df5b9c0 | |||
| ebd3c3291c | |||
| 16728d2f26 | |||
| 34dab66f44 | |||
| 3a111c7499 | |||
| 3600c94ec5 | |||
| e2f8b00e00 | |||
| 65ed1eba48 | |||
| 7fff7977fe | |||
| add5922aac | |||
| a94b54a533 | |||
| bea82b9da6 | |||
| 2e7debe282 | |||
| 1cee5a359c | |||
| b08862405e | |||
| d57e1a6756 | |||
| c9172c5bc9 | |||
| 5d5e877a05 | |||
| 1e794c87ae | |||
| d9cb1b545a | |||
| 23f611f14d | |||
| 42b28d0d69 | |||
| d0cf5f7b65 | |||
| 4699c817e8 | |||
| 4f490c16e9 | |||
| bcdab7a632 | |||
| 7f51af7cbc | |||
| b4ae60cac8 | |||
| 4d03d96e8b | |||
| a39ffebc3a | |||
| 4bba6082ed | |||
| b111632965 | |||
| 0a34b34bfe | |||
| 6b821ece22 | |||
| d3b2096bfd | |||
| 9f1b12bf06 | |||
| e64fca4b04 | |||
| b941e73f4f | |||
| c57873d3cb | |||
| f3bc3275ac | |||
| 8df26e6c5c | |||
| 5c8ecb8150 | |||
| 3928f7740a | |||
| 1767f73e6b | |||
| 9e7d5e93ab | |||
| 70c6ee93a2 | |||
| 5cbf8504ef | |||
| 9a393b023d | |||
| 30bf464f73 | |||
| 9fb1f8934b | |||
| f3f02b23a0 | |||
| 7668cdd32c | |||
| f9dafdcf09 | |||
| d284a419c1 | |||
| b45844e3d9 | |||
| 6caa7e0fff | |||
| 1669fffb8d | |||
| 18aa86eebd | |||
| 075e49d3f4 | |||
| a6695b8365 | |||
| 06ee48b391 | |||
| fcaeffbbd4 | |||
| 6146a9a641 | |||
| 83de8e40d5 | |||
| 30590c46a3 | |||
| a3a5e56287 | |||
| 185c96d63a | |||
| be61ad6eb4 | |||
| 222dfd2259 | |||
| b06e1c7e1d | |||
| 6876abba51 | |||
| 0798466a01 | |||
| 2cda782273 | |||
| 7d1c9554b6 | |||
| a29d16f1a8 | |||
| 6d0c1c0f17 | |||
| 5ed4b5c25b | |||
| 6fe89c5e44 | |||
| fda8c37641 | |||
| 6d5a0ff3a1 | |||
| f8718dd355 | |||
| 85af686797 | |||
| 0f6ec3f15f | |||
| 44644c50ee | |||
| 9749f7eacc | |||
| d9a2bdb9df | |||
| 57e678c94b | |||
| 516f127cfd | |||
| e477add103 | |||
| ba3d577875 | |||
| 917e4f47c4 | |||
| 0143dac247 | |||
| d2390f3616 | |||
| 949ea73402 | |||
| d1e2fe0efe | |||
| 584ada12bf | |||
| 3ead72f654 | |||
| 9ce96d3bd3 | |||
| 5549c003d9 | |||
| 46105bf90b | |||
| 73ce3b3702 | |||
| 1c6225dc2f | |||
| 44874542c8 | |||
| 31f2846aff | |||
| bc08011e72 | |||
| 7cccc216d0 | |||
| 09493603f6 | |||
| e799bd0ba9 | |||
| 40247b0382 | |||
| cd2e9c5119 | |||
| 0b6f7b12b1 | |||
| 86e42ba291 | |||
| e0a18cafd3 | |||
| 8c2f77cab6 | |||
| c1bd6ba1e1 | |||
| df59b89fbb | |||
| 8fd9cc160c | |||
| 28e3f07b63 | |||
| 513d902df1 | |||
| fce14a9f51 | |||
| 884107da01 | |||
| caa79a354a | |||
| 5bb873a2fe | |||
| bc0442d7df | |||
| cfcd33552b | |||
| 5f6b9fd5ba | |||
| 469dce4a2d | |||
| 55d32de331 | |||
| 4491d2d3cb | |||
| f9669b9b9a | |||
| 246d5f37c7 | |||
| 293bfb03dd | |||
| 4def4e696b | |||
| b6e58c030a | |||
| bf00308ab2 | |||
| e3e786e35e | |||
| fd67794574 | |||
| 104b502919 | |||
| a18cd3ba92 | |||
| 0676cad200 | |||
| 3b1d217310 | |||
| 93bcb2e7ba | |||
| ebc70f7919 | |||
| e32af0196e | |||
| 3e5c121c56 | |||
| e644f6ed2c | |||
| 551a7c72f3 | |||
| 05b121841e | |||
| c29aea89ee | |||
| 103e70ccc5 | |||
| ec7ecbe2dd | |||
| 7a06dbb87e | |||
| 1234e434fa | |||
| 2d374f982e | |||
| 4e73630a95 | |||
| e867baa5f9 | |||
| 04b750cb52 | |||
| 97c7b12542 | |||
| 0dfec752a3 | |||
| f16f68e103 | |||
| 4b7f8f9b77 | |||
| 9969d50833 | |||
| 7355c63845 | |||
| 16cac6442a | |||
| 5009ae5548 | |||
| 32647e285e | |||
| 6df334ea68 | |||
| f8501042c1 | |||
| be085b8f6c | |||
| ef557761dd | |||
| 15377ac391 | |||
| ad5fdef6ac | |||
| 0cb5943be8 | |||
| fb593d5f28 | |||
| 645c913e4f | |||
| b4f4cca875 | |||
| 6027513574 | |||
| 849188fdab | |||
| a9c14a5306 | |||
| 2da36a14d1 | |||
| 2ee451f5f7 | |||
| f2d7e94948 | |||
| 2031dfc08a | |||
| 34ede14877 | |||
| 2af3098e5a | |||
| 2e44511b13 | |||
| 7bc4aa7e72 | |||
| e2458bce97 | |||
| ae9789fccc | |||
| 45ef25ea27 | |||
| ad2d413c0b | |||
| 30924ff1e0 | |||
| 383c48968f | |||
| bbe8627a3f | |||
| 2bd36604e2 | |||
| 9ed47ef531 | |||
| 139f98a872 | |||
| c825895190 | |||
| 42e835ebb8 | |||
| a7d5fdf54e | |||
| 3b4e41f6ec | |||
| 5505e1de7d | |||
| 6d329e418b | |||
| 3a11afb57f | |||
| df86e02c9e | |||
| deebc1383e | |||
| 19f2f1a9d3 | |||
| 4dc13ecdd8 | |||
| b4b6e356ef | |||
| 9000f40e61 | |||
| f137c0c05a | |||
| b43a02a9aa | |||
| 30be715900 | |||
| 71cf8e14cb | |||
| ffd4863b23 | |||
| 4c17098bb8 | |||
| bcfdd18599 | |||
| 067662d280 | |||
| 93d02e4686 | |||
| 12de115305 | |||
| b5d13296c6 | |||
| 86288265ad | |||
| a559d94a44 | |||
| 1eb6870853 | |||
| f88c3e9c12 | |||
| 942ca477a6 | |||
| b0e33fb473 | |||
| d58b627b98 | |||
| b85fc35f9a | |||
| bcb466fb76 | |||
| 6db721b5dd | |||
| 140c65e52b | |||
| 29e8d77ce0 | |||
| b66a4ea919 | |||
| d3d59e5024 | |||
| 5285da0418 | |||
| a76e69d709 | |||
| 4d0d775d16 | |||
| 98f67e90d5 | |||
| fee67c2e1a | |||
| c295f26a00 | |||
| 8a09c45f28 | |||
| 79ead42ade | |||
| 94e52e1d17 | |||
| 3931beee81 | |||
| d293c17d21 | |||
| 1a3920e5dc | |||
| ffc3eb1a24 | |||
| 2f5d4a7318 | |||
| 70553f4253 | |||
| 8d39fb4094 | |||
| 7d10b2370f | |||
| 31ec7650ac | |||
| c014920dc1 | |||
| 17e3d4e1ee | |||
| b01c785805 | |||
| 0eea71f878 | |||
| ec7a287801 | |||
| 4bc585a2fe | |||
| 429f2d6765 | |||
| a0c7e3cf04 | |||
| 9cd68129da | |||
| aa6f6117b7 | |||
| 6fa9c87aa4 | |||
| ee14cf9438 | |||
| 0391bbb376 | |||
| 28ada0c634 | |||
| 2c233d23ad | |||
| 59c628803a | |||
| 6b830bc77f | |||
| f30081a313 | |||
| c15648c6b5 | |||
| a02917f502 | |||
| 70d8bd04c0 | |||
| ad2cee0cae | |||
| 756a7122ad | |||
| 3d6ebde756 | |||
| daa30aa992 | |||
| 39459eb238 | |||
| 0325e2f646 | |||
| 93b8b5631f | |||
| 60ab1ce0c1 | |||
| 2f186df52d | |||
| 452e07d432 | |||
| 05d1404b9c | |||
| 2acee24332 | |||
| e7639e55f8 | |||
| f978eca477 | |||
| eb3ac2b367 | |||
| 968d386b36 | |||
| 38cb3d0227 | |||
| 6f606dd5f9 | |||
| bab616cf11 | |||
| 966adc6291 | |||
| 518cb6ec7c | |||
| 34bcd4c237 | |||
| a121127082 | |||
| 50326e94b1 | |||
| 160723b5b4 | |||
| 7991125293 | |||
| 96f61bff30 | |||
| a94488f584 | |||
| f2cf673d3a | |||
| c4595a3dd6 | |||
| 5db118e64b | |||
| 1620c56808 | |||
| e88e0026b1 | |||
| ace9b49e28 | |||
| da90751add | |||
| 8cc566f7b5 | |||
| 02ad199905 | |||
| c3e0811d86 | |||
| 499d1c5709 | |||
| cf16ec45e1 | |||
| daa15dcceb | |||
| 32556cbe5e | |||
| 74d9c674f5 | |||
| a4da558fa0 | |||
| dba6d1d57f | |||
| b01c4338c9 | |||
| 811d947da3 | |||
| de7bf7efe6 | |||
| 5537df9927 | |||
| 81fea93741 | |||
| df1065a2d8 | |||
| c2e3bf2145 | |||
| a4d849ef68 | |||
| 957c9f3853 | |||
| 3958b6b0e1 | |||
| 5d70feb573 | |||
| a22af69335 | |||
| 1213149a2f | |||
| 398b6f75cd | |||
| e46e05e7c5 | |||
| 166028836d | |||
| 3cbe66ba8c | |||
| 99de537a2e | |||
| 1d0afdf9f7 | |||
| 4db6667923 | |||
| 80e16e44aa | |||
| 58b134b793 | |||
| 6efefac2df | |||
| 0c9670ddf0 | |||
| 53c65ddc6a | |||
| 33371c5164 | |||
| 64dd1419c5 | |||
| 108068a417 | |||
| 6e8ed95ada | |||
| 39c9f9e9e8 | |||
| b555588f5d | |||
| 47ef4bb0a0 | |||
| b34654bf97 | |||
| 6068df3ab2 | |||
| bb35999f51 | |||
| 25c51c49aa | |||
| 833bedb46b | |||
| 3d8eba7b42 | |||
| ab0e86ae4b | |||
| 94b35312d0 | |||
| f4ebc65a12 | |||
| 2bc9da4f5e | |||
| e034f258e3 | |||
| 39adf6dbd2 | |||
| 112df5f664 | |||
| 3564b77553 | |||
| c813e93d85 | |||
| ff59385034 | |||
| ea4f812a12 | |||
| dbe540e49f | |||
| c1c0969834 | |||
| b87f26ce26 | |||
| 67335e638c | |||
| 90916f34a7 | |||
| 11b38a6895 | |||
| a1f5fe6a8f | |||
| 5cad164dee | |||
| 7dd28b885d | |||
| c20828478e | |||
| 3e1c88e3e0 | |||
| e98a4ea336 | |||
| e8a5f00866 | |||
| d92b7da733 | |||
| 7ff16baa7d | |||
| 93e60715af | |||
| 14965cfce9 | |||
| da1e3f084d | |||
| 0b0a62420c | |||
| c92c82aa1a | |||
| 4742c08c7c | |||
| 9c6ced1c0a | |||
| a33c9bd774 | |||
| c8a4734b97 | |||
| 3f7ab95890 | |||
| 2d8c2972ae | |||
| 941cf4e63d | |||
| 57610a7471 | |||
| f5a6a3b0e9 | |||
| bab7f89cdc | |||
| cb5d4e836f | |||
| 3a5544f060 | |||
| 412019dbe4 | |||
| f9d9c92560 | |||
| 7f4ff0e615 | |||
| 3eac7164f4 | |||
| f9d25e8e72 | |||
| 52ed57352a | |||
| 1828e7c42f | |||
| 2c89ae4e8a | |||
| 779a460030 | |||
| 0312f939d6 | |||
| 60a8a9e918 | |||
| 89666fc4fe | |||
| 44527ab5be | |||
| a0cf6658c5 | |||
| 5107f23126 | |||
| 4a5557203b | |||
| c020a8502b | |||
| 44481354fc | |||
| 974fb1b09a | |||
| 4e9f0a8255 | |||
| fa1f286cae | |||
| 85bd287b7b | |||
| 0eff3897e3 | |||
| e26e35a9ee | |||
| 980300b381 | |||
| 1cf87e8a0b | |||
| 817d860af5 | |||
| 0be5031a93 | |||
| 1ed488da4f | |||
| ddf1598ef8 | |||
| 4a8a185aa4 | |||
| 4cdeae3283 | |||
| 5030d76acf | |||
| c51e2c8b8c | |||
| eec0420eb3 | |||
| e66ea56bb3 | |||
| eefa0c7b40 | |||
| a489884da4 | |||
| 7a74d3fc9e | |||
| e71204b52f | |||
| ca330b110a | |||
| 6c77476cc1 | |||
| cabd6848e4 | |||
| e3dbc6110e | |||
| 1d6715fe20 | |||
| 06ab3f962f | |||
| df77a8a81a | |||
| 94b7c32eb3 | |||
| 8fdec15a55 | |||
| e8b1217b28 | |||
| f56f06d88d | |||
| 0f7a1e27d0 | |||
| 5114d94ad9 | |||
| f74c42bf00 | |||
| a8e816f450 | |||
| a90c259eda | |||
| e223564a55 | |||
| 7847d77405 | |||
| 089d223922 | |||
| 930085ec9c | |||
| b5f7720ab9 | |||
| a0a2d9885a | |||
| 0557143801 | |||
| 64a15928d7 | |||
| d1fda539b7 | |||
| 95d545e75b | |||
| 491fbfdc8c | |||
| 5d24432322 | |||
| da5bb373e6 | |||
| 6584b35db2 | |||
| e5874ea40d | |||
| 4bad029fd4 | |||
| 7920b9229b | |||
| 9ee6189bf9 | |||
| 9b7d47935a | |||
| 19ec206bad | |||
| fb39971464 | |||
| 3ea1da3b2c | |||
| a0fb1ab86e | |||
| 60f4d285af | |||
| aa092f03d8 | |||
| f2cb3e0b7b | |||
| 5206c98c1b | |||
| 420c2ae745 | |||
| 874179a871 | |||
| 1d9b10d312 | |||
| ccf7a3043f | |||
| 26a614b4d1 | |||
| 297fa957f7 | |||
| 05fb544f23 | |||
| 96cd92a0a9 | |||
| 31c45e0a08 | |||
| 65d4055366 | |||
| 1f2695e875 | |||
| 59556d0943 | |||
| c65e795435 | |||
| 9842be4b15 | |||
| eb6419f02b | |||
| a6d1f6aee4 | |||
| 73d15cf643 | |||
| a4dd9d0b86 | |||
| 49cd6f99d8 | |||
| 159cba815b | |||
| cbb76eae04 | |||
| 21a189fee6 | |||
| 4cff149c46 | |||
| 788ff68c1f | |||
| 009667c26c | |||
| d9f8f39a9a | |||
| 1fe27380f0 | |||
| 621d6a4475 | |||
| 30700ede39 | |||
| 8a76dc8b59 | |||
| f646391f26 | |||
| 1703f2abed | |||
| ee85fe1a9c | |||
| 0703e0e897 | |||
| a442b5f5cc | |||
| 3d6b805652 | |||
| 58f507f9e3 | |||
| 24fe4b8af3 | |||
| 46fa7d987b | |||
| 90a7a79a19 | |||
| 64938f75b3 | |||
| 043be6f55c | |||
| cef9bf7f29 | |||
| dac5a0a07e | |||
| 76ac35cdaa | |||
| c1f0e10a59 | |||
| 1bd76a717d | |||
| cd0929aa5e | |||
| 1486d880b0 | |||
| b738b09606 | |||
| 7188f0851d | |||
| 47700a4c20 | |||
| 3aeb0f5408 | |||
| 07d1acd798 | |||
| 4cffa2219a | |||
| 11852e5c22 | |||
| eb22208a4e | |||
| bf8b4779cc | |||
| 0e7ab61a83 | |||
| 3e4ddf25d0 | |||
| 1a3b585209 | |||
| a5504231b5 | |||
| c63d042c57 | |||
| 70c8d43831 | |||
| 5837d3480c | |||
| ab1382b652 | |||
| 9553e46ed7 | |||
| 34b673844f | |||
| a2a840df93 | |||
| 5b23b67092 | |||
| f9d186d33a | |||
| 93f959b03b | |||
| 8f3f90a986 | |||
| c7a66ddf74 | |||
| b60d7e1476 | |||
| 4a1e099974 | |||
| 4bbe5a095d | |||
| 7280ff8f3d | |||
| f45213a276 | |||
| 346194bd8e | |||
| 959304bc0d | |||
| fffdd5170a | |||
| ec22828169 | |||
| 19c0474ae4 | |||
| ddc0c61692 | |||
| 1fbcb44ae6 | |||
| 7eaaf57d2b | |||
| fc643f2407 | |||
| 939b0a4297 | |||
| 234c8c9ef3 | |||
| 75bad643bd | |||
| 229e3ec184 | |||
| 6ba435f822 | |||
| 774a6f1093 | |||
| 814728a6aa | |||
| 7654698a0e | |||
| ff785e5f17 | |||
| cc645de37b | |||
| cc62ee229e | |||
| 24476090df | |||
| 5fef59beb4 | |||
| 1d0fe075eb | |||
| 47b0797fe1 | |||
| ed401cc29b | |||
| 34ea3fd17e | |||
| 68b400fbf3 | |||
| b3a9e1333d | |||
| 428ec5b2a3 | |||
| 55c42ad681 | |||
| f09c8ab1e8 | |||
| 7a1aa6b563 | |||
| 9ae84f5d6b | |||
| e51e922924 | |||
| 9fcc523485 | |||
| 67d1ab9106 | |||
| da6d2009e0 | |||
| 155132d336 | |||
| 08ddfe03d2 | |||
| 5d4716a8a3 | |||
| aa8f669a3d | |||
| d5e507fc7f | |||
| 620491a649 | |||
| 7edfc57228 | |||
| bd3cf73e6e | |||
| 177505b757 | |||
| 9d9d8cd59f | |||
| 1657af1567 | |||
| acb93d1aed | |||
| 889ad3d4e6 | |||
| 93538def65 | |||
| e5067b6611 | |||
| 0629fb62d7 | |||
| 0177cf3ea4 | |||
| 658aca1469 | |||
| 03df4c7759 | |||
| 0d4f8f4e95 | |||
| ddd3f2084d | |||
| dba3ec9428 | |||
| 9de361a1b9 | |||
| c0c959b1be | |||
| e30bf95989 | |||
| b16cc5d197 | |||
| e6f4a83da6 | |||
| 1a8bae5b2f | |||
| e8eb285a59 | |||
| b508d28123 | |||
| 62b551798f | |||
| dfbebe395c | |||
| 85280b5bf4 | |||
| fb53cfd9b0 | |||
| 92d2123d8d | |||
| ec3de28ae5 | |||
| 86dc136fa9 | |||
| 172f316ac2 | |||
| 941d9da08c | |||
| 5554a4c9f0 | |||
| 9442285526 | |||
| caa40b8dd3 | |||
| 2758353380 | |||
| fe1a956715 | |||
| c05312f151 | |||
| 5966316771 | |||
| 130ee246e2 | |||
| 90af7c73ef | |||
| 3251681207 | |||
| d332c41e71 | |||
| c9da89254b | |||
| eb2d869f71 | |||
| f1e92fe2a3 | |||
| b5400c54df | |||
| a4de6016f8 | |||
| 4807909e3f | |||
| dd0884b707 | |||
| e1634ca6cb | |||
| 651a6edc5c | |||
| ada5edce88 | |||
| 41ce4ca9fc | |||
| 27d32ac5d9 | |||
| 0673d5f44f |
18
.gitignore
vendored
18
.gitignore
vendored
@ -2,16 +2,34 @@ build/
|
||||
dist/
|
||||
torch.egg-info/
|
||||
*/**/__pycache__
|
||||
torch/version.py
|
||||
torch/csrc/generic/TensorMethods.cpp
|
||||
torch/lib/*.so*
|
||||
torch/lib/*.dylib*
|
||||
torch/lib/*.h
|
||||
torch/lib/build
|
||||
torch/lib/tmp_install
|
||||
torch/lib/include
|
||||
torch/lib/torch_shm_manager
|
||||
torch/csrc/cudnn/cuDNN.cpp
|
||||
torch/csrc/nn/THNN.cwrap
|
||||
torch/csrc/nn/THNN.cpp
|
||||
torch/csrc/nn/THCUNN.cwrap
|
||||
torch/csrc/nn/THCUNN.cpp
|
||||
torch/csrc/nn/THNN_generic.cwrap
|
||||
torch/csrc/nn/THNN_generic.cpp
|
||||
torch/csrc/nn/THNN_generic.h
|
||||
docs/src/**/*
|
||||
test/data/legacy_modules.t7
|
||||
test/data/gpu_tensors.pt
|
||||
test/htmlcov
|
||||
test/.coverage
|
||||
*/*.pyc
|
||||
*/**/*.pyc
|
||||
*/**/**/*.pyc
|
||||
*/**/**/**/*.pyc
|
||||
*/**/**/**/**/*.pyc
|
||||
*/*.so*
|
||||
*/**/*.so*
|
||||
*/**/*.dylib*
|
||||
test/data/legacy_serialized.pt
|
||||
|
||||
31
.travis.yml
31
.travis.yml
@ -3,22 +3,27 @@ language: python
|
||||
python:
|
||||
- 2.7.8
|
||||
- 2.7
|
||||
- 3.3
|
||||
- 3.4
|
||||
- 3.5
|
||||
- 3.6
|
||||
- nightly
|
||||
|
||||
cache:
|
||||
- ccache
|
||||
- directories:
|
||||
- $HOME/.ccache
|
||||
|
||||
install:
|
||||
- export CC="gcc-4.8"
|
||||
- export CXX="g++-4.8"
|
||||
- travis_retry pip install -r requirements.txt
|
||||
- travis_retry pip install .
|
||||
- unset CCACHE_DISABLE
|
||||
- export CCACHE_DIR=$HOME/.ccache
|
||||
- export CC="ccache gcc-4.8"
|
||||
- export CXX="ccache g++-4.8"
|
||||
- ccache --show-stats
|
||||
- travis_retry pip install --upgrade pip setuptools wheel
|
||||
- travis_retry pip install -r requirements.txt --only-binary=scipy
|
||||
- python setup.py install
|
||||
|
||||
script:
|
||||
- python test/test_torch.py
|
||||
- python test/test_legacy_nn.py
|
||||
- python test/test_nn.py
|
||||
- python test/test_autograd.py
|
||||
- OMP_NUM_THREADS=2 ./test/run_test.sh
|
||||
|
||||
addons:
|
||||
apt:
|
||||
@ -35,3 +40,9 @@ sudo: false
|
||||
|
||||
matrix:
|
||||
fast_finish: true
|
||||
include:
|
||||
env: LINT_CHECK
|
||||
python: "2.7"
|
||||
addons: true
|
||||
install: pip install flake8
|
||||
script: flake8
|
||||
|
||||
74
CONTRIBUTING.md
Normal file
74
CONTRIBUTING.md
Normal file
@ -0,0 +1,74 @@
|
||||
## Contributing to PyTorch
|
||||
|
||||
If you are interested in contributing to PyTorch, your contributions will fall
|
||||
into two categories:
|
||||
1. You want to propose a new Feature and implement it
|
||||
- post about your intended feature, and we shall discuss the design and
|
||||
implementation. Once we agree that the plan looks good, go ahead and implement it.
|
||||
2. You want to implement a feature or bug-fix for an outstanding issue
|
||||
- Look at the outstanding issues here: https://github.com/pytorch/pytorch/issues
|
||||
- Especially look at the Low Priority and Medium Priority issues
|
||||
- Pick an issue and comment on the task that you want to work on this feature
|
||||
- If you need more context on a particular issue, please ask and we shall provide.
|
||||
|
||||
Once you finish implementing a feature or bugfix, please send a Pull Request to
|
||||
https://github.com/pytorch/pytorch
|
||||
|
||||
If you are not familiar with creating a Pull Request, here are some guides:
|
||||
- http://stackoverflow.com/questions/14680711/how-to-do-a-github-pull-request
|
||||
- https://help.github.com/articles/creating-a-pull-request/
|
||||
|
||||
|
||||
## Developing locally with PyTorch
|
||||
|
||||
To locally develop with PyTorch, here are some tips:
|
||||
|
||||
1. Uninstall all existing pytorch installs
|
||||
```
|
||||
conda uninstall pytorch
|
||||
pip uninstall torch
|
||||
pip uninstall torch # run this command twice
|
||||
```
|
||||
|
||||
2. Locally clone a copy of PyTorch from source:
|
||||
|
||||
```
|
||||
git clone https://github.com/pytorch/pytorch
|
||||
cd pytorch
|
||||
```
|
||||
|
||||
3. Install PyTorch in `build develop` mode:
|
||||
|
||||
A full set of instructions on installing PyTorch from Source are here:
|
||||
https://github.com/pytorch/pytorch#from-source
|
||||
|
||||
The change you have to make is to replace
|
||||
|
||||
`python setup.py install`
|
||||
|
||||
with
|
||||
|
||||
```
|
||||
python setup.py build develop
|
||||
```
|
||||
|
||||
This is especially useful if you are only changing Python files.
|
||||
|
||||
This mode will symlink the python files from the current local source tree into the
|
||||
python install.
|
||||
|
||||
Hence, if you modify a python file, you do not need to reinstall pytorch again and again.
|
||||
|
||||
For example:
|
||||
- Install local pytorch in `build develop` mode
|
||||
- modify your python file torch/__init__.py (for example)
|
||||
- test functionality
|
||||
- modify your python file torch/__init__.py
|
||||
- test functionality
|
||||
- modify your python file torch/__init__.py
|
||||
- test functionality
|
||||
|
||||
You do not need to repeatedly install after modifying python files.
|
||||
|
||||
|
||||
Hope this helps, and thanks for considering to contribute.
|
||||
38
Dockerfile
Normal file
38
Dockerfile
Normal file
@ -0,0 +1,38 @@
|
||||
FROM nvidia/cuda:8.0-devel-ubuntu16.04
|
||||
|
||||
RUN echo "deb http://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64 /" > /etc/apt/sources.list.d/nvidia-ml.list
|
||||
|
||||
ENV CUDNN_VERSION 6.0.20
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
cmake \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
libjpeg-dev \
|
||||
libpng-dev \
|
||||
libcudnn6=$CUDNN_VERSION-1+cuda8.0 \
|
||||
libcudnn6-dev=$CUDNN_VERSION-1+cuda8.0 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -o ~/miniconda.sh -O https://repo.continuum.io/miniconda/Miniconda3-4.2.12-Linux-x86_64.sh && \
|
||||
chmod +x ~/miniconda.sh && \
|
||||
~/miniconda.sh -b -p /opt/conda && \
|
||||
rm ~/miniconda.sh && \
|
||||
/opt/conda/bin/conda install conda-build && \
|
||||
/opt/conda/bin/conda create -y --name pytorch-py35 python=3.5.2 numpy scipy ipython mkl&& \
|
||||
/opt/conda/bin/conda clean -ya
|
||||
ENV PATH /opt/conda/envs/pytorch-py35/bin:$PATH
|
||||
RUN conda install --name pytorch-py35 -c soumith magma-cuda80
|
||||
# This must be done before pip so that requirements.txt is available
|
||||
WORKDIR /opt/pytorch
|
||||
COPY . .
|
||||
|
||||
RUN cat requirements.txt | xargs -n1 pip install --no-cache-dir && \
|
||||
TORCH_CUDA_ARCH_LIST="3.5 5.2 6.0 6.1+PTX" TORCH_NVCC_FLAGS="-Xfatbin -compress-all" \
|
||||
CMAKE_LIBRARY_PATH=/opt/conda/envs/pytorch-py35/lib \
|
||||
CMAKE_INCLUDE_PATH=/opt/conda/envs/pytorch-py35/include \
|
||||
pip install -v .
|
||||
|
||||
WORKDIR /workspace
|
||||
RUN chmod -R a+w /workspace
|
||||
38
LICENSE
Normal file
38
LICENSE
Normal file
@ -0,0 +1,38 @@
|
||||
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
|
||||
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
|
||||
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
|
||||
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
|
||||
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
|
||||
Copyright (c) 2011-2013 NYU (Clement Farabet)
|
||||
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
|
||||
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
|
||||
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
|
||||
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
1. Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
2. Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in the
|
||||
documentation and/or other materials provided with the distribution.
|
||||
|
||||
3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
|
||||
and IDIAP Research Institute nor the names of its contributors may be
|
||||
used to endorse or promote products derived from this software without
|
||||
specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGE.
|
||||
22
Makefile
22
Makefile
@ -1,22 +0,0 @@
|
||||
# Add main target here - setup.py doesn't understand the need to recompile
|
||||
# after generic files change
|
||||
.PHONY: all clean torch
|
||||
|
||||
all: install
|
||||
|
||||
torch:
|
||||
python3 setup.py build
|
||||
|
||||
install:
|
||||
python3 setup.py install
|
||||
|
||||
clean:
|
||||
@rm -rf build
|
||||
@rm -rf dist
|
||||
@rm -rf torch.egg-info
|
||||
@rm -rf tools/__pycache__
|
||||
@rm -rf torch/csrc/generic/TensorMethods.cpp
|
||||
@rm -rf torch/lib/tmp_install
|
||||
@rm -rf torch/lib/build
|
||||
@rm -rf torch/lib/*.so*
|
||||
@rm -rf torch/lib/*.h
|
||||
470
README.md
470
README.md
@ -1,271 +1,243 @@
|
||||
# pytorch [alpha-1] 
|
||||
<p align="center"><img width="40%" src="docs/source/_static/img/pytorch-logo-dark.png" /></p>
|
||||
|
||||
The project is still under active development and is likely to drastically change in short periods of time.
|
||||
We will be announcing API changes and important developments via a newsletter, github issues and post a link to the issues on slack.
|
||||
Please remember that at this stage, this is an invite-only closed alpha, and please don't distribute code further.
|
||||
This is done so that we can control development tightly and rapidly during the initial phases with feedback from you.
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
PyTorch is a python package that provides two high-level features:
|
||||
- Tensor computation (like numpy) with strong GPU acceleration
|
||||
- Deep Neural Networks built on a tape-based autograd system
|
||||
|
||||
You can reuse your favorite python packages such as numpy, scipy and Cython to extend PyTorch when needed.
|
||||
|
||||
We are in an early-release Beta. Expect some adventures and rough edges.
|
||||
|
||||
- [More About PyTorch](#more-about-pytorch)
|
||||
- [Installation](#installation)
|
||||
- [Binaries](#binaries)
|
||||
- [From source](#from-source)
|
||||
- [Docker image](#docker-image)
|
||||
- [Getting Started](#getting-started)
|
||||
- [Communication](#communication)
|
||||
- [Releases and Contributing](#releases-and-contributing)
|
||||
- [The Team](#the-team)
|
||||
|
||||
| System | Python | Status |
|
||||
| --- | --- | --- |
|
||||
| Linux CPU | 2.7.8, 2.7, 3.5, nightly | [](https://travis-ci.org/pytorch/pytorch) |
|
||||
| Linux GPU | 2.7 | [](https://build.pytorch.org/job/pytorch-master-py2) |
|
||||
| Linux GPU | 3.5 | [](https://build.pytorch.org/job/pytorch-master-py3) |
|
||||
|
||||
## More about PyTorch
|
||||
|
||||
At a granular level, PyTorch is a library that consists of the following components:
|
||||
|
||||
<table>
|
||||
<tr>
|
||||
<td><b> torch </b></td>
|
||||
<td> a Tensor library like NumPy, with strong GPU support </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b> torch.autograd </b></td>
|
||||
<td> a tape based automatic differentiation library that supports all differentiable Tensor operations in torch </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b> torch.nn </b></td>
|
||||
<td> a neural networks library deeply integrated with autograd designed for maximum flexibility </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b> torch.multiprocessing </b></td>
|
||||
<td> python multiprocessing, but with magical memory sharing of torch Tensors across processes. Useful for data loading and hogwild training. </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b> torch.utils </b></td>
|
||||
<td> DataLoader, Trainer and other utility functions for convenience </td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><b> torch.legacy(.nn/.optim) </b></td>
|
||||
<td> legacy code that has been ported over from torch for backward compatibility reasons </td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
Usually one uses PyTorch either as:
|
||||
|
||||
- A replacement for numpy to use the power of GPUs.
|
||||
- a deep learning research platform that provides maximum flexibility and speed
|
||||
|
||||
Elaborating further:
|
||||
|
||||
### A GPU-ready Tensor library
|
||||
|
||||
If you use numpy, then you have used Tensors (a.k.a ndarray).
|
||||
|
||||
<p align=center><img width="30%" src="docs/source/_static/img/tensor_illustration.png" /></p>
|
||||
|
||||
PyTorch provides Tensors that can live either on the CPU or the GPU, and accelerate
|
||||
compute by a huge amount.
|
||||
|
||||
We provide a wide variety of tensor routines to accelerate and fit your scientific computation needs
|
||||
such as slicing, indexing, math operations, linear algebra, reductions.
|
||||
And they are fast!
|
||||
|
||||
### Dynamic Neural Networks: Tape based Autograd
|
||||
|
||||
PyTorch has a unique way of building neural networks: using and replaying a tape recorder.
|
||||
|
||||
Most frameworks such as `TensorFlow`, `Theano`, `Caffe` and `CNTK` have a static view of the world.
|
||||
One has to build a neural network, and reuse the same structure again and again.
|
||||
Changing the way the network behaves means that one has to start from scratch.
|
||||
|
||||
With PyTorch, we use a technique called Reverse-mode auto-differentiation, which allows you to
|
||||
change the way your network behaves arbitrarily with zero lag or overhead. Our inspiration comes
|
||||
from several research papers on this topic, as well as current and past work such as
|
||||
[autograd](https://github.com/twitter/torch-autograd),
|
||||
[autograd](https://github.com/HIPS/autograd),
|
||||
[Chainer](http://chainer.org), etc.
|
||||
|
||||
While this technique is not unique to PyTorch, it's one of the fastest implementations of it to date.
|
||||
You get the best of speed and flexibility for your crazy research.
|
||||
|
||||
<p align=center><img width="80%" src="docs/source/_static/img/dynamic_graph.gif" /></p>
|
||||
|
||||
### Python first
|
||||
|
||||
PyTorch is not a Python binding into a monolothic C++ framework.
|
||||
It is built to be deeply integrated into Python.
|
||||
You can use it naturally like you would use numpy / scipy / scikit-learn etc.
|
||||
You can write your new neural network layers in Python itself, using your favorite libraries
|
||||
and use packages such as Cython and Numba.
|
||||
Our goal is to not reinvent the wheel where appropriate.
|
||||
|
||||
### Imperative experiences
|
||||
|
||||
PyTorch is designed to be intuitive, linear in thought and easy to use.
|
||||
When you execute a line of code, it gets executed. There isn't an asynchronous view of the world.
|
||||
When you drop into a debugger, or receive error messages and stack traces, understanding them is straight-forward.
|
||||
The stack-trace points to exactly where your code was defined.
|
||||
We hope you never spend hours debugging your code because of bad stack traces or asynchronous and opaque execution engines.
|
||||
|
||||
### Fast and Lean
|
||||
|
||||
PyTorch has minimal framework overhead. We integrate acceleration libraries
|
||||
such as Intel MKL and NVIDIA (CuDNN, NCCL) to maximize speed.
|
||||
At the core, its CPU and GPU Tensor and Neural Network backends
|
||||
(TH, THC, THNN, THCUNN) are written as independent libraries with a C99 API.
|
||||
They are mature and have been tested for years.
|
||||
|
||||
Hence, PyTorch is quite fast -- whether you run small or large neural networks.
|
||||
|
||||
The memory usage in PyTorch is extremely efficient compared to Torch or some of the alternatives.
|
||||
We've written custom memory allocators for the GPU to make sure that
|
||||
your deep learning models are maximally memory efficient.
|
||||
This enables you to train bigger deep learning models than before.
|
||||
|
||||
### Extensions without pain
|
||||
|
||||
Writing new neural network modules, or interfacing with PyTorch's Tensor API was designed to be straight-forward
|
||||
and with minimal abstractions.
|
||||
|
||||
You can write new neural network layers in Python using the torch API
|
||||
[or your favorite numpy based libraries such as SciPy](http://pytorch.org/tutorials/advanced/numpy_extensions_tutorial.html).
|
||||
|
||||
If you want to write your layers in C/C++, we provide an extension API based on
|
||||
[cffi](http://cffi.readthedocs.io/en/latest/) that is efficient and with minimal boilerplate.
|
||||
There is no wrapper code that needs to be written. You can see [a tutorial here](http://pytorch.org/tutorials/advanced/c_extension.html) and [an example here](https://github.com/pytorch/extension-ffi).
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
### Binaries
|
||||
Commands to install from binaries via Conda or pip wheels are on our website:
|
||||
|
||||
[http://pytorch.org](http://pytorch.org)
|
||||
|
||||
### From source
|
||||
|
||||
If you are installing from source, we highly recommend installing an [Anaconda](https://www.continuum.io/downloads) environment.
|
||||
You will get a high-quality BLAS library (MKL) and you get a controlled compiler version regardless of your Linux distro.
|
||||
|
||||
Once you have [anaconda](https://www.continuum.io/downloads) installed, here are the instructions.
|
||||
|
||||
If you want to compile with CUDA support, install
|
||||
- [NVIDIA CUDA](https://developer.nvidia.com/cuda-downloads) 7.5 or above
|
||||
- [NVIDIA CuDNN](https://developer.nvidia.com/cudnn) v5.x
|
||||
|
||||
If you want to disable CUDA support, export environment variable `NO_CUDA=1`.
|
||||
|
||||
#### Install optional dependencies
|
||||
|
||||
On Linux
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
pip install .
|
||||
export CMAKE_PREFIX_PATH=[anaconda root directory]
|
||||
|
||||
# Install basic dependencies
|
||||
conda install numpy mkl setuptools cmake gcc cffi
|
||||
|
||||
# Add LAPACK support for the GPU
|
||||
conda install -c soumith magma-cuda75 # or magma-cuda80 if CUDA 8.0
|
||||
```
|
||||
|
||||
To install with CUDA support change `WITH_CUDA = False` to `WITH_CUDA = True` in `setup.py`.
|
||||
On OSX
|
||||
```bash
|
||||
export CMAKE_PREFIX_PATH=[anaconda root directory]
|
||||
conda install numpy setuptools cmake cffi
|
||||
```
|
||||
|
||||
#### Install PyTorch
|
||||
```bash
|
||||
export MACOSX_DEPLOYMENT_TARGET=10.9 # if OSX
|
||||
pip install -r requirements.txt
|
||||
python setup.py install
|
||||
```
|
||||
|
||||
### Docker image
|
||||
|
||||
Dockerfile is supplied to build images with cuda support and cudnn v6. Build as usual
|
||||
```
|
||||
docker build -t pytorch-cudnnv6 .
|
||||
```
|
||||
and run with nvidia-docker:
|
||||
```
|
||||
nvidia-docker run --rm -ti --ipc=host pytorch-cudnnv5
|
||||
```
|
||||
Please note that pytorch uses shared memory to share data between processes, so if torch multiprocessing is used (e.g.
|
||||
for multithreaded data loaders) the default shared memory segment size that container runs with is not enough, and you
|
||||
should increase shared memory size either with --ipc=host or --shm-size command line options to nvidia-docker run.
|
||||
|
||||
|
||||
## Getting Started
|
||||
|
||||
Three pointers to get you started:
|
||||
- [Tutorials: get you started with understanding and using PyTorch](http://pytorch.org/tutorials/)
|
||||
- [Examples: easy to understand pytorch code across all domains](https://github.com/pytorch/examples)
|
||||
- The API Reference: [http://pytorch.org/docs/](http://pytorch.org/docs/)
|
||||
|
||||
## Communication
|
||||
* forums: discuss implementations, research, etc. http://discuss.pytorch.org
|
||||
* github issues: bug reports, feature requests, install issues, RFCs, thoughts, etc.
|
||||
* slack: general chat, online discussions, collaboration etc. https://pytorch.slack.com/ . If you need a slack invite, ping me at soumith@pytorch.org
|
||||
* slack: general chat, online discussions, collaboration etc. https://pytorch.slack.com/ . If you need a slack invite, ping us at soumith@pytorch.org
|
||||
* newsletter: no-noise, one-way email newsletter with important announcements about pytorch. You can sign-up here: http://eepurl.com/cbG0rv
|
||||
|
||||
## Timeline
|
||||
## Releases and Contributing
|
||||
|
||||
We will run the alpha releases weekly for 6 weeks.
|
||||
After that, we will reevaluate progress, and if we are ready, we will hit beta-0. If not, we will do another two weeks of alpha.
|
||||
PyTorch has a 90 day release cycle (major releases).
|
||||
It's current state is Beta (v0.1.6), we expect no obvious bugs. Please let us know if you encounter a bug by [filing an issue](https://github.com/pytorch/pytorch/issues).
|
||||
|
||||
* alpha-0: Working versions of torch, cutorch, nn, cunn, optim fully unit tested with seamless numpy conversions
|
||||
* alpha-1: Serialization to/from disk with sharing intact. initial release of the new neuralnets package based on a Chainer-like design
|
||||
* alpha-2: sharing tensors across processes for hogwild training or data-loading processes. a rewritten optim package for this new nn.
|
||||
* alpha-3: binary installs (prob will take @alexbw 's help here), contbuilds, etc.
|
||||
* alpha-4: a ton of examples across vision, nlp, speech, RL -- this phase might make us rethink parts of the APIs, and hence want to do this in alpha than beta
|
||||
* alpha-5: Putting a simple and efficient story around multi-machine training. Probably simplistic like torch-distlearn. Building the website, release scripts, more documentation, etc.
|
||||
* alpha-6: [no plan yet]
|
||||
We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion.
|
||||
|
||||
The beta phases will be leaning more towards working with all of you, convering your use-cases, active development on non-core aspects.
|
||||
If you plan to contribute new features, utility functions or extensions to the core, please first open an issue and discuss the feature with us.
|
||||
Sending a PR without discussion might end up resulting in a rejected PR, because we might be taking the core in a different direction than you might be aware of.
|
||||
|
||||
## pytorch vs torch: important changes
|
||||
**For the next release cycle, these are the 3 big features we are planning to add:**
|
||||
|
||||
We've decided that it's time to rewrite/update parts of the old torch API, even if it means losing some of backward compatibility (we can hack up a model converter that converts correctly).
|
||||
This section lists the biggest changes, and suggests how to shift from torch to pytorch.
|
||||
1. [Distributed PyTorch](https://github.com/pytorch/pytorch/issues/241) (a draft implementation is present in this [branch](https://github.com/apaszke/pytorch-dist) )
|
||||
2. Backward of Backward - Backpropagating through the optimization process itself. Some past and recent papers such as
|
||||
[Double Backprop](http://yann.lecun.com/exdb/publis/pdf/drucker-lecun-91.pdf) and [Unrolled GANs](https://arxiv.org/abs/1611.02163) need this.
|
||||
3. Lazy Execution Engine for autograd - This will enable us to optionally introduce caching and JIT compilers to optimize autograd code.
|
||||
|
||||
For now there's no pytorch documentation.
|
||||
Since all currently implemented modules are very similar to the old ones, it's best to use torch7 docs for now (having in mind several differences described below).
|
||||
|
||||
### Library structure
|
||||
## The Team
|
||||
|
||||
All core modules are merged into a single repository.
|
||||
Most of them will be rewritten and will be completely new (more on this below), but we're providing a Python version of old packages under torch.legacy namespace.
|
||||
* torch (torch)
|
||||
* cutorch (torch.cuda)
|
||||
* nn (torch.legacy.nn)
|
||||
* cunn (torch.legacy.cunn)
|
||||
* optim (torch.legacy.optim)
|
||||
* nngraph (torch.legacy.nngraph - not implemented yet)
|
||||
PyTorch is a community driven project with several skillful engineers and researchers contributing to it.
|
||||
|
||||
### 0-based indexing
|
||||
PyTorch is currently maintained by [Adam Paszke](https://apaszke.github.io/), [Sam Gross](https://github.com/colesbury) and [Soumith Chintala](http://soumith.ch) with major contributions coming from 10s of talented individuals in various forms and means. A non-exhaustive but growing list needs to mention: Sergey Zagoruyko, Adam Lerer, Francisco Massa, Andreas Kopf, James Bradbury, Zeming Lin, Yuandong Tian, Guillaume Lample, Marat Dukhan, Natalia Gimelshein.
|
||||
|
||||
pytorch uses 0-based indexing everywhere.
|
||||
This includes arguments to `index*` functions and nn criterion weights.
|
||||
|
||||
Under the hood, on the C side, we've changed logic on TH / THC / THNN / THCUNN to introduce a TH_INDEX_BASE compile-time definition to switch between 0 and 1 indexing logic.
|
||||
|
||||
### New Tensor API
|
||||
|
||||
**All methods operating on tensors are now out-of-place by default.**
|
||||
|
||||
This means that although `a.add(b)` used to have a side-effect of mutating the elements in a, it will now return a new Tensor, holding the result.
|
||||
All methods that mutate the Tensor/Storage are now marked with a trailing underscore (including `copy` -> `copy_`, `fill` -> `fill_`, `set` -> `set_`, etc.).
|
||||
Most of math methods have their in-place counterparts, so an equivalent to `a.add(b)` in Lua is now `a.add_(b)` (or `torch.add(a, a, b)`, which is not recommended in this case)
|
||||
|
||||
### CUDA module
|
||||
|
||||
All tensors have their CUDA counterparts in torch.cuda module.
|
||||
|
||||
There is no `torch.cuda.setDevice` anymore. By default always the 0th device is selected, but code can be placed in a `with` statement to change it:
|
||||
|
||||
```python
|
||||
with torch.cuda.device(1):
|
||||
a = torch.cuda.FloatTensor(10) # a is allocated on GPU1
|
||||
```
|
||||
|
||||
Calling `.cuda()` on tensors no longer converts it to a GPU float tensor, but to a CUDA tensor of the same type located on a currently selected device.
|
||||
So, for example: `a = torch.LongTensor(10).cuda() # a is a CudaLongTensor`
|
||||
|
||||
Calling `.cuda(3)` will send it to the third device.
|
||||
`.cuda()` can be also used to transfer CUDA tensors between devices (calling it on a GPU tensor, with a different device selected will copy it into the current device).
|
||||
|
||||
```python
|
||||
a = torch.LongTensor(10)
|
||||
b = a.cuda() # b is a torch.cuda.LongTensor placed on GPU0
|
||||
c = a.cuda(2) # c is a torch.cuda.LongTensor placed on GPU2
|
||||
with torch.cuda.device(1):
|
||||
d = b.cuda() # d is a copy of b, but on GPU1
|
||||
e = d.cuda() # a no-op, d is already on current GPU, e is d == True
|
||||
```
|
||||
|
||||
Also, setting device is now only important to specify where to allocate new Tensors. You can perform operations on CUDA Tensors irrespective of currently selected device (but all arguments have to be on the same device) - result will be also allocated there. See below for an example:
|
||||
|
||||
```python
|
||||
a = torch.randn(2, 2).cuda()
|
||||
b = torch.randn(2, 2).cuda()
|
||||
with torch.cuda.device(1):
|
||||
c = a + b # c is on GPU0
|
||||
d = torch.randn(2, 2).cuda() # d is on GPU1
|
||||
```
|
||||
|
||||
In the near future, we also plan to use a CUDA allocator, which allows to alleviate problems with cudaMalloc/cudaFree being a sync point.
|
||||
This will help us to not worry about using buffers for every intermediate computation in a module if one wants to do multi-GPU training, for example.
|
||||
See: https://github.com/torch/cutorch/pull/443
|
||||
|
||||
|
||||
### Numpy integration
|
||||
|
||||
Because numpy is a core numerical package in Python, and is used by many other libraries like matplotlib, we've implemented a two-way bridge between pytorch and numpy.
|
||||
|
||||
```python
|
||||
a = torch.randn(2, 2)
|
||||
b = a.numpy() # b is a numpy array of type corresponding to a
|
||||
# no memory copy is performed, they share the same storage
|
||||
c = numpy.zeros(5, 5)
|
||||
d = torch.DoubleTensor(c) # it's possible to construct Tensors from numpy arrays
|
||||
# d shares memory with b - there's no copy
|
||||
```
|
||||
|
||||
### New neural network module
|
||||
|
||||
After looking at several framework designs, looking at the current design of `nn` and thinking through a few original design ideas, this is what we've converged to:
|
||||
|
||||
* Adopt a Chainer-like design
|
||||
* Makes it extremely natural to express Recurrent Nets and weight sharing
|
||||
* Each module can operate in-place, but marks used variables as dirty - errors will be raised if they're used again
|
||||
* RNN example:
|
||||
|
||||
```python
|
||||
class Network(nn.Container):
|
||||
def __init__(self):
|
||||
super(Network, self).__init__(
|
||||
conv1=nn.SpatialConvolution(3, 16, 3, 3, 1, 1),
|
||||
relu1=nn.ReLU(True),
|
||||
lstm=nn.LSTM(),
|
||||
)
|
||||
|
||||
def __call__(self, input):
|
||||
y = self.conv(input)
|
||||
y = self.relu1(y)
|
||||
y = self.lstm(y)
|
||||
return y
|
||||
|
||||
model = Network()
|
||||
input = nn.Variable(torch.zeros(256, 3, 224, 224))
|
||||
|
||||
output = model(input)
|
||||
|
||||
loss = 0
|
||||
for i in range(ITERS):
|
||||
input, target = ...
|
||||
# That's all you need for an RNN
|
||||
for t in range(TIMESTEPS):
|
||||
loss += loss_fn(model(input), target)
|
||||
loss.backward()
|
||||
|
||||
```
|
||||
|
||||
* Here, nn.Variable will have a complete tape-based automatic differentiation implemented
|
||||
* To access states, have hooks for forward / backward (this also makes multi-GPU easier to implement)
|
||||
* This has the advantage of not having to worry about in-place / out-of-place operators for accessing .output or .gradInput
|
||||
* When writing the module, make sure debuggability is straight forward. Dropping into pdb and inspecting things should be natural, especially when going over the backward graph.
|
||||
* Pulling handles to a module after constructing a chain should be very natural (apart from having a handle at construction)
|
||||
* It's easy, since modules are assigned as Container properties
|
||||
* Drop overly verbose names. Example:
|
||||
* SpatialConvolution → conv2d
|
||||
* VolumetricConvolution → conv3d
|
||||
|
||||
#### Some notes on new nn implementation
|
||||
|
||||
As shown above, structure of the networks is fully defined by control-flow embedded in the code. There are no rigid containers known from Lua. You can put an `if` in the middle of your model and freely branch depending on any condition you can come up with. All operations are registered in the computational graph history.
|
||||
|
||||
There are two main objects that make this possible - variables and functions. They will be denoted as squares and circles respectively.
|
||||
|
||||

|
||||
|
||||
Variables are the objects that hold a reference to a tensor (and optionally to gradient w.r.t. that tensor), and to the function in the computational graph that created it. Variables created explicitly by the user (`Variable(tensor)`) have a Leaf function node associated with them.
|
||||
|
||||

|
||||
|
||||
Functions are simple classes that define a function from a tuple of inputs to a tuple of outputs, and a formula for computing gradient w.r.t. it's inputs. Function objects are instantiated to hold references to other functions, and these references allow to reconstruct the history of a computation. An example graph for a linear layer (`Wx + b`) is shown below.
|
||||
|
||||

|
||||
|
||||
Please note that function objects never hold references to Variable objects, except for when they're necessary in the backward pass. This allows to free all the unnecessary intermediate values. A good example for this is addition when computing e.g. (`y = Wx + My`):
|
||||
|
||||

|
||||
|
||||
Matrix multiplication operation keeps references to it's inputs because it will need them, but addition doesn't need `Wx` and `My` after it computes the result, so as soon as they go out of scope they are freed. To access intermediate values in the forward pass you can either copy them when you still have a reference, or you can use a system of hooks that can be attached to any function. Hooks also allow to access and inspect gradients inside the graph.
|
||||
|
||||
Another nice thing about this is that a single layer doesn't hold any state other than it's parameters (all intermediate values are alive as long as the graph references them), so it can be used multiple times before calling backward. This is especially convenient when training RNNs. You can use the same network for all timesteps and the gradients will sum up automatically.
|
||||
|
||||
To compute backward pass you can call `.backward()` on a variable if it's a scalar (a 1-element Variable), or you can provide a gradient tensor of matching shape if it's not. This creates an execution engine object that manages the whole backward pass. It's been introduced, so that the code for analyzing the graph and scheduling node processing order is decoupled from other parts, and can be easily replaced. Right now it's simply processing the nodes in topological order, without any prioritization, but in the future we can implement algorithms and heuristics for scheduling independent nodes on different GPU streams, deciding which branches to compute first, etc.
|
||||
|
||||
### Serialization
|
||||
|
||||
Pickling tensors is supported, but requires making a temporary copy of all data and breaks sharing.
|
||||
For this reason we're providing `torch.load` and `torch.save`, that are free of these problems.
|
||||
They have the same interfaces as `pickle.load` (file object) and `pickle.dump` (serialized object, file object) respectively.
|
||||
For now the only requirement is that the file should have a `fileno` method, which returns a file descriptor number (this is already implemented by objects returned by `open`).
|
||||
|
||||
Objects are serialized in a tar archive consisting of four files:
|
||||
`sys_info` - protocol version, byte order, long size, etc.
|
||||
`pickle` - pickled object
|
||||
`tensors` - tensor metadata
|
||||
`storages` - serialized data
|
||||
|
||||
### Multi-GPU
|
||||
|
||||
Proposed solutions need to address:
|
||||
|
||||
* Kernel launch latency
|
||||
* without affecting the user's code
|
||||
* Implementation should be as transparent as possible
|
||||
* Should we expose DPT as:
|
||||
* Split
|
||||
* ParallelApply (scheduling kernels in breadth first order, to address launch latency)
|
||||
* Join
|
||||
* In backward phase, send parameters as soon as the module finishes computation
|
||||
|
||||
**Rough solution:**
|
||||
|
||||
```python
|
||||
# This is an example of a network that has a data parallel part inside
|
||||
#
|
||||
# B is data parallel
|
||||
# +->A+-->B+-+
|
||||
# +--+ +->D
|
||||
# +->C+------+
|
||||
class Network(nn.Container):
|
||||
__init__(self):
|
||||
super(Network, self).__init__(
|
||||
A = ...,
|
||||
B = GPUReplicate(B, [0, 1, 2, 3]), # Copies the module onto a list of GPUs
|
||||
C = ...,
|
||||
D = ...
|
||||
)
|
||||
|
||||
__call__(self, x):
|
||||
a = self.A(x)
|
||||
c = self.C(x)
|
||||
a_split = Split(a) # a_split is a list of Tensors placed on different devices
|
||||
b = ParallelApply(self.B, a_split) # self.B is a list-like object containing copies of B
|
||||
d_input = Join(b + [c]) # gathers Tensors on a single GPU
|
||||
return self.D(d_input)
|
||||
|
||||
```
|
||||
|
||||
Each module is assigned to a single GPU.
|
||||
|
||||
For Kernel Launch Latency:
|
||||
* Python threading
|
||||
* Generators
|
||||
|
||||
For parameter reductions ASAP:
|
||||
|
||||
* In the forward pass, register a hooks on a every parameter which are evaluated as soon as the last backward is executed for that parameter. The hook will then “all-reduce” those parameters across GPUs
|
||||
* Problem with multiple forward calls - how do you know that the parameters won't be used anymore?
|
||||
* Well, last usage in backward graph = first usage in forward graph, so this should be straightforward
|
||||
|
||||
|
||||
#### Multiprocessing
|
||||
|
||||
We plan to make it as straightforward as possible, to use pytorch in a multiprocessing environment.
|
||||
For this, we plan to implement a .share() method for tensors that will enable them to be shared across processes seamlessly.
|
||||
One can use [python multiprocessing](https://docs.python.org/2/library/multiprocessing.html) seamlessly.
|
||||
Note: this project is unrelated to [hughperkins/pytorch](https://github.com/hughperkins/pytorch) with the same name. Hugh is a valuable contributor in the Torch community and has helped with many things Torch and PyTorch.
|
||||
|
||||
@ -1,2 +0,0 @@
|
||||
|
||||
* `split` and `chunk` no longer accept a list (table in Lua) as optional first argument
|
||||
@ -685,17 +685,21 @@ endif()
|
||||
|
||||
|
||||
# CUDA_NVCC_EXECUTABLE
|
||||
cuda_find_host_program(CUDA_NVCC_EXECUTABLE
|
||||
NAMES nvcc
|
||||
PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
|
||||
ENV CUDA_PATH
|
||||
ENV CUDA_BIN_PATH
|
||||
PATH_SUFFIXES bin bin64
|
||||
NO_DEFAULT_PATH
|
||||
)
|
||||
# Search default search paths, after we search our own set of paths.
|
||||
cuda_find_host_program(CUDA_NVCC_EXECUTABLE nvcc)
|
||||
mark_as_advanced(CUDA_NVCC_EXECUTABLE)
|
||||
if(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
|
||||
SET(CUDA_NVCC_EXECUTABLE "$ENV{CUDA_NVCC_EXECUTABLE}")
|
||||
else(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
|
||||
cuda_find_host_program(CUDA_NVCC_EXECUTABLE
|
||||
NAMES nvcc
|
||||
PATHS "${CUDA_TOOLKIT_ROOT_DIR}"
|
||||
ENV CUDA_PATH
|
||||
ENV CUDA_BIN_PATH
|
||||
PATH_SUFFIXES bin bin64
|
||||
NO_DEFAULT_PATH
|
||||
)
|
||||
# Search default search paths, after we search our own set of paths.
|
||||
cuda_find_host_program(CUDA_NVCC_EXECUTABLE nvcc)
|
||||
mark_as_advanced(CUDA_NVCC_EXECUTABLE)
|
||||
endif(DEFINED ENV{CUDA_NVCC_EXECUTABLE})
|
||||
|
||||
if(CUDA_NVCC_EXECUTABLE AND NOT CUDA_VERSION)
|
||||
# Compute the version.
|
||||
|
||||
@ -63,11 +63,16 @@ function(CUDA_DETECT_INSTALLED_GPUS OUT_VARIABLE)
|
||||
"}\n")
|
||||
|
||||
execute_process(COMMAND "${CUDA_NVCC_EXECUTABLE}" "--run" "${cufile}"
|
||||
"-ccbin" ${CMAKE_CXX_COMPILER}
|
||||
WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/CMakeFiles/"
|
||||
RESULT_VARIABLE nvcc_res OUTPUT_VARIABLE nvcc_out
|
||||
ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE)
|
||||
|
||||
if(nvcc_res EQUAL 0)
|
||||
# only keep the last line of nvcc_out
|
||||
STRING(REGEX REPLACE ";" "\\\\;" nvcc_out "${nvcc_out}")
|
||||
STRING(REGEX REPLACE "\n" ";" nvcc_out "${nvcc_out}")
|
||||
list(GET nvcc_out -1 nvcc_out)
|
||||
string(REPLACE "2.1" "2.1(2.0)" nvcc_out "${nvcc_out}")
|
||||
set(CUDA_GPU_DETECT_OUTPUT ${nvcc_out} CACHE INTERNAL "Returned GPU architetures from detect_gpus tool" FORCE)
|
||||
endif()
|
||||
@ -116,13 +121,13 @@ function(CUDA_SELECT_NVCC_ARCH_FLAGS out_variable)
|
||||
set(add_ptx TRUE)
|
||||
set(arch_name ${CMAKE_MATCH_1})
|
||||
endif()
|
||||
if(arch_name MATCHES "([0-9]\\.[0-9])$")
|
||||
if(arch_name MATCHES "(^[0-9]\\.[0-9](\\([0-9]\\.[0-9]\\))?)$")
|
||||
set(arch_bin ${CMAKE_MATCH_1})
|
||||
set(arch_ptx ${arch_bin})
|
||||
else()
|
||||
# Look for it in our list of known architectures
|
||||
if(${arch_name} STREQUAL "Fermi")
|
||||
set(arch_bin 2.0 "2.1(2.0)")
|
||||
set(arch_bin "2.0 2.1(2.0)")
|
||||
elseif(${arch_name} STREQUAL "Kepler+Tegra")
|
||||
set(arch_bin 3.2)
|
||||
elseif(${arch_name} STREQUAL "Kepler+Tesla")
|
||||
@ -173,11 +178,11 @@ function(CUDA_SELECT_NVCC_ARCH_FLAGS out_variable)
|
||||
# Tell NVCC to add binaries for the specified GPUs
|
||||
foreach(arch ${cuda_arch_bin})
|
||||
if(arch MATCHES "([0-9]+)\\(([0-9]+)\\)")
|
||||
# User explicitly specified PTX for the concrete BIN
|
||||
# User explicitly specified ARCH for the concrete CODE
|
||||
list(APPEND nvcc_flags -gencode arch=compute_${CMAKE_MATCH_2},code=sm_${CMAKE_MATCH_1})
|
||||
list(APPEND nvcc_archs_readable sm_${CMAKE_MATCH_1})
|
||||
else()
|
||||
# User didn't explicitly specify PTX for the concrete BIN, we assume PTX=BIN
|
||||
# User didn't explicitly specify ARCH for the concrete CODE, we assume ARCH=CODE
|
||||
list(APPEND nvcc_flags -gencode arch=compute_${arch},code=sm_${arch})
|
||||
list(APPEND nvcc_archs_readable sm_${arch})
|
||||
endif()
|
||||
|
||||
20
docs/Makefile
Normal file
20
docs/Makefile
Normal file
@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line.
|
||||
SPHINXOPTS =
|
||||
SPHINXBUILD = sphinx-build
|
||||
SPHINXPROJ = PyTorch
|
||||
SOURCEDIR = source
|
||||
BUILDDIR = build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
36
docs/make.bat
Normal file
36
docs/make.bat
Normal file
@ -0,0 +1,36 @@
|
||||
@ECHO OFF
|
||||
|
||||
pushd %~dp0
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set SOURCEDIR=source
|
||||
set BUILDDIR=build
|
||||
set SPHINXPROJ=PyTorch
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
%SPHINXBUILD% >NUL 2>NUL
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.http://sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
|
||||
goto end
|
||||
|
||||
:help
|
||||
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
|
||||
|
||||
:end
|
||||
popd
|
||||
2
docs/requirements.txt
Normal file
2
docs/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
sphinx
|
||||
-e git://github.com/snide/sphinx_rtd_theme.git#egg=sphinx_rtd_theme
|
||||
114
docs/source/_static/css/pytorch_theme.css
Normal file
114
docs/source/_static/css/pytorch_theme.css
Normal file
@ -0,0 +1,114 @@
|
||||
body {
|
||||
font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;
|
||||
}
|
||||
|
||||
/* Default header fonts are ugly */
|
||||
h1, h2, .rst-content .toctree-wrapper p.caption, h3, h4, h5, h6, legend, p.caption {
|
||||
font-family: "Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;
|
||||
}
|
||||
|
||||
/* Use white for docs background */
|
||||
.wy-side-nav-search {
|
||||
background-color: #fff;
|
||||
}
|
||||
|
||||
.wy-nav-content-wrap, .wy-menu li.current > a {
|
||||
background-color: #fff;
|
||||
}
|
||||
|
||||
@media screen and (min-width: 1400px) {
|
||||
.wy-nav-content-wrap {
|
||||
background-color: rgba(0, 0, 0, 0.0470588);
|
||||
}
|
||||
|
||||
.wy-nav-content {
|
||||
background-color: #fff;
|
||||
}
|
||||
}
|
||||
|
||||
/* Fixes for mobile */
|
||||
.wy-nav-top {
|
||||
background-color: #fff;
|
||||
background-image: url('../img/pytorch-logo-dark.svg');
|
||||
background-repeat: no-repeat;
|
||||
background-position: center;
|
||||
padding: 0;
|
||||
margin: 0.4045em 0.809em;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.wy-nav-top > a {
|
||||
display: none;
|
||||
}
|
||||
|
||||
@media screen and (max-width: 768px) {
|
||||
.wy-side-nav-search>a img.logo {
|
||||
height: 60px;
|
||||
}
|
||||
}
|
||||
|
||||
/* This is needed to ensure that logo above search scales properly */
|
||||
.wy-side-nav-search a {
|
||||
display: block;
|
||||
}
|
||||
|
||||
/* This ensures that multiple constructors will remain in separate lines. */
|
||||
.rst-content dl:not(.docutils) dt {
|
||||
display: table;
|
||||
}
|
||||
|
||||
/* Use our red for literals (it's very similar to the original color) */
|
||||
.rst-content tt.literal, .rst-content tt.literal, .rst-content code.literal {
|
||||
color: #F05732;
|
||||
}
|
||||
|
||||
.rst-content tt.xref, a .rst-content tt, .rst-content tt.xref,
|
||||
.rst-content code.xref, a .rst-content tt, a .rst-content code {
|
||||
color: #404040;
|
||||
}
|
||||
|
||||
/* Change link colors (except for the menu) */
|
||||
|
||||
a {
|
||||
color: #F05732;
|
||||
}
|
||||
|
||||
a:hover {
|
||||
color: #F05732;
|
||||
}
|
||||
|
||||
|
||||
a:visited {
|
||||
color: #D44D2C;
|
||||
}
|
||||
|
||||
.wy-menu a {
|
||||
color: #b3b3b3;
|
||||
}
|
||||
|
||||
.wy-menu a:hover {
|
||||
color: #b3b3b3;
|
||||
}
|
||||
|
||||
/* Default footer text is quite big */
|
||||
footer {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
footer .rst-footer-buttons {
|
||||
font-size: 125%; /* revert footer settings - 1/80% = 125% */
|
||||
}
|
||||
|
||||
footer p {
|
||||
font-size: 100%;
|
||||
}
|
||||
|
||||
/* For hidden headers that appear in TOC tree */
|
||||
/* see http://stackoverflow.com/a/32363545/3343043 */
|
||||
.rst-content .hidden-section {
|
||||
display: none;
|
||||
}
|
||||
|
||||
nav .hidden-section {
|
||||
display: inherit;
|
||||
}
|
||||
BIN
docs/source/_static/img/dynamic_graph.gif
Normal file
BIN
docs/source/_static/img/dynamic_graph.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 258 KiB |
BIN
docs/source/_static/img/pytorch-logo-dark.png
Normal file
BIN
docs/source/_static/img/pytorch-logo-dark.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 27 KiB |
24
docs/source/_static/img/pytorch-logo-dark.svg
Normal file
24
docs/source/_static/img/pytorch-logo-dark.svg
Normal file
@ -0,0 +1,24 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
<!-- Generator: Adobe Illustrator 21.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) -->
|
||||
<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px"
|
||||
viewBox="0 0 199.7 40.2" style="enable-background:new 0 0 199.7 40.2;" xml:space="preserve">
|
||||
<style type="text/css">
|
||||
.st0{fill:#F05732;}
|
||||
.st1{fill:#9E529F;}
|
||||
.st2{fill:#333333;}
|
||||
</style>
|
||||
<path class="st0" d="M102.7,12.2c-1.3-1-1.8,3.9-4.4,3.9c-3,0-4-13-6.3-13c-0.7,0-0.8-0.4-7.9,21.3c-2.9,9,4.4,15.8,11.8,15.8
|
||||
c4.6,0,12.3-3,12.3-12.6C108.2,20.5,104.7,13.7,102.7,12.2z M95.8,35.3c-3.7,0-6.7-3.1-6.7-7c0-3.9,3-7,6.7-7s6.7,3.1,6.7,7
|
||||
C102.5,32.1,99.5,35.3,95.8,35.3z"/>
|
||||
<path class="st1" d="M99.8,0c-0.5,0-1.8,2.5-1.8,3.6c0,1.5,1,2,1.8,2c0.8,0,1.8-0.5,1.8-2C101.5,2.5,100.2,0,99.8,0z"/>
|
||||
<path class="st2" d="M0,39.5V14.9h11.5c5.3,0,8.3,3.6,8.3,7.9c0,4.3-3,7.9-8.3,7.9H5.2v8.8H0z M14.4,22.8c0-2.1-1.6-3.3-3.7-3.3H5.2
|
||||
v6.6h5.5C12.8,26.1,14.4,24.8,14.4,22.8z"/>
|
||||
<path class="st2" d="M35.2,39.5V29.4l-9.4-14.5h6l6.1,9.8l6.1-9.8h5.9l-9.4,14.5v10.1H35.2z"/>
|
||||
<path class="st2" d="M63.3,39.5v-20h-7.2v-4.6h19.6v4.6h-7.2v20H63.3z"/>
|
||||
<path class="st2" d="M131.4,39.5l-4.8-8.7h-3.8v8.7h-5.2V14.9H129c5.1,0,8.3,3.4,8.3,7.9c0,4.3-2.8,6.7-5.4,7.3l5.6,9.4H131.4z
|
||||
M131.9,22.8c0-2-1.6-3.3-3.7-3.3h-5.5v6.6h5.5C130.3,26.1,131.9,24.9,131.9,22.8z"/>
|
||||
<path class="st2" d="M145.6,27.2c0-7.6,5.7-12.7,13.1-12.7c5.4,0,8.5,2.9,10.3,6l-4.5,2.2c-1-2-3.2-3.6-5.8-3.6
|
||||
c-4.5,0-7.7,3.4-7.7,8.1c0,4.6,3.2,8.1,7.7,8.1c2.5,0,4.7-1.6,5.8-3.6l4.5,2.2c-1.7,3.1-4.9,6-10.3,6
|
||||
C151.3,39.9,145.6,34.7,145.6,27.2z"/>
|
||||
<path class="st2" d="M194.5,39.5V29.1h-11.6v10.4h-5.2V14.9h5.2v9.7h11.6v-9.7h5.3v24.6H194.5z"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.7 KiB |
BIN
docs/source/_static/img/tensor_illustration.png
Normal file
BIN
docs/source/_static/img/tensor_illustration.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
53
docs/source/autograd.rst
Normal file
53
docs/source/autograd.rst
Normal file
@ -0,0 +1,53 @@
|
||||
.. role:: hidden
|
||||
:class: hidden-section
|
||||
|
||||
Automatic differentiation package - torch.autograd
|
||||
==================================================
|
||||
|
||||
.. automodule:: torch.autograd
|
||||
.. currentmodule:: torch.autograd
|
||||
|
||||
.. autofunction:: backward
|
||||
|
||||
Variable
|
||||
--------
|
||||
|
||||
API compatibility
|
||||
^^^^^^^^^^^^^^^^^
|
||||
|
||||
Variable API is nearly the same as regular Tensor API (with the exception
|
||||
of a couple in-place methods, that would overwrite inputs required for
|
||||
gradient computation). In most cases Tensors can be safely replaced with
|
||||
Variables and the code will remain to work just fine. Because of this,
|
||||
we're not documenting all the operations on variables, and you should
|
||||
refere to :class:`torch.Tensor` docs for this purpose.
|
||||
|
||||
In-place operations on Variables
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Supporting in-place operations in autograd is a hard matter, and we discourage
|
||||
their use in most cases. Autograd's aggressive buffer freeing and reuse makes
|
||||
it very efficient and there are very few occasions when in-place operations
|
||||
actually lower memory usage by any significant amount. Unless you're operating
|
||||
under heavy memory pressure, you might never need to use them.
|
||||
|
||||
In-place correctness checks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
All :class:`Variable` s keep track of in-place operations applied to them, and
|
||||
if the implementation detects that a variable was saved for backward in one of
|
||||
the functions, but it was modified in-place afterwards, an error will be raised
|
||||
once backward pass is started. This ensures that if you're using in-place
|
||||
functions and not seing any errors, you can be sure that the computed gradients
|
||||
are correct.
|
||||
|
||||
|
||||
.. autoclass:: Variable
|
||||
:members:
|
||||
|
||||
:hidden:`Function`
|
||||
------------------
|
||||
|
||||
.. autoclass:: Function
|
||||
:members:
|
||||
|
||||
246
docs/source/conf.py
Normal file
246
docs/source/conf.py
Normal file
@ -0,0 +1,246 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# PyTorch documentation build configuration file, created by
|
||||
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
|
||||
#
|
||||
# This file is execfile()d with the current directory set to its
|
||||
# containing dir.
|
||||
#
|
||||
# Note that not all possible configuration values are present in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration values have a default; values that are commented out
|
||||
# serve to show the default.
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
# import os
|
||||
# import sys
|
||||
# sys.path.insert(0, os.path.abspath('.'))
|
||||
import torch
|
||||
try:
|
||||
import torchvision
|
||||
except ImportError:
|
||||
import warnings
|
||||
warnings.warn('unable to load "torchvision" package')
|
||||
import sphinx_rtd_theme
|
||||
|
||||
|
||||
# -- General configuration ------------------------------------------------
|
||||
|
||||
# If your documentation needs a minimal Sphinx version, state it here.
|
||||
#
|
||||
# needs_sphinx = '1.0'
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
'sphinx.ext.autodoc',
|
||||
'sphinx.ext.autosummary',
|
||||
'sphinx.ext.doctest',
|
||||
'sphinx.ext.intersphinx',
|
||||
'sphinx.ext.todo',
|
||||
'sphinx.ext.coverage',
|
||||
'sphinx.ext.mathjax',
|
||||
'sphinx.ext.napoleon',
|
||||
'sphinx.ext.viewcode',
|
||||
]
|
||||
|
||||
napoleon_use_ivar = True
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix(es) of source filenames.
|
||||
# You can specify multiple suffix as a list of string:
|
||||
#
|
||||
# source_suffix = ['.rst', '.md']
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = 'PyTorch'
|
||||
copyright = '2017, Torch Contributors'
|
||||
author = 'Torch Contributors'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# The short X.Y version.
|
||||
# TODO: change to [:2] at v1.0
|
||||
version = '.'.join(torch.__version__.split('+')[0].split('.')[:3])
|
||||
# The full version, including alpha/beta/rc tags.
|
||||
# TODO: verify this works as expected
|
||||
release = torch.__version__.split('+')[0]
|
||||
|
||||
# The language for content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#
|
||||
# This is also used if you do content translation via gettext catalogs.
|
||||
# Usually you set "language" from the command line for these cases.
|
||||
language = None
|
||||
|
||||
# List of patterns, relative to source directory, that match files and
|
||||
# directories to ignore when looking for source files.
|
||||
# This patterns also effect to html_static_path and html_extra_path
|
||||
exclude_patterns = []
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# If true, `todo` and `todoList` produce output, else they produce nothing.
|
||||
todo_include_todos = True
|
||||
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. See the documentation for
|
||||
# a list of builtin themes.
|
||||
#
|
||||
html_theme = 'sphinx_rtd_theme'
|
||||
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#
|
||||
html_theme_options = {
|
||||
'collapse_navigation': False,
|
||||
'display_version': False,
|
||||
'logo_only': True,
|
||||
}
|
||||
|
||||
html_logo = '_static/img/pytorch-logo-dark.svg'
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
html_static_path = ['_static']
|
||||
|
||||
# html_style_path = 'css/pytorch_theme.css'
|
||||
html_context = {
|
||||
'css_files': [
|
||||
'https://fonts.googleapis.com/css?family=Lato',
|
||||
'_static/css/pytorch_theme.css'
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
# -- Options for HTMLHelp output ------------------------------------------
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'PyTorchdoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ---------------------------------------------
|
||||
|
||||
latex_elements = {
|
||||
# The paper size ('letterpaper' or 'a4paper').
|
||||
#
|
||||
# 'papersize': 'letterpaper',
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#
|
||||
# 'pointsize': '10pt',
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#
|
||||
# 'preamble': '',
|
||||
|
||||
# Latex figure (float) alignment
|
||||
#
|
||||
# 'figure_align': 'htbp',
|
||||
}
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title,
|
||||
# author, documentclass [howto, manual, or own class]).
|
||||
latex_documents = [
|
||||
(master_doc, 'pytorch.tex', 'PyTorch Documentation',
|
||||
'Torch Contributors', 'manual'),
|
||||
]
|
||||
|
||||
|
||||
# -- Options for manual page output ---------------------------------------
|
||||
|
||||
# One entry per manual page. List of tuples
|
||||
# (source start file, name, description, authors, manual section).
|
||||
man_pages = [
|
||||
(master_doc, 'PyTorch', 'PyTorch Documentation',
|
||||
[author], 1)
|
||||
]
|
||||
|
||||
|
||||
# -- Options for Texinfo output -------------------------------------------
|
||||
|
||||
# Grouping the document tree into Texinfo files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# dir menu entry, description, category)
|
||||
texinfo_documents = [
|
||||
(master_doc, 'PyTorch', 'PyTorch Documentation',
|
||||
author, 'PyTorch', 'One line description of project.',
|
||||
'Miscellaneous'),
|
||||
]
|
||||
|
||||
|
||||
# Example configuration for intersphinx: refer to the Python standard library.
|
||||
intersphinx_mapping = {
|
||||
'python': ('https://docs.python.org/', None),
|
||||
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
|
||||
}
|
||||
|
||||
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
|
||||
# See http://stackoverflow.com/a/41184353/3343043
|
||||
|
||||
from docutils import nodes
|
||||
from sphinx.util.docfields import TypedField
|
||||
from sphinx import addnodes
|
||||
|
||||
|
||||
def patched_make_field(self, types, domain, items):
|
||||
# type: (List, unicode, Tuple) -> nodes.field
|
||||
def handle_item(fieldarg, content):
|
||||
par = nodes.paragraph()
|
||||
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
|
||||
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
|
||||
# addnodes.literal_strong))
|
||||
if fieldarg in types:
|
||||
par += nodes.Text(' (')
|
||||
# NOTE: using .pop() here to prevent a single type node to be
|
||||
# inserted twice into the doctree, which leads to
|
||||
# inconsistencies later when references are resolved
|
||||
fieldtype = types.pop(fieldarg)
|
||||
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
|
||||
typename = u''.join(n.astext() for n in fieldtype)
|
||||
typename = typename.replace('int', 'python:int')
|
||||
typename = typename.replace('long', 'python:long')
|
||||
typename = typename.replace('float', 'python:float')
|
||||
typename = typename.replace('type', 'python:type')
|
||||
par.extend(self.make_xrefs(self.typerolename, domain, typename,
|
||||
addnodes.literal_emphasis))
|
||||
else:
|
||||
par += fieldtype
|
||||
par += nodes.Text(')')
|
||||
par += nodes.Text(' -- ')
|
||||
par += content
|
||||
return par
|
||||
|
||||
fieldname = nodes.field_name('', self.label)
|
||||
if len(items) == 1 and self.can_collapse:
|
||||
fieldarg, content = items[0]
|
||||
bodynode = handle_item(fieldarg, content)
|
||||
else:
|
||||
bodynode = self.list_type()
|
||||
for fieldarg, content in items:
|
||||
bodynode += nodes.list_item('', handle_item(fieldarg, content))
|
||||
fieldbody = nodes.field_body('', bodynode)
|
||||
return nodes.field('', fieldname, fieldbody)
|
||||
|
||||
TypedField.make_field = patched_make_field
|
||||
27
docs/source/cuda.rst
Normal file
27
docs/source/cuda.rst
Normal file
@ -0,0 +1,27 @@
|
||||
torch.cuda
|
||||
===================================
|
||||
|
||||
.. currentmodule:: torch.cuda
|
||||
|
||||
.. automodule:: torch.cuda
|
||||
:members:
|
||||
|
||||
Communication collectives
|
||||
-------------------------
|
||||
|
||||
.. autofunction:: torch.cuda.comm.broadcast
|
||||
|
||||
.. autofunction:: torch.cuda.comm.reduce_add
|
||||
|
||||
.. autofunction:: torch.cuda.comm.scatter
|
||||
|
||||
.. autofunction:: torch.cuda.comm.gather
|
||||
|
||||
Streams and events
|
||||
------------------
|
||||
|
||||
.. autoclass:: Stream
|
||||
:members:
|
||||
|
||||
.. autoclass:: Event
|
||||
:members:
|
||||
12
docs/source/data.rst
Normal file
12
docs/source/data.rst
Normal file
@ -0,0 +1,12 @@
|
||||
torch.utils.data
|
||||
===================================
|
||||
|
||||
.. automodule:: torch.utils.data
|
||||
.. autoclass:: Dataset
|
||||
.. autoclass:: TensorDataset
|
||||
.. autoclass:: DataLoader
|
||||
.. autoclass:: torch.utils.data.sampler.Sampler
|
||||
.. autoclass:: torch.utils.data.sampler.SequentialSampler
|
||||
.. autoclass:: torch.utils.data.sampler.RandomSampler
|
||||
.. autoclass:: torch.utils.data.sampler.SubsetRandomSampler
|
||||
.. autoclass:: torch.utils.data.sampler.WeightedRandomSampler
|
||||
6
docs/source/ffi.rst
Normal file
6
docs/source/ffi.rst
Normal file
@ -0,0 +1,6 @@
|
||||
torch.utils.ffi
|
||||
===============
|
||||
|
||||
.. currentmodule:: torch.utils.ffi
|
||||
.. autofunction:: create_extension
|
||||
|
||||
54
docs/source/index.rst
Normal file
54
docs/source/index.rst
Normal file
@ -0,0 +1,54 @@
|
||||
.. PyTorch documentation master file, created by
|
||||
sphinx-quickstart on Fri Dec 23 13:31:47 2016.
|
||||
You can adapt this file completely to your liking, but it should at least
|
||||
contain the root `toctree` directive.
|
||||
|
||||
:github_url: https://github.com/pytorch/pytorch
|
||||
|
||||
PyTorch documentation
|
||||
===================================
|
||||
|
||||
PyTorch is an optimized tensor library for deep learning using GPUs and CPUs.
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
:maxdepth: 1
|
||||
:caption: Notes
|
||||
|
||||
notes/*
|
||||
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Package Reference
|
||||
|
||||
torch
|
||||
tensors
|
||||
storage
|
||||
nn
|
||||
optim
|
||||
torch.autograd <autograd>
|
||||
torch.multiprocessing <multiprocessing>
|
||||
torch.legacy <legacy>
|
||||
cuda
|
||||
ffi
|
||||
data
|
||||
model_zoo
|
||||
|
||||
.. toctree::
|
||||
:glob:
|
||||
:maxdepth: 1
|
||||
:caption: torchvision Reference
|
||||
|
||||
torchvision/torchvision
|
||||
torchvision/datasets
|
||||
torchvision/models
|
||||
torchvision/transforms
|
||||
torchvision/utils
|
||||
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
4
docs/source/legacy.rst
Normal file
4
docs/source/legacy.rst
Normal file
@ -0,0 +1,4 @@
|
||||
Legacy package - torch.legacy
|
||||
===================================
|
||||
|
||||
.. automodule:: torch.legacy
|
||||
5
docs/source/model_zoo.rst
Normal file
5
docs/source/model_zoo.rst
Normal file
@ -0,0 +1,5 @@
|
||||
torch.utils.model_zoo
|
||||
===================================
|
||||
|
||||
.. automodule:: torch.utils.model_zoo
|
||||
.. autofunction:: load_url
|
||||
88
docs/source/multiprocessing.rst
Normal file
88
docs/source/multiprocessing.rst
Normal file
@ -0,0 +1,88 @@
|
||||
Multiprocessing package - torch.multiprocessing
|
||||
===============================================
|
||||
|
||||
.. automodule:: torch.multiprocessing
|
||||
.. currentmodule:: torch.multiprocessing
|
||||
|
||||
.. warning::
|
||||
|
||||
If the main process exits abruptly (e.g. because of an incoming signal),
|
||||
Python's ``multiprocessing`` sometimes fails to clean up its children.
|
||||
It's a known caveat, so if you're seeing any resource leaks after
|
||||
interrupting the interpreter, it probably means that this has just happened
|
||||
to you.
|
||||
|
||||
Strategy management
|
||||
-------------------
|
||||
|
||||
.. autofunction:: get_all_sharing_strategies
|
||||
.. autofunction:: get_sharing_strategy
|
||||
.. autofunction:: set_sharing_strategy
|
||||
|
||||
Sharing CUDA tensors
|
||||
--------------------
|
||||
|
||||
Sharing CUDA tensors between processes is supported only in Python 3, using
|
||||
a ``spawn`` or ``forkserver`` start methods. :mod:`python:multiprocessing` in
|
||||
Python 2 can only create subprocesses using ``fork``, and it's not supported
|
||||
by the CUDA runtime.
|
||||
|
||||
.. warning::
|
||||
|
||||
CUDA API requires that the allocation exported to other processes remains
|
||||
valid as long as it's used by them. You should be careful and ensure that
|
||||
CUDA tensors you shared don't go out of scope as long as it's necessary.
|
||||
This shouldn't be a problem for sharing model parameters, but passing other
|
||||
kinds of data should be done with care. Note that this restriction doesn't
|
||||
apply to shared CPU memory.
|
||||
|
||||
|
||||
Sharing strategies
|
||||
------------------
|
||||
|
||||
This section provides a brief overview into how different sharing strategies
|
||||
work. Note that it applies only to CPU tensor - CUDA tensors will always use
|
||||
the CUDA API, as that's the only way they can be shared.
|
||||
|
||||
File descriptor - ``file_descriptor``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
This is the default strategy (except for macOS and OS X where it's not
|
||||
supported).
|
||||
|
||||
This strategy will use file descriptors as shared memory handles. Whenever a
|
||||
storage is moved to shared memory, a file descriptor obtained from ``shm_open``
|
||||
is cached with the object, and when it's going to be sent to other processes,
|
||||
the file descriptor will be transferred (e.g. via UNIX sockets) to it. The
|
||||
receiver will also cache the file descriptor and ``mmap`` it, to obtain a shared
|
||||
view onto the storage data.
|
||||
|
||||
Note that if there will be a lot of tensors shared, this strategy will keep a
|
||||
large number of file descriptors open most of the time. If your system has low
|
||||
limits for the number of open file descriptors, and you can't rise them, you
|
||||
should use the ``file_system`` strategy.
|
||||
|
||||
File system - ``file_system``
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
This strategy will use file names given to ``shm_open`` to identify the shared
|
||||
memory regions. This has a benefit of not requiring the implementation to cache
|
||||
the file descriptors obtained from it, but at the same time is prone to shared
|
||||
memory leaks. The file can't be deleted right after its creation, because other
|
||||
processes need to access it to open their views. If the processes fatally
|
||||
crash, or are killed, and don't call the storage destructors, the files will
|
||||
remain in the system. This is very serious, because they keep using up the
|
||||
memory until the system is restarted, or they're freed manually.
|
||||
|
||||
To counter the problem of shared memory file leaks, :mod:`torch.multiprocessing`
|
||||
will spawn a daemon named ``torch_shm_manager`` that will isolate itself from
|
||||
the current process group, and will keep track of all shared memory allocations.
|
||||
Once all processes connected to it exit, it will wait a moment to ensure there
|
||||
will be no new connections, and will iterate over all shared memory files
|
||||
allocated by the group. If it finds that any of them still exist, they will be
|
||||
deallocated. We've tested this method and it prooved to be robust to various
|
||||
failures. Still, if your system has high enough limits, and ``file_descriptor``
|
||||
is a supported strategy, we do not recommend switching to this one.
|
||||
849
docs/source/nn.rst
Normal file
849
docs/source/nn.rst
Normal file
@ -0,0 +1,849 @@
|
||||
.. role:: hidden
|
||||
:class: hidden-section
|
||||
|
||||
torch.nn
|
||||
===================================
|
||||
|
||||
.. automodule:: torch.nn
|
||||
.. currentmodule:: torch.nn
|
||||
|
||||
Parameters
|
||||
----------
|
||||
|
||||
.. autoclass:: Parameter
|
||||
:members:
|
||||
|
||||
Containers
|
||||
----------------------------------
|
||||
|
||||
:hidden:`Module`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Module
|
||||
:members:
|
||||
|
||||
:hidden:`Sequential`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Sequential
|
||||
:members:
|
||||
|
||||
:hidden:`ModuleList`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: ModuleList
|
||||
:members:
|
||||
|
||||
:hidden:`ParameterList`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: ParameterList
|
||||
:members:
|
||||
|
||||
Convolution Layers
|
||||
----------------------------------
|
||||
|
||||
:hidden:`Conv1d`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Conv1d
|
||||
:members:
|
||||
|
||||
:hidden:`Conv2d`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Conv2d
|
||||
:members:
|
||||
|
||||
:hidden:`Conv3d`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Conv3d
|
||||
:members:
|
||||
|
||||
:hidden:`ConvTranspose1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: ConvTranspose1d
|
||||
:members:
|
||||
|
||||
:hidden:`ConvTranspose2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
||||
.. autoclass:: ConvTranspose2d
|
||||
:members:
|
||||
|
||||
:hidden:`ConvTranspose3d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: ConvTranspose3d
|
||||
:members:
|
||||
|
||||
|
||||
Pooling Layers
|
||||
----------------------------------
|
||||
|
||||
:hidden:`MaxPool1d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MaxPool1d
|
||||
:members:
|
||||
|
||||
:hidden:`MaxPool2d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MaxPool2d
|
||||
:members:
|
||||
|
||||
:hidden:`MaxPool3d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MaxPool3d
|
||||
:members:
|
||||
|
||||
:hidden:`MaxUnpool1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MaxUnpool1d
|
||||
:members:
|
||||
|
||||
:hidden:`MaxUnpool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MaxUnpool2d
|
||||
:members:
|
||||
|
||||
:hidden:`MaxUnpool3d`
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MaxUnpool3d
|
||||
:members:
|
||||
|
||||
:hidden:`AvgPool1d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: AvgPool1d
|
||||
:members:
|
||||
|
||||
:hidden:`AvgPool2d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: AvgPool2d
|
||||
:members:
|
||||
|
||||
:hidden:`AvgPool3d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: AvgPool3d
|
||||
:members:
|
||||
|
||||
:hidden:`FractionalMaxPool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: FractionalMaxPool2d
|
||||
:members:
|
||||
|
||||
:hidden:`LPPool2d`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: LPPool2d
|
||||
:members:
|
||||
|
||||
:hidden:`AdaptiveMaxPool1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: AdaptiveMaxPool1d
|
||||
:members:
|
||||
|
||||
:hidden:`AdaptiveMaxPool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: AdaptiveMaxPool2d
|
||||
:members:
|
||||
|
||||
:hidden:`AdaptiveAvgPool1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: AdaptiveAvgPool1d
|
||||
:members:
|
||||
|
||||
:hidden:`AdaptiveAvgPool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: AdaptiveAvgPool2d
|
||||
:members:
|
||||
|
||||
|
||||
Non-linear Activations
|
||||
----------------------------------
|
||||
|
||||
:hidden:`ReLU`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: ReLU
|
||||
:members:
|
||||
|
||||
:hidden:`ReLU6`
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: ReLU6
|
||||
:members:
|
||||
|
||||
:hidden:`ELU`
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: ELU
|
||||
:members:
|
||||
|
||||
:hidden:`PReLU`
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: PReLU
|
||||
:members:
|
||||
|
||||
:hidden:`LeakyReLU`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: LeakyReLU
|
||||
:members:
|
||||
|
||||
:hidden:`Threshold`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Threshold
|
||||
:members:
|
||||
|
||||
:hidden:`Hardtanh`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Hardtanh
|
||||
:members:
|
||||
|
||||
:hidden:`Sigmoid`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Sigmoid
|
||||
:members:
|
||||
|
||||
:hidden:`Tanh`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Tanh
|
||||
:members:
|
||||
|
||||
:hidden:`LogSigmoid`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: LogSigmoid
|
||||
:members:
|
||||
|
||||
:hidden:`Softplus`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Softplus
|
||||
:members:
|
||||
|
||||
:hidden:`Softshrink`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Softshrink
|
||||
:members:
|
||||
|
||||
:hidden:`Softsign`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Softsign
|
||||
:members:
|
||||
|
||||
:hidden:`Tanhshrink`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Tanhshrink
|
||||
:members:
|
||||
|
||||
:hidden:`Softmin`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Softmin
|
||||
:members:
|
||||
|
||||
:hidden:`Softmax`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Softmax
|
||||
:members:
|
||||
|
||||
:hidden:`LogSoftmax`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: LogSoftmax
|
||||
:members:
|
||||
|
||||
|
||||
Normalization layers
|
||||
----------------------------------
|
||||
|
||||
:hidden:`BatchNorm1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: BatchNorm1d
|
||||
:members:
|
||||
|
||||
:hidden:`BatchNorm2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: BatchNorm2d
|
||||
:members:
|
||||
|
||||
:hidden:`BatchNorm3d`
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: BatchNorm3d
|
||||
:members:
|
||||
|
||||
|
||||
Recurrent layers
|
||||
----------------------------------
|
||||
|
||||
:hidden:`RNN`
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: RNN
|
||||
:members:
|
||||
|
||||
:hidden:`LSTM`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: LSTM
|
||||
:members:
|
||||
|
||||
:hidden:`GRU`
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: GRU
|
||||
:members:
|
||||
|
||||
:hidden:`RNNCell`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: RNNCell
|
||||
:members:
|
||||
|
||||
:hidden:`LSTMCell`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: LSTMCell
|
||||
:members:
|
||||
|
||||
:hidden:`GRUCell`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: GRUCell
|
||||
:members:
|
||||
|
||||
Linear layers
|
||||
----------------------------------
|
||||
|
||||
:hidden:`Linear`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Linear
|
||||
:members:
|
||||
|
||||
|
||||
Dropout layers
|
||||
----------------------------------
|
||||
|
||||
:hidden:`Dropout`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Dropout
|
||||
:members:
|
||||
|
||||
:hidden:`Dropout2d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Dropout2d
|
||||
:members:
|
||||
|
||||
:hidden:`Dropout3d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Dropout3d
|
||||
:members:
|
||||
|
||||
|
||||
Sparse layers
|
||||
----------------------------------
|
||||
|
||||
:hidden:`Embedding`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: Embedding
|
||||
:members:
|
||||
|
||||
Distance functions
|
||||
----------------------------------
|
||||
|
||||
:hidden:`PairwiseDistance`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: PairwiseDistance
|
||||
:members:
|
||||
|
||||
|
||||
Loss functions
|
||||
----------------------------------
|
||||
|
||||
:hidden:`L1Loss`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: L1Loss
|
||||
:members:
|
||||
|
||||
:hidden:`MSELoss`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MSELoss
|
||||
:members:
|
||||
|
||||
:hidden:`CrossEntropyLoss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: CrossEntropyLoss
|
||||
:members:
|
||||
|
||||
:hidden:`NLLLoss`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: NLLLoss
|
||||
:members:
|
||||
|
||||
:hidden:`NLLLoss2d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: NLLLoss2d
|
||||
:members:
|
||||
|
||||
:hidden:`KLDivLoss`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: KLDivLoss
|
||||
:members:
|
||||
|
||||
:hidden:`BCELoss`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: BCELoss
|
||||
:members:
|
||||
|
||||
:hidden:`MarginRankingLoss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MarginRankingLoss
|
||||
:members:
|
||||
|
||||
:hidden:`HingeEmbeddingLoss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: HingeEmbeddingLoss
|
||||
:members:
|
||||
|
||||
:hidden:`MultiLabelMarginLoss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MultiLabelMarginLoss
|
||||
:members:
|
||||
|
||||
:hidden:`SmoothL1Loss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: SmoothL1Loss
|
||||
:members:
|
||||
|
||||
:hidden:`SoftMarginLoss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: SoftMarginLoss
|
||||
:members:
|
||||
|
||||
:hidden:`MultiLabelSoftMarginLoss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MultiLabelSoftMarginLoss
|
||||
:members:
|
||||
|
||||
:hidden:`CosineEmbeddingLoss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: CosineEmbeddingLoss
|
||||
:members:
|
||||
|
||||
:hidden:`MultiMarginLoss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: MultiMarginLoss
|
||||
:members:
|
||||
|
||||
|
||||
Vision layers
|
||||
----------------
|
||||
|
||||
:hidden:`PixelShuffle`
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: PixelShuffle
|
||||
:members:
|
||||
|
||||
:hidden:`UpsamplingNearest2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: UpsamplingNearest2d
|
||||
:members:
|
||||
|
||||
:hidden:`UpsamplingBilinear2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: UpsamplingBilinear2d
|
||||
:members:
|
||||
|
||||
|
||||
Multi-GPU layers
|
||||
----------------
|
||||
|
||||
:hidden:`DataParallel`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autoclass:: DataParallel
|
||||
:members:
|
||||
|
||||
|
||||
Utilities
|
||||
---------
|
||||
|
||||
:hidden:`clip_grad_norm`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: torch.nn.utils.clip_grad_norm
|
||||
|
||||
|
||||
.. currentmodule:: torch.nn.utils.rnn
|
||||
|
||||
:hidden:`PackedSequence`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: torch.nn.utils.rnn.PackedSequence
|
||||
|
||||
|
||||
:hidden:`pack_padded_sequence`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: torch.nn.utils.rnn.pack_padded_sequence
|
||||
|
||||
|
||||
:hidden:`pad_packed_sequence`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: torch.nn.utils.rnn.pad_packed_sequence
|
||||
|
||||
|
||||
torch.nn.functional
|
||||
===================
|
||||
|
||||
.. currentmodule:: torch.nn.functional
|
||||
|
||||
Convolution functions
|
||||
----------------------------------
|
||||
|
||||
:hidden:`conv1d`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: conv1d
|
||||
|
||||
:hidden:`conv2d`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: conv2d
|
||||
|
||||
:hidden:`conv3d`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: conv3d
|
||||
|
||||
:hidden:`conv_transpose1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: conv_transpose1d
|
||||
|
||||
:hidden:`conv_transpose2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: conv_transpose2d
|
||||
|
||||
:hidden:`conv_transpose3d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: conv_transpose3d
|
||||
|
||||
Pooling functions
|
||||
----------------------------------
|
||||
|
||||
:hidden:`avg_pool1d`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: avg_pool1d
|
||||
|
||||
:hidden:`avg_pool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: avg_pool2d
|
||||
|
||||
:hidden:`avg_pool3d`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: avg_pool3d
|
||||
|
||||
:hidden:`max_pool1d`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: max_pool1d
|
||||
|
||||
:hidden:`max_pool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: max_pool2d
|
||||
|
||||
:hidden:`max_pool3d`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: max_pool3d
|
||||
|
||||
:hidden:`max_unpool1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: max_unpool1d
|
||||
|
||||
:hidden:`max_unpool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: max_unpool2d
|
||||
|
||||
:hidden:`max_unpool3d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: max_unpool3d
|
||||
|
||||
:hidden:`lp_pool2d`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: lp_pool2d
|
||||
|
||||
:hidden:`adaptive_max_pool1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: adaptive_max_pool1d
|
||||
|
||||
:hidden:`adaptive_max_pool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: adaptive_max_pool2d
|
||||
|
||||
:hidden:`adaptive_avg_pool1d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: adaptive_avg_pool1d
|
||||
|
||||
:hidden:`adaptive_avg_pool2d`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: adaptive_avg_pool2d
|
||||
|
||||
|
||||
Non-linear activation functions
|
||||
-------------------------------
|
||||
|
||||
:hidden:`threshold`
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: threshold
|
||||
|
||||
|
||||
:hidden:`relu`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: relu
|
||||
|
||||
:hidden:`hardtanh`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: hardtanh
|
||||
|
||||
:hidden:`relu6`
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: relu6
|
||||
|
||||
:hidden:`elu`
|
||||
~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: elu
|
||||
|
||||
:hidden:`leaky_relu`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: leaky_relu
|
||||
|
||||
:hidden:`prelu`
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: prelu
|
||||
|
||||
:hidden:`rrelu`
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: rrelu
|
||||
|
||||
:hidden:`logsigmoid`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: logsigmoid
|
||||
|
||||
:hidden:`hardshrink`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: hardshrink
|
||||
|
||||
:hidden:`tanhshrink`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: tanhshrink
|
||||
|
||||
:hidden:`softsign`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: softsign
|
||||
|
||||
:hidden:`softplus`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: softplus
|
||||
|
||||
:hidden:`softmin`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: softmin
|
||||
|
||||
:hidden:`softmax`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: softmax
|
||||
|
||||
:hidden:`softshrink`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: softshrink
|
||||
|
||||
:hidden:`log_softmax`
|
||||
~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: log_softmax
|
||||
|
||||
:hidden:`tanh`
|
||||
~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: tanh
|
||||
|
||||
:hidden:`sigmoid`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: sigmoid
|
||||
|
||||
Normalization functions
|
||||
-----------------------
|
||||
|
||||
:hidden:`batch_norm`
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: batch_norm
|
||||
|
||||
Linear functions
|
||||
----------------
|
||||
|
||||
:hidden:`linear`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: linear
|
||||
|
||||
Dropout functions
|
||||
-----------------
|
||||
|
||||
:hidden:`dropout`
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: dropout
|
||||
|
||||
Distance functions
|
||||
----------------------------------
|
||||
|
||||
:hidden:`pairwise_distance`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: pairwise_distance
|
||||
|
||||
|
||||
Loss functions
|
||||
--------------
|
||||
|
||||
:hidden:`nll_loss`
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: nll_loss
|
||||
|
||||
|
||||
:hidden:`kl_div`
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: kl_div
|
||||
|
||||
:hidden:`cross_entropy`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: cross_entropy
|
||||
|
||||
:hidden:`binary_cross_entropy`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: binary_cross_entropy
|
||||
|
||||
:hidden:`smooth_l1_loss`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: smooth_l1_loss
|
||||
|
||||
Vision functions
|
||||
----------------
|
||||
|
||||
:hidden:`pixel_shuffle`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: pixel_shuffle
|
||||
|
||||
:hidden:`pad`
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: pad
|
||||
|
||||
torch.nn.init
|
||||
=============
|
||||
|
||||
.. currentmodule:: torch.nn.init
|
||||
.. autofunction:: uniform
|
||||
.. autofunction:: normal
|
||||
.. autofunction:: constant
|
||||
.. autofunction:: xavier_uniform
|
||||
.. autofunction:: xavier_normal
|
||||
.. autofunction:: kaiming_uniform
|
||||
.. autofunction:: kaiming_normal
|
||||
.. autofunction:: orthogonal
|
||||
.. autofunction:: sparse
|
||||
144
docs/source/notes/autograd.rst
Normal file
144
docs/source/notes/autograd.rst
Normal file
@ -0,0 +1,144 @@
|
||||
Autograd mechanics
|
||||
==================
|
||||
|
||||
This note will present an overview of how autograd works and records the
|
||||
operations. It's not strictly necessary to understand all this, but we recommend
|
||||
getting familiar with it, as it will help you write more efficient, cleaner
|
||||
programs, and can aid you in debugging.
|
||||
|
||||
.. _excluding-subgraphs:
|
||||
|
||||
Excluding subgraphs from backward
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Every Variable has two flags: :attr:`requires_grad` and :attr:`volatile`.
|
||||
They both allow for fine grained exclusion of subgraphs from gradient
|
||||
computation and can increase efficiency.
|
||||
|
||||
.. _excluding-requires_grad:
|
||||
|
||||
``requires_grad``
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
If there's a single input to an operation that requires gradient, its output
|
||||
will also require gradient. Conversely, only if all inputs don't require
|
||||
gradient, the output also won't require it. Backward computation is never
|
||||
performed in the subgraphs, where all Variables didn't require gradients.
|
||||
|
||||
.. code::
|
||||
|
||||
>>> x = Variable(torch.randn(5, 5))
|
||||
>>> y = Variable(torch.randn(5, 5))
|
||||
>>> z = Variable(torch.randn(5, 5), requires_grad=True)
|
||||
>>> a = x + y
|
||||
>>> a.requires_grad
|
||||
False
|
||||
>>> b = a + z
|
||||
>>> b.requires_grad
|
||||
True
|
||||
|
||||
This is especially useful when you want to freeze part of your model, or you
|
||||
know in advance that you're not going to use gradients w.r.t. some parameters.
|
||||
For example if you want to finetune a pretrained CNN, it's enough to switch the
|
||||
:attr:`requires_grad` flags in the frozen base, and no intermediate buffers will
|
||||
be saved, until the computation gets to the last layer, where the affine
|
||||
transform will use weights that require gradient, and the output of the network
|
||||
will also require them.
|
||||
|
||||
.. code::
|
||||
|
||||
model = torchvision.models.resnet18(pretrained=True)
|
||||
for param in model.parameters():
|
||||
param.requires_grad = False
|
||||
# Replace the last fully-connected layer
|
||||
# Parameters of newly constructed modules have requires_grad=True by default
|
||||
model.fc = nn.Linear(512, 100)
|
||||
|
||||
# Optimize only the classifier
|
||||
optimizer = optim.SGD(model.fc.parameters(), lr=1e-2, momentum=0.9)
|
||||
|
||||
``volatile``
|
||||
~~~~~~~~~~~~
|
||||
|
||||
Volatile is recommended for purely inference mode, when you're sure you won't
|
||||
be even calling `.backward()`. It's more efficient than any other autograd
|
||||
setting - it will use the absolute minimal amount of memory to evaluate the
|
||||
model. ``volatile`` also determines that ``requires_grad is False``.
|
||||
|
||||
Volatile differs from :ref:`excluding-requires_grad` in how the flag propagates.
|
||||
If there's even a single volatile input to an operation, its output is also
|
||||
going to be volatile. Volatility spreads accross the graph much easier than
|
||||
non-requiring gradient - you only need a **single** volatile leaf to have a
|
||||
volatile output, while you need **all** leaves to not require gradient to
|
||||
have an output the doesn't require gradient. Using volatile flag you don't
|
||||
need to change any settings of your model parameters to use it for
|
||||
inference. It's enough to create a volatile input, and this will ensure that
|
||||
no intermediate states are saved.
|
||||
|
||||
.. code::
|
||||
|
||||
>>> regular_input = Variable(torch.randn(5, 5))
|
||||
>>> volatile_input = Variable(torch.randn(5, 5), volatile=True)
|
||||
>>> model = torchvision.models.resnet18(pretrained=True)
|
||||
>>> model(regular_input).requires_grad
|
||||
True
|
||||
>>> model(volatile_input).requires_grad
|
||||
False
|
||||
>>> model(volatile_input).volatile
|
||||
True
|
||||
>>> model(volatile_input).creator is None
|
||||
True
|
||||
|
||||
How autograd encodes the history
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Each Variable has a ``.creator`` attribute, that points to the function, of
|
||||
which it is an output. This is an entry point to a directed acyclic graph (DAG)
|
||||
consisting of :class:`Function` objects as nodes, and references between them
|
||||
being the edges. Every time an operation is performed, a new :class:`Function`
|
||||
representing it is instantiated, its :meth:`~torch.autograd.Function.forward`
|
||||
method is called, and its output :class:`Variable` s creators are set to it.
|
||||
Then, by following the path from any :class:`Variable` to the leaves, it is
|
||||
possible to reconstruct the sequence of operations that has created the data,
|
||||
and automatically compute the gradients.
|
||||
|
||||
An important thing to note is that the graph is recreated from scratch at every
|
||||
iteration, and this is exactly what allows for using arbitrary Python control
|
||||
flow statements, that can change the overall shape and size of the graph at
|
||||
every iteration. You don't have to encode all possible paths before you
|
||||
launch the training - what you run is what you differentiate.
|
||||
|
||||
In-place operations on Variables
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Supporting in-place operations in autograd is a hard matter, and we discourage
|
||||
their use in most cases. Autograd's aggressive buffer freeing and reuse makes
|
||||
it very efficient and there are very few occasions when in-place operations
|
||||
actually lower memory usage by any significant amount. Unless you're operating
|
||||
under heavy memory pressure, you might never need to use them.
|
||||
|
||||
There are two main reasons that limit the applicability of in-place operations:
|
||||
|
||||
1. Overwriting values required to compute gradients. This is why variables don't
|
||||
support ``log_``. Its gradient formula requires the original input, and while
|
||||
it is possible to recreate it by computing the inverse operation, it is
|
||||
numerically unstable, and requires additional work that often defeats the
|
||||
purpose of using these functions.
|
||||
|
||||
2. Every in-place operation actually requires the implementation to rewrite the
|
||||
computational graph. Out-of-place versions simply allocate new objects and
|
||||
keep references to the old graph, while in-place operations, require
|
||||
changing the creator of all inputs to the :class:`Function` representing
|
||||
this operation. This can be tricky, especially if there are many Variables
|
||||
that reference the same storage (e.g. created by indexing or transposing),
|
||||
and in-place functions will actually raise an error if the storage of
|
||||
modified inputs is referenced by any other :class:`Variable`.
|
||||
|
||||
In-place correctness checks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Every variable keeps a version counter, that is incremented every time it's
|
||||
marked dirty in any operation. When a Function saves any tensors for backward,
|
||||
a version counter of their containing Variable is saved as well. Once you access
|
||||
``self.saved_tensors`` it is checked, and if it's greater than the saved value
|
||||
an error is raised.
|
||||
83
docs/source/notes/cuda.rst
Normal file
83
docs/source/notes/cuda.rst
Normal file
@ -0,0 +1,83 @@
|
||||
.. _cuda-semantics:
|
||||
|
||||
CUDA semantics
|
||||
==============
|
||||
|
||||
:mod:`torch.cuda` keeps track of currently selected GPU, and all CUDA tensors
|
||||
you allocate will be created on it. The selected device can be changed with a
|
||||
:any:`torch.cuda.device` context manager.
|
||||
|
||||
However, once a tensor is allocated, you can do operations on it irrespectively
|
||||
of your selected device, and the results will be always placed in on the same
|
||||
device as the tensor.
|
||||
|
||||
Cross-GPU operations are not allowed by default, with the only exception of
|
||||
:meth:`~torch.Tensor.copy_`. Unless you enable peer-to-peer memory accesses
|
||||
any attempts to launch ops on tensors spread across different devices will
|
||||
raise an error.
|
||||
|
||||
Below you can find a small example showcasing this::
|
||||
|
||||
x = torch.cuda.FloatTensor(1)
|
||||
# x.get_device() == 0
|
||||
y = torch.FloatTensor(1).cuda()
|
||||
# y.get_device() == 0
|
||||
|
||||
with torch.cuda.device(1):
|
||||
# allocates a tensor on GPU 1
|
||||
a = torch.cuda.FloatTensor(1)
|
||||
|
||||
# transfers a tensor from CPU to GPU 1
|
||||
b = torch.FloatTensor(1).cuda()
|
||||
# a.get_device() == b.get_device() == 1
|
||||
|
||||
c = a + b
|
||||
# c.get_device() == 1
|
||||
|
||||
z = x + y
|
||||
# z.get_device() == 0
|
||||
|
||||
# even within a context, you can give a GPU id to the .cuda call
|
||||
d = torch.randn(2).cuda(2)
|
||||
# d.get_device() == 2
|
||||
|
||||
Best practices
|
||||
--------------
|
||||
|
||||
Use pinned memory buffers
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. warning:
|
||||
|
||||
This is an advanced tip. You overuse of pinned memory can cause serious
|
||||
problems if you'll be running low on RAM, and you should be aware that
|
||||
pinning is often an expensive operation.
|
||||
|
||||
Host to GPU copies are much faster when they originate from pinned (page-locked)
|
||||
memory. CPU tensors and storages expose a :meth:`~torch.Tensor.pin_memory`
|
||||
method, that returns a copy of the object, with data put in a pinned region.
|
||||
|
||||
Also, once you pin a tensor or storage, you can use asynchronous GPU copies.
|
||||
Just pass an additional ``async=True`` argument to a :meth:`~torch.Tensor.cuda`
|
||||
call. This can be used to overlap data transfers with computation.
|
||||
|
||||
You can make the :class:`~torch.utils.data.DataLoader` return batches placed in
|
||||
pinned memory by passing ``pin_memory=True`` to its constructor.
|
||||
|
||||
.. _cuda-nn-dataparallel-instead:
|
||||
|
||||
Use nn.DataParallel instead of multiprocessing
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Most use cases involving batched input and multiple GPUs should default to using
|
||||
:class:`~torch.nn.DataParallel` to utilize more than one GPU. Even with the GIL,
|
||||
a single python process can saturate multiple GPUs.
|
||||
|
||||
As of version 0.1.9, large numbers of GPUs (8+) might not be fully utilized.
|
||||
However, this is a known issue that is under active development. As always,
|
||||
test your use case.
|
||||
|
||||
There are significant caveats to using CUDA models with
|
||||
:mod:`~torch.multiprocessing`; unless care is taken to meet the data handling
|
||||
requirements exactly, it is likely that your program will have incorrect or
|
||||
undefined behavior.
|
||||
169
docs/source/notes/extending.rst
Normal file
169
docs/source/notes/extending.rst
Normal file
@ -0,0 +1,169 @@
|
||||
Extending PyTorch
|
||||
=================
|
||||
|
||||
In this note we'll cover ways of extending :mod:`torch.nn`,
|
||||
:mod:`torch.autograd`, and writing custom C extensions utilizing our C
|
||||
libraries.
|
||||
|
||||
Extending :mod:`torch.autograd`
|
||||
-------------------------------
|
||||
|
||||
.. currentmodule:: torch.autograd
|
||||
|
||||
Adding operations to :mod:`~torch.autograd` requires implementing a new
|
||||
:class:`Function` subclass for each operation. Recall that :class:`Function` s
|
||||
are what :mod:`~torch.autograd` uses to compute the results and gradients, and
|
||||
encode the operation history. Every new function requires you to implement 3
|
||||
methods:
|
||||
|
||||
- ``__init__`` (*optional*) - if your operation is parametrized by/uses
|
||||
objects different than :class:`Variable` s, you should pass them as arguments
|
||||
to ``__init__``. For example, ``AddConstant`` function takes a scalar to add,
|
||||
while ``Transpose`` requires specifying which two dimensions to swap. If your
|
||||
function doesn't require any additional parameters, you can skip it.
|
||||
- :meth:`~Function.forward` - the code that performs the operation. It can take
|
||||
as many arguments as you want, with some of them being
|
||||
optional, if you specify the default values. Keep in mind that only
|
||||
:class:`Variable` s will be passed in here. You can return either a single
|
||||
:class:`Variable` output, or a :class:`tuple` of :class:`Variable` s if there
|
||||
are multiple. Also, please refer to the docs of :class:`Function` to find
|
||||
descriptions of useful methods that can be called only from
|
||||
:meth:`~Function.forward`.
|
||||
- :meth:`~Function.backward` - gradient formula. It will be given
|
||||
as many arguments as there were outputs, with each of them representing
|
||||
gradient w.r.t. that output. It should return as many :class:`Tensor` s as
|
||||
there were inputs, with each of them containing the gradient w.r.t.
|
||||
corresponding input. If you inputs didn't require gradient (see
|
||||
:attr:`~Variable.needs_input_grad`), or it was non-differentiable, you
|
||||
can return :class:`None`. Also, if you have optional arguments to
|
||||
:meth:`~Variable.forward` you can return more gradients than there were
|
||||
inputs, as long as they're all :any:`python:None`.
|
||||
|
||||
Below you can find code for a ``Linear`` function from :mod:`torch.nn`, with
|
||||
additional comments::
|
||||
|
||||
# Inherit from Function
|
||||
class Linear(Function):
|
||||
|
||||
# bias is an optional argument
|
||||
def forward(self, input, weight, bias=None):
|
||||
self.save_for_backward(input, weight, bias)
|
||||
output = input.mm(weight.t())
|
||||
if bias is not None:
|
||||
output += bias.unsqueeze(0).expand_as(output)
|
||||
return output
|
||||
|
||||
# This function has only a single output, so it gets only one gradient
|
||||
def backward(self, grad_output):
|
||||
# This is a pattern that is very convenient - at the top of backward
|
||||
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
|
||||
# None. Thanks to the fact that additional trailing Nones are
|
||||
# ignored, the return statement is simple even when the function has
|
||||
# optional inputs.
|
||||
input, weight, bias = self.saved_tensors
|
||||
grad_input = grad_weight = grad_bias = None
|
||||
|
||||
# These needs_input_grad checks are optional and there only to
|
||||
# improve efficiency. If you want to make your code simpler, you can
|
||||
# skip them. Returning gradients for inputs that don't require it is
|
||||
# not an error.
|
||||
if self.needs_input_grad[0]:
|
||||
grad_input = grad_output.mm(weight)
|
||||
if self.needs_input_grad[1]:
|
||||
grad_weight = grad_output.t().mm(input)
|
||||
if bias is not None and self.needs_input_grad[2]:
|
||||
grad_bias = grad_output.sum(0).squeeze(0)
|
||||
|
||||
return grad_input, grad_weight, grad_bias
|
||||
|
||||
Now, to make it easier to use these custom ops, we recommend wrapping them in
|
||||
small helper functions::
|
||||
|
||||
def linear(input, weight, bias=None):
|
||||
# First braces create a Function object. Any arguments given here
|
||||
# will be passed to __init__. Second braces will invoke the __call__
|
||||
# operator, that will then use forward() to compute the result and
|
||||
# return it.
|
||||
return Linear()(input, weight, bias)
|
||||
|
||||
You probably want to check if the backward method you implemented actually
|
||||
computes the derivatives of your function. It is possible by comparing with
|
||||
numerical approximations using small finite differences::
|
||||
|
||||
from torch.autograd import gradcheck
|
||||
|
||||
# gradchek takes a tuple of tensor as input, check if your gradient
|
||||
# evaluated with these tensors are close enough to numerical
|
||||
# approximations and returns True if they all verify this condition.
|
||||
input = (Variable(torch.randn(20,20).double(), requires_grad=True),)
|
||||
test = gradcheck.gradcheck(Linear(), input, eps=1e-6, atol=1e-4)
|
||||
print(test)
|
||||
|
||||
Extending :mod:`torch.nn`
|
||||
-------------------------
|
||||
|
||||
.. currentmodule:: torch.nn
|
||||
|
||||
:mod:`~torch.nn` exports two kinds of interfaces - modules and their functional
|
||||
versions. You can extend it in both ways, but we recommend using modules for
|
||||
all kinds of layers, that hold any parameters or buffers, and recommend using
|
||||
a functional form parameter-less operations like activation functions, pooling,
|
||||
etc.
|
||||
|
||||
Adding a functional version of an operation is already fully covered in the
|
||||
section above.
|
||||
|
||||
Adding a :class:`Module`
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Since :mod:`~torch.nn` heavily utilizes :mod:`~torch.autograd`, adding a new
|
||||
:class:`Module` requires implementing a :class:`~torch.autograd.Function`
|
||||
that performs the operation and can compute the gradient. From now on let's
|
||||
assume that we want to implement a ``Linear`` module and we have the function
|
||||
implementated as in the listing above. There's very little code required to
|
||||
add this. Now, there are two functions that need to be implemented:
|
||||
|
||||
- ``__init__`` (*optional*) - takes in arguments such as kernel sizes, numbers
|
||||
of features, etc. and initializes parameters and buffers.
|
||||
- :meth:`~Module.forward` - instantiates a :class:`~torch.autograd.Function` and
|
||||
uses it to perform the operation. It's very similar to a functional wrapper
|
||||
shown above.
|
||||
|
||||
This is how a ``Linear`` module can be implemented::
|
||||
|
||||
class Linear(nn.Module):
|
||||
def __init__(self, input_features, output_features, bias=True):
|
||||
self.input_features = input_features
|
||||
self.output_features = output_features
|
||||
|
||||
# nn.Parameter is a special kind of Variable, that will get
|
||||
# automatically registered as Module's parameter once it's assigned
|
||||
# as an attribute. Parameters and buffers need to be registered, or
|
||||
# they won't appear in .parameters() (doesn't apply to buffers), and
|
||||
# won't be converted when e.g. .cuda() is called. You can use
|
||||
# .register_buffer() to register buffers.
|
||||
# nn.Parameters can never be volatile and, different than Variables,
|
||||
# they require gradients by default.
|
||||
self.weight = nn.Parameter(torch.Tensor(input_features, output_features))
|
||||
if bias:
|
||||
self.bias = nn.Parameter(torch.Tensor(output_features))
|
||||
else:
|
||||
# You should always register all possible parameters, but the
|
||||
# optional ones can be None if you want.
|
||||
self.register_parameter('bias', None)
|
||||
|
||||
# Not a very smart way to initialize weights
|
||||
self.weight.data.uniform_(-0.1, 0.1)
|
||||
if bias is not None:
|
||||
self.bias.data.uniform_(-0.1, 0.1)
|
||||
|
||||
def forward(self, input):
|
||||
# See the autograd section for explanation of what happens here.
|
||||
return Linear()(input, self.weight, self.bias)
|
||||
|
||||
|
||||
Writing custom C extensions
|
||||
---------------------------
|
||||
|
||||
Coming soon. For now you can find an example at
|
||||
`GitHub <https://github.com/pytorch/extension-ffi>`_.
|
||||
124
docs/source/notes/multiprocessing.rst
Normal file
124
docs/source/notes/multiprocessing.rst
Normal file
@ -0,0 +1,124 @@
|
||||
Multiprocessing best practices
|
||||
==============================
|
||||
|
||||
:mod:`torch.multiprocessing` is a drop in replacement for Python's
|
||||
:mod:`python:multiprocessing` module. It supports the exact same operations,
|
||||
but extends it, so that all tensors sent through a
|
||||
:class:`python:multiprocessing.Queue`, will have their data moved into shared
|
||||
memory and will only send a handle to another process.
|
||||
|
||||
.. note::
|
||||
|
||||
When a :class:`~torch.autograd.Variable` is sent to another process, both
|
||||
the :attr:`Variable.data` and :attr:`Variable.grad.data` are going to be
|
||||
shared.
|
||||
|
||||
This allows to implement various training methods, like Hogwild, A3C, or any
|
||||
others that require asynchronous operation.
|
||||
|
||||
Sharing CUDA tensors
|
||||
--------------------
|
||||
|
||||
Sharing CUDA tensors between processes is supported only in Python 3, using
|
||||
a ``spawn`` or ``forkserver`` start methods. :mod:`python:multiprocessing` in
|
||||
Python 2 can only create subprocesses using ``fork``, and it's not supported
|
||||
by the CUDA runtime.
|
||||
|
||||
.. warning::
|
||||
|
||||
CUDA API requires that the allocation exported to other processes remains
|
||||
valid as long as it's used by them. You should be careful and ensure that
|
||||
CUDA tensors you shared don't go out of scope as long as it's necessary.
|
||||
This shouldn't be a problem for sharing model parameters, but passing other
|
||||
kinds of data should be done with care. Note that this restriction doesn't
|
||||
apply to shared CPU memory.
|
||||
|
||||
See also: :ref:`cuda-nn-dataparallel-instead`
|
||||
|
||||
|
||||
Best practices and tips
|
||||
-----------------------
|
||||
|
||||
Avoiding and fighting deadlocks
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There are a lot of things that can go wrong when a new process is spawned, with
|
||||
the most common cause of deadlocks being background threads. If there's any
|
||||
thread that holds a lock or imports a module, and ``fork`` is called, it's very
|
||||
likely that the subprocess will be in a corrupted state and will deadlock or
|
||||
fail in a different way. Note that even if you don't, Python built in
|
||||
libraries do - no need to look further than :mod:`python:multiprocessing`.
|
||||
:class:`python:multiprocessing.Queue` is actually a very complex class, that
|
||||
spawns multiple threads used to serialize, send and receive objects, and they
|
||||
can cause aforementioned problems too. If you find yourself in such situation
|
||||
try using a :class:`~python:multiprocessing.queues.SimpleQueue`, that doesn't
|
||||
use any additional threads.
|
||||
|
||||
We're trying our best to make it easy for you and ensure these deadlocks don't
|
||||
happen but some things are out of our control. If you have any issues you can't
|
||||
cope with for a while, try reaching out on forums, and we'll see if it's an
|
||||
issue we can fix.
|
||||
|
||||
Reuse buffers passed through a Queue
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Remember that each time you put a :class:`~torch.Tensor` into a
|
||||
:class:`python:multiprocessing.Queue`, it has to be moved into shared memory.
|
||||
If it's already shared, it is a no-op, otherwise it will incur an additional
|
||||
memory copy that can slow down the whole process. Even if you have a pool of
|
||||
processes sending data to a single one, make it send the buffers back - this
|
||||
is nearly free and will let you avoid a copy when sending next batch.
|
||||
|
||||
Asynchronous multiprocess training (e.g. Hogwild)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Using :mod:`torch.multiprocessing`, it is possible to train a model
|
||||
asynchronously, with parameters either shared all the time, or being
|
||||
periodically synchronized. In the first case, we recommend sending over the whole
|
||||
model object, while in the latter, we advise to only send the
|
||||
:meth:`~torch.nn.Module.state_dict`.
|
||||
|
||||
We recommend using :class:`python:multiprocessing.Queue` for passing all kinds
|
||||
of PyTorch objects between processes. It is possible to e.g. inherit the tensors
|
||||
and storages already in shared memory, when using the ``fork`` start method,
|
||||
however it is very bug prone and should be used with care, and only by advanced
|
||||
users. Queues, even though they're sometimes a less elegant solution, will work
|
||||
properly in all cases.
|
||||
|
||||
.. warning::
|
||||
|
||||
You should be careful about having global statements, that are not guarded
|
||||
with an ``if __name__ == '__main__'``. If a different start method than
|
||||
``fork`` is used, they will be executed in all subprocesses.
|
||||
|
||||
Hogwild
|
||||
~~~~~~~
|
||||
|
||||
A concrete Hogwild implementation can be found in the `examples repository`__,
|
||||
but to showcase the overall structure of the code, there's also a minimal
|
||||
example below as well::
|
||||
|
||||
import torch.multiprocessing as mp
|
||||
from model import MyModel
|
||||
|
||||
def train(model):
|
||||
# Construct data_loader, optimizer, etc.
|
||||
for data, labels in data_loader:
|
||||
optimizer.zero_grad()
|
||||
loss_fn(model(data), labels).backward()
|
||||
optimizer.step() # This will update the shared parameters
|
||||
|
||||
if __name__ == '__main__':
|
||||
num_processes = 4
|
||||
model = MyModel()
|
||||
# NOTE: this is required for the ``fork`` method to work
|
||||
model.share_memory()
|
||||
processes = []
|
||||
for rank in range(num_processes):
|
||||
p = mp.Process(target=train, args=(model,))
|
||||
p.start()
|
||||
processes.append(p)
|
||||
for p in processes:
|
||||
p.join()
|
||||
|
||||
.. __: https://github.com/pytorch/examples/tree/master/mnist_hogwild
|
||||
34
docs/source/notes/serialization.rst
Normal file
34
docs/source/notes/serialization.rst
Normal file
@ -0,0 +1,34 @@
|
||||
|
||||
Serialization semantics
|
||||
=======================
|
||||
|
||||
Best practices
|
||||
--------------
|
||||
|
||||
.. _recommend-saving-models:
|
||||
|
||||
Recommended approach for saving a model
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
There are two main approaches for serializing and restoring a model.
|
||||
|
||||
The first (recommended) saves and loads only the model parameters::
|
||||
|
||||
torch.save(the_model.state_dict(), PATH)
|
||||
|
||||
Then later::
|
||||
|
||||
the_model = TheModelClass(*args, **kwargs)
|
||||
the_model.load_state_dict(torch.load(PATH))
|
||||
|
||||
The second saves and loads the entire model::
|
||||
|
||||
torch.save(the_model, PATH)
|
||||
|
||||
Then later::
|
||||
|
||||
the_model = torch.load(PATH)
|
||||
|
||||
However in this case, the serialized data is bound to the specific classes
|
||||
and the exact directory structure used, so it can break in various ways when
|
||||
used in other projects, or after some serious refactors.
|
||||
116
docs/source/optim.rst
Normal file
116
docs/source/optim.rst
Normal file
@ -0,0 +1,116 @@
|
||||
torch.optim
|
||||
===================================
|
||||
|
||||
.. automodule:: torch.optim
|
||||
|
||||
How to use an optimizer
|
||||
-----------------------
|
||||
|
||||
To use :mod:`torch.optim` you have to construct an optimizer object, that will hold
|
||||
the current state and will update the parameters based on the computed gradients.
|
||||
|
||||
Constructing it
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
To construct an :class:`Optimizer` you have to give it an iterable containing the
|
||||
parameters (all should be :class:`~torch.autograd.Variable` s) to optimize. Then,
|
||||
you can specify optimizer-specific options such as the learning rate, weight decay, etc.
|
||||
|
||||
Example::
|
||||
|
||||
optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9)
|
||||
optimizer = optim.Adam([var1, var2], lr = 0.0001)
|
||||
|
||||
Per-parameter options
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
:class:`Optimizer` s also support specifying per-parameter options. To do this, instead
|
||||
of passing an iterable of :class:`~torch.autograd.Variable` s, pass in an iterable of
|
||||
:class:`dict` s. Each of them will define a separate parameter group, and should contain
|
||||
a ``params`` key, containing a list of parameters belonging to it. Other keys
|
||||
should match the keyword arguments accepted by the optimizers, and will be used
|
||||
as optimization options for this group.
|
||||
|
||||
.. note::
|
||||
|
||||
You can still pass options as keyword arguments. They will be used as
|
||||
defaults, in the groups that didn't override them. This is useful when you
|
||||
only want to vary a single option, while keeping all others consistent
|
||||
between parameter groups.
|
||||
|
||||
|
||||
For example, this is very useful when one wants to specify per-layer learning rates::
|
||||
|
||||
optim.SGD([
|
||||
{'params': model.base.parameters()},
|
||||
{'params': model.classifier.parameters(), 'lr': 1e-3}
|
||||
], lr=1e-2, momentum=0.9)
|
||||
|
||||
This means that ``model.base``'s parameters will use the default learning rate of ``1e-2``,
|
||||
``model.classifier``'s parameters will use a learning rate of ``1e-3``, and a momentum of
|
||||
``0.9`` will be used for all parameters
|
||||
|
||||
Taking an optimization step
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
All optimizers implement a :func:`~Optimizer.step` method, that updates the
|
||||
parameters. It can be used in two ways:
|
||||
|
||||
``optimizer.step()``
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
This is a simplified version supported by most optimizers. The function can be
|
||||
called once the gradients are computed using e.g.
|
||||
:func:`~torch.autograd.Variable.backward`.
|
||||
|
||||
Example::
|
||||
|
||||
for input, target in dataset:
|
||||
optimizer.zero_grad()
|
||||
output = model(input)
|
||||
loss = loss_fn(output, target)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
``optimizer.step(closure)``
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Some optimization algorithms such as Conjugate Gradient and LBFGS need to
|
||||
reevaluate the function multiple times, so you have to pass in a closure that
|
||||
allows them to recompute your model. The closure should clear the gradients,
|
||||
compute the loss, and return it.
|
||||
|
||||
Example::
|
||||
|
||||
for input, target in dataset:
|
||||
def closure():
|
||||
optimizer.zero_grad()
|
||||
output = model(input)
|
||||
loss = loss_fn(output, target)
|
||||
loss.backward()
|
||||
return loss
|
||||
optimizer.step(closure)
|
||||
|
||||
Algorithms
|
||||
----------
|
||||
|
||||
.. autoclass:: Optimizer
|
||||
:members:
|
||||
.. autoclass:: Adadelta
|
||||
:members:
|
||||
.. autoclass:: Adagrad
|
||||
:members:
|
||||
.. autoclass:: Adam
|
||||
:members:
|
||||
.. autoclass:: Adamax
|
||||
:members:
|
||||
.. autoclass:: ASGD
|
||||
:members:
|
||||
.. autoclass:: LBFGS
|
||||
:members:
|
||||
.. autoclass:: RMSprop
|
||||
:members:
|
||||
.. autoclass:: Rprop
|
||||
:members:
|
||||
.. autoclass:: SGD
|
||||
:members:
|
||||
12
docs/source/storage.rst
Normal file
12
docs/source/storage.rst
Normal file
@ -0,0 +1,12 @@
|
||||
torch.Storage
|
||||
===================================
|
||||
|
||||
A :class:`torch.Storage` is a contiguous, one-dimensional array of a single
|
||||
data type.
|
||||
|
||||
Every :class:`torch.Tensor` has a corresponding storage of the same data type.
|
||||
|
||||
.. autoclass:: torch.FloatStorage
|
||||
:members:
|
||||
:undoc-members:
|
||||
:inherited-members:
|
||||
308
docs/source/tensors.rst
Normal file
308
docs/source/tensors.rst
Normal file
@ -0,0 +1,308 @@
|
||||
.. currentmodule:: torch
|
||||
|
||||
torch.Tensor
|
||||
===================================
|
||||
|
||||
A :class:`torch.Tensor` is a multi-dimensional matrix containing elements of
|
||||
a single data type.
|
||||
|
||||
Torch defines seven CPU tensor types and eight GPU tensor types:
|
||||
|
||||
======================== =========================== ================================
|
||||
Data type CPU tensor GPU tensor
|
||||
======================== =========================== ================================
|
||||
32-bit floating point :class:`torch.FloatTensor` :class:`torch.cuda.FloatTensor`
|
||||
64-bit floating point :class:`torch.DoubleTensor` :class:`torch.cuda.DoubleTensor`
|
||||
16-bit floating point N/A :class:`torch.cuda.HalfTensor`
|
||||
8-bit integer (unsigned) :class:`torch.ByteTensor` :class:`torch.cuda.ByteTensor`
|
||||
8-bit integer (signed) :class:`torch.CharTensor` :class:`torch.cuda.CharTensor`
|
||||
16-bit integer (signed) :class:`torch.ShortTensor` :class:`torch.cuda.ShortTensor`
|
||||
32-bit integer (signed) :class:`torch.IntTensor` :class:`torch.cuda.IntTensor`
|
||||
64-bit integer (signed) :class:`torch.LongTensor` :class:`torch.cuda.LongTensor`
|
||||
======================== =========================== ================================
|
||||
|
||||
The :class:`torch.Tensor` constructor is an alias for the default tensor type
|
||||
(:class:`torch.FloatTensor`).
|
||||
|
||||
A tensor can be constructed from a Python :class:`list` or sequence:
|
||||
|
||||
::
|
||||
|
||||
>>> torch.FloatTensor([[1, 2, 3], [4, 5, 6]])
|
||||
1 2 3
|
||||
4 5 6
|
||||
[torch.FloatTensor of size 2x3]
|
||||
|
||||
An empty tensor can be constructed by specifying its size:
|
||||
|
||||
::
|
||||
|
||||
>>> torch.IntTensor(2, 4).zero_()
|
||||
0 0 0 0
|
||||
0 0 0 0
|
||||
[torch.IntTensor of size 2x4]
|
||||
|
||||
The contents of a tensor can be accessed and modified using Python's indexing
|
||||
and slicing notation:
|
||||
|
||||
::
|
||||
|
||||
>>> x = torch.FloatTensor([[1, 2, 3], [4, 5, 6]])
|
||||
>>> print(x[1][2])
|
||||
6.0
|
||||
>>> x[0][1] = 8
|
||||
>>> print(x)
|
||||
1 8 3
|
||||
4 5 6
|
||||
[torch.FloatTensor of size 2x3]
|
||||
|
||||
Each tensor has an associated :class:`torch.Storage`, which holds its data.
|
||||
The tensor class provides multi-dimensional, `strided <https://en.wikipedia.org/wiki/Stride_of_an_array>`_
|
||||
view of a storage and defines numeric operations on it.
|
||||
|
||||
.. note::
|
||||
Methods which mutate a tensor are marked with an underscore suffix.
|
||||
For example, :func:`torch.FloatTensor.abs_` computes the absolute value
|
||||
in-place and returns the modified tensor, while :func:`torch.FloatTensor.abs`
|
||||
computes the result in a new tensor.
|
||||
|
||||
.. class:: Tensor()
|
||||
Tensor(*sizes)
|
||||
Tensor(size)
|
||||
Tensor(sequence)
|
||||
Tensor(ndarray)
|
||||
Tensor(tensor)
|
||||
Tensor(storage)
|
||||
|
||||
Creates a new tensor from an optional size or data.
|
||||
|
||||
If no arguments are given, an empty zero-dimensional tensor is returned.
|
||||
If a :class:`numpy.ndarray`, :class:`torch.Tensor`, or :class:`torch.Storage`
|
||||
is given, a new tensor that shares the same data is returned. If a Python
|
||||
sequence is given, a new tensor is created from a copy of the sequence.
|
||||
|
||||
.. automethod:: abs
|
||||
.. automethod:: abs_
|
||||
.. automethod:: acos
|
||||
.. automethod:: acos_
|
||||
.. automethod:: add
|
||||
.. automethod:: add_
|
||||
.. automethod:: addbmm
|
||||
.. automethod:: addbmm_
|
||||
.. automethod:: addcdiv
|
||||
.. automethod:: addcdiv_
|
||||
.. automethod:: addcmul
|
||||
.. automethod:: addcmul_
|
||||
.. automethod:: addmm
|
||||
.. automethod:: addmm_
|
||||
.. automethod:: addmv
|
||||
.. automethod:: addmv_
|
||||
.. automethod:: addr
|
||||
.. automethod:: addr_
|
||||
.. automethod:: apply_
|
||||
.. automethod:: asin
|
||||
.. automethod:: asin_
|
||||
.. automethod:: atan
|
||||
.. automethod:: atan2
|
||||
.. automethod:: atan2_
|
||||
.. automethod:: atan_
|
||||
.. automethod:: baddbmm
|
||||
.. automethod:: baddbmm_
|
||||
.. automethod:: bernoulli
|
||||
.. automethod:: bernoulli_
|
||||
.. automethod:: bmm
|
||||
.. automethod:: byte
|
||||
.. automethod:: cauchy_
|
||||
.. automethod:: ceil
|
||||
.. automethod:: ceil_
|
||||
.. automethod:: char
|
||||
.. automethod:: chunk
|
||||
.. automethod:: clamp
|
||||
.. automethod:: clamp_
|
||||
.. automethod:: clone
|
||||
.. automethod:: contiguous
|
||||
.. automethod:: copy_
|
||||
.. automethod:: cos
|
||||
.. automethod:: cos_
|
||||
.. automethod:: cosh
|
||||
.. automethod:: cosh_
|
||||
.. automethod:: cpu
|
||||
.. automethod:: cross
|
||||
.. automethod:: cuda
|
||||
.. automethod:: cumprod
|
||||
.. automethod:: cumsum
|
||||
.. automethod:: data_ptr
|
||||
.. automethod:: diag
|
||||
.. automethod:: dim
|
||||
.. automethod:: dist
|
||||
.. automethod:: div
|
||||
.. automethod:: div_
|
||||
.. automethod:: dot
|
||||
.. automethod:: double
|
||||
.. automethod:: eig
|
||||
.. automethod:: element_size
|
||||
.. automethod:: eq
|
||||
.. automethod:: eq_
|
||||
.. automethod:: equal
|
||||
.. automethod:: exp
|
||||
.. automethod:: exp_
|
||||
.. automethod:: expand
|
||||
.. automethod:: expand_as
|
||||
.. automethod:: exponential_
|
||||
.. automethod:: fill_
|
||||
.. automethod:: float
|
||||
.. automethod:: floor
|
||||
.. automethod:: floor_
|
||||
.. automethod:: fmod
|
||||
.. automethod:: fmod_
|
||||
.. automethod:: frac
|
||||
.. automethod:: frac_
|
||||
.. automethod:: gather
|
||||
.. automethod:: ge
|
||||
.. automethod:: ge_
|
||||
.. automethod:: gels
|
||||
.. automethod:: geometric_
|
||||
.. automethod:: geqrf
|
||||
.. automethod:: ger
|
||||
.. automethod:: gesv
|
||||
.. automethod:: gt
|
||||
.. automethod:: gt_
|
||||
.. automethod:: half
|
||||
.. automethod:: histc
|
||||
.. automethod:: index
|
||||
.. automethod:: index_add_
|
||||
.. automethod:: index_copy_
|
||||
.. automethod:: index_fill_
|
||||
.. automethod:: index_select
|
||||
.. automethod:: int
|
||||
.. automethod:: inverse
|
||||
.. automethod:: is_contiguous
|
||||
.. autoattribute:: is_cuda
|
||||
:annotation:
|
||||
.. automethod:: is_pinned
|
||||
.. automethod:: is_set_to
|
||||
.. automethod:: is_signed
|
||||
.. automethod:: kthvalue
|
||||
.. automethod:: le
|
||||
.. automethod:: le_
|
||||
.. automethod:: lerp
|
||||
.. automethod:: lerp_
|
||||
.. automethod:: log
|
||||
.. automethod:: log1p
|
||||
.. automethod:: log1p_
|
||||
.. automethod:: log_
|
||||
.. automethod:: log_normal_
|
||||
.. automethod:: long
|
||||
.. automethod:: lt
|
||||
.. automethod:: lt_
|
||||
.. automethod:: map_
|
||||
.. automethod:: masked_copy_
|
||||
.. automethod:: masked_fill_
|
||||
.. automethod:: masked_select
|
||||
.. automethod:: max
|
||||
.. automethod:: mean
|
||||
.. automethod:: median
|
||||
.. automethod:: min
|
||||
.. automethod:: mm
|
||||
.. automethod:: mode
|
||||
.. automethod:: mul
|
||||
.. automethod:: mul_
|
||||
.. automethod:: multinomial
|
||||
.. automethod:: mv
|
||||
.. automethod:: narrow
|
||||
.. automethod:: ndimension
|
||||
.. automethod:: ne
|
||||
.. automethod:: ne_
|
||||
.. automethod:: neg
|
||||
.. automethod:: neg_
|
||||
.. automethod:: nelement
|
||||
.. automethod:: new
|
||||
.. automethod:: nonzero
|
||||
.. automethod:: norm
|
||||
.. automethod:: normal_
|
||||
.. automethod:: numel
|
||||
.. automethod:: numpy
|
||||
.. automethod:: orgqr
|
||||
.. automethod:: ormqr
|
||||
.. automethod:: permute
|
||||
.. automethod:: pin_memory
|
||||
.. automethod:: potrf
|
||||
.. automethod:: potri
|
||||
.. automethod:: potrs
|
||||
.. automethod:: pow
|
||||
.. automethod:: pow_
|
||||
.. automethod:: prod
|
||||
.. automethod:: pstrf
|
||||
.. automethod:: qr
|
||||
.. automethod:: random_
|
||||
.. automethod:: reciprocal
|
||||
.. automethod:: reciprocal_
|
||||
.. automethod:: remainder
|
||||
.. automethod:: remainder_
|
||||
.. automethod:: renorm
|
||||
.. automethod:: renorm_
|
||||
.. automethod:: repeat
|
||||
.. automethod:: resize_
|
||||
.. automethod:: resize_as_
|
||||
.. automethod:: round
|
||||
.. automethod:: round_
|
||||
.. automethod:: rsqrt
|
||||
.. automethod:: rsqrt_
|
||||
.. automethod:: scatter_
|
||||
.. automethod:: select
|
||||
.. automethod:: set_
|
||||
.. automethod:: share_memory_
|
||||
.. automethod:: short
|
||||
.. automethod:: sigmoid
|
||||
.. automethod:: sigmoid_
|
||||
.. automethod:: sign
|
||||
.. automethod:: sign_
|
||||
.. automethod:: sin
|
||||
.. automethod:: sin_
|
||||
.. automethod:: sinh
|
||||
.. automethod:: sinh_
|
||||
.. automethod:: size
|
||||
.. automethod:: sort
|
||||
.. automethod:: split
|
||||
.. automethod:: sqrt
|
||||
.. automethod:: sqrt_
|
||||
.. automethod:: squeeze
|
||||
.. automethod:: squeeze_
|
||||
.. automethod:: std
|
||||
.. automethod:: storage
|
||||
.. automethod:: storage_offset
|
||||
.. automethod:: storage_type
|
||||
.. automethod:: stride
|
||||
.. automethod:: sub
|
||||
.. automethod:: sub_
|
||||
.. automethod:: sum
|
||||
.. automethod:: svd
|
||||
.. automethod:: symeig
|
||||
.. automethod:: t
|
||||
.. automethod:: t_
|
||||
.. automethod:: tan
|
||||
.. automethod:: tan_
|
||||
.. automethod:: tanh
|
||||
.. automethod:: tanh_
|
||||
.. automethod:: tolist
|
||||
.. automethod:: topk
|
||||
.. automethod:: trace
|
||||
.. automethod:: transpose
|
||||
.. automethod:: transpose_
|
||||
.. automethod:: tril
|
||||
.. automethod:: tril_
|
||||
.. automethod:: triu
|
||||
.. automethod:: triu_
|
||||
.. automethod:: trtrs
|
||||
.. automethod:: trunc
|
||||
.. automethod:: trunc_
|
||||
.. automethod:: type
|
||||
.. automethod:: type_as
|
||||
.. automethod:: unfold
|
||||
.. automethod:: uniform_
|
||||
.. automethod:: unsqueeze
|
||||
.. automethod:: unsqueeze_
|
||||
.. automethod:: var
|
||||
.. automethod:: view
|
||||
.. automethod:: view_as
|
||||
.. automethod:: zero_
|
||||
184
docs/source/torch.rst
Normal file
184
docs/source/torch.rst
Normal file
@ -0,0 +1,184 @@
|
||||
torch
|
||||
===================================
|
||||
.. automodule:: torch
|
||||
|
||||
Tensors
|
||||
----------------------------------
|
||||
.. autofunction:: is_tensor
|
||||
.. autofunction:: is_storage
|
||||
.. autofunction:: set_default_tensor_type
|
||||
.. autofunction:: numel
|
||||
.. autofunction:: set_printoptions
|
||||
|
||||
|
||||
Creation Ops
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. autofunction:: eye
|
||||
.. autofunction:: from_numpy
|
||||
.. autofunction:: linspace
|
||||
.. autofunction:: logspace
|
||||
.. autofunction:: ones
|
||||
.. autofunction:: rand
|
||||
.. autofunction:: randn
|
||||
.. autofunction:: randperm
|
||||
.. autofunction:: range
|
||||
.. autofunction:: zeros
|
||||
|
||||
|
||||
Indexing, Slicing, Joining, Mutating Ops
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. autofunction:: cat
|
||||
.. autofunction:: chunk
|
||||
.. autofunction:: gather
|
||||
.. autofunction:: index_select
|
||||
.. autofunction:: masked_select
|
||||
.. autofunction:: nonzero
|
||||
.. autofunction:: split
|
||||
.. autofunction:: squeeze
|
||||
.. autofunction:: stack
|
||||
.. autofunction:: t
|
||||
.. autofunction:: transpose
|
||||
.. autofunction:: unbind
|
||||
.. autofunction:: unsqueeze
|
||||
|
||||
|
||||
Random sampling
|
||||
----------------------------------
|
||||
.. autofunction:: manual_seed
|
||||
.. autofunction:: initial_seed
|
||||
.. autofunction:: get_rng_state
|
||||
.. autofunction:: set_rng_state
|
||||
.. autodata:: default_generator
|
||||
.. autofunction:: bernoulli
|
||||
.. autofunction:: multinomial
|
||||
.. autofunction:: normal
|
||||
|
||||
|
||||
Serialization
|
||||
----------------------------------
|
||||
.. autofunction:: save
|
||||
.. autofunction:: load
|
||||
|
||||
|
||||
Parallelism
|
||||
----------------------------------
|
||||
.. autofunction:: get_num_threads
|
||||
.. autofunction:: set_num_threads
|
||||
|
||||
|
||||
Math operations
|
||||
----------------------------------
|
||||
|
||||
Pointwise Ops
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: abs
|
||||
.. autofunction:: acos
|
||||
.. autofunction:: add
|
||||
.. autofunction:: addcdiv
|
||||
.. autofunction:: addcmul
|
||||
.. autofunction:: asin
|
||||
.. autofunction:: atan
|
||||
.. autofunction:: atan2
|
||||
.. autofunction:: ceil
|
||||
.. autofunction:: clamp
|
||||
.. autofunction:: cos
|
||||
.. autofunction:: cosh
|
||||
.. autofunction:: div
|
||||
.. autofunction:: exp
|
||||
.. autofunction:: floor
|
||||
.. autofunction:: fmod
|
||||
.. autofunction:: frac
|
||||
.. autofunction:: lerp
|
||||
.. autofunction:: log
|
||||
.. autofunction:: log1p
|
||||
.. autofunction:: mul
|
||||
.. autofunction:: neg
|
||||
.. autofunction:: pow
|
||||
.. autofunction:: reciprocal
|
||||
.. autofunction:: remainder
|
||||
.. autofunction:: round
|
||||
.. autofunction:: rsqrt
|
||||
.. autofunction:: sigmoid
|
||||
.. autofunction:: sign
|
||||
.. autofunction:: sin
|
||||
.. autofunction:: sinh
|
||||
.. autofunction:: sqrt
|
||||
.. autofunction:: tan
|
||||
.. autofunction:: tanh
|
||||
.. autofunction:: trunc
|
||||
|
||||
|
||||
Reduction Ops
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. autofunction:: cumprod
|
||||
.. autofunction:: cumsum
|
||||
.. autofunction:: dist
|
||||
.. autofunction:: mean
|
||||
.. autofunction:: median
|
||||
.. autofunction:: mode
|
||||
.. autofunction:: norm
|
||||
.. autofunction:: prod
|
||||
.. autofunction:: std
|
||||
.. autofunction:: sum
|
||||
.. autofunction:: var
|
||||
|
||||
|
||||
Comparison Ops
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. autofunction:: eq
|
||||
.. autofunction:: equal
|
||||
.. autofunction:: ge
|
||||
.. autofunction:: gt
|
||||
.. autofunction:: kthvalue
|
||||
.. autofunction:: le
|
||||
.. autofunction:: lt
|
||||
.. autofunction:: max
|
||||
.. autofunction:: min
|
||||
.. autofunction:: ne
|
||||
.. autofunction:: sort
|
||||
.. autofunction:: topk
|
||||
|
||||
|
||||
Other Operations
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
.. autofunction:: cross
|
||||
.. autofunction:: diag
|
||||
.. autofunction:: histc
|
||||
.. autofunction:: renorm
|
||||
.. autofunction:: trace
|
||||
.. autofunction:: tril
|
||||
.. autofunction:: triu
|
||||
|
||||
|
||||
BLAS and LAPACK Operations
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: addbmm
|
||||
.. autofunction:: addmm
|
||||
.. autofunction:: addmv
|
||||
.. autofunction:: addr
|
||||
.. autofunction:: baddbmm
|
||||
.. autofunction:: bmm
|
||||
.. autofunction:: btrifact
|
||||
.. autofunction:: btrisolve
|
||||
.. autofunction:: dot
|
||||
.. autofunction:: eig
|
||||
.. autofunction:: gels
|
||||
.. autofunction:: geqrf
|
||||
.. autofunction:: ger
|
||||
.. autofunction:: gesv
|
||||
.. autofunction:: inverse
|
||||
.. autofunction:: mm
|
||||
.. autofunction:: mv
|
||||
.. autofunction:: orgqr
|
||||
.. autofunction:: ormqr
|
||||
.. autofunction:: potrf
|
||||
.. autofunction:: potri
|
||||
.. autofunction:: potrs
|
||||
.. autofunction:: pstrf
|
||||
.. autofunction:: qr
|
||||
.. autofunction:: svd
|
||||
.. autofunction:: symeig
|
||||
.. autofunction:: trtrs
|
||||
|
||||
162
docs/source/torchvision/datasets.rst
Normal file
162
docs/source/torchvision/datasets.rst
Normal file
@ -0,0 +1,162 @@
|
||||
torchvision.datasets
|
||||
====================
|
||||
|
||||
The following dataset loaders are available:
|
||||
|
||||
- `MNIST`_
|
||||
- `COCO (Captioning and Detection)`_
|
||||
- `LSUN Classification`_
|
||||
- `ImageFolder`_
|
||||
- `Imagenet-12`_
|
||||
- `CIFAR10 and CIFAR100`_
|
||||
- `STL10`_
|
||||
|
||||
Datasets have the API:
|
||||
|
||||
- ``__getitem__``
|
||||
- ``__len__``
|
||||
They all subclass from ``torch.utils.data.Dataset``
|
||||
Hence, they can all be multi-threaded (python multiprocessing) using
|
||||
standard torch.utils.data.DataLoader.
|
||||
|
||||
For example:
|
||||
|
||||
``torch.utils.data.DataLoader(coco_cap, batch_size=args.batchSize, shuffle=True, num_workers=args.nThreads)``
|
||||
|
||||
In the constructor, each dataset has a slightly different API as needed,
|
||||
but they all take the keyword args:
|
||||
|
||||
- ``transform`` - a function that takes in an image and returns a
|
||||
transformed version
|
||||
- common stuff like ``ToTensor``, ``RandomCrop``, etc. These can be
|
||||
composed together with ``transforms.Compose`` (see transforms section
|
||||
below)
|
||||
- ``target_transform`` - a function that takes in the target and
|
||||
transforms it. For example, take in the caption string and return a
|
||||
tensor of word indices.
|
||||
|
||||
MNIST
|
||||
~~~~~
|
||||
|
||||
``dset.MNIST(root, train=True, transform=None, target_transform=None, download=False)``
|
||||
|
||||
- ``root`` : root directory of dataset where ``processed/training.pt`` and ``processed/test.pt`` exist.
|
||||
- ``train`` : ``True`` = Training set, ``False`` = Test set
|
||||
- ``download`` : ``True`` = downloads the dataset from the internet and puts it in root directory. If dataset already downloaded, place the processed dataset (function available in mnist.py) in the ``processed`` folder.
|
||||
|
||||
COCO
|
||||
~~~~
|
||||
|
||||
This requires the `COCO API to be installed`_
|
||||
|
||||
Captions:
|
||||
^^^^^^^^^
|
||||
|
||||
``dset.CocoCaptions(root="dir where images are", annFile="json annotation file", [transform, target_transform])``
|
||||
|
||||
Example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
import torchvision.datasets as dset
|
||||
import torchvision.transforms as transforms
|
||||
cap = dset.CocoCaptions(root = 'dir where images are',
|
||||
annFile = 'json annotation file',
|
||||
transform=transforms.ToTensor())
|
||||
|
||||
print('Number of samples: ', len(cap))
|
||||
img, target = cap[3] # load 4th sample
|
||||
|
||||
print("Image Size: ", img.size())
|
||||
print(target)
|
||||
|
||||
Output:
|
||||
|
||||
::
|
||||
|
||||
Number of samples: 82783
|
||||
Image Size: (3L, 427L, 640L)
|
||||
[u'A plane emitting smoke stream flying over a mountain.',
|
||||
u'A plane darts across a bright blue sky behind a mountain covered in snow',
|
||||
u'A plane leaves a contrail above the snowy mountain top.',
|
||||
u'A mountain that has a plane flying overheard in the distance.',
|
||||
u'A mountain view with a plume of smoke in the background']
|
||||
|
||||
Detection:
|
||||
^^^^^^^^^^
|
||||
|
||||
``dset.CocoDetection(root="dir where images are", annFile="json annotation file", [transform, target_transform])``
|
||||
|
||||
LSUN
|
||||
~~~~
|
||||
|
||||
``dset.LSUN(db_path, classes='train', [transform, target_transform])``
|
||||
|
||||
- db\_path = root directory for the database files
|
||||
- ``classes`` = ``‘train’`` (all categories, training set), ``‘val’`` (all categories, validation set), ``‘test’`` (all categories, test set)
|
||||
- [``‘bedroom\_train’``, ``‘church\_train’``, …] : a list of categories to load
|
||||
|
||||
ImageFolder
|
||||
~~~~~~~~~~~
|
||||
|
||||
A generic data loader where the images are arranged in this way:
|
||||
|
||||
::
|
||||
|
||||
root/dog/xxx.png
|
||||
root/dog/xxy.png
|
||||
root/dog/xxz.png
|
||||
|
||||
root/cat/123.png
|
||||
root/cat/nsdf3.png
|
||||
root/cat/asd932_.png
|
||||
|
||||
``dset.ImageFolder(root="root folder path", [transform, target_transform])``
|
||||
|
||||
It has the members:
|
||||
|
||||
- ``self.classes`` - The class names as a list
|
||||
- ``self.class_to_idx`` - Corresponding class indices
|
||||
- ``self.imgs`` - The list of (image path, class-index) tuples
|
||||
|
||||
Imagenet-12
|
||||
~~~~~~~~~~~
|
||||
|
||||
This is simply implemented with an ImageFolder dataset.
|
||||
|
||||
The data is preprocessed `as described
|
||||
here <https://github.com/facebook/fb.resnet.torch/blob/master/INSTALL.md#download-the-imagenet-dataset>`__
|
||||
|
||||
`Here is an
|
||||
example <https://github.com/pytorch/examples/blob/27e2a46c1d1505324032b1d94fc6ce24d5b67e97/imagenet/main.py#L48-L62>`__.
|
||||
|
||||
CIFAR
|
||||
~~~~~
|
||||
|
||||
``dset.CIFAR10(root, train=True, transform=None, target_transform=None, download=False)``
|
||||
|
||||
``dset.CIFAR100(root, train=True, transform=None, target_transform=None, download=False)``
|
||||
|
||||
- ``root`` : root directory of dataset where there is folder
|
||||
``cifar-10-batches-py``
|
||||
- ``train`` : ``True`` = Training set, ``False`` = Test set
|
||||
- ``download`` : ``True`` = downloads the dataset from the internet and
|
||||
puts it in root directory. If dataset already downloaded, doesn't do anything.
|
||||
|
||||
STL10
|
||||
~~~~~
|
||||
|
||||
``dset.STL10(root, split='train', transform=None, target_transform=None, download=False)``
|
||||
|
||||
- ``root`` : root directory of dataset where there is folder ``stl10_binary``
|
||||
- ``split`` : ``'train'`` = Training set, ``'test'`` = Test set, ``'unlabeled'`` = Unlabeled set, ``'train+unlabeled'`` = Training + Unlabeled set (missing label marked as ``-1``)
|
||||
- ``download`` : ``True`` = downloads the dataset from the internet and puts it in root directory. If dataset already downloaded, doesn't do anything.
|
||||
|
||||
.. _MNIST: #mnist
|
||||
.. _COCO (Captioning and Detection): #coco
|
||||
.. _LSUN Classification: #lsun
|
||||
.. _ImageFolder: #imagefolder
|
||||
.. _Imagenet-12: #imagenet-12
|
||||
.. _CIFAR10 and CIFAR100: #cifar
|
||||
.. _STL10: #stl10
|
||||
.. _COCO API to be installed: https://github.com/pdollar/coco/tree/master/PythonAPI
|
||||
11
docs/source/torchvision/models.rst
Normal file
11
docs/source/torchvision/models.rst
Normal file
@ -0,0 +1,11 @@
|
||||
torchvision.models
|
||||
===================
|
||||
|
||||
.. currentmodule:: torchvision.models
|
||||
|
||||
|
||||
.. automodule:: torchvision.models
|
||||
:members: alexnet, resnet18, resnet34, resnet50, resnet101, resnet152,
|
||||
vgg11, vgg11_bn, vgg13, vgg13_bn, vgg16, vgg16_bn, vgg19,
|
||||
vgg19_bn
|
||||
:undoc-members:
|
||||
5
docs/source/torchvision/torchvision.rst
Normal file
5
docs/source/torchvision/torchvision.rst
Normal file
@ -0,0 +1,5 @@
|
||||
torchvision
|
||||
===================
|
||||
|
||||
The :mod:`torchvision` package consists of popular datasets, model
|
||||
architectures, and common image transformations for computer vision.
|
||||
40
docs/source/torchvision/transforms.rst
Normal file
40
docs/source/torchvision/transforms.rst
Normal file
@ -0,0 +1,40 @@
|
||||
torchvision.transforms
|
||||
======================
|
||||
|
||||
.. currentmodule:: torchvision.transforms
|
||||
|
||||
.. autoclass:: Compose
|
||||
|
||||
Transforms on PIL.Image
|
||||
-----------------------
|
||||
|
||||
.. autoclass:: Scale
|
||||
|
||||
.. autoclass:: CenterCrop
|
||||
|
||||
.. autoclass:: RandomCrop
|
||||
|
||||
.. autoclass:: RandomHorizontalFlip
|
||||
|
||||
.. autoclass:: RandomSizedCrop
|
||||
|
||||
.. autoclass:: Pad
|
||||
|
||||
Transforms on torch.\*Tensor
|
||||
----------------------------
|
||||
|
||||
.. autoclass:: Normalize
|
||||
|
||||
|
||||
Conversion Transforms
|
||||
---------------------
|
||||
|
||||
.. autoclass:: ToTensor
|
||||
|
||||
.. autoclass:: ToPILImage
|
||||
|
||||
Generic Transforms
|
||||
------------------
|
||||
|
||||
.. autoclass:: Lambda
|
||||
|
||||
9
docs/source/torchvision/utils.rst
Normal file
9
docs/source/torchvision/utils.rst
Normal file
@ -0,0 +1,9 @@
|
||||
torchvision.utils
|
||||
===================
|
||||
|
||||
.. currentmodule:: torchvision.utils
|
||||
|
||||
.. autofunction:: make_grid
|
||||
|
||||
.. autofunction:: save_image
|
||||
|
||||
340
setup.py
340
setup.py
@ -1,6 +1,9 @@
|
||||
from setuptools import setup, Extension, distutils, Command, find_packages
|
||||
import setuptools.command.build_ext
|
||||
import setuptools.command.install
|
||||
import setuptools.command.develop
|
||||
import setuptools.command.build_py
|
||||
import distutils.unixccompiler
|
||||
import distutils.command.build
|
||||
import distutils.command.clean
|
||||
import platform
|
||||
@ -9,21 +12,32 @@ import shutil
|
||||
import sys
|
||||
import os
|
||||
|
||||
# TODO: make this more robust
|
||||
WITH_CUDA = os.path.exists('/Developer/NVIDIA/CUDA-7.5/include') or os.path.exists('/usr/local/cuda/include')
|
||||
DEBUG = False
|
||||
from tools.setup_helpers.env import check_env_flag
|
||||
from tools.setup_helpers.cuda import WITH_CUDA, CUDA_HOME
|
||||
from tools.setup_helpers.cudnn import WITH_CUDNN, CUDNN_LIB_DIR, CUDNN_INCLUDE_DIR
|
||||
DEBUG = check_env_flag('DEBUG')
|
||||
WITH_DISTRIBUTED = check_env_flag('WITH_DISTRIBUTED')
|
||||
WITH_DISTRIBUTED_MW = WITH_DISTRIBUTED and check_env_flag('WITH_DISTRIBUTED_MW')
|
||||
WITH_NCCL = WITH_CUDA and platform.system() != 'Darwin'
|
||||
SYSTEM_NCCL = False
|
||||
|
||||
################################################################################
|
||||
# Monkey-patch setuptools to compile in parallel
|
||||
################################################################################
|
||||
original_link = distutils.unixccompiler.UnixCCompiler.link
|
||||
|
||||
def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
|
||||
|
||||
def parallelCCompile(self, sources, output_dir=None, macros=None,
|
||||
include_dirs=None, debug=0, extra_preargs=None,
|
||||
extra_postargs=None, depends=None):
|
||||
# those lines are copied from distutils.ccompiler.CCompiler directly
|
||||
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
|
||||
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
|
||||
output_dir, macros, include_dirs, sources, depends, extra_postargs)
|
||||
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
|
||||
|
||||
# compile using a thread pool
|
||||
import multiprocessing.pool
|
||||
|
||||
def _single_compile(obj):
|
||||
src, ext = build[obj]
|
||||
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
|
||||
@ -32,12 +46,23 @@ def parallelCCompile(self, sources, output_dir=None, macros=None, include_dirs=N
|
||||
|
||||
return objects
|
||||
|
||||
|
||||
def patched_link(self, *args, **kwargs):
|
||||
_cxx = self.compiler_cxx
|
||||
self.compiler_cxx = None
|
||||
result = original_link(self, *args, **kwargs)
|
||||
self.compiler_cxx = _cxx
|
||||
return result
|
||||
|
||||
|
||||
distutils.ccompiler.CCompiler.compile = parallelCCompile
|
||||
distutils.unixccompiler.UnixCCompiler.link = patched_link
|
||||
|
||||
################################################################################
|
||||
# Custom build commands
|
||||
################################################################################
|
||||
|
||||
|
||||
class build_deps(Command):
|
||||
user_options = []
|
||||
|
||||
@ -52,6 +77,10 @@ class build_deps(Command):
|
||||
build_all_cmd = ['bash', 'torch/lib/build_all.sh']
|
||||
if WITH_CUDA:
|
||||
build_all_cmd += ['--with-cuda']
|
||||
if WITH_NCCL and not SYSTEM_NCCL:
|
||||
build_all_cmd += ['--with-nccl']
|
||||
if WITH_DISTRIBUTED:
|
||||
build_all_cmd += ['--with-distributed']
|
||||
if subprocess.call(build_all_cmd) != 0:
|
||||
sys.exit(1)
|
||||
generate_nn_wrappers()
|
||||
@ -71,22 +100,72 @@ class build_module(Command):
|
||||
self.run_command('build_ext')
|
||||
|
||||
|
||||
class build_ext(setuptools.command.build_ext.build_ext):
|
||||
class build_py(setuptools.command.build_py.build_py):
|
||||
|
||||
def run(self):
|
||||
self.create_version_file()
|
||||
setuptools.command.build_py.build_py.run(self)
|
||||
|
||||
@staticmethod
|
||||
def create_version_file():
|
||||
global version, cwd
|
||||
print('-- Building version ' + version)
|
||||
version_path = os.path.join(cwd, 'torch', 'version.py')
|
||||
with open(version_path, 'w') as f:
|
||||
f.write("__version__ = '{}'\n".format(version))
|
||||
|
||||
|
||||
class develop(setuptools.command.develop.develop):
|
||||
|
||||
def run(self):
|
||||
build_py.create_version_file()
|
||||
setuptools.command.develop.develop.run(self)
|
||||
|
||||
|
||||
class build_ext(setuptools.command.build_ext.build_ext):
|
||||
|
||||
def run(self):
|
||||
# Print build options
|
||||
if WITH_NUMPY:
|
||||
print('-- Building with NumPy bindings')
|
||||
else:
|
||||
print('-- NumPy not found')
|
||||
if WITH_CUDNN:
|
||||
print('-- Detected cuDNN at ' + CUDNN_LIB_DIR + ', ' + CUDNN_INCLUDE_DIR)
|
||||
else:
|
||||
print('-- Not using cuDNN')
|
||||
if WITH_CUDA:
|
||||
print('-- Detected CUDA at ' + CUDA_HOME)
|
||||
else:
|
||||
print('-- Not using CUDA')
|
||||
if WITH_NCCL and SYSTEM_NCCL:
|
||||
print('-- Using system provided NCCL library')
|
||||
elif WITH_NCCL:
|
||||
print('-- Building NCCL library')
|
||||
else:
|
||||
print('-- Not using NCCL')
|
||||
|
||||
# cwrap depends on pyyaml, so we can't import it earlier
|
||||
from tools.cwrap import cwrap
|
||||
from tools.cwrap.plugins.THPPlugin import THPPlugin
|
||||
from tools.cwrap.plugins.THPLongArgsPlugin import THPLongArgsPlugin
|
||||
from tools.cwrap.plugins.ArgcountSortPlugin import ArgcountSortPlugin
|
||||
from tools.cwrap.plugins.AutoGPU import AutoGPU
|
||||
from tools.cwrap.plugins.BoolOption import BoolOption
|
||||
from tools.cwrap.plugins.KwargsPlugin import KwargsPlugin
|
||||
from tools.cwrap.plugins.NullableArguments import NullableArguments
|
||||
from tools.cwrap.plugins.CuDNNPlugin import CuDNNPlugin
|
||||
thp_plugin = THPPlugin()
|
||||
cwrap('torch/csrc/generic/TensorMethods.cwrap', plugins=[
|
||||
THPLongArgsPlugin(), THPPlugin(), ArgcountSortPlugin(), AutoGPU()
|
||||
BoolOption(), thp_plugin, AutoGPU(condition='IS_CUDA'),
|
||||
ArgcountSortPlugin(), KwargsPlugin()
|
||||
])
|
||||
cwrap('torch/csrc/cudnn/cuDNN.cwrap', plugins=[
|
||||
CuDNNPlugin(), NullableArguments()
|
||||
])
|
||||
# It's an old-style class in Python 2.7...
|
||||
setuptools.command.build_ext.build_ext.run(self)
|
||||
|
||||
|
||||
|
||||
class build(distutils.command.build.build):
|
||||
sub_commands = [
|
||||
('build_deps', lambda self: True),
|
||||
@ -94,6 +173,7 @@ class build(distutils.command.build.build):
|
||||
|
||||
|
||||
class install(setuptools.command.install.install):
|
||||
|
||||
def run(self):
|
||||
if not self.skip_build:
|
||||
self.run_command('build_deps')
|
||||
@ -101,23 +181,34 @@ class install(setuptools.command.install.install):
|
||||
|
||||
|
||||
class clean(distutils.command.clean.clean):
|
||||
|
||||
def run(self):
|
||||
import glob
|
||||
with open('.gitignore', 'r') as f:
|
||||
ignores = f.read()
|
||||
for glob in filter(bool, ignores.split('\n')):
|
||||
shutil.rmtree(glob, ignore_errors=True)
|
||||
for wildcard in filter(bool, ignores.split('\n')):
|
||||
for filename in glob.glob(wildcard):
|
||||
try:
|
||||
os.remove(filename)
|
||||
except OSError:
|
||||
shutil.rmtree(filename, ignore_errors=True)
|
||||
|
||||
# It's an old-style class in Python 2.7...
|
||||
distutils.command.clean.clean.run(self)
|
||||
|
||||
|
||||
|
||||
################################################################################
|
||||
# Configure compile flags
|
||||
################################################################################
|
||||
|
||||
include_dirs = []
|
||||
library_dirs = []
|
||||
extra_link_args = []
|
||||
extra_compile_args = ['-std=c++11']
|
||||
extra_compile_args = ['-std=c++11', '-Wno-write-strings']
|
||||
if os.getenv('PYTORCH_BINARY_BUILD') and platform.system() == 'Linux':
|
||||
print('PYTORCH_BINARY_BUILD found. Static linking libstdc++ on Linux')
|
||||
extra_compile_args += ['-static-libstdc++']
|
||||
extra_link_args += ['-static-libstdc++']
|
||||
|
||||
cwd = os.path.dirname(os.path.abspath(__file__))
|
||||
lib_path = os.path.join(cwd, "torch", "lib")
|
||||
@ -128,50 +219,137 @@ include_dirs += [
|
||||
os.path.join(cwd, "torch", "csrc"),
|
||||
tmp_install_path + "/include",
|
||||
tmp_install_path + "/include/TH",
|
||||
tmp_install_path + "/include/THPP",
|
||||
tmp_install_path + "/include/THNN",
|
||||
]
|
||||
|
||||
extra_link_args.append('-L' + lib_path)
|
||||
library_dirs.append(lib_path)
|
||||
|
||||
main_libraries = ['TH']
|
||||
# we specify exact lib names to avoid conflict with lua-torch installs
|
||||
TH_LIB = os.path.join(lib_path, 'libTH.so.1')
|
||||
THS_LIB = os.path.join(lib_path, 'libTHS.so.1')
|
||||
THC_LIB = os.path.join(lib_path, 'libTHC.so.1')
|
||||
THCS_LIB = os.path.join(lib_path, 'libTHCS.so.1')
|
||||
THNN_LIB = os.path.join(lib_path, 'libTHNN.so.1')
|
||||
THCUNN_LIB = os.path.join(lib_path, 'libTHCUNN.so.1')
|
||||
THPP_LIB = os.path.join(lib_path, 'libTHPP.so.1')
|
||||
THD_LIB = os.path.join(lib_path, 'libTHD.so.1')
|
||||
NCCL_LIB = os.path.join(lib_path, 'libnccl.so.1')
|
||||
if platform.system() == 'Darwin':
|
||||
TH_LIB = os.path.join(lib_path, 'libTH.1.dylib')
|
||||
THS_LIB = os.path.join(lib_path, 'libTHS.1.dylib')
|
||||
THC_LIB = os.path.join(lib_path, 'libTHC.1.dylib')
|
||||
THCS_LIB = os.path.join(lib_path, 'libTHCS.1.dylib')
|
||||
THNN_LIB = os.path.join(lib_path, 'libTHNN.1.dylib')
|
||||
THCUNN_LIB = os.path.join(lib_path, 'libTHCUNN.1.dylib')
|
||||
THPP_LIB = os.path.join(lib_path, 'libTHPP.1.dylib')
|
||||
THD_LIB = os.path.join(lib_path, 'libTHD.1.dylib')
|
||||
NCCL_LIB = os.path.join(lib_path, 'libnccl.1.dylib')
|
||||
|
||||
if WITH_NCCL and subprocess.call('ldconfig -p | grep libnccl >/dev/null', shell=True) == 0:
|
||||
SYSTEM_NCCL = True
|
||||
|
||||
main_compile_args = ['-D_THP_CORE']
|
||||
main_libraries = ['shm']
|
||||
main_link_args = [TH_LIB, THS_LIB, THPP_LIB, THNN_LIB]
|
||||
main_sources = [
|
||||
"torch/csrc/PtrWrapper.cpp",
|
||||
"torch/csrc/Module.cpp",
|
||||
"torch/csrc/Generator.cpp",
|
||||
"torch/csrc/Size.cpp",
|
||||
"torch/csrc/Exceptions.cpp",
|
||||
"torch/csrc/Tensor.cpp",
|
||||
"torch/csrc/Storage.cpp",
|
||||
"torch/csrc/DynamicTypes.cpp",
|
||||
"torch/csrc/byte_order.cpp",
|
||||
"torch/csrc/utils.cpp",
|
||||
"torch/csrc/utils/object_ptr.cpp",
|
||||
"torch/csrc/utils/tuple_parser.cpp",
|
||||
"torch/csrc/allocators.cpp",
|
||||
"torch/csrc/serialization.cpp",
|
||||
"torch/csrc/autograd/init.cpp",
|
||||
"torch/csrc/autograd/engine.cpp",
|
||||
"torch/csrc/autograd/function.cpp",
|
||||
"torch/csrc/autograd/variable.cpp",
|
||||
"torch/csrc/autograd/grad_buffer.cpp",
|
||||
"torch/csrc/autograd/python_function.cpp",
|
||||
"torch/csrc/autograd/python_cpp_function.cpp",
|
||||
"torch/csrc/autograd/python_variable.cpp",
|
||||
"torch/csrc/autograd/python_engine.cpp",
|
||||
"torch/csrc/autograd/python_hook.cpp",
|
||||
"torch/csrc/autograd/functions/batch_normalization.cpp",
|
||||
"torch/csrc/autograd/functions/convolution.cpp",
|
||||
"torch/csrc/autograd/functions/init.cpp",
|
||||
"torch/csrc/nn/THNN_generic.cpp",
|
||||
]
|
||||
|
||||
try:
|
||||
import numpy as np
|
||||
include_dirs += [np.get_include()]
|
||||
main_sources += ["torch/csrc/numpy.cpp"]
|
||||
extra_compile_args += ['-DWITH_NUMPY']
|
||||
WITH_NUMPY = True
|
||||
except ImportError:
|
||||
pass
|
||||
WITH_NUMPY = False
|
||||
|
||||
if WITH_DISTRIBUTED:
|
||||
extra_compile_args += ['-DWITH_DISTRIBUTED']
|
||||
main_sources += [
|
||||
"torch/csrc/distributed/Module.cpp",
|
||||
"torch/csrc/distributed/utils.cpp",
|
||||
]
|
||||
if WITH_DISTRIBUTED_MW:
|
||||
main_sources += [
|
||||
"torch/csrc/distributed/Tensor.cpp",
|
||||
"torch/csrc/distributed/Storage.cpp",
|
||||
]
|
||||
include_dirs += [tmp_install_path + "/include/THD"]
|
||||
main_link_args += [THD_LIB]
|
||||
|
||||
if WITH_CUDA:
|
||||
if platform.system() == 'Darwin':
|
||||
cuda_path = '/Developer/NVIDIA/CUDA-7.5'
|
||||
cuda_include_path = cuda_path + '/include'
|
||||
cuda_lib_path = cuda_path + '/lib'
|
||||
else:
|
||||
cuda_path = '/usr/local/cuda'
|
||||
cuda_include_path = cuda_path + '/include'
|
||||
cuda_lib_path = cuda_path + '/lib64'
|
||||
cuda_lib_dirs = ['lib64', 'lib']
|
||||
cuda_include_path = os.path.join(CUDA_HOME, 'include')
|
||||
for lib_dir in cuda_lib_dirs:
|
||||
cuda_lib_path = os.path.join(CUDA_HOME, lib_dir)
|
||||
if os.path.exists(cuda_lib_path):
|
||||
break
|
||||
include_dirs.append(cuda_include_path)
|
||||
extra_link_args.append('-L' + cuda_lib_path)
|
||||
include_dirs.append(tmp_install_path + "/include/THCUNN")
|
||||
library_dirs.append(cuda_lib_path)
|
||||
extra_link_args.append('-Wl,-rpath,' + cuda_lib_path)
|
||||
extra_compile_args += ['-DWITH_CUDA']
|
||||
main_libraries += ['THC']
|
||||
extra_compile_args += ['-DCUDA_LIB_PATH=' + cuda_lib_path]
|
||||
main_libraries += ['cudart']
|
||||
main_link_args += [THC_LIB, THCS_LIB, THCUNN_LIB]
|
||||
main_sources += [
|
||||
"torch/csrc/cuda/Module.cpp",
|
||||
"torch/csrc/cuda/Storage.cpp",
|
||||
"torch/csrc/cuda/Stream.cpp",
|
||||
"torch/csrc/cuda/Tensor.cpp",
|
||||
"torch/csrc/cuda/AutoGPU.cpp",
|
||||
"torch/csrc/cuda/utils.cpp",
|
||||
"torch/csrc/cuda/serialization.cpp",
|
||||
]
|
||||
|
||||
if WITH_NCCL:
|
||||
if SYSTEM_NCCL:
|
||||
main_libraries += ['nccl']
|
||||
else:
|
||||
main_link_args += [NCCL_LIB]
|
||||
extra_compile_args += ['-DWITH_NCCL']
|
||||
|
||||
if WITH_CUDNN:
|
||||
main_libraries += ['cudnn']
|
||||
include_dirs.append(CUDNN_INCLUDE_DIR)
|
||||
library_dirs.append(CUDNN_LIB_DIR)
|
||||
main_sources += [
|
||||
"torch/csrc/cudnn/BatchNorm.cpp",
|
||||
"torch/csrc/cudnn/Conv.cpp",
|
||||
"torch/csrc/cudnn/cuDNN.cpp",
|
||||
"torch/csrc/cudnn/Types.cpp",
|
||||
"torch/csrc/cudnn/Handles.cpp",
|
||||
]
|
||||
extra_compile_args += ['-DWITH_CUDNN']
|
||||
|
||||
if DEBUG:
|
||||
extra_compile_args += ['-O0', '-g']
|
||||
extra_link_args += ['-O0', '-g']
|
||||
@ -188,51 +366,85 @@ def make_relative_rpath(path):
|
||||
################################################################################
|
||||
|
||||
extensions = []
|
||||
packages = find_packages(exclude=('tools.*', 'torch.cuda', 'torch.legacy.cunn'))
|
||||
packages = find_packages(exclude=('tools.*',))
|
||||
|
||||
C = Extension("torch._C",
|
||||
libraries=main_libraries,
|
||||
sources=main_sources,
|
||||
language='c++',
|
||||
extra_compile_args=extra_compile_args,
|
||||
include_dirs=include_dirs,
|
||||
extra_link_args=extra_link_args + [make_relative_rpath('lib')]
|
||||
)
|
||||
libraries=main_libraries,
|
||||
sources=main_sources,
|
||||
language='c++',
|
||||
extra_compile_args=main_compile_args + extra_compile_args,
|
||||
include_dirs=include_dirs,
|
||||
library_dirs=library_dirs,
|
||||
extra_link_args=extra_link_args + main_link_args + [make_relative_rpath('lib')],
|
||||
)
|
||||
extensions.append(C)
|
||||
|
||||
DL = Extension("torch._dl",
|
||||
sources=["torch/csrc/dl.c"],
|
||||
language='c',
|
||||
)
|
||||
extensions.append(DL)
|
||||
|
||||
THNN = Extension("torch._thnn._THNN",
|
||||
libraries=['TH', 'THNN'],
|
||||
sources=['torch/csrc/nn/THNN.cpp'],
|
||||
language='c++',
|
||||
extra_compile_args=extra_compile_args,
|
||||
include_dirs=include_dirs,
|
||||
extra_link_args=extra_link_args + [make_relative_rpath('../lib')]
|
||||
)
|
||||
sources=['torch/csrc/nn/THNN.cpp'],
|
||||
language='c++',
|
||||
extra_compile_args=extra_compile_args,
|
||||
include_dirs=include_dirs,
|
||||
extra_link_args=extra_link_args + [
|
||||
TH_LIB,
|
||||
THNN_LIB,
|
||||
make_relative_rpath('../lib'),
|
||||
]
|
||||
)
|
||||
extensions.append(THNN)
|
||||
|
||||
if WITH_CUDA:
|
||||
THCUNN = Extension("torch._thnn._THCUNN",
|
||||
libraries=['TH', 'THC', 'THCUNN'],
|
||||
sources=['torch/csrc/nn/THCUNN.cpp'],
|
||||
language='c++',
|
||||
extra_compile_args=extra_compile_args,
|
||||
include_dirs=include_dirs,
|
||||
extra_link_args=extra_link_args + [make_relative_rpath('../lib')]
|
||||
)
|
||||
sources=['torch/csrc/nn/THCUNN.cpp'],
|
||||
language='c++',
|
||||
extra_compile_args=extra_compile_args,
|
||||
include_dirs=include_dirs,
|
||||
extra_link_args=extra_link_args + [
|
||||
TH_LIB,
|
||||
THC_LIB,
|
||||
THCUNN_LIB,
|
||||
make_relative_rpath('../lib'),
|
||||
]
|
||||
)
|
||||
extensions.append(THCUNN)
|
||||
packages += ['torch.cuda', 'torch.legacy.cunn']
|
||||
|
||||
setup(name="torch", version="0.1",
|
||||
ext_modules=extensions,
|
||||
cmdclass = {
|
||||
'build': build,
|
||||
'build_ext': build_ext,
|
||||
'build_deps': build_deps,
|
||||
'build_module': build_module,
|
||||
'install': install,
|
||||
'clean': clean,
|
||||
},
|
||||
packages=packages,
|
||||
package_data={'torch': ['lib/*.so*', 'lib/*.h']},
|
||||
install_requires=['pyyaml'],
|
||||
)
|
||||
version = '0.1.11'
|
||||
if os.getenv('PYTORCH_BUILD_VERSION'):
|
||||
assert os.getenv('PYTORCH_BUILD_NUMBER') is not None
|
||||
version = os.getenv('PYTORCH_BUILD_VERSION') \
|
||||
+ '_' + os.getenv('PYTORCH_BUILD_NUMBER')
|
||||
else:
|
||||
try:
|
||||
sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip()
|
||||
version += '+' + sha[:7]
|
||||
except subprocess.CalledProcessError:
|
||||
pass
|
||||
|
||||
|
||||
setup(name="torch", version=version,
|
||||
description="Tensors and Dynamic neural networks in Python with strong GPU acceleration",
|
||||
ext_modules=extensions,
|
||||
cmdclass={
|
||||
'build': build,
|
||||
'build_py': build_py,
|
||||
'build_ext': build_ext,
|
||||
'build_deps': build_deps,
|
||||
'build_module': build_module,
|
||||
'develop': develop,
|
||||
'install': install,
|
||||
'clean': clean,
|
||||
},
|
||||
packages=packages,
|
||||
package_data={'torch': [
|
||||
'lib/*.so*', 'lib/*.dylib*',
|
||||
'lib/torch_shm_manager',
|
||||
'lib/*.h',
|
||||
'lib/include/TH/*.h', 'lib/include/TH/generic/*.h',
|
||||
'lib/include/THC/*.h', 'lib/include/THC/generic/*.h']},
|
||||
install_requires=['pyyaml'],
|
||||
)
|
||||
|
||||
210
test/common.py
210
test/common.py
@ -1,10 +1,55 @@
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
import unittest
|
||||
import contextlib
|
||||
from functools import wraps
|
||||
from itertools import product
|
||||
from copy import deepcopy
|
||||
|
||||
import torch
|
||||
import torch.cuda
|
||||
from torch.autograd import Variable
|
||||
from torch.autograd.leaf import Leaf
|
||||
|
||||
|
||||
torch.set_default_tensor_type('torch.DoubleTensor')
|
||||
|
||||
|
||||
def run_tests():
|
||||
parser = argparse.ArgumentParser(add_help=False)
|
||||
parser.add_argument('--seed', type=int, default=123)
|
||||
args, remaining = parser.parse_known_args()
|
||||
torch.manual_seed(args.seed)
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.manual_seed_all(args.seed)
|
||||
remaining = [sys.argv[0]] + remaining
|
||||
unittest.main(argv=remaining)
|
||||
|
||||
|
||||
TEST_NUMPY = True
|
||||
try:
|
||||
import numpy
|
||||
except ImportError:
|
||||
TEST_NUMPY = False
|
||||
|
||||
TEST_SCIPY = True
|
||||
try:
|
||||
import scipy
|
||||
except ImportError:
|
||||
TEST_SCIPY = False
|
||||
|
||||
|
||||
def skipIfNoLapack(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(*args, **kwargs):
|
||||
try:
|
||||
fn(*args, **kwargs)
|
||||
except Exception as e:
|
||||
if 'Lapack library not found' in e.args[0]:
|
||||
raise unittest.SkipTest('Compiled without Lapack')
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
|
||||
def get_cpu_type(t):
|
||||
assert t.__module__ == 'torch.cuda'
|
||||
@ -16,23 +61,38 @@ def get_gpu_type(t):
|
||||
return getattr(torch.cuda, t.__name__)
|
||||
|
||||
|
||||
def to_gpu(obj, tensor_type=None):
|
||||
if torch.isTensor(obj):
|
||||
if tensor_type:
|
||||
return tensor_type(obj.size()).copy_(obj)
|
||||
return get_gpu_type(type(obj))(obj.size()).copy_(obj)
|
||||
elif torch.isStorage(obj):
|
||||
def to_gpu(obj, type_map={}):
|
||||
if torch.is_tensor(obj):
|
||||
t = type_map.get(type(obj), get_gpu_type(type(obj)))
|
||||
return obj.clone().type(t)
|
||||
elif torch.is_storage(obj):
|
||||
return obj.new().resize_(obj.size()).copy_(obj)
|
||||
elif isinstance(obj, Variable):
|
||||
assert type(obj.creator) == Leaf
|
||||
return Variable(obj.data.clone().type(tensor_type))
|
||||
assert obj.creator is None
|
||||
t = type_map.get(type(obj.data), get_gpu_type(type(obj.data)))
|
||||
return Variable(obj.data.clone().type(t), requires_grad=obj.requires_grad)
|
||||
elif isinstance(obj, list):
|
||||
return [to_gpu(o, tensor_type) for o in obj]
|
||||
return [to_gpu(o, type_map) for o in obj]
|
||||
elif isinstance(obj, tuple):
|
||||
return tuple(to_gpu(o, type_map) for o in obj)
|
||||
else:
|
||||
return deepcopy(obj)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def freeze_rng_state():
|
||||
rng_state = torch.get_rng_state()
|
||||
if torch.cuda.is_available():
|
||||
cuda_rng_state = torch.cuda.get_rng_state()
|
||||
yield
|
||||
if torch.cuda.is_available():
|
||||
torch.cuda.set_rng_state(cuda_rng_state)
|
||||
torch.set_rng_state(rng_state)
|
||||
|
||||
|
||||
def iter_indices(tensor):
|
||||
if tensor.dim() == 0:
|
||||
return range(0)
|
||||
if tensor.dim() == 1:
|
||||
return range(tensor.size(0))
|
||||
return product(*(range(s) for s in tensor.size()))
|
||||
@ -57,12 +117,19 @@ class TestCase(unittest.TestCase):
|
||||
x = x.data
|
||||
y = y.data
|
||||
|
||||
if torch.isTensor(x) and torch.isTensor(y):
|
||||
max_err = 0
|
||||
super(TestCase, self).assertEqual(x.size().tolist(), y.size().tolist())
|
||||
for index in iter_indices(x):
|
||||
max_err = max(max_err, abs(x[index] - y[index]))
|
||||
self.assertLessEqual(max_err, prec)
|
||||
if torch.is_tensor(x) and torch.is_tensor(y):
|
||||
def assertTensorsEqual(a, b):
|
||||
max_err = 0
|
||||
super(TestCase, self).assertEqual(a.size(), b.size())
|
||||
for index in iter_indices(a):
|
||||
max_err = max(max_err, abs(a[index] - b[index]))
|
||||
self.assertLessEqual(max_err, prec, message)
|
||||
self.assertEqual(x.is_sparse, y.is_sparse, message)
|
||||
if x.is_sparse:
|
||||
assertTensorsEqual(x.indices(), y.indices())
|
||||
assertTensorsEqual(x.values(), y.values())
|
||||
else:
|
||||
assertTensorsEqual(x, y)
|
||||
elif type(x) == str and type(y) == str:
|
||||
super(TestCase, self).assertEqual(x, y)
|
||||
elif is_iterable(x) and is_iterable(y):
|
||||
@ -70,68 +137,63 @@ class TestCase(unittest.TestCase):
|
||||
self.assertEqual(x_, y_, prec, message)
|
||||
else:
|
||||
try:
|
||||
self.assertLessEqual(abs(x - y), prec)
|
||||
self.assertLessEqual(abs(x - y), prec, message)
|
||||
return
|
||||
except:
|
||||
pass
|
||||
super(TestCase, self).assertEqual(x, y)
|
||||
super(TestCase, self).assertEqual(x, y, message)
|
||||
|
||||
def assertNotEqual(self, x, y, prec=None, message=''):
|
||||
if prec is None:
|
||||
prec = self.precision
|
||||
|
||||
if isinstance(x, Variable) and isinstance(y, Variable):
|
||||
x = x.data
|
||||
y = y.data
|
||||
|
||||
if torch.is_tensor(x) and torch.is_tensor(y):
|
||||
max_err = 0
|
||||
if x.size() != y.size():
|
||||
super(TestCase, self).assertNotEqual(x.size(), y.size())
|
||||
for index in iter_indices(x):
|
||||
max_err = max(max_err, abs(x[index] - y[index]))
|
||||
self.assertGreaterEqual(max_err, prec, message)
|
||||
elif type(x) == str and type(y) == str:
|
||||
super(TestCase, self).assertNotEqual(x, y)
|
||||
elif is_iterable(x) and is_iterable(y):
|
||||
super(TestCase, self).assertNotEqual(x, y)
|
||||
else:
|
||||
try:
|
||||
self.assertGreaterEqual(abs(x - y), prec, message)
|
||||
return
|
||||
except:
|
||||
pass
|
||||
super(TestCase, self).assertNotEqual(x, y, message)
|
||||
|
||||
def assertObjectIn(self, obj, iterable):
|
||||
for elem in iterable:
|
||||
if id(obj) == id(elem):
|
||||
return
|
||||
raise AssertionError("object not found in iterable")
|
||||
|
||||
|
||||
def make_jacobian(input, num_out):
|
||||
if torch.isTensor(input) or isinstance(input, Variable):
|
||||
return torch.zeros(input.nElement(), num_out)
|
||||
def download_file(url, path, binary=True):
|
||||
if sys.version_info < (3,):
|
||||
import urllib2
|
||||
request = urllib2
|
||||
error = urllib2
|
||||
else:
|
||||
return type(input)(make_jacobian(elem, num_out) for elem in input)
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
request = urllib.request
|
||||
error = urllib.error
|
||||
|
||||
|
||||
def iter_tensors(x):
|
||||
if torch.isTensor(x):
|
||||
yield x
|
||||
elif isinstance(x, Variable):
|
||||
yield x.data
|
||||
else:
|
||||
for elem in x:
|
||||
for result in iter_tensors(elem):
|
||||
yield result
|
||||
|
||||
|
||||
def contiguous(input):
|
||||
if torch.isTensor(input):
|
||||
return input.contiguous()
|
||||
elif isinstance(input, Variable):
|
||||
return input.contiguous_()
|
||||
else:
|
||||
return type(input)(contiguous(e) for e in input)
|
||||
|
||||
|
||||
def get_numerical_jacobian(fn, input, target):
|
||||
perturbation = 1e-6
|
||||
# To be able to use .view(-1) input must be contiguous
|
||||
input = contiguous(input)
|
||||
output_size = fn(input).numel()
|
||||
jacobian = make_jacobian(target, output_size)
|
||||
|
||||
# It's much easier to iterate over flattened lists of tensors.
|
||||
# These are reference to the same objects in jacobian, so any changes
|
||||
# will be reflected in it as well.
|
||||
x_tensors = [t for t in iter_tensors(target)]
|
||||
j_tensors = [t for t in iter_tensors(jacobian)]
|
||||
|
||||
outa = torch.Tensor(output_size)
|
||||
outb = torch.Tensor(output_size)
|
||||
|
||||
# TODO: compare structure
|
||||
for x_tensor, d_tensor in zip(x_tensors, j_tensors):
|
||||
flat_tensor = x_tensor.view(-1)
|
||||
for i in range(flat_tensor.nElement()):
|
||||
orig = flat_tensor[i]
|
||||
flat_tensor[i] = orig - perturbation
|
||||
outa.copy_(fn(input))
|
||||
flat_tensor[i] = orig + perturbation
|
||||
outb.copy_(fn(input))
|
||||
flat_tensor[i] = orig
|
||||
|
||||
outb.add_(-1,outa).div_(2*perturbation)
|
||||
d_tensor[i] = outb
|
||||
|
||||
return jacobian
|
||||
if os.path.exists(path):
|
||||
return True
|
||||
try:
|
||||
data = request.urlopen(url, timeout=15).read()
|
||||
with open(path, 'wb' if binary else 'w') as f:
|
||||
f.write(data)
|
||||
return True
|
||||
except error.URLError as e:
|
||||
return False
|
||||
|
||||
@ -1,17 +1,26 @@
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
from copy import deepcopy
|
||||
from itertools import product
|
||||
|
||||
import torch
|
||||
import torch.cuda
|
||||
from torch.autograd import Variable
|
||||
from common import TestCase, to_gpu, get_numerical_jacobian, iter_tensors, contiguous
|
||||
from common import TestCase, to_gpu, freeze_rng_state
|
||||
from torch.autograd.gradcheck import get_numerical_jacobian, iter_tensors, contiguous
|
||||
import torch.backends.cudnn
|
||||
|
||||
try:
|
||||
import torch.cuda
|
||||
import torch.legacy.cunn
|
||||
TEST_CUDA = True
|
||||
except ImportError:
|
||||
TEST_CUDA = False
|
||||
# tarfile module tries to obtain a file object name in python 3.3
|
||||
if sys.version_info[:2] == (3, 3):
|
||||
TemporaryFile = tempfile.NamedTemporaryFile
|
||||
else:
|
||||
TemporaryFile = tempfile.TemporaryFile
|
||||
|
||||
TEST_CUDA = torch.cuda.is_available()
|
||||
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
|
||||
TEST_CUDNN = TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.cuda.FloatTensor(1))
|
||||
TEST_CUDNN_VERSION = TEST_CUDNN and torch.backends.cudnn.version()
|
||||
PRECISION = 1e-5
|
||||
|
||||
module_tests = [
|
||||
@ -19,24 +28,14 @@ module_tests = [
|
||||
module_name='Linear',
|
||||
constructor_args=(10, 8),
|
||||
input_size=(4, 10),
|
||||
reference_fn=lambda i,p: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8)
|
||||
reference_fn=lambda i, p: torch.mm(i, p[0].t()) + p[1].view(1, -1).expand(4, 8)
|
||||
),
|
||||
dict(
|
||||
module_name='Conv2d',
|
||||
constructor_args=(3, 4, 3, 3),
|
||||
input_size=(2, 3, 6, 6)
|
||||
),
|
||||
dict(
|
||||
module_name='Conv2d',
|
||||
constructor_args=(3, 4, 3, 3, 2, 2),
|
||||
input_size=(2, 3, 6, 6),
|
||||
desc='strided'
|
||||
),
|
||||
dict(
|
||||
module_name='Conv2d',
|
||||
constructor_args=(3, 4, 3, 3, 2, 2, 1, 1),
|
||||
input_size=(2, 3, 6, 6),
|
||||
desc='padding'
|
||||
module_name='Linear',
|
||||
constructor_args=(10, 8, False),
|
||||
input_size=(4, 10),
|
||||
desc='no_bias',
|
||||
reference_fn=lambda i, p: torch.mm(i, p[0].t())
|
||||
),
|
||||
dict(
|
||||
module_name='Threshold',
|
||||
@ -62,9 +61,21 @@ module_tests = [
|
||||
check_inplace=True
|
||||
),
|
||||
dict(
|
||||
module_name='HardTanh',
|
||||
module_name='RReLU',
|
||||
input_size=(1, 2, 2),
|
||||
test_cuda=False
|
||||
),
|
||||
dict(
|
||||
module_name='RReLU',
|
||||
constructor_args=(0.1, 0.9),
|
||||
input_size=(4, 4, 5),
|
||||
desc='with_up_down',
|
||||
test_cuda=False
|
||||
),
|
||||
dict(
|
||||
module_name='Hardtanh',
|
||||
input_size=(3, 2, 5),
|
||||
reference_fn=lambda i,_: i.clamp(-1, 1)
|
||||
reference_fn=lambda i, _: i.clamp(-1, 1)
|
||||
),
|
||||
dict(
|
||||
module_name='Sigmoid',
|
||||
@ -74,60 +85,272 @@ module_tests = [
|
||||
module_name='Tanh',
|
||||
input_size=(2, 3, 4, 5)
|
||||
),
|
||||
dict(
|
||||
module_name='MaxPooling2d',
|
||||
constructor_args=(3, 3, 2, 2, 1, 1),
|
||||
input_size=(1, 3, 7, 7)
|
||||
),
|
||||
dict(
|
||||
module_name='Softmax',
|
||||
input_size=(10, 20),
|
||||
reference_fn=lambda i,_: torch.exp(i).div(torch.exp(i).sum(1).expand(10, 20))
|
||||
reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1).expand(10, 20))
|
||||
),
|
||||
dict(
|
||||
module_name='Softmax2d',
|
||||
input_size=(1, 3, 10, 20),
|
||||
reference_fn=lambda i,_: torch.exp(i).div(torch.exp(i).sum(1).expandAs(i))
|
||||
),
|
||||
dict(
|
||||
module_name='BatchNorm',
|
||||
constructor_args=(10,),
|
||||
input_size=(4, 10),
|
||||
desc='affine'
|
||||
),
|
||||
dict(
|
||||
module_name='BatchNorm',
|
||||
constructor_args=(10, 1e-3, 0.3, False),
|
||||
input_size=(4, 10),
|
||||
desc='not_affine'
|
||||
reference_fn=lambda i, _: torch.exp(i).div(torch.exp(i).sum(1).expand_as(i))
|
||||
),
|
||||
dict(
|
||||
module_name='LogSoftmax',
|
||||
input_size=(10, 20),
|
||||
reference_fn=lambda i,_: torch.exp(i).div_(torch.exp(i).sum(1).expand(10, 20)).log_()
|
||||
reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1).expand(10, 20)).log_()
|
||||
),
|
||||
dict(
|
||||
module_name='LogSoftmax',
|
||||
input_size=(1, 3, 10, 20),
|
||||
reference_fn=lambda i, _: torch.exp(i).div_(torch.exp(i).sum(1).expand_as(i)).log_(),
|
||||
desc='multiparam'
|
||||
),
|
||||
dict(
|
||||
module_name='ELU',
|
||||
constructor_args=(2.,),
|
||||
input_size=(3, 2, 5),
|
||||
check_inplace=True
|
||||
),
|
||||
# TODO: reference function
|
||||
dict(
|
||||
module_name='Hardshrink',
|
||||
constructor_args=(2.,),
|
||||
input_size=(4, 3, 2, 4)
|
||||
),
|
||||
dict(
|
||||
module_name='LeakyReLU',
|
||||
input_size=(3, 2, 5),
|
||||
check_inplace=True
|
||||
),
|
||||
dict(
|
||||
module_name='LeakyReLU',
|
||||
constructor_args=(0.5,),
|
||||
input_size=(3, 2, 5),
|
||||
check_inplace=True,
|
||||
desc='with_negval'
|
||||
),
|
||||
dict(
|
||||
module_name='LogSigmoid',
|
||||
input_size=(2, 3, 4),
|
||||
reference_fn=lambda i, _: i.sigmoid().log()
|
||||
),
|
||||
dict(
|
||||
module_name='Softplus',
|
||||
input_size=(10, 20),
|
||||
reference_fn=lambda i, _: torch.log(1 + torch.exp(i))
|
||||
),
|
||||
dict(
|
||||
module_name='Softplus',
|
||||
constructor_args=(2,),
|
||||
input_size=(10, 20),
|
||||
reference_fn=lambda i, _: 1. / 2. * torch.log(1 + torch.exp(2 * i)),
|
||||
desc='beta'
|
||||
),
|
||||
dict(
|
||||
module_name='Softshrink',
|
||||
input_size=(3, 2, 5)
|
||||
),
|
||||
dict(
|
||||
module_name='Softshrink',
|
||||
constructor_args=(1,),
|
||||
input_size=(3, 2, 5),
|
||||
desc='lambda'
|
||||
),
|
||||
dict(
|
||||
module_name='CrossMapLRN2d',
|
||||
constructor_args=(5, 5e-3, 1e-3, 2),
|
||||
input_size=(2, 3, 6, 6)
|
||||
),
|
||||
dict(
|
||||
module_name='PReLU',
|
||||
input_size=(2, 3, 4),
|
||||
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
|
||||
desc='1d',
|
||||
),
|
||||
dict(
|
||||
module_name='PReLU',
|
||||
constructor_args=(3,),
|
||||
input_size=(2, 3, 4),
|
||||
desc='1d_multiparam',
|
||||
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
|
||||
),
|
||||
dict(
|
||||
module_name='PReLU',
|
||||
input_size=(2, 3, 4, 5),
|
||||
desc='2d',
|
||||
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
|
||||
),
|
||||
dict(
|
||||
module_name='PReLU',
|
||||
constructor_args=(3,),
|
||||
input_size=(2, 3, 4, 5),
|
||||
desc='2d_multiparam',
|
||||
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
|
||||
),
|
||||
dict(
|
||||
module_name='PReLU',
|
||||
input_size=(2, 3, 4, 5, 6),
|
||||
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
|
||||
desc='3d',
|
||||
),
|
||||
dict(
|
||||
module_name='PReLU',
|
||||
constructor_args=(3,),
|
||||
input_size=(2, 3, 4, 5, 6),
|
||||
desc='3d_multiparam',
|
||||
reference_fn=lambda i, p: torch.clamp(i, min=0) + torch.clamp(i, max=0) * p[0][0],
|
||||
),
|
||||
dict(
|
||||
module_name='Softsign',
|
||||
input_size=(3, 2, 5),
|
||||
reference_fn=lambda i, _: i.div(1 + torch.abs(i))
|
||||
),
|
||||
dict(
|
||||
module_name='Softmin',
|
||||
input_size=(10, 20)
|
||||
),
|
||||
dict(
|
||||
module_name='Tanhshrink',
|
||||
input_size=(2, 3, 4, 5)
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
criterion_tests = [
|
||||
dict(module_name='AbsCriterion',
|
||||
input_size=(2, 3, 4),
|
||||
target=torch.randn(2, 3, 4),
|
||||
reference_fn=lambda i,t,_: 1./i.numel() * \
|
||||
sum((a-b).abs().sum() for a,b in zip(i, t))
|
||||
),
|
||||
dict(module_name='L1Loss',
|
||||
input_size=(2, 3, 4),
|
||||
target=torch.randn(2, 3, 4),
|
||||
reference_fn=lambda i, t, _: 1. / i.numel() *
|
||||
sum((a - b).abs().sum() for a, b in zip(i, t))
|
||||
),
|
||||
dict(
|
||||
module_name='ClassNLLCriterion',
|
||||
module_name='NLLLoss',
|
||||
input=torch.rand(15, 10).log(),
|
||||
target=torch.Tensor(15).uniform_().mul(10).floor().long(),
|
||||
),
|
||||
dict(
|
||||
module_name='ClassNLLCriterion',
|
||||
module_name='NLLLoss',
|
||||
constructor_args=(torch.rand(10),),
|
||||
input=torch.rand(15, 10).add(1e-2).log(),
|
||||
target=torch.Tensor(15).uniform_().mul(10).floor().long(),
|
||||
desc='weights',
|
||||
),
|
||||
dict(
|
||||
module_name='KLDivLoss',
|
||||
input=torch.rand(10, 10).log(),
|
||||
target=torch.rand(10, 10)
|
||||
),
|
||||
dict(
|
||||
module_name='MSELoss',
|
||||
input=torch.randn(2, 3, 4, 5),
|
||||
target=torch.randn(2, 3, 4, 5),
|
||||
reference_fn=lambda i, t, _: (i - t).abs().pow(2).sum() / i.numel()
|
||||
),
|
||||
dict(
|
||||
module_name='BCELoss',
|
||||
input=torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
|
||||
target=torch.randn(15, 10).gt(0).double()
|
||||
),
|
||||
dict(
|
||||
module_name='BCELoss',
|
||||
constructor_args=(torch.rand(10),),
|
||||
input=torch.rand(15, 10).clamp_(1e-2, 1 - 1e-2),
|
||||
target=torch.randn(15, 10).gt(0).double(),
|
||||
desc='weights'
|
||||
),
|
||||
dict(
|
||||
module_name='CrossEntropyLoss',
|
||||
input=torch.randn(15, 10),
|
||||
target=torch.Tensor(15).uniform_().mul(10).floor().long()
|
||||
),
|
||||
dict(
|
||||
module_name='CrossEntropyLoss',
|
||||
constructor_args=(torch.rand(10),),
|
||||
input=torch.randn(15, 10),
|
||||
target=torch.Tensor(15).uniform_().mul(10).floor().long(),
|
||||
desc='weights'
|
||||
),
|
||||
dict(
|
||||
module_name='NLLLoss2d',
|
||||
input_size=(2, 3, 5, 5),
|
||||
target=torch.rand(2, 5, 5).mul(3).floor().long()
|
||||
),
|
||||
dict(
|
||||
module_name='NLLLoss2d',
|
||||
constructor_args=(torch.rand(3),),
|
||||
input_size=(2, 3, 5, 5),
|
||||
target=torch.rand(2, 5, 5).mul(3).floor().long(),
|
||||
desc='weights'
|
||||
),
|
||||
dict(
|
||||
module_name='HingeEmbeddingLoss',
|
||||
input=torch.rand(10),
|
||||
target=torch.randn(10).gt(0).double().mul_(2).sub(1)
|
||||
),
|
||||
dict(
|
||||
module_name='HingeEmbeddingLoss',
|
||||
constructor_args=(0.5,),
|
||||
input=torch.rand(10),
|
||||
target=torch.randn(10).gt(0).double().mul_(2).sub(1),
|
||||
desc='margin'
|
||||
),
|
||||
dict(
|
||||
module_name='MultiLabelMarginLoss',
|
||||
input_size=(5, 10),
|
||||
target=torch.rand(5, 10).mul(10).floor().long()
|
||||
),
|
||||
dict(
|
||||
module_name='MultiLabelSoftMarginLoss',
|
||||
input_size=(5, 10),
|
||||
target=torch.rand(5, 10).mul(2).floor()
|
||||
),
|
||||
dict(
|
||||
module_name='MultiLabelSoftMarginLoss',
|
||||
constructor_args=(torch.rand(10),),
|
||||
input_size=(5, 10),
|
||||
target=torch.rand(5, 10).mul(2).floor(),
|
||||
desc='weights'
|
||||
),
|
||||
dict(
|
||||
module_name='MultiMarginLoss',
|
||||
input_size=(5, 10),
|
||||
target=torch.rand(5).mul(8).floor().long()
|
||||
),
|
||||
dict(
|
||||
module_name='SmoothL1Loss',
|
||||
input_size=(5, 10),
|
||||
target=torch.randn(5, 10)
|
||||
),
|
||||
dict(
|
||||
module_name='SoftMarginLoss',
|
||||
input_size=(5, 5),
|
||||
target=torch.randn(5, 5).sign()
|
||||
),
|
||||
dict(
|
||||
module_name='CosineEmbeddingLoss',
|
||||
input=(torch.rand(15, 10), torch.rand(15, 10)),
|
||||
target=torch.randn(15).sign()
|
||||
),
|
||||
dict(
|
||||
module_name='CosineEmbeddingLoss',
|
||||
constructor_args=(0.7,),
|
||||
input=(torch.rand(15, 10), torch.rand(15, 10)),
|
||||
target=torch.randn(15).sign(),
|
||||
desc='margin'
|
||||
),
|
||||
dict(
|
||||
module_name='MarginRankingLoss',
|
||||
input=(torch.randn(50).mul(10), torch.randn(50).mul(10)),
|
||||
target=torch.randn(50).sign()
|
||||
),
|
||||
dict(
|
||||
module_name='MarginRankingLoss',
|
||||
constructor_args=(2,),
|
||||
input=(torch.randn(50).mul(10), torch.randn(50).mul(10)),
|
||||
target=torch.randn(50).sign(),
|
||||
desc='margin'
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
@ -139,20 +362,24 @@ class NNTestCase(TestCase):
|
||||
elif isinstance(input, list):
|
||||
return [self._jacobian(elem, num_out) for elem in input]
|
||||
else:
|
||||
return torch.zeros(input.nElement(), num_out)
|
||||
return torch.zeros(input.nelement(), num_out)
|
||||
|
||||
def _flatten_tensors(self, x):
|
||||
if torch.isTensor(x):
|
||||
return x.view(-1)
|
||||
if torch.is_tensor(x):
|
||||
if x.is_sparse:
|
||||
return x.to_dense().view(-1)
|
||||
else:
|
||||
return x.view(-1)
|
||||
elif isinstance(x, Variable):
|
||||
return x.data.view(-1)
|
||||
return self._flatten_tensors(x.data)
|
||||
else:
|
||||
return tuple(self._flatten_tensors(a) for a in x)
|
||||
|
||||
def _zero_grad_input(self, input):
|
||||
if isinstance(input, Variable):
|
||||
input.grad.zero_()
|
||||
elif torch.isTensor(input):
|
||||
if input.requires_grad and input.grad is not None:
|
||||
input.grad.data.zero_()
|
||||
elif torch.is_tensor(input):
|
||||
return
|
||||
else:
|
||||
for i in input:
|
||||
@ -165,33 +392,34 @@ class NNTestCase(TestCase):
|
||||
flat_d_out = d_out.view(-1)
|
||||
|
||||
if jacobian_input:
|
||||
jacobian_input = self._jacobian(input, d_out.nElement())
|
||||
flat_jacobian_input = list(iter_tensors(jacobian_input))
|
||||
jacobian_inp = self._jacobian(input, d_out.nelement())
|
||||
flat_jacobian_input = list(iter_tensors(jacobian_inp))
|
||||
|
||||
if jacobian_parameters:
|
||||
param, d_param = self._get_parameters(module)
|
||||
num_param = sum(p.numel() for p in param)
|
||||
jacobian_param = torch.zeros(num_param, d_out.nElement())
|
||||
jacobian_param = torch.zeros(num_param, d_out.nelement())
|
||||
|
||||
for i in range(flat_d_out.nElement()):
|
||||
for i in range(flat_d_out.nelement()):
|
||||
d_out.zero_()
|
||||
flat_d_out[i] = 1
|
||||
|
||||
if jacobian_parameters:
|
||||
self._zero_grad_parameters(module)
|
||||
# Variables will accumulate gradient from multiple steps
|
||||
self._zero_grad_input(input)
|
||||
if jacobian_input:
|
||||
self._zero_grad_input(input)
|
||||
d_input = self._backward(module, input, output, d_out)
|
||||
|
||||
if jacobian_input:
|
||||
for jacobian_x, d_x in zip(flat_jacobian_input, iter_tensors(d_input)):
|
||||
jacobian_x[:,i] = d_x
|
||||
jacobian_x[:, i] = d_x
|
||||
if jacobian_parameters:
|
||||
jacobian_param[:,i] = torch.cat(self._flatten_tensors(d_param), 0)
|
||||
jacobian_param[:, i] = torch.cat(self._flatten_tensors(d_param), 0)
|
||||
|
||||
res = tuple()
|
||||
if jacobian_input:
|
||||
res += jacobian_input,
|
||||
res += jacobian_inp,
|
||||
if jacobian_parameters:
|
||||
res += jacobian_param,
|
||||
|
||||
@ -199,7 +427,7 @@ class NNTestCase(TestCase):
|
||||
|
||||
def _numerical_jacobian(self, module, input, jacobian_input=True, jacobian_parameters=True):
|
||||
output = self._forward(module, input)
|
||||
output_size = output.nElement()
|
||||
output_size = output.nelement()
|
||||
|
||||
if jacobian_parameters:
|
||||
param, d_param = self._get_parameters(module)
|
||||
@ -214,9 +442,9 @@ class NNTestCase(TestCase):
|
||||
# TODO: enable non-contig tests
|
||||
input = contiguous(input)
|
||||
if jacobian_input:
|
||||
res += get_numerical_jacobian(fw, input, input),
|
||||
res += get_numerical_jacobian(fw, input, input, eps=1e-6),
|
||||
if jacobian_parameters:
|
||||
res += torch.cat(list(get_numerical_jacobian(fw, input, p) for p in param), 0),
|
||||
res += torch.cat(list(get_numerical_jacobian(fw, input, p, eps=1e-6) for p in param), 0),
|
||||
return res
|
||||
|
||||
def check_jacobian(self, module, input, jacobian_input=True):
|
||||
@ -237,19 +465,18 @@ class NNTestCase(TestCase):
|
||||
analytical_d_x = self._backward_criterion(criterion, input, target)
|
||||
numerical_d_x = deepcopy(analytical_d_x)
|
||||
|
||||
|
||||
input_t = iter_tensors(input)
|
||||
numerical_t = iter_tensors(numerical_d_x)
|
||||
for x, d_x in zip(input_t, numerical_t):
|
||||
x = x.view(-1)
|
||||
d_x = d_x.view(-1)
|
||||
for i in range(x.nElement()):
|
||||
for i in range(x.nelement()):
|
||||
original = x[i]
|
||||
x[i] = original + eps
|
||||
fx1 = self._forward_criterion(criterion, input, target)
|
||||
x[i] = original - eps
|
||||
fx2 = self._forward_criterion(criterion, input, target)
|
||||
deriv = (fx1 - fx2) / (2.*eps)
|
||||
deriv = (fx1 - fx2) / (2. * eps)
|
||||
d_x[i] = deriv
|
||||
x[i] = original
|
||||
|
||||
@ -263,8 +490,9 @@ class NNTestCase(TestCase):
|
||||
|
||||
|
||||
class TestBase(object):
|
||||
|
||||
def __init__(self, constructor, constructor_args=tuple(), input_size=None,
|
||||
input=None, desc='', reference_fn=None, fullname=None, **kwargs):
|
||||
input=None, desc='', reference_fn=None, fullname=None, **kwargs):
|
||||
if input_size is None and input is None:
|
||||
raise RuntimeError("Specify either an input tensor, or it's size!")
|
||||
self.constructor = constructor
|
||||
@ -287,7 +515,7 @@ class TestBase(object):
|
||||
def _unpack_input(self, input):
|
||||
if isinstance(input, Variable):
|
||||
return input.data
|
||||
elif torch.isTensor(input):
|
||||
elif torch.is_tensor(input):
|
||||
return input
|
||||
else:
|
||||
return type(input)(self._unpack_input(i) for i in input)
|
||||
@ -299,8 +527,8 @@ class TestBase(object):
|
||||
def map_input_sizes(sizes):
|
||||
if isinstance(sizes, list):
|
||||
return [map_input_sizes(s) for s in sizes]
|
||||
elif torch.isTensor(sizes):
|
||||
return sizes
|
||||
elif torch.is_tensor(sizes):
|
||||
return sizes.double()
|
||||
else:
|
||||
return torch.randn(*sizes)
|
||||
|
||||
@ -312,6 +540,7 @@ class TestBase(object):
|
||||
|
||||
|
||||
class ModuleTest(TestBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(ModuleTest, self).__init__(*args, **kwargs)
|
||||
self.jacobian_input = kwargs.get('jacobian_input', True)
|
||||
@ -329,19 +558,73 @@ class ModuleTest(TestBase):
|
||||
expected_out = self.reference_fn(ref_input, test_case._get_parameters(module)[0])
|
||||
test_case.assertEqual(out, expected_out)
|
||||
|
||||
self.test_noncontig(test_case, module, input)
|
||||
|
||||
# TODO: do this with in-memory files as soon as torch.save will support it
|
||||
with TemporaryFile() as f:
|
||||
test_case._forward(module, input)
|
||||
torch.save(module, f)
|
||||
f.seek(0)
|
||||
module_copy = torch.load(f)
|
||||
test_case.assertEqual(test_case._forward(module, input), test_case._forward(module_copy, input))
|
||||
|
||||
self._do_test(test_case, module, input)
|
||||
|
||||
def noncontiguize(self, obj):
|
||||
if isinstance(obj, list):
|
||||
return [self.noncontiguize(o) for o in obj]
|
||||
tensor = obj.data if isinstance(obj, Variable) else obj
|
||||
ndim = tensor.dim()
|
||||
noncontig = torch.stack([tensor.clone().zero_(), tensor], ndim).select(ndim, 1)
|
||||
assert noncontig.numel() == 1 or not noncontig.is_contiguous()
|
||||
if isinstance(obj, Variable):
|
||||
return Variable(noncontig, requires_grad=obj.requires_grad)
|
||||
return noncontig
|
||||
|
||||
def test_noncontig(self, test_case, module, input):
|
||||
test_case._zero_grad_parameters(module)
|
||||
test_case._zero_grad_input(input)
|
||||
with freeze_rng_state():
|
||||
output = test_case._forward(module, input)
|
||||
grad_output = output
|
||||
if isinstance(grad_output, Variable):
|
||||
grad_output = grad_output.data.clone()
|
||||
else:
|
||||
grad_output = grad_output.clone()
|
||||
output = output.clone()
|
||||
grad_output.normal_()
|
||||
d_input = deepcopy(test_case._backward(module, input, output, grad_output))
|
||||
d_param = deepcopy(test_case._get_parameters(module)[1])
|
||||
|
||||
nc_input = self.noncontiguize(input)
|
||||
nc_grad_output = self.noncontiguize(grad_output)
|
||||
for contig_i, contig_g in product((True, False), repeat=2):
|
||||
i = input if contig_i else nc_input
|
||||
go = grad_output if contig_g else nc_grad_output
|
||||
test_case._zero_grad_parameters(module)
|
||||
test_case._zero_grad_input(i)
|
||||
with freeze_rng_state():
|
||||
try:
|
||||
out = test_case._forward(module, i)
|
||||
except Exception:
|
||||
# Some modules will fail because of non contiguous inputs and we're ok with that
|
||||
continue
|
||||
grad = test_case._backward(module, i, out, go)
|
||||
|
||||
test_case.assertEqual(out, output)
|
||||
test_case.assertEqual(grad, d_input, 1e-4)
|
||||
test_case.assertEqual(test_case._get_parameters(module)[1], d_param)
|
||||
|
||||
def test_cuda(self, test_case):
|
||||
if not TEST_CUDA or not self.should_test_cuda:
|
||||
raise unittest.SkipTest('Excluded from CUDA tests')
|
||||
try:
|
||||
cpu_input = self._get_input()
|
||||
gpu_input = to_gpu(cpu_input, tensor_type=torch.cuda.FloatTensor)
|
||||
type_map = {torch.DoubleTensor: torch.cuda.FloatTensor}
|
||||
gpu_input = to_gpu(cpu_input, type_map=type_map)
|
||||
|
||||
cpu_module = self.constructor(*self.constructor_args)
|
||||
gpu_module = self.constructor(*self.constructor_args).cuda()
|
||||
test_case._zero_grad_parameters(cpu_module)
|
||||
test_case._zero_grad_parameters(gpu_module)
|
||||
gpu_module = self.constructor(*self.constructor_args).float().cuda()
|
||||
cpu_param = test_case._get_parameters(cpu_module)
|
||||
gpu_param = test_case._get_parameters(gpu_module)
|
||||
for cpu_p, gpu_p in zip(cpu_param[0], gpu_param[0]):
|
||||
@ -351,6 +634,10 @@ class ModuleTest(TestBase):
|
||||
gpu_p = gpu_p.data
|
||||
gpu_p.copy_(cpu_p)
|
||||
|
||||
test_case._zero_grad_input(cpu_input)
|
||||
test_case._zero_grad_input(gpu_input)
|
||||
test_case._zero_grad_parameters(cpu_module)
|
||||
test_case._zero_grad_parameters(gpu_module)
|
||||
cpu_output = test_case._forward(cpu_module, cpu_input)
|
||||
gpu_output = test_case._forward(gpu_module, gpu_input)
|
||||
test_case.assertEqual(cpu_output, gpu_output, 2e-4)
|
||||
@ -364,6 +651,8 @@ class ModuleTest(TestBase):
|
||||
test_case.assertEqual(cpu_gradInput, gpu_gradInput, 2e-4)
|
||||
for cpu_d_p, gpu_d_p in zip(cpu_param[1], gpu_param[1]):
|
||||
test_case.assertEqual(cpu_d_p, gpu_d_p, 2e-4)
|
||||
|
||||
self.test_noncontig(test_case, gpu_module, gpu_input)
|
||||
except NotImplementedError:
|
||||
pass
|
||||
# TODO: remove this after CUDA scatter_ is implemented
|
||||
@ -375,19 +664,30 @@ class ModuleTest(TestBase):
|
||||
|
||||
|
||||
class CriterionTest(TestBase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(CriterionTest, self).__init__(*args, **kwargs)
|
||||
self.target = kwargs.get('target', None)
|
||||
self.target = self._get_target(kwargs['target'])
|
||||
self.should_test_cuda = kwargs.get('test_cuda', True)
|
||||
|
||||
def _get_target(self, target):
|
||||
return target
|
||||
|
||||
def __call__(self, test_case):
|
||||
module = self.constructor(*self.constructor_args)
|
||||
input = self._get_input()
|
||||
|
||||
# Check that these methods don't raise errors
|
||||
module.__repr__()
|
||||
str(module)
|
||||
|
||||
if self.reference_fn is not None:
|
||||
out = test_case._forward_criterion(module, input, self.target)
|
||||
target = self.target
|
||||
if isinstance(target, Variable):
|
||||
target = target.data
|
||||
expected_out = self.reference_fn(deepcopy(self._unpack_input(input)),
|
||||
deepcopy(self.target), module)
|
||||
deepcopy(target), module)
|
||||
test_case.assertEqual(out, expected_out)
|
||||
|
||||
test_case.check_criterion_jacobian(module, input, self.target)
|
||||
@ -397,21 +697,23 @@ class CriterionTest(TestBase):
|
||||
raise unittest.SkipTest('Excluded from CUDA tests')
|
||||
try:
|
||||
cpu_input = self._get_input()
|
||||
gpu_input = to_gpu(cpu_input, tensor_type=torch.cuda.FloatTensor)
|
||||
type_map = {
|
||||
torch.DoubleTensor: torch.cuda.FloatTensor,
|
||||
}
|
||||
gpu_input = to_gpu(cpu_input, type_map=type_map)
|
||||
|
||||
cpu_target = self.target
|
||||
gpu_target = to_gpu(self.target, tensor_type=torch.cuda.FloatTensor)
|
||||
gpu_target = to_gpu(self.target, type_map=type_map)
|
||||
|
||||
cpu_module = self.constructor(*self.constructor_args)
|
||||
gpu_module = self.constructor(*self.constructor_args).cuda()
|
||||
gpu_module = self.constructor(*self.constructor_args).float().cuda()
|
||||
|
||||
cpu_output = test_case._forward_criterion(cpu_module, cpu_input, cpu_target)
|
||||
gpu_output = test_case._forward_criterion(gpu_module, gpu_input, gpu_target)
|
||||
test_case.assertEqual(cpu_output, gpu_output, 2e-4)
|
||||
test_case.assertEqual(cpu_output, gpu_output, 4e-4)
|
||||
|
||||
cpu_gradInput = test_case._backward_criterion(cpu_module, cpu_input, cpu_target)
|
||||
gpu_gradInput = test_case._backward_criterion(gpu_module, gpu_input, gpu_target)
|
||||
test_case.assertEqual(cpu_gradInput, gpu_gradInput, 2e-4)
|
||||
test_case.assertEqual(cpu_gradInput, gpu_gradInput, 4e-4)
|
||||
except NotImplementedError:
|
||||
pass
|
||||
|
||||
|
||||
8
test/data/network1.py
Normal file
8
test/data/network1.py
Normal file
@ -0,0 +1,8 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.linear = nn.Linear(10, 20)
|
||||
9
test/data/network2.py
Normal file
9
test/data/network2.py
Normal file
@ -0,0 +1,9 @@
|
||||
import torch.nn as nn
|
||||
|
||||
|
||||
class Net(nn.Module):
|
||||
|
||||
def __init__(self):
|
||||
super(Net, self).__init__()
|
||||
self.linear = nn.Linear(10, 20)
|
||||
self.relu = nn.ReLU()
|
||||
71
test/error_messages/storage.py
Normal file
71
test/error_messages/storage.py
Normal file
@ -0,0 +1,71 @@
|
||||
import torch
|
||||
|
||||
|
||||
def check_error(desc, fn, *required_substrings):
|
||||
try:
|
||||
fn()
|
||||
except Exception as e:
|
||||
error_message = e.args[0]
|
||||
print('=' * 80)
|
||||
print(desc)
|
||||
print('-' * 80)
|
||||
print(error_message)
|
||||
print('')
|
||||
for sub in required_substrings:
|
||||
assert sub in error_message
|
||||
return
|
||||
assert False, "given function ({}) didn't raise an error".format(desc)
|
||||
|
||||
check_error(
|
||||
'Wrong argument types',
|
||||
lambda: torch.FloatStorage(object()),
|
||||
'object')
|
||||
|
||||
check_error('Unknown keyword argument',
|
||||
lambda: torch.FloatStorage(content=1234.),
|
||||
'keyword')
|
||||
|
||||
check_error('Invalid types inside a sequence',
|
||||
lambda: torch.FloatStorage(['a', 'b']),
|
||||
'list', 'str')
|
||||
|
||||
check_error('Invalid size type',
|
||||
lambda: torch.FloatStorage(1.5),
|
||||
'float')
|
||||
|
||||
check_error('Invalid offset',
|
||||
lambda: torch.FloatStorage(torch.FloatStorage(2), 4),
|
||||
'2', '4')
|
||||
|
||||
check_error('Negative offset',
|
||||
lambda: torch.FloatStorage(torch.FloatStorage(2), -1),
|
||||
'2', '-1')
|
||||
|
||||
check_error('Invalid size',
|
||||
lambda: torch.FloatStorage(torch.FloatStorage(3), 1, 5),
|
||||
'2', '1', '5')
|
||||
|
||||
check_error('Negative size',
|
||||
lambda: torch.FloatStorage(torch.FloatStorage(3), 1, -5),
|
||||
'2', '1', '-5')
|
||||
|
||||
check_error('Invalid index type',
|
||||
lambda: torch.FloatStorage(10)['first item'],
|
||||
'str')
|
||||
|
||||
|
||||
def assign():
|
||||
torch.FloatStorage(10)[1:-1] = '1'
|
||||
check_error('Invalid value type',
|
||||
assign,
|
||||
'str')
|
||||
|
||||
check_error('resize_ with invalid type',
|
||||
lambda: torch.FloatStorage(10).resize_(1.5),
|
||||
'float')
|
||||
|
||||
check_error('fill_ with invalid type',
|
||||
lambda: torch.IntStorage(10).fill_('asdf'),
|
||||
'str')
|
||||
|
||||
# TODO: frombuffer
|
||||
6
test/ffi/src/cpu/lib.h
Normal file
6
test/ffi/src/cpu/lib.h
Normal file
@ -0,0 +1,6 @@
|
||||
|
||||
void good_func(THFloatTensor *tensor, int a, float b);
|
||||
void bad_func(THFloatTensor *tensor, int a, float b);
|
||||
THFloatTensor * new_tensor(int a);
|
||||
float int_to_float(int a);
|
||||
|
||||
19
test/ffi/src/cpu/lib1.c
Normal file
19
test/ffi/src/cpu/lib1.c
Normal file
@ -0,0 +1,19 @@
|
||||
#include <TH/TH.h>
|
||||
|
||||
void good_func(THFloatTensor *tensor, int a, float b)
|
||||
{
|
||||
THFloatTensor_mul(tensor, tensor, a);
|
||||
THFloatTensor_add(tensor, tensor, b);
|
||||
}
|
||||
|
||||
THFloatTensor * new_tensor(int a)
|
||||
{
|
||||
THFloatTensor *t = THFloatTensor_newWithSize2d(a, a);
|
||||
THFloatTensor_fill(t, a);
|
||||
return t;
|
||||
}
|
||||
|
||||
float int_to_float(int a)
|
||||
{
|
||||
return a;
|
||||
}
|
||||
8
test/ffi/src/cpu/lib2.c
Normal file
8
test/ffi/src/cpu/lib2.c
Normal file
@ -0,0 +1,8 @@
|
||||
#include <TH/TH.h>
|
||||
|
||||
void bad_func(THFloatTensor *tensor, int a, float b)
|
||||
{
|
||||
THFloatTensor_mul(tensor, tensor, a);
|
||||
THFloatTensor_add(tensor, tensor, b);
|
||||
THFloatTensor_addbmm(tensor, 1, tensor, 1, tensor, tensor);
|
||||
}
|
||||
12
test/ffi/src/cuda/cudalib.c
Normal file
12
test/ffi/src/cuda/cudalib.c
Normal file
@ -0,0 +1,12 @@
|
||||
#include <TH/TH.h>
|
||||
#include <THC/THC.h>
|
||||
|
||||
extern THCState *state;
|
||||
|
||||
#include "../cpu/lib1.c"
|
||||
|
||||
void cuda_func(THCudaTensor *tensor, int a, float b)
|
||||
{
|
||||
THCudaTensor_mul(state, tensor, tensor, a);
|
||||
THCudaTensor_add(state, tensor, tensor, b);
|
||||
}
|
||||
5
test/ffi/src/cuda/cudalib.h
Normal file
5
test/ffi/src/cuda/cudalib.h
Normal file
@ -0,0 +1,5 @@
|
||||
|
||||
void good_func(THFloatTensor *tensor, int a, float b);
|
||||
void cuda_func(THCudaTensor *tensor, int a, float b);
|
||||
THFloatTensor * new_tensor(int a);
|
||||
float int_to_float(int a);
|
||||
5
test/ffi/src/lib.h
Normal file
5
test/ffi/src/lib.h
Normal file
@ -0,0 +1,5 @@
|
||||
|
||||
void my_func(THFloatTensor *tensor, int a, float b);
|
||||
void my_cuda_func(THCudaTensor *tensor, int a, float b);
|
||||
THFloatTensor * new_t(int a);
|
||||
float new_int(int a);
|
||||
@ -1,5 +1,5 @@
|
||||
|
||||
# th test.lua > lua.out
|
||||
th test.lua > lua.out
|
||||
python3 test.py > python.out
|
||||
|
||||
diff lua.out python.out >/dev/null 2>&1
|
||||
|
||||
5060
test/optim/lua.out
5060
test/optim/lua.out
File diff suppressed because it is too large
Load Diff
@ -1,39 +0,0 @@
|
||||
assert(arg[1])
|
||||
funcs = {
|
||||
'resizeAs', 'add', 'zero', 'mul', 'div', 'abs',
|
||||
'addcmul', 'addcdiv', 'copy', 'sqrt', 'fill',
|
||||
{'cmul', 'mul'},
|
||||
{'cdiv', 'div'},
|
||||
}
|
||||
for _, val in pairs(funcs) do
|
||||
local name, newname
|
||||
if type(val) == 'table' then
|
||||
name = val[1]
|
||||
newname = val[2]
|
||||
else
|
||||
name = val
|
||||
newname = val .. '_'
|
||||
end
|
||||
|
||||
command = "sed -i -r "
|
||||
.. "'/torch\\." .. name .. "\\(/b; " -- short-circuits
|
||||
.. "s/([a-zA-Z]*)\\." .. name .. "\\(" -- substitution
|
||||
.. "/"
|
||||
.. "\\1\\." .. newname .. "\\(/g' " .. arg[1]
|
||||
print(command)
|
||||
os.execute(command)
|
||||
command = "sed -i 's/math\\." .. newname
|
||||
.. "/math\\." .. name .. "/' " .. arg[1]
|
||||
print(command)
|
||||
os.execute(command)
|
||||
end
|
||||
|
||||
funcs = {
|
||||
{'torch\.cmul', 'torch\.mul'},
|
||||
{'torch\.cdiv', 'torch\.div'},
|
||||
}
|
||||
for _, val in pairs(funcs) do
|
||||
command = "sed -i 's/" .. val[1] .. "/" .. val[2] .. "/' " .. arg[1]
|
||||
print(command)
|
||||
os.execute(command)
|
||||
end
|
||||
33
test/optim/test.lua
Normal file
33
test/optim/test.lua
Normal file
@ -0,0 +1,33 @@
|
||||
local cjson = require 'cjson'
|
||||
require 'optim'
|
||||
|
||||
function rosenbrock(t)
|
||||
x, y = t[1], t[2]
|
||||
return (1 - x) ^ 2 + 100 * (y - x^2)^2
|
||||
end
|
||||
|
||||
function drosenbrock(t)
|
||||
x, y = t[1], t[2]
|
||||
return torch.DoubleTensor({-400 * x * (y - x^2) - 2 * (1 - x), 200 * x * (y - x^2)})
|
||||
end
|
||||
|
||||
local fd = io.open('tests.json', 'r')
|
||||
local tests = cjson.decode(fd:read('*a'))
|
||||
fd:close()
|
||||
|
||||
for i, test in ipairs(tests) do
|
||||
print(test.algorithm)
|
||||
algorithm = optim[test.algorithm]
|
||||
for i, config in ipairs(test.config) do
|
||||
print('================================================================================')
|
||||
params = torch.DoubleTensor({1.5, 1.5})
|
||||
for i = 1, 100 do
|
||||
function closure(x)
|
||||
return rosenbrock(x), drosenbrock(x)
|
||||
end
|
||||
algorithm(closure, params, config)
|
||||
print(string.format('%.8f\t%.8f', params[1], params[2]))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
@ -3,13 +3,15 @@ import torch
|
||||
import torch.legacy.optim as optim
|
||||
from pprint import pprint
|
||||
|
||||
|
||||
def rosenbrock(tensor):
|
||||
x, y = tensor
|
||||
return (1 - x)**2 + 100 * (y - x**2)**2
|
||||
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
|
||||
|
||||
|
||||
def drosenbrock(tensor):
|
||||
x, y = tensor
|
||||
return torch.DoubleTensor((-400 * x * (y - x**2) - 2 * (1 - x), 200 * x * (y - x**2)))
|
||||
return torch.DoubleTensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * x * (y - x ** 2)))
|
||||
|
||||
algorithms = {
|
||||
'adadelta': optim.adadelta,
|
||||
@ -22,6 +24,7 @@ algorithms = {
|
||||
'rmsprop': optim.rmsprop,
|
||||
'rprop': optim.rprop,
|
||||
'sgd': optim.sgd,
|
||||
'lbfgs': optim.lbfgs,
|
||||
}
|
||||
|
||||
with open('tests.json', 'r') as f:
|
||||
@ -35,4 +38,4 @@ for test in tests:
|
||||
params = torch.DoubleTensor((1.5, 1.5))
|
||||
for i in range(100):
|
||||
algorithm(lambda x: (rosenbrock(x), drosenbrock(x)), params, config)
|
||||
print('{:.12f}\t{:.12f}\t'.format(params[0], params[1]))
|
||||
print('{:.8f}\t{:.8f}\t'.format(params[0], params[1]))
|
||||
|
||||
@ -98,5 +98,12 @@
|
||||
{"learningRate": 1e-4, "nesterov": true, "momentum": 0.95, "dampening": 0},
|
||||
{"weightDecay": 0.2}
|
||||
]
|
||||
},
|
||||
{
|
||||
"algorithm": "lbfgs",
|
||||
"config": [
|
||||
{},
|
||||
{"learningRate": 1e-1}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
90
test/run_test.sh
Executable file
90
test/run_test.sh
Executable file
@ -0,0 +1,90 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
PYCMD=${PYCMD:="python"}
|
||||
COVERAGE=0
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-p|--python) PYCMD=$2; shift 2 ;;
|
||||
-c|--coverage) COVERAGE=1; shift 1;;
|
||||
--) shift; break ;;
|
||||
*) echo "Invalid argument: $1!" ; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $COVERAGE -eq 1 ]]; then
|
||||
coverage erase
|
||||
PYCMD="coverage run --parallel-mode --source torch "
|
||||
echo "coverage flag found. Setting python command to: \"$PYCMD\""
|
||||
fi
|
||||
|
||||
pushd "$(dirname "$0")"
|
||||
|
||||
echo "Running torch tests"
|
||||
$PYCMD test_torch.py $@
|
||||
|
||||
echo "Running autograd tests"
|
||||
$PYCMD test_autograd.py $@
|
||||
|
||||
echo "Running sparse tests"
|
||||
$PYCMD test_sparse.py $@
|
||||
|
||||
echo "Running nn tests"
|
||||
$PYCMD test_nn.py $@
|
||||
|
||||
echo "Running legacy nn tests"
|
||||
$PYCMD test_legacy_nn.py $@
|
||||
|
||||
echo "Running optim tests"
|
||||
$PYCMD test_optim.py $@
|
||||
|
||||
echo "Running multiprocessing tests"
|
||||
$PYCMD test_multiprocessing.py $@
|
||||
MULTIPROCESSING_METHOD=spawn $PYCMD test_multiprocessing.py $@
|
||||
MULTIPROCESSING_METHOD=forkserver $PYCMD test_multiprocessing.py $@
|
||||
|
||||
echo "Running util tests"
|
||||
$PYCMD test_utils.py $@
|
||||
|
||||
echo "Running dataloader tests"
|
||||
$PYCMD test_dataloader.py $@
|
||||
|
||||
echo "Running cuda tests"
|
||||
$PYCMD test_cuda.py $@
|
||||
|
||||
echo "Running NCCL tests"
|
||||
$PYCMD test_nccl.py $@
|
||||
|
||||
################################################################################
|
||||
if [[ "$TEST_DISTRIBUTED" -eq 1 ]]; then
|
||||
distributed_set_up() {
|
||||
export TEMP_DIR="$(mktemp -d)"
|
||||
rm -rf "$TEMP_DIR/"*
|
||||
mkdir "$TEMP_DIR/barrier"
|
||||
mkdir "$TEMP_DIR/test_dir"
|
||||
}
|
||||
|
||||
distributed_tear_down() {
|
||||
rm -rf "$TEMP_DIR"
|
||||
}
|
||||
|
||||
trap distributed_tear_down EXIT SIGHUP SIGINT SIGTERM
|
||||
|
||||
echo "Running distributed tests for the TCP backend"
|
||||
distributed_set_up
|
||||
BACKEND=tcp WORLD_SIZE=3 $PYCMD ./test_distributed.py
|
||||
distributed_tear_down
|
||||
|
||||
echo "Running distributed tests for the MPI backend"
|
||||
distributed_set_up
|
||||
BACKEND=mpi mpiexec -n 3 $PYCMD ./test_distributed.py
|
||||
distributed_tear_down
|
||||
fi
|
||||
################################################################################
|
||||
|
||||
if [[ $COVERAGE -eq 1 ]]; then
|
||||
coverage combine
|
||||
coverage html
|
||||
fi
|
||||
|
||||
popd
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,10 +1,21 @@
|
||||
import math
|
||||
import tempfile
|
||||
import unittest
|
||||
from itertools import repeat
|
||||
|
||||
import torch
|
||||
import torch.cuda
|
||||
import torch.cuda.comm as comm
|
||||
|
||||
from test_torch import TestTorch
|
||||
from common import TestCase, get_gpu_type, to_gpu, freeze_rng_state, run_tests
|
||||
|
||||
HAS_CUDA = True
|
||||
if not torch.cuda.is_available():
|
||||
print('CUDA not available, skipping tests')
|
||||
TestCase = object # noqa: F811
|
||||
HAS_CUDA = False
|
||||
|
||||
from common import TestCase, get_gpu_type, to_gpu
|
||||
|
||||
def is_floating(t):
|
||||
return type(t) in [torch.FloatTensor, torch.DoubleTensor,
|
||||
@ -19,35 +30,88 @@ types = [
|
||||
torch.CharTensor,
|
||||
torch.ByteTensor,
|
||||
]
|
||||
|
||||
float_types = [
|
||||
torch.FloatTensor,
|
||||
torch.DoubleTensor
|
||||
] # TODO: add half...
|
||||
|
||||
|
||||
def number(floating, integer, t):
|
||||
name = type(t).__name__
|
||||
if 'Double' in name or 'Float' in name or 'Half' in name:
|
||||
return floating
|
||||
else:
|
||||
return integer
|
||||
# TODO: check HalfTensor
|
||||
|
||||
S = 10
|
||||
M = 50
|
||||
|
||||
|
||||
def make_tensor(t, *sizes):
|
||||
return t(*sizes).copy_(torch.randn(*sizes))
|
||||
|
||||
|
||||
def small_2d(t):
|
||||
return make_tensor(t, S, S)
|
||||
|
||||
|
||||
def small_2d_scaled(t, scale=10):
|
||||
return make_tensor(t, S, S).mul(scale)
|
||||
|
||||
|
||||
def small_2d_oneish(t):
|
||||
if is_floating(t):
|
||||
return make_tensor(t, S, S).clamp(min=0.99, max=1.01)
|
||||
else:
|
||||
return t(S, S).fill_(1)
|
||||
|
||||
|
||||
def small_3d(t):
|
||||
return make_tensor(t, S, S, S)
|
||||
|
||||
|
||||
def medium_1d(t):
|
||||
return make_tensor(t, M)
|
||||
|
||||
|
||||
def medium_2d(t):
|
||||
return make_tensor(t, M, M)
|
||||
|
||||
|
||||
def medium_2d_scaled(t, scale=10):
|
||||
return make_tensor(t, M, M).mul(scale)
|
||||
|
||||
|
||||
def small_3d_ones(t):
|
||||
return t(S, S, S).copy_(torch.ones(S, S, S))
|
||||
|
||||
|
||||
def small_3d_positive(t):
|
||||
min_val = 1e-3 if is_floating(t) else 2
|
||||
return make_tensor(t, S, S, S).clamp_(min_val, 120)
|
||||
|
||||
|
||||
def small_3d_unique(t):
|
||||
return t(S, S, S).copy_(torch.range(1, S*S*S))
|
||||
return t(S, S, S).copy_(torch.range(1, S * S * S))
|
||||
|
||||
|
||||
def small_1d_lapack(t):
|
||||
return t(1, 3).copy_(torch.range(1, 3).view(3))
|
||||
|
||||
|
||||
def small_2d_lapack(t):
|
||||
return t(3, 3).copy_(torch.range(1, 9).view(3, 3))
|
||||
|
||||
|
||||
def small_2d_lapack_skinny(t):
|
||||
return t(3, 4).copy_(torch.range(1, 12).view(3, 4))
|
||||
|
||||
|
||||
def small_2d_lapack_fat(t):
|
||||
return t(4, 3).copy_(torch.range(1, 12).view(4, 3))
|
||||
|
||||
|
||||
def new_t(*sizes):
|
||||
def tmp(t):
|
||||
@ -55,169 +119,207 @@ def new_t(*sizes):
|
||||
return tmp
|
||||
|
||||
tests = [
|
||||
('add', small_3d, lambda t: [3.14] ),
|
||||
('add', small_3d, lambda t: [small_3d_positive(t)], 'tensor' ),
|
||||
('add', small_3d, lambda t: [0.2, small_3d_positive(t)], 'scalar_tensor' ),
|
||||
('sub', small_3d, lambda t: [3.14], ),
|
||||
('sub', small_3d, lambda t: [small_3d_positive(t)], 'tensor' ),
|
||||
('mul', small_3d, lambda t: [3.14], ),
|
||||
('mul', small_3d, lambda t: [small_3d_positive(t)], 'tensor' ),
|
||||
('div', small_3d, lambda t: [3.14], ),
|
||||
('div', small_3d, lambda t: [small_3d_positive(t)], 'tensor' ),
|
||||
('pow', small_3d, lambda t: [3.14], ),
|
||||
('pow', small_3d, lambda t: [small_3d(t).abs_()], 'tensor' ),
|
||||
('addbmm', small_2d, lambda t: [small_3d(t), small_3d(t)], ),
|
||||
('addbmm', small_2d, lambda t: [0.2, small_3d(t), small_3d(t)], 'scalar' ),
|
||||
('addbmm', small_2d, lambda t: [0.5, 0.2, small_3d(t), small_3d(t)], 'two_scalars' ),
|
||||
('baddbmm', small_3d, lambda t: [small_3d(t), small_3d(t)], ),
|
||||
('baddbmm', small_3d, lambda t: [0.2, small_3d(t), small_3d(t)], 'scalar' ),
|
||||
('baddbmm', small_3d, lambda t: [0.5, 0.2, small_3d(t), small_3d(t)], 'two_scalars' ),
|
||||
('addcdiv', small_3d, lambda t: [small_3d(t), small_3d(t)], ),
|
||||
('addcdiv', small_3d, lambda t: [0.2, small_3d(t), small_3d(t)], 'scalar' ),
|
||||
('addcmul', small_3d, lambda t: [small_3d(t), small_3d(t)], ),
|
||||
('addcmul', small_3d, lambda t: [0.2, small_3d(t), small_3d(t)], 'scalar' ),
|
||||
('addmm', medium_2d, lambda t: [medium_2d(t), medium_2d(t)], ),
|
||||
('addmm', medium_2d, lambda t: [0.2, medium_2d(t), medium_2d(t)], 'scalar' ),
|
||||
('addmm', medium_2d, lambda t: [0.5, 0.2, medium_2d(t), medium_2d(t)], 'two_scalars' ),
|
||||
('addmv', medium_1d, lambda t: [medium_2d(t), medium_1d(t)], ),
|
||||
('addmv', medium_1d, lambda t: [0.2, medium_2d(t), medium_1d(t)], 'scalar' ),
|
||||
('addmv', medium_1d, lambda t: [0.5, 0.2, medium_2d(t), medium_1d(t)], 'two_scalars' ),
|
||||
('addmv', medium_1d, lambda t: [medium_2d(t), medium_1d(t)], ),
|
||||
('addmv', medium_1d, lambda t: [0.2, medium_2d(t), medium_1d(t)], 'scalar' ),
|
||||
('addmv', medium_1d, lambda t: [0.5, 0.2, medium_2d(t), medium_1d(t)], 'two_scalars' ),
|
||||
('addr', medium_2d, lambda t: [medium_1d(t), medium_1d(t)], ),
|
||||
('addr', medium_2d, lambda t: [0.2, medium_1d(t), medium_1d(t)], 'scalar' ),
|
||||
('addr', medium_2d, lambda t: [0.5, 0.2, medium_1d(t), medium_1d(t)], 'two_scalars' ),
|
||||
('addr', medium_2d, lambda t: [0.5, 0.2, medium_1d(t), medium_1d(t)], 'two_scalars' ),
|
||||
('atan2', medium_2d, lambda t: [medium_2d(t)], ),
|
||||
('chunk', medium_2d, lambda t: [4], ),
|
||||
('chunk', medium_2d, lambda t: [4, 1], 'dim' ),
|
||||
('clamp', medium_2d, lambda t: [-0.1, 0.5], ),
|
||||
('clone', medium_2d, lambda t: [], ),
|
||||
('cmax', medium_2d, lambda t: [medium_2d(t)], ),
|
||||
('cmin', medium_2d, lambda t: [medium_2d(t)], ),
|
||||
('contiguous', medium_2d, lambda t: [], ),
|
||||
('cross', new_t(M, 3, M), lambda t: [new_t(M, 3, M)(t)], ),
|
||||
('cumprod', small_3d, lambda t: [1], ),
|
||||
('cumsum', small_3d, lambda t: [1], ),
|
||||
('dim', small_3d, lambda t: [], ),
|
||||
('dist', small_2d, lambda t: [small_2d(t)], ),
|
||||
('dist', small_2d, lambda t: [small_2d(t), 3], '3_norm' ),
|
||||
('dist', small_2d, lambda t: [small_2d(t), 2.5], '2.5_norm' ),
|
||||
('dot', medium_1d, lambda t: [medium_1d(t)], ),
|
||||
('elementSize', medium_1d, lambda t: [], ),
|
||||
('eq', small_3d_ones, lambda t: [small_3d(t)], ),
|
||||
('eq', small_3d_ones, lambda t: [small_3d_ones(t)], 'equal' ),
|
||||
('ne', small_3d_ones, lambda t: [small_3d(t)], ),
|
||||
('ne', small_3d_ones, lambda t: [small_3d_ones(t)], 'equal' ),
|
||||
('equal', small_3d_ones, lambda t: [small_3d_ones(t)], ),
|
||||
('equal', small_3d_ones, lambda t: [small_3d(t)], ),
|
||||
('expand', new_t(M, 1, M), lambda t: [M, 4, M], ),
|
||||
('expandAs', new_t(M, 1, M), lambda t: [new_t(M, 4, M)(t)], ),
|
||||
('fill', medium_2d, lambda t: [3.14], ),
|
||||
('ge', medium_2d, lambda t: [medium_2d(t)], ),
|
||||
('le', medium_2d, lambda t: [medium_2d(t)], ),
|
||||
('gt', medium_2d, lambda t: [medium_2d(t)], ),
|
||||
('lt', medium_2d, lambda t: [medium_2d(t)], ),
|
||||
('isContiguous', medium_2d, lambda t: [], ),
|
||||
('add', small_3d, lambda t: [number(3.14, 3, t)]),
|
||||
('add', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
|
||||
('add', small_3d, lambda t: [number(0.2, 2, t), small_3d_positive(t)], 'scalar_tensor'),
|
||||
('sub', small_3d, lambda t: [number(3.14, 3, t)],),
|
||||
('sub', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
|
||||
('mul', small_3d, lambda t: [number(3.14, 3, t)],),
|
||||
('mul', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
|
||||
('div', small_3d, lambda t: [number(3.14, 3, t)],),
|
||||
('div', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
|
||||
('pow', small_3d, lambda t: [number(3.14, 3, t)], None, float_types),
|
||||
('pow', small_3d, lambda t: [small_3d(t).abs_()], 'tensor', float_types),
|
||||
('addbmm', small_2d, lambda t: [small_3d(t), small_3d(t)], None, float_types),
|
||||
('addbmm', small_2d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'),
|
||||
('addbmm', small_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars'),
|
||||
('baddbmm', small_3d, lambda t: [small_3d(t), small_3d(t)],),
|
||||
('baddbmm', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'),
|
||||
('baddbmm', small_3d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), small_3d(t), small_3d(t)], 'two_scalars'),
|
||||
('addcdiv', small_2d_lapack, lambda t: [small_2d_lapack(t).mul(2), small_2d_lapack(t)],),
|
||||
('addcdiv', small_2d_lapack, lambda t: [number(2.8, 1, t),
|
||||
small_2d_lapack(t).mul(2), small_2d_lapack(t)], 'scalar'),
|
||||
('addcmul', small_3d, lambda t: [small_3d(t), small_3d(t)],),
|
||||
('addcmul', small_3d, lambda t: [number(0.4, 2, t), small_3d(t), small_3d(t)], 'scalar'),
|
||||
('addmm', medium_2d, lambda t: [medium_2d(t), medium_2d(t)],),
|
||||
('addmm', medium_2d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'scalar'),
|
||||
('addmm', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_2d(t)], 'two_scalars'),
|
||||
('addmv', medium_1d, lambda t: [medium_2d(t), medium_1d(t)],),
|
||||
('addmv', medium_1d, lambda t: [number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'scalar'),
|
||||
('addmv', medium_1d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_2d(t), medium_1d(t)], 'two_scalars'),
|
||||
('addr', medium_2d, lambda t: [medium_1d(t), medium_1d(t)],),
|
||||
('addr', medium_2d, lambda t: [number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'scalar'),
|
||||
('addr', medium_2d, lambda t: [number(0.5, 3, t), number(0.4, 2, t), medium_1d(t), medium_1d(t)], 'two_scalars'),
|
||||
('atan2', medium_2d, lambda t: [medium_2d(t)], None, float_types),
|
||||
('fmod', small_3d, lambda t: [3], 'value'),
|
||||
('fmod', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
|
||||
('chunk', medium_2d, lambda t: [4],),
|
||||
('chunk', medium_2d, lambda t: [4, 1], 'dim'),
|
||||
('clamp', medium_2d_scaled, lambda t: [-1, 5],),
|
||||
('clone', medium_2d, lambda t: [],),
|
||||
('contiguous', medium_2d, lambda t: [],),
|
||||
('cross', new_t(M, 3, M), lambda t: [new_t(M, 3, M)(t)],),
|
||||
('cumprod', small_3d, lambda t: [1],),
|
||||
('cumsum', small_3d, lambda t: [1],),
|
||||
('dim', small_3d, lambda t: [],),
|
||||
('dist', small_2d, lambda t: [small_2d(t)],),
|
||||
('dist', small_2d, lambda t: [small_2d(t), 3], '3_norm'),
|
||||
('dist', small_2d, lambda t: [small_2d(t), 2.5], '2_5_norm'),
|
||||
('dot', medium_1d, lambda t: [medium_1d(t)],),
|
||||
('element_size', medium_1d, lambda t: [],),
|
||||
('eq', small_3d_ones, lambda t: [small_3d(t)],),
|
||||
('eq', small_3d_ones, lambda t: [small_3d_ones(t)], 'equal'),
|
||||
('ne', small_3d_ones, lambda t: [small_3d(t)],),
|
||||
('ne', small_3d_ones, lambda t: [small_3d_ones(t)], 'equal'),
|
||||
('equal', small_3d_ones, lambda t: [small_3d_ones(t)], 'equal'),
|
||||
('equal', small_3d_ones, lambda t: [small_3d(t)],),
|
||||
('expand', new_t(M, 1, M), lambda t: [M, 4, M],),
|
||||
('expand_as', new_t(M, 1, M), lambda t: [new_t(M, 4, M)(t)],),
|
||||
('fill', medium_2d, lambda t: [number(3.14, 3, t)],),
|
||||
('ge', medium_2d, lambda t: [medium_2d(t)],),
|
||||
('le', medium_2d, lambda t: [medium_2d(t)],),
|
||||
('gt', medium_2d, lambda t: [medium_2d(t)],),
|
||||
('lt', medium_2d, lambda t: [medium_2d(t)],),
|
||||
('is_contiguous', medium_2d, lambda t: [],),
|
||||
# TODO: can't check negative case - GPU copy will be contiguous
|
||||
('isSameSizeAs', medium_2d, lambda t: [small_3d(t)], 'negative' ),
|
||||
('isSameSizeAs', medium_2d, lambda t: [medium_2d(t)], 'positive' ),
|
||||
('isSetTo', medium_2d, lambda t: [medium_2d(t)], ),
|
||||
('is_same_size', medium_2d, lambda t: [small_3d(t)], 'negative'),
|
||||
('is_same_size', medium_2d, lambda t: [medium_2d(t)], 'positive'),
|
||||
('is_set_to', medium_2d, lambda t: [medium_2d(t)],),
|
||||
# TODO: positive case
|
||||
('isSize', medium_2d, lambda t: [torch.LongStorage((M, M))], ),
|
||||
('kthvalue', small_3d_unique, lambda t: [3], ),
|
||||
('kthvalue', small_3d_unique, lambda t: [3, 1], 'dim' ),
|
||||
('lerp', small_3d, lambda t: [small_3d(t), 0.3], ),
|
||||
('max', small_3d_unique, lambda t: [], ),
|
||||
('max', small_3d_unique, lambda t: [1], 'dim' ),
|
||||
('min', small_3d_unique, lambda t: [], ),
|
||||
('min', small_3d_unique, lambda t: [1], 'dim' ),
|
||||
('mean', small_3d, lambda t: [], ),
|
||||
('mean', small_3d, lambda t: [1], 'dim' ),
|
||||
('mode', small_3d, lambda t: [], ),
|
||||
('mode', small_3d, lambda t: [1], 'dim' ),
|
||||
('std', small_3d, lambda t: [], ),
|
||||
('std', small_3d, lambda t: [1], 'dim' ),
|
||||
('var', small_3d, lambda t: [], ),
|
||||
('var', small_3d, lambda t: [1], 'dim' ),
|
||||
('nDimension', small_3d, lambda t: [], ),
|
||||
('nElement', small_3d, lambda t: [], ),
|
||||
('numel', small_3d, lambda t: [], ),
|
||||
('narrow', small_3d, lambda t: [1, 3, 2], ),
|
||||
('nonzero', small_3d, lambda t: [], ),
|
||||
('norm', small_3d, lambda t: [], ),
|
||||
('norm', small_3d, lambda t: [3], '3_norm' ),
|
||||
('norm', small_3d, lambda t: [3, 0], '3_norm_dim' ),
|
||||
('ones', small_3d, lambda t: [1, 2, 3, 4, 5], ),
|
||||
('permute', new_t(1, 2, 3, 4), lambda t: [2, 1, 3, 0], ),
|
||||
('prod', small_3d, lambda t: [], ),
|
||||
('prod', small_3d, lambda t: [1], 'dim' ),
|
||||
('sum', small_2d, lambda t: [], ),
|
||||
('sum', small_3d, lambda t: [1], 'dim' ),
|
||||
('renorm', small_3d, lambda t: [2, 1, 1], '2_norm' ),
|
||||
('renorm', small_3d, lambda t: [1.5, 1, 1], '1.5_norm' ),
|
||||
('repeatTensor', small_2d, lambda t: [2, 2, 2], ),
|
||||
('size', new_t(1, 2, 3, 4), lambda t: [], ),
|
||||
('sort', small_3d_unique, lambda t: [], ),
|
||||
('sort', small_3d_unique, lambda t: [1], 'dim' ),
|
||||
('sort', small_3d_unique, lambda t: [1, True], 'dim_descending'),
|
||||
('split', small_3d, lambda t: [2], ),
|
||||
('split', small_3d, lambda t: [2, 1], 'dim' ),
|
||||
('squeeze', new_t(1, 2, 1, 4), lambda t: [], ),
|
||||
('squeeze', new_t(1, 2, 1, 4), lambda t: [2], 'dim' ),
|
||||
('t', new_t(1, 2), lambda t: [], ),
|
||||
('transpose', new_t(1, 2, 3, 4), lambda t: [1, 2], ),
|
||||
('to_list', small_3d, lambda t: [], ),
|
||||
('topk', small_3d, lambda t: [2, 1, False, True], 'dim_sort' ),
|
||||
('topk', small_3d, lambda t: [2, 1, True, True], 'dim_desc_sort' ),
|
||||
('trace', medium_2d, lambda t: [], ),
|
||||
('tril', medium_2d, lambda t: [], ),
|
||||
('tril', medium_2d, lambda t: [2], 'positive' ),
|
||||
('tril', medium_2d, lambda t: [-2], 'negative' ),
|
||||
('triu', medium_2d, lambda t: [], ),
|
||||
('triu', medium_2d, lambda t: [2], 'positive' ),
|
||||
('triu', medium_2d, lambda t: [-2], 'negative' ),
|
||||
('view', small_3d, lambda t: [100, 10], ),
|
||||
('viewAs', small_3d, lambda t: [t(100, 10)], ),
|
||||
('zero', small_3d, lambda t: [], ),
|
||||
('zeros', small_3d, lambda t: [1, 2, 3, 4], ),
|
||||
('rsqrt', lambda t: small_3d(t) + 1, lambda t: [], ),
|
||||
('sinh', lambda t: small_3d(t).clamp(-1, 1), lambda t: [], ),
|
||||
('tan', lambda t: small_3d(t).clamp(-1, 1), lambda t: [], ),
|
||||
('kthvalue', small_3d_unique, lambda t: [3],),
|
||||
('kthvalue', small_3d_unique, lambda t: [3, 1], 'dim'),
|
||||
('lerp', small_3d, lambda t: [small_3d(t), 0.3],),
|
||||
('max', small_3d_unique, lambda t: [],),
|
||||
('max', small_3d_unique, lambda t: [1], 'dim'),
|
||||
('max', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
|
||||
('min', small_3d_unique, lambda t: [],),
|
||||
('min', small_3d_unique, lambda t: [1], 'dim'),
|
||||
('min', medium_2d, lambda t: [medium_2d(t)], 'elementwise'),
|
||||
('mean', small_3d, lambda t: [],),
|
||||
('mean', small_3d, lambda t: [1], 'dim'),
|
||||
('mode', small_3d, lambda t: [],),
|
||||
('mode', small_3d, lambda t: [1], 'dim'),
|
||||
('remainder', small_3d, lambda t: [3], 'value'),
|
||||
('remainder', small_3d, lambda t: [small_3d_positive(t)], 'tensor'),
|
||||
('std', small_3d, lambda t: [],),
|
||||
('std', small_3d, lambda t: [1], 'dim'),
|
||||
('var', small_3d, lambda t: [],),
|
||||
('var', small_3d, lambda t: [1], 'dim'),
|
||||
('ndimension', small_3d, lambda t: [],),
|
||||
('nelement', small_3d, lambda t: [],),
|
||||
('numel', small_3d, lambda t: [],),
|
||||
('narrow', small_3d, lambda t: [1, 3, 2],),
|
||||
('nonzero', small_3d, lambda t: [],),
|
||||
('norm', small_3d, lambda t: [],),
|
||||
('norm', small_3d, lambda t: [3], '3_norm'),
|
||||
('norm', small_3d, lambda t: [3, 0], '3_norm_dim'),
|
||||
('ones', small_3d, lambda t: [1, 2, 3, 4, 5],),
|
||||
('permute', new_t(1, 2, 3, 4), lambda t: [2, 1, 3, 0],),
|
||||
('prod', small_2d_oneish, lambda t: [],),
|
||||
('prod', small_3d, lambda t: [1], 'dim'),
|
||||
('sum', small_2d, lambda t: [],),
|
||||
('sum', small_3d, lambda t: [1], 'dim'),
|
||||
('renorm', small_3d, lambda t: [2, 1, 1], '2_norm'),
|
||||
('renorm', small_3d, lambda t: [1.5, 1, 1], '1_5_norm'),
|
||||
('repeat', small_2d, lambda t: [2, 2, 2],),
|
||||
('size', new_t(1, 2, 3, 4), lambda t: [],),
|
||||
('sort', small_3d_unique, lambda t: [],),
|
||||
('sort', small_3d_unique, lambda t: [1], 'dim'),
|
||||
('sort', small_3d_unique, lambda t: [1, True], 'dim_descending'),
|
||||
('split', small_3d, lambda t: [2],),
|
||||
('split', small_3d, lambda t: [2, 1], 'dim'),
|
||||
('squeeze', new_t(1, 2, 1, 4), lambda t: [],),
|
||||
('squeeze', new_t(1, 2, 1, 4), lambda t: [2], 'dim'),
|
||||
('t', new_t(1, 2), lambda t: [],),
|
||||
('transpose', new_t(1, 2, 3, 4), lambda t: [1, 2],),
|
||||
('to_list', small_3d, lambda t: [],),
|
||||
('topk', small_3d, lambda t: [2, 1, False, True], 'dim_sort'),
|
||||
('topk', small_3d, lambda t: [2, 1, True, True], 'dim_desc_sort'),
|
||||
('trace', medium_2d, lambda t: [],),
|
||||
('tril', medium_2d, lambda t: [],),
|
||||
('tril', medium_2d, lambda t: [2], 'positive'),
|
||||
('tril', medium_2d, lambda t: [-2], 'negative'),
|
||||
('triu', medium_2d, lambda t: [],),
|
||||
('triu', medium_2d, lambda t: [2], 'positive'),
|
||||
('triu', medium_2d, lambda t: [-2], 'negative'),
|
||||
('unsqueeze', new_t(2, 3, 4), lambda t: [2],),
|
||||
('view', small_3d, lambda t: [100, 10],),
|
||||
('view_as', small_3d, lambda t: [t(100, 10)],),
|
||||
('zero', small_3d, lambda t: [],),
|
||||
('zeros', small_3d, lambda t: [1, 2, 3, 4],),
|
||||
('rsqrt', lambda t: small_3d(t) + 1, lambda t: [], None, float_types),
|
||||
('sinh', lambda t: small_3d(t).clamp(-1, 1), lambda t: [], None, float_types),
|
||||
('tan', lambda t: small_3d(t).clamp(-1, 1), lambda t: [], None, float_types),
|
||||
# lapack tests
|
||||
('qr', small_2d_lapack, lambda t: [], 'square', float_types),
|
||||
('qr', small_2d_lapack_skinny, lambda t: [], 'skinny', float_types),
|
||||
('qr', small_2d_lapack_fat, lambda t: [], 'fat', float_types),
|
||||
|
||||
]
|
||||
|
||||
# TODO: random functions, cat, gather, scatter, index*, masked*, resize, resizeAs, storageOffset, storage, stride, unfold
|
||||
# TODO: random functions, cat, gather, scatter, index*, masked*,
|
||||
# resize, resizeAs, storage_offset, storage, stride, unfold
|
||||
|
||||
custom_precision = {
|
||||
'addbmm': 1e-4,
|
||||
'addmm': 1e-4,
|
||||
'addmv': 1e-4,
|
||||
'addr': 1e-4,
|
||||
'baddbmm': 1e-4,
|
||||
'rsqrt': 1e-4,
|
||||
'cumprod': 1e-4,
|
||||
}
|
||||
|
||||
simple_pointwise = [
|
||||
'abs',
|
||||
'acos',
|
||||
'asin',
|
||||
'atan',
|
||||
'ceil',
|
||||
'cinv',
|
||||
'cos',
|
||||
'cosh',
|
||||
'exp',
|
||||
'floor',
|
||||
'fmod',
|
||||
'frac',
|
||||
'log',
|
||||
'log1p',
|
||||
'neg',
|
||||
'remainder',
|
||||
'round',
|
||||
'sigmoid',
|
||||
'sign',
|
||||
'sin',
|
||||
'sqrt',
|
||||
'tanh',
|
||||
'trunc',
|
||||
]
|
||||
for fn in simple_pointwise:
|
||||
tests.append((fn, small_3d, lambda t: []))
|
||||
|
||||
def compare_cpu_gpu(tensor_constructor, arg_constructor, fn, t):
|
||||
simple_pointwise_float = [
|
||||
'log',
|
||||
'log1p',
|
||||
'sigmoid',
|
||||
'sin',
|
||||
'sqrt',
|
||||
'tanh',
|
||||
'acos',
|
||||
'asin',
|
||||
'atan',
|
||||
'cos',
|
||||
'cosh',
|
||||
'exp',
|
||||
'reciprocal',
|
||||
'floor',
|
||||
'frac',
|
||||
'neg',
|
||||
'round',
|
||||
'trunc',
|
||||
'ceil',
|
||||
]
|
||||
|
||||
for fn in simple_pointwise_float:
|
||||
tests.append((fn, small_3d, lambda t: [], None, float_types))
|
||||
|
||||
_cycles_per_ms = None
|
||||
|
||||
|
||||
def get_cycles_per_ms():
|
||||
"""Approximate number of cycles per millisecond for torch.cuda._sleep"""
|
||||
global _cycles_per_ms
|
||||
if _cycles_per_ms is None:
|
||||
start = torch.cuda.Event(enable_timing=True)
|
||||
end = torch.cuda.Event(enable_timing=True)
|
||||
start.record()
|
||||
torch.cuda._sleep(1000000)
|
||||
end.record()
|
||||
end.synchronize()
|
||||
_cycles_per_ms = 1000000 / start.elapsed_time(end)
|
||||
return _cycles_per_ms
|
||||
|
||||
|
||||
def compare_cpu_gpu(tensor_constructor, arg_constructor, fn, t, precision=1e-5):
|
||||
def tmp(self):
|
||||
cpu_tensor = tensor_constructor(t)
|
||||
gpu_tensor = to_gpu(cpu_tensor)
|
||||
@ -231,32 +333,58 @@ def compare_cpu_gpu(tensor_constructor, arg_constructor, fn, t):
|
||||
if 'unimplemented data type' in reason:
|
||||
raise unittest.SkipTest('unimplemented data type')
|
||||
raise
|
||||
except AttributeError as e:
|
||||
reason = e.args[0]
|
||||
if 'object has no attribute' in reason:
|
||||
raise unittest.SkipTest('unimplemented data type')
|
||||
raise
|
||||
# If one changes, another should change as well
|
||||
self.assertEqual(cpu_tensor, gpu_tensor)
|
||||
self.assertEqual(cpu_args, gpu_args)
|
||||
self.assertEqual(cpu_tensor, gpu_tensor, precision)
|
||||
self.assertEqual(cpu_args, gpu_args, precision)
|
||||
# Compare results
|
||||
self.assertEqual(cpu_result, gpu_result)
|
||||
self.assertEqual(cpu_result, gpu_result, precision)
|
||||
return tmp
|
||||
|
||||
|
||||
class TestCuda(TestCase):
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
|
||||
def test_autogpu(self):
|
||||
if torch.cuda.deviceCount() > 1:
|
||||
x = torch.randn(5, 5).cuda()
|
||||
y = torch.randn(5, 5).cuda()
|
||||
self.assertEqual(x.getDevice(), 0)
|
||||
self.assertEqual(x.getDevice(), 0)
|
||||
with torch.cuda.device(1):
|
||||
z = torch.randn(5, 5).cuda()
|
||||
self.assertEqual(z.getDevice(), 1)
|
||||
q = x.add(y)
|
||||
self.assertEqual(q.getDevice(), 0)
|
||||
w = torch.randn(5, 5).cuda()
|
||||
self.assertEqual(w.getDevice(), 1)
|
||||
z = z.cuda()
|
||||
self.assertEqual(z.getDevice(), 0)
|
||||
x = torch.randn(5, 5).cuda()
|
||||
y = torch.randn(5, 5).cuda()
|
||||
self.assertEqual(x.get_device(), 0)
|
||||
self.assertEqual(x.get_device(), 0)
|
||||
with torch.cuda.device(1):
|
||||
z = torch.randn(5, 5).cuda()
|
||||
self.assertEqual(z.get_device(), 1)
|
||||
q = x.add(y)
|
||||
self.assertEqual(q.get_device(), 0)
|
||||
w = torch.randn(5, 5).cuda()
|
||||
self.assertEqual(w.get_device(), 1)
|
||||
z = z.cuda()
|
||||
self.assertEqual(z.get_device(), 0)
|
||||
|
||||
def test_serialization(self):
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
|
||||
def test_copy_device(self):
|
||||
x = torch.randn(5, 5).cuda()
|
||||
with torch.cuda.device(1):
|
||||
y = x.cuda()
|
||||
self.assertEqual(y.get_device(), 1)
|
||||
self.assertIs(y.cuda(), y)
|
||||
z = y.cuda(0)
|
||||
self.assertEqual(z.get_device(), 0)
|
||||
self.assertIs(z.cuda(0), z)
|
||||
|
||||
x = torch.randn(5, 5)
|
||||
with torch.cuda.device(1):
|
||||
y = x.cuda()
|
||||
self.assertEqual(y.get_device(), 1)
|
||||
self.assertIs(y.cuda(), y)
|
||||
z = y.cuda(0)
|
||||
self.assertEqual(z.get_device(), 0)
|
||||
self.assertIs(z.cuda(0), z)
|
||||
|
||||
def test_serialization_array_with_storage(self):
|
||||
x = torch.randn(5, 5).cuda()
|
||||
y = torch.IntTensor(2, 5).fill_(0).cuda()
|
||||
q = [x, y, x, y.storage()]
|
||||
@ -274,30 +402,385 @@ class TestCuda(TestCase):
|
||||
q_copy[1].fill_(10)
|
||||
self.assertTrue(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
|
||||
|
||||
for decl in tests:
|
||||
for t in types:
|
||||
tensor = t()
|
||||
gpu_tensor = get_gpu_type(t)()
|
||||
for inplace in (True, False):
|
||||
def test_type_conversions(self):
|
||||
x = torch.randn(5, 5)
|
||||
self.assertIs(type(x.float()), torch.FloatTensor)
|
||||
self.assertIs(type(x.cuda()), torch.cuda.DoubleTensor)
|
||||
self.assertIs(type(x.cuda().float()), torch.cuda.FloatTensor)
|
||||
self.assertIs(type(x.cuda().float().cpu()), torch.FloatTensor)
|
||||
self.assertIs(type(x.cuda().float().cpu().int()), torch.IntTensor)
|
||||
|
||||
y = x.storage()
|
||||
self.assertIs(type(y.float()), torch.FloatStorage)
|
||||
self.assertIs(type(y.cuda()), torch.cuda.DoubleStorage)
|
||||
self.assertIs(type(y.cuda().float()), torch.cuda.FloatStorage)
|
||||
self.assertIs(type(y.cuda().float().cpu()), torch.FloatStorage)
|
||||
self.assertIs(type(y.cuda().float().cpu().int()), torch.IntStorage)
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
|
||||
def test_type_conversions_same_gpu(self):
|
||||
x = torch.randn(5, 5).cuda(1)
|
||||
self.assertEqual(x.int().get_device(), 1)
|
||||
|
||||
def _test_broadcast(self, input):
|
||||
if torch.cuda.device_count() < 2:
|
||||
raise unittest.SkipTest("only one GPU detected")
|
||||
result = comm.broadcast(input, (0, 1))
|
||||
for i, t in enumerate(result):
|
||||
self.assertEqual(t.get_device(), i)
|
||||
self.assertEqual(t, input)
|
||||
|
||||
def test_broadcast_cpu(self):
|
||||
self._test_broadcast(torch.randn(5, 5))
|
||||
|
||||
def test_broadcast_gpu(self):
|
||||
self._test_broadcast(torch.randn(5, 5))
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
|
||||
def test_reduce_add(self):
|
||||
x = torch.randn(5, 5)
|
||||
y = torch.randn(5, 5)
|
||||
x_cuda = x.cuda(0)
|
||||
y_cuda = y.cuda(1)
|
||||
result = comm.reduce_add((x_cuda, y_cuda))
|
||||
self.assertEqual(result.get_device(), 0)
|
||||
self.assertEqual(result.cpu(), x + y)
|
||||
|
||||
def _test_scatter(self, input, chunk_sizes=None, dim=0):
|
||||
if torch.cuda.device_count() < 2:
|
||||
raise unittest.SkipTest("only one GPU detected")
|
||||
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
|
||||
self.assertEqual(len(result), 2)
|
||||
if chunk_sizes is None:
|
||||
chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
|
||||
chunk_start = 0
|
||||
for i, r in enumerate(result):
|
||||
chunk_end = chunk_start + chunk_sizes[i]
|
||||
index = [slice(None, None), slice(None, None)]
|
||||
index[dim] = slice(chunk_start, chunk_end)
|
||||
self.assertEqual(r, input[tuple(index)], 0)
|
||||
chunk_start = chunk_end
|
||||
|
||||
def test_scatter_cpu(self):
|
||||
self._test_scatter(torch.randn(4, 4), dim=0)
|
||||
|
||||
def test_scatter_cpu_dim(self):
|
||||
self._test_scatter(torch.randn(4, 4), dim=1)
|
||||
|
||||
def test_scatter_cpu_sizes(self):
|
||||
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
|
||||
|
||||
def test_scatter_gpu(self):
|
||||
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
|
||||
|
||||
def test_scatter_gpu_dim(self):
|
||||
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
|
||||
|
||||
def test_scatter_gpu_sizes(self):
|
||||
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
|
||||
|
||||
def _test_gather(self, dim):
|
||||
if torch.cuda.device_count() < 2:
|
||||
raise unittest.SkipTest("only one GPU detected")
|
||||
x = torch.randn(2, 5).cuda(0)
|
||||
y = torch.randn(2, 5).cuda(1)
|
||||
result = comm.gather((x, y), dim)
|
||||
|
||||
expected_size = list(x.size())
|
||||
expected_size[dim] += y.size(dim)
|
||||
expected_size = torch.Size(expected_size)
|
||||
self.assertEqual(result.get_device(), 0)
|
||||
self.assertEqual(result.size(), expected_size)
|
||||
|
||||
index = [slice(None, None), slice(None, None)]
|
||||
index[dim] = slice(0, x.size(dim))
|
||||
self.assertEqual(result[tuple(index)], x)
|
||||
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
|
||||
self.assertEqual(result[tuple(index)], y)
|
||||
|
||||
def test_gather(self):
|
||||
self._test_gather(0)
|
||||
|
||||
def test_gather_dim(self):
|
||||
self._test_gather(1)
|
||||
|
||||
def test_from_sequence(self):
|
||||
seq = [list(range(i * 4, i * 4 + 4)) for i in range(5)]
|
||||
reference = torch.range(0, 19).resize_(5, 4)
|
||||
for t in types:
|
||||
cuda_type = get_gpu_type(t)
|
||||
self.assertEqual(cuda_type(seq), reference)
|
||||
|
||||
def test_manual_seed(self):
|
||||
with freeze_rng_state():
|
||||
x = torch.zeros(4, 4).float().cuda()
|
||||
torch.cuda.manual_seed(2)
|
||||
self.assertEqual(torch.cuda.initial_seed(), 2)
|
||||
x.uniform_()
|
||||
torch.cuda.manual_seed(2)
|
||||
y = x.clone().uniform_()
|
||||
self.assertEqual(x, y)
|
||||
self.assertEqual(torch.cuda.initial_seed(), 2)
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
|
||||
def test_cat_autogpu(self):
|
||||
x = torch.randn(4, 4).cuda(1)
|
||||
y = torch.randn(4, 4).cuda(1)
|
||||
z = torch.cat([x, y], 0)
|
||||
self.assertEqual(z.get_device(), x.get_device())
|
||||
|
||||
def test_serialization(self):
|
||||
x = torch.randn(4, 4).cuda()
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
torch.save(x, f)
|
||||
f.seek(0)
|
||||
x_copy = torch.load(f)
|
||||
self.assertEqual(x_copy, x)
|
||||
self.assertIs(type(x_copy), type(x))
|
||||
self.assertEqual(x_copy.get_device(), x.get_device())
|
||||
|
||||
def test_serialization_array_with_empty(self):
|
||||
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
torch.save(x, f)
|
||||
f.seek(0)
|
||||
x_copy = torch.load(f)
|
||||
for original, copy in zip(x, x_copy):
|
||||
self.assertEqual(copy, original)
|
||||
self.assertIs(type(copy), type(original))
|
||||
self.assertEqual(copy.get_device(), original.get_device())
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "detected only one GPU")
|
||||
def test_multigpu_serialization(self):
|
||||
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
torch.save(x, f)
|
||||
f.seek(0)
|
||||
x_copy = torch.load(f)
|
||||
for original, copy in zip(x, x_copy):
|
||||
self.assertEqual(copy, original)
|
||||
self.assertIs(type(copy), type(original))
|
||||
self.assertEqual(copy.get_device(), original.get_device())
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "detected only one GPU")
|
||||
def test_multigpu_serialization_remap(self):
|
||||
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
|
||||
|
||||
def gpu_remap(storage, location):
|
||||
if location == 'cuda:1':
|
||||
return storage.cuda(0)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
torch.save(x, f)
|
||||
f.seek(0)
|
||||
x_copy = torch.load(f, map_location=gpu_remap)
|
||||
|
||||
for original, copy in zip(x, x_copy):
|
||||
self.assertEqual(copy, original)
|
||||
self.assertIs(type(copy), type(original))
|
||||
self.assertEqual(copy.get_device(), 0)
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "detected only one GPU")
|
||||
def test_multigpu_serialization_remap_dict(self):
|
||||
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
|
||||
with tempfile.NamedTemporaryFile() as f:
|
||||
torch.save(x, f)
|
||||
f.seek(0)
|
||||
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
|
||||
for original, copy in zip(x, x_copy):
|
||||
self.assertEqual(copy, original)
|
||||
self.assertIs(type(copy), type(original))
|
||||
self.assertEqual(copy.get_device(), 0)
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "detected only one GPU")
|
||||
def test_cuda_set_device(self):
|
||||
x = torch.randn(5, 5)
|
||||
with torch.cuda.device(1):
|
||||
self.assertEqual(x.cuda().get_device(), 1)
|
||||
torch.cuda.set_device(0)
|
||||
self.assertEqual(x.cuda().get_device(), 0)
|
||||
with torch.cuda.device(1):
|
||||
self.assertEqual(x.cuda().get_device(), 1)
|
||||
self.assertEqual(x.cuda().get_device(), 0)
|
||||
torch.cuda.set_device(1)
|
||||
self.assertEqual(x.cuda().get_device(), 0)
|
||||
|
||||
def test_is_tensor(self):
|
||||
for t in types:
|
||||
tensor = get_gpu_type(t)()
|
||||
self.assertTrue(torch.is_tensor(tensor))
|
||||
self.assertTrue(torch.is_tensor(torch.cuda.HalfTensor()))
|
||||
|
||||
def test_cuda_synchronize(self):
|
||||
torch.cuda.synchronize()
|
||||
|
||||
def test_streams(self):
|
||||
default_stream = torch.cuda.current_stream()
|
||||
user_stream = torch.cuda.Stream()
|
||||
self.assertEqual(torch.cuda.current_stream(), default_stream)
|
||||
self.assertNotEqual(default_stream, user_stream)
|
||||
self.assertEqual(default_stream.cuda_stream, 0)
|
||||
self.assertNotEqual(user_stream.cuda_stream, 0)
|
||||
with torch.cuda.stream(user_stream):
|
||||
self.assertEqual(torch.cuda.current_stream(), user_stream)
|
||||
self.assertTrue(user_stream.query())
|
||||
# copy 10 MB tensor from CPU-GPU which should take some time
|
||||
tensor1 = torch.ByteTensor(10000000).pin_memory()
|
||||
tensor2 = tensor1.cuda(async=True)
|
||||
self.assertFalse(default_stream.query())
|
||||
default_stream.synchronize()
|
||||
self.assertTrue(default_stream.query())
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "detected only one GPU")
|
||||
def test_streams_multi_gpu(self):
|
||||
default_stream = torch.cuda.current_stream()
|
||||
self.assertEqual(default_stream.device, 0)
|
||||
stream = torch.cuda.Stream(device=1)
|
||||
self.assertEqual(stream.device, 1)
|
||||
with torch.cuda.device(1):
|
||||
self.assertEqual(torch.cuda.current_stream().device, 1)
|
||||
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "multi-GPU not supported")
|
||||
def test_tensor_device(self):
|
||||
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
|
||||
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
|
||||
with torch.cuda.device(1):
|
||||
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
|
||||
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
|
||||
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
|
||||
|
||||
def test_events(self):
|
||||
stream = torch.cuda.current_stream()
|
||||
event = torch.cuda.Event(enable_timing=True)
|
||||
self.assertTrue(event.query())
|
||||
start_event = torch.cuda.Event(enable_timing=True)
|
||||
stream.record_event(start_event)
|
||||
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
|
||||
stream.record_event(event)
|
||||
self.assertFalse(event.query())
|
||||
event.synchronize()
|
||||
self.assertTrue(event.query())
|
||||
self.assertGreater(start_event.elapsed_time(event), 0)
|
||||
|
||||
def test_record_stream(self):
|
||||
cycles_per_ms = get_cycles_per_ms()
|
||||
|
||||
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
|
||||
result = torch.cuda.FloatTensor(t.size())
|
||||
stream = torch.cuda.Stream()
|
||||
ptr = [None]
|
||||
|
||||
# Performs the CPU->GPU copy in a background stream
|
||||
def perform_copy():
|
||||
with torch.cuda.stream(stream):
|
||||
tmp = t.cuda(async=True)
|
||||
ptr[0] = tmp.data_ptr()
|
||||
torch.cuda.current_stream().wait_stream(stream)
|
||||
tmp.record_stream(torch.cuda.current_stream())
|
||||
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
|
||||
result.copy_(tmp)
|
||||
|
||||
perform_copy()
|
||||
with torch.cuda.stream(stream):
|
||||
tmp2 = torch.cuda.FloatTensor(t.size())
|
||||
tmp2.zero_()
|
||||
self.assertNotEqual(tmp2.data_ptr(), ptr[0], 'allocation re-used to soon')
|
||||
|
||||
self.assertEqual(result.tolist(), [1, 2, 3, 4])
|
||||
|
||||
# Check that the block will be re-used after the main stream finishes
|
||||
torch.cuda.current_stream().synchronize()
|
||||
with torch.cuda.stream(stream):
|
||||
tmp3 = torch.cuda.FloatTensor(t.size())
|
||||
self.assertEqual(tmp3.data_ptr(), ptr[0], 'allocation not re-used')
|
||||
|
||||
def test_caching_pinned_memory(self):
|
||||
cycles_per_ms = get_cycles_per_ms()
|
||||
|
||||
# check that allocations are re-used after deletion
|
||||
t = torch.FloatTensor([1]).pin_memory()
|
||||
ptr = t.data_ptr()
|
||||
del t
|
||||
t = torch.FloatTensor([1]).pin_memory()
|
||||
self.assertEqual(t.data_ptr(), ptr, 'allocation not reused')
|
||||
|
||||
# check that the allocation is not re-used if it's in-use by a copy
|
||||
gpu_tensor = torch.cuda.FloatTensor([0])
|
||||
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
|
||||
gpu_tensor.copy_(t, async=True)
|
||||
del t
|
||||
t = torch.FloatTensor([1]).pin_memory()
|
||||
self.assertNotEqual(t.data_ptr(), ptr, 'allocation re-used too soon')
|
||||
self.assertEqual(list(gpu_tensor), [1])
|
||||
|
||||
@unittest.skipIf(torch.cuda.device_count() < 2, "only one GPU detected")
|
||||
def test_caching_pinned_memory_multi_gpu(self):
|
||||
# checks that the events preventing pinned memory from being re-used
|
||||
# too early are recorded on the correct GPU
|
||||
cycles_per_ms = get_cycles_per_ms()
|
||||
|
||||
t = torch.FloatTensor([1]).pin_memory()
|
||||
ptr = t.data_ptr()
|
||||
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
|
||||
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
|
||||
|
||||
with torch.cuda.device(1):
|
||||
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
|
||||
gpu_tensor1.copy_(t, async=True)
|
||||
|
||||
del t
|
||||
t = torch.FloatTensor([2]).pin_memory()
|
||||
self.assertNotEqual(t.data_ptr(), ptr, 'allocation re-used too soon')
|
||||
|
||||
with torch.cuda.device(0):
|
||||
gpu_tensor0.copy_(t, async=True)
|
||||
|
||||
self.assertEqual(gpu_tensor1[0], 1)
|
||||
self.assertEqual(gpu_tensor0[0], 2)
|
||||
|
||||
def test_btrifact(self):
|
||||
TestTorch._test_btrifact(self, lambda t: t.cuda())
|
||||
|
||||
def test_btrisolve(self):
|
||||
TestTorch._test_btrisolve(self, lambda t: t.cuda())
|
||||
|
||||
|
||||
if HAS_CUDA:
|
||||
for decl in tests:
|
||||
for t in types:
|
||||
tensor = t()
|
||||
gpu_tensor = get_gpu_type(t)()
|
||||
if len(decl) == 3:
|
||||
name, constr, arg_constr = decl
|
||||
desc = ''
|
||||
elif len(decl) == 4:
|
||||
name, constr, arg_constr, desc = decl
|
||||
if inplace:
|
||||
name = name + '_'
|
||||
if not hasattr(tensor, name):
|
||||
continue
|
||||
if not hasattr(gpu_tensor, name):
|
||||
print("Ignoring {}, because it's not implemented by torch.cuda.{}".format(name, gpu_tensor.__class__.__name__))
|
||||
continue
|
||||
elif len(decl) == 5:
|
||||
name, constr, arg_constr, desc, type_subset = decl
|
||||
if t not in type_subset:
|
||||
continue
|
||||
|
||||
test_name = 'test_' + t.__name__ + '_' + name
|
||||
if desc:
|
||||
test_name += '_' + desc
|
||||
precision = custom_precision.get(name, TestCuda.precision)
|
||||
for inplace in (True, False):
|
||||
if inplace:
|
||||
name_inner = name + '_'
|
||||
else:
|
||||
name_inner = name
|
||||
if not hasattr(tensor, name_inner):
|
||||
continue
|
||||
if not hasattr(gpu_tensor, name_inner):
|
||||
print("Ignoring {}, because it's not implemented by torch.cuda.{}".format(
|
||||
name_inner, gpu_tensor.__class__.__name__))
|
||||
continue
|
||||
|
||||
test_name = 'test_' + t.__name__ + '_' + name_inner
|
||||
if desc:
|
||||
test_name += '_' + desc
|
||||
|
||||
assert not hasattr(TestCuda, test_name), "Duplicated test name: " + test_name
|
||||
setattr(TestCuda, test_name, compare_cpu_gpu(constr, arg_constr, name_inner, t, precision))
|
||||
|
||||
assert not hasattr(TestCase, test_name)
|
||||
setattr(TestCuda, test_name, compare_cpu_gpu(constr, arg_constr, name, t))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
run_tests()
|
||||
|
||||
201
test/test_dataloader.py
Normal file
201
test/test_dataloader.py
Normal file
@ -0,0 +1,201 @@
|
||||
import math
|
||||
import sys
|
||||
import torch
|
||||
import traceback
|
||||
import unittest
|
||||
from torch.utils.data import Dataset, TensorDataset, DataLoader
|
||||
from common import TestCase, run_tests, TEST_NUMPY
|
||||
from common_nn import TEST_CUDA
|
||||
|
||||
|
||||
class TestTensorDataset(TestCase):
|
||||
|
||||
def test_len(self):
|
||||
source = TensorDataset(torch.randn(15, 10, 2, 3, 4, 5), torch.randperm(15))
|
||||
self.assertEqual(len(source), 15)
|
||||
|
||||
def test_getitem(self):
|
||||
t = torch.randn(15, 10, 2, 3, 4, 5)
|
||||
l = torch.randn(15, 10)
|
||||
source = TensorDataset(t, l)
|
||||
for i in range(15):
|
||||
self.assertEqual(t[i], source[i][0])
|
||||
self.assertEqual(l[i], source[i][1])
|
||||
|
||||
def test_getitem_1d(self):
|
||||
t = torch.randn(15)
|
||||
l = torch.randn(15)
|
||||
source = TensorDataset(t, l)
|
||||
for i in range(15):
|
||||
self.assertEqual(t[i], source[i][0])
|
||||
self.assertEqual(l[i], source[i][1])
|
||||
|
||||
|
||||
class ErrorDataset(Dataset):
|
||||
|
||||
def __init__(self, size):
|
||||
self.size = size
|
||||
|
||||
def __len__(self):
|
||||
return self.size
|
||||
|
||||
|
||||
class TestDataLoader(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.data = torch.randn(100, 2, 3, 5)
|
||||
self.labels = torch.randperm(50).repeat(2)
|
||||
self.dataset = TensorDataset(self.data, self.labels)
|
||||
|
||||
def _test_sequential(self, loader):
|
||||
batch_size = loader.batch_size
|
||||
for i, (sample, target) in enumerate(loader):
|
||||
idx = i * batch_size
|
||||
self.assertEqual(sample, self.data[idx:idx + batch_size])
|
||||
self.assertEqual(target, self.labels[idx:idx + batch_size])
|
||||
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
|
||||
|
||||
def _test_shuffle(self, loader):
|
||||
found_data = {i: 0 for i in range(self.data.size(0))}
|
||||
found_labels = {i: 0 for i in range(self.labels.size(0))}
|
||||
batch_size = loader.batch_size
|
||||
for i, (batch_samples, batch_targets) in enumerate(loader):
|
||||
for sample, target in zip(batch_samples, batch_targets):
|
||||
for data_point_idx, data_point in enumerate(self.data):
|
||||
if data_point.eq(sample).all():
|
||||
self.assertFalse(found_data[data_point_idx])
|
||||
found_data[data_point_idx] += 1
|
||||
break
|
||||
self.assertEqual(target, self.labels[data_point_idx])
|
||||
found_labels[data_point_idx] += 1
|
||||
self.assertEqual(sum(found_data.values()), (i + 1) * batch_size)
|
||||
self.assertEqual(sum(found_labels.values()), (i + 1) * batch_size)
|
||||
self.assertEqual(i, math.floor((len(self.dataset) - 1) / batch_size))
|
||||
|
||||
def _test_error(self, loader):
|
||||
it = iter(loader)
|
||||
errors = 0
|
||||
while True:
|
||||
try:
|
||||
it.next()
|
||||
except NotImplementedError:
|
||||
errors += 1
|
||||
except StopIteration:
|
||||
self.assertEqual(errors,
|
||||
math.ceil(float(len(loader.dataset)) / loader.batch_size))
|
||||
return
|
||||
|
||||
def test_sequential(self):
|
||||
self._test_sequential(DataLoader(self.dataset))
|
||||
|
||||
def test_sequential_batch(self):
|
||||
self._test_sequential(DataLoader(self.dataset, batch_size=2))
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
|
||||
def test_sequential_pin_memory(self):
|
||||
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
|
||||
for input, target in loader:
|
||||
self.assertTrue(input.is_pinned())
|
||||
self.assertTrue(target.is_pinned())
|
||||
|
||||
def test_shuffle(self):
|
||||
self._test_shuffle(DataLoader(self.dataset, shuffle=True))
|
||||
|
||||
def test_shuffle_batch(self):
|
||||
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True))
|
||||
|
||||
def test_sequential_workers(self):
|
||||
self._test_sequential(DataLoader(self.dataset, num_workers=4))
|
||||
|
||||
def test_seqential_batch_workers(self):
|
||||
self._test_sequential(DataLoader(self.dataset, batch_size=2, num_workers=4))
|
||||
|
||||
def test_shuffle_workers(self):
|
||||
self._test_shuffle(DataLoader(self.dataset, shuffle=True, num_workers=4))
|
||||
|
||||
def test_shuffle_batch_workers(self):
|
||||
self._test_shuffle(DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4))
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
|
||||
def test_shuffle_pin_memory(self):
|
||||
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
|
||||
for input, target in loader:
|
||||
self.assertTrue(input.is_pinned())
|
||||
self.assertTrue(target.is_pinned())
|
||||
|
||||
@unittest.skipIf(not TEST_NUMPY, "numpy unavailable")
|
||||
def test_numpy(self):
|
||||
import numpy as np
|
||||
|
||||
class TestDataset(torch.utils.data.Dataset):
|
||||
def __getitem__(self, i):
|
||||
return np.ones((2, 3, 4)) * i
|
||||
|
||||
def __len__(self):
|
||||
return 1000
|
||||
|
||||
loader = DataLoader(TestDataset(), batch_size=12)
|
||||
batch = next(iter(loader))
|
||||
self.assertIsInstance(batch, torch.DoubleTensor)
|
||||
self.assertEqual(batch.size(), torch.Size([12, 2, 3, 4]))
|
||||
|
||||
def test_error(self):
|
||||
self._test_error(DataLoader(ErrorDataset(100), batch_size=2, shuffle=True))
|
||||
|
||||
def test_error_workers(self):
|
||||
self._test_error(DataLoader(ErrorDataset(41), batch_size=2, shuffle=True, num_workers=4))
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
|
||||
def test_partial_workers(self):
|
||||
"check that workers exit even if the iterator is not exhausted"
|
||||
loader = iter(DataLoader(self.dataset, batch_size=2, num_workers=4, pin_memory=True))
|
||||
workers = loader.workers
|
||||
pin_thread = loader.pin_thread
|
||||
for i, sample in enumerate(loader):
|
||||
if i == 3:
|
||||
break
|
||||
del loader
|
||||
for w in workers:
|
||||
w.join(1.0) # timeout of one second
|
||||
self.assertFalse(w.is_alive(), 'subprocess not terminated')
|
||||
self.assertEqual(w.exitcode, 0)
|
||||
pin_thread.join(1.0)
|
||||
self.assertFalse(pin_thread.is_alive())
|
||||
|
||||
def test_len(self):
|
||||
def check_len(dl, expected):
|
||||
self.assertEqual(len(dl), expected)
|
||||
n = 0
|
||||
for sample in dl:
|
||||
n += 1
|
||||
self.assertEqual(n, expected)
|
||||
check_len(self.dataset, 100)
|
||||
check_len(DataLoader(self.dataset, batch_size=2), 50)
|
||||
check_len(DataLoader(self.dataset, batch_size=3), 34)
|
||||
|
||||
|
||||
class StringDataset(Dataset):
|
||||
def __init__(self):
|
||||
self.s = '12345'
|
||||
|
||||
def __len__(self):
|
||||
return len(self.s)
|
||||
|
||||
def __getitem__(self, ndx):
|
||||
return (self.s[ndx], ndx)
|
||||
|
||||
|
||||
class TestStringDataLoader(TestCase):
|
||||
def setUp(self):
|
||||
self.dataset = StringDataset()
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
|
||||
def test_shuffle_pin_memory(self):
|
||||
loader = DataLoader(self.dataset, batch_size=2, shuffle=True, num_workers=4, pin_memory=True)
|
||||
for batch_ndx, (s, n) in enumerate(loader):
|
||||
self.assertIsInstance(s[0], str)
|
||||
self.assertTrue(n.is_pinned())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
508
test/test_distributed.py
Normal file
508
test/test_distributed.py
Normal file
@ -0,0 +1,508 @@
|
||||
import fcntl
|
||||
import multiprocessing
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
from functools import wraps, reduce
|
||||
from contextlib import contextmanager
|
||||
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
from common import TestCase
|
||||
|
||||
BACKEND = os.environ['BACKEND']
|
||||
TEMP_DIR = os.environ['TEMP_DIR']
|
||||
MASTER_PORT = '29500'
|
||||
MASTER_ADDR = '127.0.0.1:' + MASTER_PORT
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _lock():
|
||||
lockfile = os.path.join(TEMP_DIR, 'lockfile')
|
||||
with open(lockfile, 'w') as lf:
|
||||
try:
|
||||
fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
|
||||
yield
|
||||
finally:
|
||||
fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
|
||||
lf.close()
|
||||
|
||||
|
||||
def _build_tensor(size, value=None):
|
||||
if value is None:
|
||||
value = size
|
||||
return torch.FloatTensor(size, size, size).fill_(value)
|
||||
|
||||
|
||||
class Barrier(object):
|
||||
barrier_id = 0
|
||||
|
||||
@classmethod
|
||||
def init(cls):
|
||||
cls.barrier_id = 0
|
||||
barrier_dir = os.path.join(TEMP_DIR, 'barrier')
|
||||
for f_name in os.listdir(barrier_dir):
|
||||
os.unlink(os.path.join(barrier_dir, f_name))
|
||||
|
||||
@classmethod
|
||||
def sync(cls, timeout=5):
|
||||
cls.barrier_id += 1
|
||||
barrier_dir = os.path.join(TEMP_DIR, 'barrier')
|
||||
pid = str(os.getpid())
|
||||
barrier_file = os.path.join(barrier_dir, pid)
|
||||
with _lock():
|
||||
with open(barrier_file, 'w') as f:
|
||||
f.write(str(cls.barrier_id))
|
||||
|
||||
start_time = time.time()
|
||||
while True:
|
||||
arrived = 0
|
||||
with _lock():
|
||||
for f_name in os.listdir(barrier_dir):
|
||||
with open(os.path.join(barrier_dir, f_name), 'r') as f:
|
||||
data = f.read()
|
||||
if int(data) >= cls.barrier_id:
|
||||
arrived += 1
|
||||
if arrived == dist.get_num_processes():
|
||||
break
|
||||
|
||||
if time.time() - start_time > timeout:
|
||||
raise RuntimeError("barrier timeout")
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
class _DistTestBase(object):
|
||||
|
||||
def _barrier(self, *args, **kwargs):
|
||||
Barrier.sync(*args, **kwargs)
|
||||
|
||||
def _init_group_test(self):
|
||||
group = [1, 2]
|
||||
group_id = dist.new_group(group)
|
||||
rank = dist.get_rank()
|
||||
if rank not in group:
|
||||
return ([], None, rank)
|
||||
|
||||
return (group, group_id, rank)
|
||||
|
||||
def _init_global_test(self):
|
||||
group = [i for i in range(0, dist.get_num_processes())]
|
||||
group_id = dist.group.WORLD
|
||||
rank = dist.get_rank()
|
||||
return (group, group_id, rank)
|
||||
|
||||
# GET RANK
|
||||
def test_get_rank(self):
|
||||
test_dir = os.path.join(TEMP_DIR, 'test_dir')
|
||||
pid = str(os.getpid())
|
||||
num_processes = dist.get_num_processes()
|
||||
with open(os.path.join(test_dir, pid), 'w') as f:
|
||||
f.write(str(dist.get_rank()))
|
||||
|
||||
self._barrier()
|
||||
|
||||
all_ranks = set()
|
||||
for f_name in os.listdir(test_dir):
|
||||
with open(os.path.join(test_dir, f_name), 'r') as f:
|
||||
all_ranks.add(int(f.read()))
|
||||
self.assertEqual(len(all_ranks), num_processes)
|
||||
|
||||
self._barrier()
|
||||
|
||||
if dist.get_rank() == 0:
|
||||
for f_name in os.listdir(test_dir):
|
||||
os.unlink(os.path.join(test_dir, f_name))
|
||||
|
||||
self._barrier()
|
||||
|
||||
# SEND RECV
|
||||
def test_send_recv(self):
|
||||
rank = dist.get_rank()
|
||||
tensor = _build_tensor(rank + 1)
|
||||
for dest in range(0, dist.get_num_processes()):
|
||||
if dest == rank:
|
||||
continue
|
||||
dist.send(tensor, dest)
|
||||
|
||||
for src in range(0, dist.get_num_processes()):
|
||||
if src == rank:
|
||||
continue
|
||||
tensor = _build_tensor(src + 1, value=-1)
|
||||
expected_tensor = _build_tensor(src + 1)
|
||||
dist.recv(tensor, src)
|
||||
self.assertEqual(tensor, expected_tensor)
|
||||
|
||||
self._barrier()
|
||||
|
||||
# SEND RECV ANY SOURCE
|
||||
def test_send_recv_any_source(self):
|
||||
rank = dist.get_rank()
|
||||
tensor = _build_tensor(10, rank)
|
||||
for dest in range(0, dist.get_num_processes()):
|
||||
if dest == rank:
|
||||
continue
|
||||
dist.send(tensor, dest)
|
||||
|
||||
recv_ranks = set()
|
||||
for src in range(0, dist.get_num_processes()):
|
||||
if src == rank:
|
||||
continue
|
||||
tensor = _build_tensor(10, value=-1)
|
||||
dist.recv(tensor)
|
||||
recv_ranks.add(tensor.resize_(1)[0])
|
||||
|
||||
self.assertEqual(len(recv_ranks), dist.get_num_processes() - 1)
|
||||
self._barrier()
|
||||
|
||||
# ISEND
|
||||
def test_isend(self):
|
||||
rank = dist.get_rank()
|
||||
world_size = dist.get_num_processes()
|
||||
|
||||
if rank == 0:
|
||||
requests = [
|
||||
dist.isend(_build_tensor(dest, 10), dest) for dest in range(1, world_size)
|
||||
]
|
||||
for request in requests:
|
||||
request.wait()
|
||||
self.assertTrue(request.is_completed())
|
||||
else:
|
||||
tensor = _build_tensor(rank, -1)
|
||||
dist.recv(tensor, 0)
|
||||
self.assertEqual(tensor, _build_tensor(rank, 10))
|
||||
|
||||
self._barrier()
|
||||
|
||||
# IRECV
|
||||
def test_irecv(self):
|
||||
rank = dist.get_rank()
|
||||
world_size = dist.get_num_processes()
|
||||
|
||||
if rank == 0:
|
||||
expected_tensors = [_build_tensor(src, -1) for src in range(1, world_size)]
|
||||
requests = [
|
||||
dist.irecv(expected_tensors[src - 1], src) for src in range(1, world_size)
|
||||
]
|
||||
|
||||
for src in range(1, world_size):
|
||||
requests[src - 1].wait()
|
||||
self.assertTrue(requests[src - 1].is_completed())
|
||||
self.assertEqual(expected_tensors[src - 1], _build_tensor(src, 10))
|
||||
else:
|
||||
tensor = _build_tensor(rank, 10)
|
||||
dist.send(tensor, 0)
|
||||
|
||||
self._barrier()
|
||||
|
||||
# BROADCAST
|
||||
def _test_broadcast_helper(self, group, group_id, rank):
|
||||
for src in group:
|
||||
expected_tensor = _build_tensor(src + 1)
|
||||
if rank == src:
|
||||
dist.broadcast(expected_tensor, src, group_id)
|
||||
else:
|
||||
tensor = _build_tensor(src + 1, -1)
|
||||
dist.broadcast(tensor, src, group_id)
|
||||
self.assertEqual(tensor, expected_tensor)
|
||||
|
||||
self._barrier()
|
||||
|
||||
def test_broadcast(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_broadcast_helper(group, group_id, rank)
|
||||
|
||||
def test_broadcast_group(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_broadcast_helper(group, group_id, rank)
|
||||
|
||||
# REDUCE
|
||||
def _test_reduce_helper(self, group, group_id, rank, op, master_value, worker_value, expected_value):
|
||||
for src in group:
|
||||
if rank == src:
|
||||
tensor = _build_tensor(src + 1).fill_(master_value)
|
||||
dist.reduce(tensor, src, op, group_id)
|
||||
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
|
||||
else:
|
||||
tensor = _build_tensor(src + 1).fill_(worker_value)
|
||||
dist.reduce(tensor, src, op, group_id)
|
||||
|
||||
self._barrier()
|
||||
|
||||
def test_reduce_sum(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.SUM, 2, 10, 2 + (10 * (len(group) - 1))
|
||||
)
|
||||
|
||||
def test_reduce_product(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.PRODUCT,
|
||||
2, 10, reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2)
|
||||
)
|
||||
|
||||
def test_reduce_min(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.MIN, 1010, 1, 1
|
||||
)
|
||||
|
||||
def test_reduce_max(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.MAX, -1, 10, 10
|
||||
)
|
||||
|
||||
def test_reduce_group_sum(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.SUM, 2, 10, 2 + (10 * (len(group) - 1))
|
||||
)
|
||||
|
||||
def test_reduce_group_product(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.PRODUCT,
|
||||
2, 10, reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2)
|
||||
)
|
||||
|
||||
def test_reduce_group_min(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.MIN, 1010, 1, 1
|
||||
)
|
||||
|
||||
def test_reduce_group_max(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.MAX, -1, 10, 10
|
||||
)
|
||||
|
||||
# ALL REDUCE
|
||||
def _test_all_reduce_helper(self, group, group_id, rank, op, master_value, worker_value, expected_value):
|
||||
for src in group:
|
||||
if rank == src:
|
||||
tensor = _build_tensor(src + 1).fill_(master_value)
|
||||
dist.all_reduce(tensor, op, group_id)
|
||||
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
|
||||
else:
|
||||
tensor = _build_tensor(src + 1).fill_(worker_value)
|
||||
dist.all_reduce(tensor, op, group_id)
|
||||
self.assertEqual(tensor, _build_tensor(src + 1, expected_value))
|
||||
|
||||
self._barrier()
|
||||
|
||||
def test_all_reduce_sum(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_all_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.SUM, 2, 10, 2 + (10 * (len(group) - 1))
|
||||
)
|
||||
|
||||
def test_all_reduce_product(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_all_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.PRODUCT,
|
||||
2, 10, reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2)
|
||||
)
|
||||
|
||||
def test_all_reduce_min(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_all_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.MIN, 1010, 1, 1
|
||||
)
|
||||
|
||||
def test_all_reduce_max(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_all_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.MAX, -1, 10, 10
|
||||
)
|
||||
|
||||
def test_all_reduce_group_sum(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_all_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.SUM, 2, 10, 2 + (10 * (len(group) - 1))
|
||||
)
|
||||
|
||||
def test_all_reduce_group_product(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_all_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.PRODUCT,
|
||||
2, 10, reduce((lambda x, y: x * y), [10] * (len(group) - 1), 2)
|
||||
)
|
||||
|
||||
def test_all_reduce_group_min(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_all_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.MIN, 1010, 1, 1
|
||||
)
|
||||
|
||||
def test_all_reduce_group_max(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_all_reduce_helper(
|
||||
group, group_id, rank, dist.reduce_op.MAX, -1, 10, 10
|
||||
)
|
||||
|
||||
# SCATTER
|
||||
def _test_scatter_helper(self, group, group_id, rank):
|
||||
for dest in group:
|
||||
tensor = _build_tensor(dest + 1, -1)
|
||||
expected_tensor = _build_tensor(dest + 1, rank)
|
||||
if rank == dest:
|
||||
tensors = [_build_tensor(dest + 1, i) for i in group]
|
||||
dist.scatter_send(tensors, tensor, group_id)
|
||||
self.assertEqual(tensor, expected_tensor)
|
||||
else:
|
||||
dist.scatter_recv(tensor, dest, group_id)
|
||||
self.assertEqual(tensor, expected_tensor)
|
||||
|
||||
self._barrier()
|
||||
|
||||
def test_scatter(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_scatter_helper(group, group_id, rank)
|
||||
|
||||
def test_scatter_group(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_scatter_helper(group, group_id, rank)
|
||||
|
||||
# GATHER
|
||||
def _test_gather_helper(self, group, group_id, rank):
|
||||
for dest in group:
|
||||
tensor = _build_tensor(dest + 1, rank)
|
||||
if rank == dest:
|
||||
tensors = [_build_tensor(dest + 1, -1) for i in group]
|
||||
dist.gather_recv(tensors, tensor, group_id)
|
||||
|
||||
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
|
||||
for t1, t2 in zip(tensors, expected_tensors):
|
||||
self.assertEqual(t1, t2)
|
||||
else:
|
||||
dist.gather_send(tensor, dest, group_id)
|
||||
|
||||
self._barrier()
|
||||
|
||||
def test_gather(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_gather_helper(group, group_id, rank)
|
||||
|
||||
def test_gather_group(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_gather_helper(group, group_id, rank)
|
||||
|
||||
# ALL GATHER
|
||||
def _test_all_gather_helper(self, group, group_id, rank):
|
||||
for dest in group:
|
||||
tensor = _build_tensor(dest + 1, rank)
|
||||
tensors = [_build_tensor(dest + 1, -1) for i in group]
|
||||
dist.all_gather(tensors, tensor, group_id)
|
||||
|
||||
expected_tensors = [_build_tensor(dest + 1, i) for i in group]
|
||||
for t1, t2 in zip(tensors, expected_tensors):
|
||||
self.assertEqual(t1, t2)
|
||||
|
||||
self._barrier()
|
||||
|
||||
def test_all_gather(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_all_gather_helper(group, group_id, rank)
|
||||
|
||||
def test_all_gather_group(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_all_gather_helper(group, group_id, rank)
|
||||
|
||||
# BARRIER
|
||||
def _test_barrier_helper(self, group, group_id, rank):
|
||||
WAIT_TIME = 0.3 # seconds
|
||||
|
||||
for dest in group:
|
||||
expected_time = torch.DoubleTensor(1).fill_(0.0)
|
||||
if dest == rank:
|
||||
expected_time.fill_(time.time() + WAIT_TIME)
|
||||
dist.broadcast(expected_time, dest, group_id)
|
||||
time.sleep(WAIT_TIME + 0.1) # sleep a little bit longer
|
||||
dist.barrier(group_id)
|
||||
else:
|
||||
dist.broadcast(expected_time, dest, group_id)
|
||||
dist.barrier(group_id)
|
||||
self.assertGreaterEqual(time.time(), expected_time[0])
|
||||
|
||||
self._barrier()
|
||||
|
||||
def test_barrier(self):
|
||||
group, group_id, rank = self._init_global_test()
|
||||
self._test_barrier_helper(group, group_id, rank)
|
||||
|
||||
def test_barrier_group(self):
|
||||
group, group_id, rank = self._init_group_test()
|
||||
self._test_barrier_helper(group, group_id, rank)
|
||||
|
||||
if BACKEND == 'tcp':
|
||||
WORLD_SIZE = os.environ['WORLD_SIZE']
|
||||
|
||||
class TestTCP(TestCase, _DistTestBase):
|
||||
|
||||
MANAGER_PROCESS_RANK = -1
|
||||
JOIN_TIMEOUT = 5
|
||||
|
||||
@staticmethod
|
||||
def manager_join(fn):
|
||||
@wraps(fn)
|
||||
def wrapper(self):
|
||||
if self.rank == self.MANAGER_PROCESS_RANK:
|
||||
self._join_and_reduce()
|
||||
else:
|
||||
fn(self)
|
||||
return wrapper
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
os.environ['MASTER_ADDR'] = MASTER_ADDR
|
||||
os.environ['MASTER_PORT'] = MASTER_PORT
|
||||
os.environ['WORLD_SIZE'] = WORLD_SIZE
|
||||
for attr in dir(cls):
|
||||
if attr.startswith('test'):
|
||||
fn = getattr(cls, attr)
|
||||
setattr(cls, attr, cls.manager_join(fn))
|
||||
|
||||
def setUp(self):
|
||||
self.processes = []
|
||||
self.rank = self.MANAGER_PROCESS_RANK
|
||||
Barrier.init()
|
||||
for rank in range(int(WORLD_SIZE)):
|
||||
self.processes.append(self._spawn_process(rank))
|
||||
|
||||
def tearDown(self):
|
||||
for p in self.processes:
|
||||
p.terminate()
|
||||
|
||||
def _spawn_process(self, rank):
|
||||
os.environ['RANK'] = str(rank)
|
||||
name = 'process ' + str(rank)
|
||||
process = multiprocessing.Process(target=self._run, name=name,
|
||||
args=(rank,))
|
||||
process.start()
|
||||
return process
|
||||
|
||||
def _run(self, rank):
|
||||
self.rank = rank
|
||||
dist.init_process_group(backend=BACKEND)
|
||||
# self.id() == e.g. '__main__.TestDistributed.test_get_rank'
|
||||
# We're retreiving a corresponding test and executing it.
|
||||
getattr(self, self.id().split(".")[2])()
|
||||
sys.exit(0)
|
||||
|
||||
def _join_and_reduce(self):
|
||||
for p in self.processes:
|
||||
p.join(self.JOIN_TIMEOUT)
|
||||
self.assertEqual(p.exitcode, 0)
|
||||
|
||||
elif BACKEND == 'mpi':
|
||||
dist.init_process_group(backend='mpi')
|
||||
|
||||
class TestMPI(TestCase, _DistTestBase):
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
File diff suppressed because it is too large
Load Diff
415
test/test_multiprocessing.py
Normal file
415
test/test_multiprocessing.py
Normal file
@ -0,0 +1,415 @@
|
||||
import contextlib
|
||||
import gc
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import unittest
|
||||
from sys import platform
|
||||
|
||||
import torch
|
||||
import torch.cuda
|
||||
import torch.multiprocessing as mp
|
||||
from torch.autograd import Variable
|
||||
from torch.nn import Parameter
|
||||
from common import TestCase, run_tests
|
||||
|
||||
|
||||
TEST_REPEATS = 30
|
||||
HAS_SHM_FILES = os.path.isdir('/dev/shm')
|
||||
TEST_CUDA_IPC = torch.cuda.is_available() and \
|
||||
sys.version_info[0] == 3 and \
|
||||
sys.platform != 'darwin'
|
||||
TEST_MULTIGPU = TEST_CUDA_IPC and torch.cuda.device_count() > 1
|
||||
|
||||
|
||||
def simple_fill(queue, event):
|
||||
data = queue.get()
|
||||
data[0][:] = 4
|
||||
event.set()
|
||||
|
||||
|
||||
def simple_pool_fill(tensor):
|
||||
tensor.fill_(4)
|
||||
return tensor.add(1)
|
||||
|
||||
|
||||
def send_tensor(queue, event, tp):
|
||||
t = torch.ones(5, 5).type(tp)
|
||||
queue.put(t)
|
||||
queue.put(t)
|
||||
event.wait()
|
||||
|
||||
|
||||
def sum_tensors(inq, outq):
|
||||
with torch.cuda.device(1):
|
||||
tensors = inq.get()
|
||||
for tensor in tensors:
|
||||
outq.put((tensor.sum(), tensor.get_device(),
|
||||
tensor.numel(), tensor.storage().size()))
|
||||
|
||||
|
||||
def queue_get_exception(inqueue, outqueue):
|
||||
os.close(2) # hide expected error message
|
||||
try:
|
||||
torch.zeros(5, 5).cuda()
|
||||
except Exception as e:
|
||||
outqueue.put(e)
|
||||
else:
|
||||
outqueue.put('no exception')
|
||||
|
||||
|
||||
# Multiply by two in a separate stream
|
||||
def cuda_multiply_two(queue, ready, done):
|
||||
ready.set()
|
||||
with torch.cuda.stream(torch.cuda.Stream()):
|
||||
cuda_event, tensor = queue.get()
|
||||
cuda_event.wait()
|
||||
tensor.mul_(2)
|
||||
cuda_event.record()
|
||||
done.set()
|
||||
del cuda_event
|
||||
|
||||
|
||||
def autograd_sharing(queue, ready, master_modified):
|
||||
var = queue.get()
|
||||
ready.set()
|
||||
master_modified.wait()
|
||||
|
||||
expected_var = torch.range(1, 25).view(5, 5)
|
||||
expected_var[0, 0] = 1000
|
||||
is_ok = var.data.equal(expected_var)
|
||||
var.data[:] = torch.ones(5, 5)
|
||||
|
||||
is_ok &= var.grad is None
|
||||
var._grad = Variable(torch.ones(5, 5), requires_grad=False)
|
||||
|
||||
queue.put(is_ok)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def fs_sharing():
|
||||
prev_strategy = mp.get_sharing_strategy()
|
||||
mp.set_sharing_strategy('file_system')
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
mp.set_sharing_strategy(prev_strategy)
|
||||
|
||||
|
||||
class leak_checker(object):
|
||||
|
||||
def __init__(self, test_case):
|
||||
self.checked_pids = [os.getpid()]
|
||||
self.test_case = test_case
|
||||
|
||||
def __enter__(self):
|
||||
self.next_fds = self._get_next_fds(10)
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
if args[0] is None:
|
||||
# Check that the 10th available file-descriptor at the end of the
|
||||
# test is no more than 4 higher than the 10th available at the
|
||||
# start. This attempts to catch file descriptor leaks, but allows
|
||||
# one-off initialization that may use up a file descriptor
|
||||
available_fds = self._get_next_fds(10)
|
||||
self.test_case.assertLessEqual(
|
||||
available_fds[-1] - self.next_fds[-1], 5)
|
||||
self.test_case.assertFalse(self.has_shm_files())
|
||||
return False
|
||||
|
||||
def check_pid(self, pid):
|
||||
self.checked_pids.append(pid)
|
||||
|
||||
def _get_next_fds(self, n=1):
|
||||
# dup uses the lowest-numbered unused descriptor for the new descriptor
|
||||
fds = [os.dup(0) for i in range(n)]
|
||||
for fd in fds:
|
||||
os.close(fd)
|
||||
return fds
|
||||
|
||||
def has_shm_files(self, wait=True):
|
||||
if not HAS_SHM_FILES:
|
||||
return False
|
||||
result = self._has_shm_files()
|
||||
if result and mp.get_sharing_strategy() == 'file_system' and wait:
|
||||
time.sleep(0.5)
|
||||
return self._has_shm_files()
|
||||
return result
|
||||
|
||||
def _has_shm_files(self):
|
||||
gc.collect()
|
||||
names = list('torch_' + str(pid) for pid in self.checked_pids)
|
||||
for filename in os.listdir('/dev/shm'):
|
||||
for name in names:
|
||||
if filename.startswith(name):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class TestMultiprocessing(TestCase):
|
||||
|
||||
def _test_sharing(self, ctx=mp, type=torch.FloatTensor, repeat=1):
|
||||
def test_fill():
|
||||
x = torch.zeros(5, 5).type(type)
|
||||
q = ctx.Queue()
|
||||
e = ctx.Event()
|
||||
data = [x, x[:, 1]]
|
||||
q.put(data)
|
||||
p = ctx.Process(target=simple_fill, args=(q, e))
|
||||
p.daemon = True
|
||||
lc.check_pid(p.pid)
|
||||
p.start()
|
||||
e.wait(10)
|
||||
self.assertTrue(e.is_set())
|
||||
self.assertTrue(data[0].eq(4).all())
|
||||
self.assertTrue(data[1].eq(4).all())
|
||||
p.join(1)
|
||||
self.assertFalse(p.is_alive())
|
||||
|
||||
def test_receive():
|
||||
q = ctx.Queue()
|
||||
e = ctx.Event()
|
||||
p = ctx.Process(target=send_tensor, args=(q, e, type))
|
||||
p.daemon = True
|
||||
lc.check_pid(p.pid)
|
||||
p.start()
|
||||
t1 = q.get()
|
||||
t2 = q.get()
|
||||
self.assertTrue(t1.eq(1).all())
|
||||
self.assertTrue(id(t1.storage()) == id(t2.storage()))
|
||||
e.set()
|
||||
p.join(1)
|
||||
self.assertFalse(p.is_alive())
|
||||
|
||||
with leak_checker(self) as lc:
|
||||
for _ in range(repeat):
|
||||
test_fill()
|
||||
test_receive()
|
||||
|
||||
def _test_preserve_sharing(self, ctx=mp, repeat=1):
|
||||
def do_test():
|
||||
x = torch.randn(5, 5)
|
||||
data = [x.storage(), x.storage()[1:4], x, x[2], x[:, 1]]
|
||||
q = ctx.Queue()
|
||||
q.put(data)
|
||||
new_data = q.get(timeout=1)
|
||||
self.assertEqual(new_data, data, 0)
|
||||
storage_cdata = data[0]._cdata
|
||||
self.assertEqual(new_data[0]._cdata, storage_cdata)
|
||||
for t in new_data[2:]:
|
||||
self.assertEqual(t.storage()._cdata, storage_cdata)
|
||||
# TODO: enable after fixing #46
|
||||
# new_data[0].fill_(10)
|
||||
# self.assertEqual(new_data[1], new_data[0][1:4], 0)
|
||||
|
||||
with leak_checker(self):
|
||||
for i in range(repeat):
|
||||
do_test()
|
||||
|
||||
def _test_pool(self, ctx=mp, repeat=1):
|
||||
def do_test():
|
||||
p = ctx.Pool(2)
|
||||
for proc in p._pool:
|
||||
lc.check_pid(proc.pid)
|
||||
|
||||
buffers = [torch.zeros(2, 2) for i in range(4)]
|
||||
results = p.map(simple_pool_fill, buffers, 1)
|
||||
self.assertEqual(len(results), len(buffers))
|
||||
for r in results:
|
||||
self.assertEqual(r, torch.ones(2, 2) * 5, 0)
|
||||
for b in buffers:
|
||||
self.assertEqual(b, torch.ones(2, 2) * 4, 0)
|
||||
|
||||
p.close()
|
||||
p.join()
|
||||
|
||||
with leak_checker(self) as lc:
|
||||
for i in range(repeat):
|
||||
do_test()
|
||||
|
||||
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on OS X")
|
||||
def test_fd_sharing(self):
|
||||
self._test_sharing(repeat=TEST_REPEATS)
|
||||
|
||||
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on OS X")
|
||||
def test_fd_preserve_sharing(self):
|
||||
self._test_preserve_sharing(repeat=TEST_REPEATS)
|
||||
|
||||
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on OS X")
|
||||
def test_fd_pool(self):
|
||||
self._test_pool(repeat=TEST_REPEATS)
|
||||
|
||||
def test_fs_sharing(self):
|
||||
with fs_sharing():
|
||||
self._test_sharing(repeat=TEST_REPEATS)
|
||||
|
||||
def test_fs_preserve_sharing(self):
|
||||
with fs_sharing():
|
||||
self._test_preserve_sharing(repeat=TEST_REPEATS)
|
||||
|
||||
def test_fs_pool(self):
|
||||
with fs_sharing():
|
||||
self._test_pool(repeat=TEST_REPEATS)
|
||||
|
||||
@unittest.skipIf(not HAS_SHM_FILES, "don't not how to check if shm files exist")
|
||||
def test_fs(self):
|
||||
def queue_put():
|
||||
x = torch.DoubleStorage(4)
|
||||
q = mp.Queue()
|
||||
self.assertFalse(lc.has_shm_files())
|
||||
q.put(x)
|
||||
time.sleep(0.05) # queue serializes asynchronously
|
||||
self.assertTrue(lc.has_shm_files(wait=False))
|
||||
q.get()
|
||||
|
||||
with fs_sharing(), leak_checker(self) as lc:
|
||||
for _ in range(TEST_REPEATS):
|
||||
queue_put()
|
||||
|
||||
def test_inherit_tensor(self):
|
||||
class SubProcess(mp.Process):
|
||||
def __init__(self, tensor):
|
||||
super(SubProcess, self).__init__()
|
||||
self.tensor = tensor
|
||||
self.daemon = True
|
||||
|
||||
def run(self):
|
||||
self.tensor.add_(3)
|
||||
|
||||
t = torch.zeros(5, 5)
|
||||
p = SubProcess(t.share_memory_())
|
||||
p.start()
|
||||
p.join(1)
|
||||
self.assertEqual(t, torch.ones(5, 5) * 3, 0)
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
|
||||
def test_cuda(self):
|
||||
torch.cuda.FloatTensor([1]) # initialize CUDA outside of leak checker
|
||||
self._test_sharing(mp.get_context('spawn'), torch.cuda.FloatTensor)
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
|
||||
@unittest.skipIf(not TEST_MULTIGPU, 'found only 1 GPU')
|
||||
def test_cuda_small_tensors(self):
|
||||
# Check multiple small tensors which will likely use the same
|
||||
# underlying cached allocation
|
||||
ctx = mp.get_context('spawn')
|
||||
tensors = []
|
||||
for i in range(5):
|
||||
tensors += [torch.range(i * 5, (i * 5) + 4).cuda()]
|
||||
|
||||
inq = ctx.Queue()
|
||||
outq = ctx.Queue()
|
||||
inq.put(tensors)
|
||||
p = ctx.Process(target=sum_tensors, args=(inq, outq))
|
||||
p.start()
|
||||
|
||||
results = []
|
||||
for i in range(5):
|
||||
results.append(outq.get())
|
||||
p.join()
|
||||
|
||||
for i, tensor in enumerate(tensors):
|
||||
v, device, tensor_size, storage_size = results[i]
|
||||
self.assertEqual(v, torch.range(i * 5, (i * 5) + 4).sum())
|
||||
self.assertEqual(device, 0)
|
||||
self.assertEqual(tensor_size, 5)
|
||||
self.assertEqual(storage_size, 5)
|
||||
|
||||
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
|
||||
def test_cuda_bad_call(self):
|
||||
# Initialize CUDA
|
||||
t = torch.zeros(5, 5).cuda().cpu()
|
||||
inq = mp.Queue()
|
||||
outq = mp.Queue()
|
||||
p = mp.Process(target=queue_get_exception, args=(inq, outq))
|
||||
p.start()
|
||||
inq.put(t)
|
||||
p.join()
|
||||
self.assertIsInstance(outq.get(), RuntimeError)
|
||||
|
||||
@unittest.skipIf(not TEST_CUDA_IPC, 'CUDA IPC not available')
|
||||
def test_event(self):
|
||||
ctx = mp.get_context('spawn')
|
||||
queue = ctx.Queue()
|
||||
ready = ctx.Event()
|
||||
done = ctx.Event()
|
||||
p = ctx.Process(target=cuda_multiply_two, args=(queue, ready, done))
|
||||
p.start()
|
||||
|
||||
ready.wait()
|
||||
with torch.cuda.stream(torch.cuda.Stream()):
|
||||
tensor = torch.cuda.FloatTensor([1, 1, 1, 1])
|
||||
# Use a sleep kernel to test events. Without the event, the
|
||||
# multiply happens before the add.
|
||||
event = torch.cuda.Event(interprocess=True)
|
||||
torch.cuda._sleep(20000000) # about 30 ms
|
||||
tensor.add_(1)
|
||||
event.record()
|
||||
queue.put((event, tensor))
|
||||
done.wait() # must wait until subprocess records event
|
||||
event.synchronize()
|
||||
self.assertEqual(list(tensor), [4, 4, 4, 4])
|
||||
p.join()
|
||||
|
||||
def _test_autograd_sharing(self, var):
|
||||
ready = mp.Event()
|
||||
master_modified = mp.Event()
|
||||
queue = mp.Queue()
|
||||
p = mp.Process(target=autograd_sharing, args=(queue, ready, master_modified))
|
||||
p.daemon = True
|
||||
p.start()
|
||||
var._grad = Variable(torch.zeros(5, 5), requires_grad=False)
|
||||
queue.put(var)
|
||||
|
||||
ready.wait()
|
||||
var.data[0, 0] = 1000
|
||||
var.grad.data[:] = torch.ones(5, 5) * 4
|
||||
master_modified.set()
|
||||
|
||||
worker_ok = queue.get()
|
||||
self.assertTrue(worker_ok)
|
||||
|
||||
self.assertEqual(var.data, torch.ones(5, 5))
|
||||
self.assertEqual(var.grad.data, torch.ones(5, 5) * 4)
|
||||
p.join(1)
|
||||
self.assertFalse(p.is_alive())
|
||||
|
||||
def test_variable_sharing(self):
|
||||
configs = [
|
||||
(True, False),
|
||||
(False, False),
|
||||
(False, True),
|
||||
]
|
||||
for requires_grad, volatile in configs:
|
||||
var = Variable(torch.range(1, 25).view(5, 5),
|
||||
requires_grad=requires_grad,
|
||||
volatile=volatile)
|
||||
self._test_autograd_sharing(var)
|
||||
|
||||
def test_parameter_sharing(self):
|
||||
param = Parameter(torch.range(1, 25).view(5, 5))
|
||||
self._test_autograd_sharing(param)
|
||||
|
||||
def _test_is_shared(self):
|
||||
t = torch.randn(5, 5)
|
||||
self.assertFalse(t.is_shared())
|
||||
t.share_memory_()
|
||||
self.assertTrue(t.is_shared())
|
||||
|
||||
@unittest.skipIf(platform == 'darwin', "file descriptor strategy is not supported on OS X")
|
||||
def test_is_shared(self):
|
||||
self._test_is_shared()
|
||||
|
||||
def test_fs_is_shared(self):
|
||||
with fs_sharing():
|
||||
self._test_is_shared()
|
||||
|
||||
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA not available')
|
||||
def test_is_shared_cuda(self):
|
||||
t = torch.randn(5, 5).cuda()
|
||||
self.assertTrue(t.is_shared())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
88
test/test_nccl.py
Normal file
88
test/test_nccl.py
Normal file
@ -0,0 +1,88 @@
|
||||
import unittest
|
||||
|
||||
import torch
|
||||
import torch.cuda.nccl as nccl
|
||||
import torch.cuda
|
||||
|
||||
from common import TestCase, run_tests
|
||||
|
||||
nGPUs = torch.cuda.device_count()
|
||||
if nGPUs == 0:
|
||||
print('CUDA not available, skipping tests')
|
||||
TestCase = object # noqa: F811
|
||||
|
||||
|
||||
class TestNCCL(TestCase):
|
||||
|
||||
@unittest.skipIf(nGPUs < 2, "only one GPU detected")
|
||||
def test_broadcast(self):
|
||||
expected = torch.FloatTensor(128).uniform_()
|
||||
tensors = [expected.cuda()]
|
||||
for device in range(1, torch.cuda.device_count()):
|
||||
with torch.cuda.device(device):
|
||||
tensors.append(torch.cuda.FloatTensor(128))
|
||||
|
||||
nccl.broadcast(tensors)
|
||||
for i in range(torch.cuda.device_count()):
|
||||
self.assertEqual(tensors[i], expected)
|
||||
|
||||
@unittest.skipIf(nGPUs < 2, "only one GPU detected")
|
||||
def test_reduce(self):
|
||||
tensors = [torch.FloatTensor(128).uniform_() for i in range(nGPUs)]
|
||||
expected = torch.FloatTensor(128).zero_()
|
||||
for t in tensors:
|
||||
expected.add_(t)
|
||||
|
||||
tensors = [tensors[i].cuda(i) for i in range(nGPUs)]
|
||||
nccl.reduce(tensors)
|
||||
|
||||
self.assertEqual(tensors[0], expected)
|
||||
|
||||
@unittest.skipIf(nGPUs < 2, "only one GPU detected")
|
||||
def test_all_reduce(self):
|
||||
tensors = [torch.FloatTensor(128).uniform_() for i in range(nGPUs)]
|
||||
expected = torch.FloatTensor(128).zero_()
|
||||
for t in tensors:
|
||||
expected.add_(t)
|
||||
|
||||
tensors = [tensors[i].cuda(i) for i in range(nGPUs)]
|
||||
nccl.all_reduce(tensors)
|
||||
|
||||
for tensor in tensors:
|
||||
self.assertEqual(tensor, expected)
|
||||
|
||||
@unittest.skipIf(nGPUs < 2, "only one GPU detected")
|
||||
def test_all_gather(self):
|
||||
inputs = [torch.FloatTensor(128).uniform_() for i in range(nGPUs)]
|
||||
expected = torch.cat(inputs, 0)
|
||||
|
||||
inputs = [inputs[i].cuda(i) for i in range(nGPUs)]
|
||||
outputs = [torch.cuda.FloatTensor(128 * nGPUs, device=i)
|
||||
for i in range(nGPUs)]
|
||||
nccl.all_gather(inputs, outputs)
|
||||
|
||||
for tensor in outputs:
|
||||
self.assertEqual(tensor, expected)
|
||||
|
||||
@unittest.skipIf(nGPUs < 2, "only one GPU detected")
|
||||
def test_reduce_scatter(self):
|
||||
in_size = 32 * nGPUs
|
||||
out_size = 32
|
||||
|
||||
inputs = [torch.FloatTensor(in_size).uniform_() for i in range(nGPUs)]
|
||||
expected = torch.FloatTensor(in_size).zero_()
|
||||
for t in inputs:
|
||||
expected.add_(t)
|
||||
expected = expected.view(nGPUs, 32)
|
||||
|
||||
inputs = [inputs[i].cuda(i) for i in range(nGPUs)]
|
||||
outputs = [torch.cuda.FloatTensor(out_size, device=i)
|
||||
for i in range(nGPUs)]
|
||||
nccl.reduce_scatter(inputs, outputs)
|
||||
|
||||
for i in range(nGPUs):
|
||||
self.assertEqual(outputs[i], expected[i])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
2487
test/test_nn.py
2487
test/test_nn.py
File diff suppressed because it is too large
Load Diff
347
test/test_optim.py
Normal file
347
test/test_optim.py
Normal file
@ -0,0 +1,347 @@
|
||||
import unittest
|
||||
import functools
|
||||
from copy import deepcopy
|
||||
import torch
|
||||
import torch.optim as optim
|
||||
import torch.legacy.optim as old_optim
|
||||
from torch.autograd import Variable
|
||||
|
||||
from common import TestCase, run_tests
|
||||
|
||||
|
||||
def rosenbrock(tensor):
|
||||
x, y = tensor
|
||||
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
|
||||
|
||||
|
||||
def drosenbrock(tensor):
|
||||
x, y = tensor
|
||||
return torch.DoubleTensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2)))
|
||||
|
||||
|
||||
def wrap_old_fn(old_fn, **config):
|
||||
def wrapper(closure, params, state):
|
||||
return old_fn(closure, params, config, state)
|
||||
return wrapper
|
||||
|
||||
|
||||
class TestOptim(TestCase):
|
||||
|
||||
def _test_rosenbrock(self, constructor, old_fn):
|
||||
params_t = torch.Tensor([1.5, 1.5])
|
||||
state = {}
|
||||
|
||||
params = Variable(torch.Tensor([1.5, 1.5]), requires_grad=True)
|
||||
optimizer = constructor([params])
|
||||
|
||||
solution = torch.Tensor([1, 1])
|
||||
initial_dist = params.data.dist(solution)
|
||||
|
||||
def eval():
|
||||
optimizer.zero_grad()
|
||||
loss = rosenbrock(params)
|
||||
loss.backward()
|
||||
# loss.backward() will give **slightly** different
|
||||
# gradients, than drosenbtock, because of a different ordering
|
||||
# of floating point operations. In most cases it doesn't matter,
|
||||
# but some optimizers are so sensitive that they can temporarily
|
||||
# diverge up to 1e-4, just to converge again. This makes the
|
||||
# comparison more stable.
|
||||
params.grad.data.copy_(drosenbrock(params.data))
|
||||
return loss
|
||||
|
||||
for i in range(2000):
|
||||
optimizer.step(eval)
|
||||
old_fn(lambda _: (rosenbrock(params_t), drosenbrock(params_t)),
|
||||
params_t, state)
|
||||
self.assertEqual(params.data, params_t)
|
||||
|
||||
self.assertLessEqual(params.data.dist(solution), initial_dist)
|
||||
|
||||
def _test_basic_cases_template(self, weight, bias, input, constructor):
|
||||
weight = Variable(weight, requires_grad=True)
|
||||
bias = Variable(bias, requires_grad=True)
|
||||
input = Variable(input)
|
||||
optimizer = constructor(weight, bias)
|
||||
|
||||
def fn():
|
||||
optimizer.zero_grad()
|
||||
y = weight.mv(input)
|
||||
if y.is_cuda and bias.is_cuda and y.get_device() != bias.get_device():
|
||||
y = y.cuda(bias.get_device())
|
||||
loss = (y + bias).pow(2).sum()
|
||||
loss.backward()
|
||||
return loss
|
||||
|
||||
initial_value = fn().data[0]
|
||||
for i in range(200):
|
||||
optimizer.step(fn)
|
||||
self.assertLess(fn().data[0], initial_value)
|
||||
|
||||
def _test_state_dict(self, weight, bias, input, constructor):
|
||||
weight = Variable(weight, requires_grad=True)
|
||||
bias = Variable(bias, requires_grad=True)
|
||||
input = Variable(input)
|
||||
|
||||
def fn_base(optimizer, weight, bias):
|
||||
optimizer.zero_grad()
|
||||
loss = (weight.mv(input) + bias).pow(2).sum()
|
||||
loss.backward()
|
||||
return loss
|
||||
|
||||
optimizer = constructor(weight, bias)
|
||||
fn = functools.partial(fn_base, optimizer, weight, bias)
|
||||
|
||||
# Prime the optimizer
|
||||
for i in range(20):
|
||||
optimizer.step(fn)
|
||||
# Clone the weights and construct new optimizer for them
|
||||
weight_c = Variable(weight.data.clone(), requires_grad=True)
|
||||
bias_c = Variable(bias.data.clone(), requires_grad=True)
|
||||
optimizer_c = constructor(weight_c, bias_c)
|
||||
fn_c = functools.partial(fn_base, optimizer_c, weight_c, bias_c)
|
||||
# Load state dict
|
||||
state_dict = deepcopy(optimizer.state_dict())
|
||||
state_dict_c = deepcopy(optimizer.state_dict())
|
||||
optimizer_c.load_state_dict(state_dict_c)
|
||||
# Run both optimizations in parallel
|
||||
for i in range(20):
|
||||
optimizer.step(fn)
|
||||
optimizer_c.step(fn_c)
|
||||
self.assertEqual(weight, weight_c)
|
||||
self.assertEqual(bias, bias_c)
|
||||
# Make sure state dict wasn't modified
|
||||
self.assertEqual(state_dict, state_dict_c)
|
||||
|
||||
def _test_basic_cases(self, constructor, ignore_multidevice=False):
|
||||
self._test_state_dict(
|
||||
torch.randn(10, 5),
|
||||
torch.randn(10),
|
||||
torch.randn(5),
|
||||
constructor
|
||||
)
|
||||
self._test_basic_cases_template(
|
||||
torch.randn(10, 5),
|
||||
torch.randn(10),
|
||||
torch.randn(5),
|
||||
constructor
|
||||
)
|
||||
# non-contiguous parameters
|
||||
self._test_basic_cases_template(
|
||||
torch.randn(10, 5, 2)[..., 0],
|
||||
torch.randn(10, 2)[..., 0],
|
||||
torch.randn(5),
|
||||
constructor
|
||||
)
|
||||
# CUDA
|
||||
if not torch.cuda.is_available():
|
||||
return
|
||||
self._test_basic_cases_template(
|
||||
torch.randn(10, 5).cuda(),
|
||||
torch.randn(10).cuda(),
|
||||
torch.randn(5).cuda(),
|
||||
constructor
|
||||
)
|
||||
# Multi-GPU
|
||||
if not torch.cuda.device_count() > 1 or ignore_multidevice:
|
||||
return
|
||||
self._test_basic_cases_template(
|
||||
torch.randn(10, 5).cuda(0),
|
||||
torch.randn(10).cuda(1),
|
||||
torch.randn(5).cuda(0),
|
||||
constructor
|
||||
)
|
||||
|
||||
def _build_params_dict(self, weight, bias, **kwargs):
|
||||
return [dict(params=[weight]), dict(params=[bias], **kwargs)]
|
||||
|
||||
def test_sgd(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.SGD(params, lr=1e-3),
|
||||
wrap_old_fn(old_optim.sgd, learningRate=1e-3)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.SGD(params, lr=1e-3, momentum=0.9,
|
||||
dampening=0, weight_decay=1e-4),
|
||||
wrap_old_fn(old_optim.sgd, learningRate=1e-3, momentum=0.9,
|
||||
dampening=0, weightDecay=1e-4)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.SGD([weight, bias], lr=1e-3)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.SGD(
|
||||
self._build_params_dict(weight, bias, lr=1e-2),
|
||||
lr=1e-3)
|
||||
)
|
||||
|
||||
def test_adam(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adam(params, lr=1e-2),
|
||||
wrap_old_fn(old_optim.adam, learningRate=1e-2)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adam(params, lr=1e-2, weight_decay=1e-2),
|
||||
wrap_old_fn(old_optim.adam, learningRate=1e-2, weightDecay=1e-2)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adam([weight, bias], lr=1e-3)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adam(
|
||||
self._build_params_dict(weight, bias, lr=1e-2),
|
||||
lr=1e-3)
|
||||
)
|
||||
|
||||
def test_adadelta(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adadelta(params),
|
||||
wrap_old_fn(old_optim.adadelta)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adadelta(params, rho=0.95),
|
||||
wrap_old_fn(old_optim.adadelta, rho=0.95)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adadelta(params, weight_decay=1e-2),
|
||||
wrap_old_fn(old_optim.adadelta, weightDecay=1e-2)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adadelta([weight, bias])
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adadelta(
|
||||
self._build_params_dict(weight, bias, rho=0.95))
|
||||
)
|
||||
|
||||
def test_adagrad(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adagrad(params, lr=1e-1),
|
||||
wrap_old_fn(old_optim.adagrad, learningRate=1e-1)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adagrad(params, lr=1e-1, lr_decay=1e-3),
|
||||
wrap_old_fn(old_optim.adagrad, learningRate=1e-1, learningRateDecay=1e-3)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adagrad(params, lr=1e-1, weight_decay=1e-2),
|
||||
wrap_old_fn(old_optim.adagrad, learningRate=1e-1, weightDecay=1e-2)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adagrad([weight, bias], lr=1e-1)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adagrad(
|
||||
self._build_params_dict(weight, bias, lr=1e-2),
|
||||
lr=1e-1)
|
||||
)
|
||||
|
||||
def test_adamax(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adamax(params, lr=1e-1),
|
||||
wrap_old_fn(old_optim.adamax, learningRate=1e-1)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adamax(params, lr=1e-1, weight_decay=1e-2),
|
||||
wrap_old_fn(old_optim.adamax, learningRate=1e-1, weightDecay=1e-2)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Adamax(params, lr=1e-1, betas=(0.95, 0.998)),
|
||||
wrap_old_fn(old_optim.adamax, learningRate=1e-1, beta1=0.95, beta2=0.998)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adagrad([weight, bias], lr=1e-1)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adagrad(
|
||||
self._build_params_dict(weight, bias, lr=1e-2),
|
||||
lr=1e-1)
|
||||
)
|
||||
|
||||
def test_rmsprop(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.RMSprop(params, lr=1e-2),
|
||||
wrap_old_fn(old_optim.rmsprop, learningRate=1e-2)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.RMSprop(params, lr=1e-2, weight_decay=1e-2),
|
||||
wrap_old_fn(old_optim.rmsprop, learningRate=1e-2, weightDecay=1e-2)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.RMSprop(params, lr=1e-2, alpha=0.95),
|
||||
wrap_old_fn(old_optim.rmsprop, learningRate=1e-2, alpha=0.95)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adagrad([weight, bias], lr=1e-2)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Adagrad(
|
||||
self._build_params_dict(weight, bias, lr=1e-3),
|
||||
lr=1e-2)
|
||||
)
|
||||
|
||||
def test_asgd(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.ASGD(params, lr=1e-3),
|
||||
wrap_old_fn(old_optim.asgd, eta0=1e-3)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.ASGD(params, lr=1e-3, alpha=0.8),
|
||||
wrap_old_fn(old_optim.asgd, eta0=1e-3, alpha=0.8)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.ASGD(params, lr=1e-3, t0=1e3),
|
||||
wrap_old_fn(old_optim.asgd, eta0=1e-3, t0=1e3)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.ASGD([weight, bias], lr=1e-3, t0=100)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.ASGD(
|
||||
self._build_params_dict(weight, bias, lr=1e-2),
|
||||
lr=1e-3, t0=100)
|
||||
)
|
||||
|
||||
def test_rprop(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Rprop(params, lr=1e-3),
|
||||
wrap_old_fn(old_optim.rprop, stepsize=1e-3)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Rprop(params, lr=1e-3, etas=(0.6, 1.1)),
|
||||
wrap_old_fn(old_optim.rprop, stepsize=1e-3, etaminus=0.6, etaplus=1.1)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.Rprop(params, lr=1e-3, step_sizes=(1e-4, 3)),
|
||||
wrap_old_fn(old_optim.rprop, stepsize=1e-3, stepsizemin=1e-4, stepsizemax=3)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Rprop([weight, bias], lr=1e-3)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.Rprop(
|
||||
self._build_params_dict(weight, bias, lr=1e-2),
|
||||
lr=1e-3)
|
||||
)
|
||||
|
||||
def test_lbfgs(self):
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.LBFGS(params),
|
||||
wrap_old_fn(old_optim.lbfgs)
|
||||
)
|
||||
self._test_rosenbrock(
|
||||
lambda params: optim.LBFGS(params, lr=5e-2, max_iter=5),
|
||||
wrap_old_fn(old_optim.lbfgs, learningRate=5e-2, maxIter=5)
|
||||
)
|
||||
self._test_basic_cases(
|
||||
lambda weight, bias: optim.LBFGS([weight, bias]),
|
||||
ignore_multidevice=True
|
||||
)
|
||||
|
||||
def test_invalid_param_type(self):
|
||||
with self.assertRaises(TypeError):
|
||||
optim.SGD(Variable(torch.randn(5, 5)), lr=3)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
372
test/test_sparse.py
Normal file
372
test/test_sparse.py
Normal file
@ -0,0 +1,372 @@
|
||||
import torch
|
||||
from torch import sparse
|
||||
|
||||
import itertools
|
||||
import random
|
||||
import unittest
|
||||
from common import TestCase, run_tests
|
||||
from numbers import Number
|
||||
|
||||
SparseTensor = sparse.DoubleTensor
|
||||
|
||||
|
||||
class TestSparse(TestCase):
|
||||
|
||||
@staticmethod
|
||||
def _gen_sparse(d, nnz, with_size):
|
||||
if isinstance(with_size, Number):
|
||||
v = torch.randn(nnz)
|
||||
i = (torch.rand(d, nnz) * with_size).type(torch.LongTensor)
|
||||
x = SparseTensor(i, v)
|
||||
else:
|
||||
v_size = [nnz] + list(with_size[d:])
|
||||
v = torch.randn(*v_size)
|
||||
i = torch.rand(d, nnz) * \
|
||||
torch.Tensor(with_size[:d]).repeat(nnz, 1).transpose(0, 1)
|
||||
i = i.type(torch.LongTensor)
|
||||
x = SparseTensor(i, v, torch.Size(with_size))
|
||||
|
||||
return x, i, v
|
||||
|
||||
def test_basic(self):
|
||||
x, i, v = self._gen_sparse(3, 10, 100)
|
||||
|
||||
self.assertEqual(i, x.indices())
|
||||
self.assertEqual(v, x.values())
|
||||
|
||||
x, i, v = self._gen_sparse(3, 10, [100, 100, 100])
|
||||
self.assertEqual(i, x.indices())
|
||||
self.assertEqual(v, x.values())
|
||||
self.assertEqual(x.ndimension(), 3)
|
||||
self.assertEqual(x.nnz(), 10)
|
||||
for i in range(3):
|
||||
self.assertEqual(x.size(i), 100)
|
||||
|
||||
# Make sure we can access empty indices / values
|
||||
x = SparseTensor()
|
||||
self.assertEqual(x.indices().numel(), 0)
|
||||
self.assertEqual(x.values().numel(), 0)
|
||||
|
||||
def test_to_dense(self):
|
||||
i = torch.LongTensor([
|
||||
[0, 1, 2, 2],
|
||||
[0, 0, 0, 3],
|
||||
[0, 0, 1, 4],
|
||||
])
|
||||
v = torch.Tensor([2, 1, 3, 4])
|
||||
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
|
||||
res = torch.Tensor([
|
||||
[[2, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[1, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0]],
|
||||
[[0, 3, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 0],
|
||||
[0, 0, 0, 0, 4]],
|
||||
])
|
||||
|
||||
x.to_dense() # Tests double to_dense for memory corruption
|
||||
x.to_dense()
|
||||
x.to_dense()
|
||||
self.assertEqual(res, x.to_dense())
|
||||
|
||||
def test_to_dense_hybrid(self):
|
||||
i = torch.LongTensor([
|
||||
[0, 1, 2, 2],
|
||||
[0, 0, 0, 3],
|
||||
])
|
||||
v = torch.Tensor([[2, 3], [1, 2], [3, 4], [4, 5]])
|
||||
x = SparseTensor(i, v, torch.Size([3, 4, 2]))
|
||||
res = torch.Tensor([
|
||||
[[2, 3],
|
||||
[0, 0],
|
||||
[0, 0],
|
||||
[0, 0]],
|
||||
[[1, 2],
|
||||
[0, 0],
|
||||
[0, 0],
|
||||
[0, 0]],
|
||||
[[3, 4],
|
||||
[0, 0],
|
||||
[0, 0],
|
||||
[4, 5]],
|
||||
])
|
||||
|
||||
x.to_dense() # Tests double to_dense for memory corruption
|
||||
x.to_dense()
|
||||
x.to_dense()
|
||||
self.assertEqual(res, x.to_dense())
|
||||
|
||||
def test_contig(self):
|
||||
i = torch.LongTensor([
|
||||
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
|
||||
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
|
||||
])
|
||||
v = torch.Tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
|
||||
x = SparseTensor(i, v, torch.Size([100, 100]))
|
||||
exp_i = torch.LongTensor([
|
||||
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
|
||||
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
|
||||
])
|
||||
exp_v = torch.Tensor([2, 1, 6, 4, 10, 3, 5, 9, 8, 7])
|
||||
x.contiguous()
|
||||
self.assertEqual(exp_i, x.indices())
|
||||
self.assertEqual(exp_v, x.values())
|
||||
|
||||
i = torch.LongTensor([
|
||||
[2, 0, 2, 1],
|
||||
[0, 0, 3, 0],
|
||||
[1, 0, 4, 0],
|
||||
])
|
||||
v = torch.Tensor([3, 2, 4, 1])
|
||||
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
|
||||
exp_i = torch.LongTensor([
|
||||
[0, 1, 2, 2],
|
||||
[0, 0, 0, 3],
|
||||
[0, 0, 1, 4],
|
||||
])
|
||||
exp_v = torch.Tensor([2, 1, 3, 4])
|
||||
|
||||
x.contiguous()
|
||||
self.assertEqual(exp_i, x.indices())
|
||||
self.assertEqual(exp_v, x.values())
|
||||
|
||||
# Duplicate indices
|
||||
i = torch.LongTensor([
|
||||
[0, 0, 2, 0],
|
||||
[0, 0, 3, 0],
|
||||
[0, 0, 4, 0],
|
||||
])
|
||||
v = torch.Tensor([3, 2, 4, 1])
|
||||
x = SparseTensor(i, v, torch.Size([3, 4, 5]))
|
||||
exp_i = torch.LongTensor([
|
||||
[0, 2],
|
||||
[0, 3],
|
||||
[0, 4],
|
||||
])
|
||||
exp_v = torch.Tensor([6, 4])
|
||||
|
||||
x.contiguous()
|
||||
self.assertEqual(exp_i, x.indices())
|
||||
self.assertEqual(exp_v, x.values())
|
||||
|
||||
def test_contig_hybrid(self):
|
||||
i = torch.LongTensor([
|
||||
[1, 0, 35, 14, 39, 6, 71, 66, 40, 27],
|
||||
[92, 31, 62, 50, 22, 65, 89, 74, 56, 34],
|
||||
])
|
||||
v = torch.Tensor([
|
||||
[1, 2], [2, 3], [3, 4], [4, 5], [5, 6],
|
||||
[6, 7], [7, 8], [8, 9], [9, 10], [10, 11],
|
||||
])
|
||||
x = SparseTensor(i, v, torch.Size([100, 100, 2]))
|
||||
exp_i = torch.LongTensor([
|
||||
[0, 1, 6, 14, 27, 35, 39, 40, 66, 71],
|
||||
[31, 92, 65, 50, 34, 62, 22, 56, 74, 89],
|
||||
])
|
||||
exp_v = torch.Tensor([
|
||||
[2, 3], [1, 2], [6, 7], [4, 5], [10, 11],
|
||||
[3, 4], [5, 6], [9, 10], [8, 9], [7, 8],
|
||||
])
|
||||
x.contiguous()
|
||||
self.assertEqual(exp_i, x.indices())
|
||||
self.assertEqual(exp_v, x.values())
|
||||
|
||||
i = torch.LongTensor([
|
||||
[2, 0, 2, 1],
|
||||
[0, 0, 3, 0],
|
||||
[1, 0, 4, 0],
|
||||
])
|
||||
v = torch.Tensor([[3, 3, 3], [2, 2, 2], [4, 4, 4], [1, 1, 1]])
|
||||
x = SparseTensor(i, v, torch.Size([3, 4, 5, 3]))
|
||||
exp_i = torch.LongTensor([
|
||||
[0, 1, 2, 2],
|
||||
[0, 0, 0, 3],
|
||||
[0, 0, 1, 4],
|
||||
])
|
||||
exp_v = torch.Tensor([[2, 2, 2], [1, 1, 1], [3, 3, 3], [4, 4, 4]])
|
||||
|
||||
x.contiguous()
|
||||
self.assertEqual(exp_i, x.indices())
|
||||
self.assertEqual(exp_v, x.values())
|
||||
|
||||
# Duplicate indices
|
||||
i = torch.LongTensor([
|
||||
[0, 0, 2, 0],
|
||||
[0, 0, 3, 0],
|
||||
[0, 0, 4, 0],
|
||||
])
|
||||
v = torch.Tensor([[3, 2, 3], [2, 1, 1], [4, 3, 4], [1, 1, 1]])
|
||||
x = SparseTensor(i, v, torch.Size([3, 4, 5, 3]))
|
||||
exp_i = torch.LongTensor([
|
||||
[0, 2],
|
||||
[0, 3],
|
||||
[0, 4],
|
||||
])
|
||||
exp_v = torch.Tensor([[6, 4, 5], [4, 3, 4]])
|
||||
|
||||
x.contiguous()
|
||||
self.assertEqual(exp_i, x.indices())
|
||||
self.assertEqual(exp_v, x.values())
|
||||
|
||||
def test_transpose(self):
|
||||
x = self._gen_sparse(4, 20, 5)[0]
|
||||
y = x.to_dense()
|
||||
|
||||
for i, j in itertools.combinations(range(4), 2):
|
||||
x = x.transpose_(i, j)
|
||||
y = y.transpose(i, j)
|
||||
self.assertEqual(x.to_dense(), y)
|
||||
|
||||
x = x.transpose(i, j)
|
||||
y = y.transpose(i, j)
|
||||
self.assertEqual(x.to_dense(), y)
|
||||
|
||||
def test_mm(self):
|
||||
def test_shape(di, dj, dk):
|
||||
x, _, _ = self._gen_sparse(2, 20, [di, dj])
|
||||
t = torch.randn(di, dk)
|
||||
y = torch.randn(dj, dk)
|
||||
alpha = random.random()
|
||||
beta = random.random()
|
||||
|
||||
expected = torch.addmm(alpha, t, beta, x.to_dense(), y)
|
||||
res = torch.addmm(alpha, t, beta, x, y)
|
||||
self.assertEqual(res, expected)
|
||||
|
||||
expected = torch.addmm(t, x.to_dense(), y)
|
||||
res = torch.addmm(t, x, y)
|
||||
self.assertEqual(res, expected)
|
||||
|
||||
expected = torch.mm(x.to_dense(), y)
|
||||
res = torch.mm(x, y)
|
||||
self.assertEqual(res, expected)
|
||||
|
||||
test_shape(10, 100, 100)
|
||||
test_shape(100, 1000, 200)
|
||||
test_shape(64, 10000, 300)
|
||||
|
||||
def test_saddmm(self):
|
||||
def test_shape(di, dj, dk):
|
||||
x = self._gen_sparse(2, 20, [di, dj])[0]
|
||||
t = self._gen_sparse(2, 20, [di, dk])[0]
|
||||
y = torch.randn(dj, dk)
|
||||
alpha = random.random()
|
||||
beta = random.random()
|
||||
|
||||
expected = torch.addmm(alpha, t.to_dense(), beta, x.to_dense(), y)
|
||||
res = torch.saddmm(alpha, t, beta, x, y)
|
||||
self.assertEqual(res.to_dense(), expected)
|
||||
|
||||
expected = torch.addmm(t.to_dense(), x.to_dense(), y)
|
||||
res = torch.saddmm(t, x, y)
|
||||
self.assertEqual(res.to_dense(), expected)
|
||||
|
||||
expected = torch.mm(x.to_dense(), y)
|
||||
res = torch.smm(x, y)
|
||||
self.assertEqual(res.to_dense(), expected)
|
||||
|
||||
test_shape(7, 5, 3)
|
||||
test_shape(1000, 100, 100)
|
||||
test_shape(3000, 64, 300)
|
||||
|
||||
def _test_spadd_shape(self, shape_i, shape_v=None):
|
||||
shape = shape_i + (shape_v or [])
|
||||
x, _, _ = self._gen_sparse(len(shape_i), 10, shape)
|
||||
y = torch.randn(*shape)
|
||||
r = random.random()
|
||||
|
||||
expected = y + r * x.to_dense()
|
||||
res = torch.add(y, r, x)
|
||||
|
||||
self.assertEqual(res, expected)
|
||||
|
||||
# Non contiguous dense tensor
|
||||
s = list(shape)
|
||||
s[0] = shape[-1]
|
||||
s[-1] = shape[0]
|
||||
y = torch.randn(*s).transpose_(0, len(s) - 1)
|
||||
r = random.random()
|
||||
|
||||
expected = y + r * x.to_dense()
|
||||
res = torch.add(y, r, x)
|
||||
|
||||
self.assertEqual(res, expected)
|
||||
|
||||
def test_spadd(self):
|
||||
self._test_spadd_shape([5, 6])
|
||||
self._test_spadd_shape([10, 10, 10])
|
||||
self._test_spadd_shape([50, 30, 20])
|
||||
self._test_spadd_shape([5, 5, 5, 5, 5, 5])
|
||||
|
||||
def test_spadd_hybrid(self):
|
||||
self._test_spadd_shape([5, 6], [2, 3])
|
||||
self._test_spadd_shape([10, 10, 10], [3])
|
||||
self._test_spadd_shape([50, 30, 20], [2])
|
||||
self._test_spadd_shape([5, 5, 5, 5, 5, 5], [2])
|
||||
|
||||
def _test_basic_ops_shape(self, shape_i, shape_v=None):
|
||||
shape = shape_i + (shape_v or [])
|
||||
x1, _, _ = self._gen_sparse(len(shape_i), 9, shape)
|
||||
x2, _, _ = self._gen_sparse(len(shape_i), 12, shape)
|
||||
|
||||
y1 = x1 + x2
|
||||
y2 = x1.clone()
|
||||
y2.add_(x2)
|
||||
expected = x1.to_dense() + x2.to_dense()
|
||||
self.assertEqual(y1.to_dense(), expected)
|
||||
self.assertEqual(y2.to_dense(), expected)
|
||||
|
||||
y1 = x1 - x2
|
||||
y2 = x1.clone()
|
||||
y2.sub_(x2)
|
||||
expected = x1.to_dense() - x2.to_dense()
|
||||
self.assertEqual(y1.to_dense(), expected)
|
||||
self.assertEqual(y2.to_dense(), expected)
|
||||
|
||||
y1 = x1 * x2
|
||||
y2 = x1.clone()
|
||||
y2.mul_(x2)
|
||||
expected = x1.to_dense() * x2.to_dense()
|
||||
self.assertEqual(y1.to_dense(), expected)
|
||||
self.assertEqual(y2.to_dense(), expected)
|
||||
|
||||
y1 = x1 * 37.5
|
||||
y2 = x1.clone()
|
||||
y2.mul_(37.5)
|
||||
expected = x1.to_dense() * 37.5
|
||||
self.assertEqual(y1.to_dense(), expected)
|
||||
self.assertEqual(y2.to_dense(), expected)
|
||||
|
||||
y1 = x1 / 37.5
|
||||
y2 = x1.clone()
|
||||
y2.div_(37.5)
|
||||
expected = x1.to_dense() / 37.5
|
||||
self.assertEqual(y1.to_dense(), expected)
|
||||
self.assertEqual(y2.to_dense(), expected)
|
||||
|
||||
y = x1.clone()
|
||||
y.zero_()
|
||||
expected = torch.zeros(x1.size())
|
||||
self.assertEqual(y.to_dense(), expected)
|
||||
|
||||
def test_basic_ops(self):
|
||||
self._test_basic_ops_shape([5, 6])
|
||||
self._test_basic_ops_shape([10, 10, 10])
|
||||
self._test_basic_ops_shape([50, 30, 20])
|
||||
self._test_basic_ops_shape([5, 5, 5, 5, 5, 5])
|
||||
|
||||
def test_basic_ops_hybrid(self):
|
||||
self._test_basic_ops_shape([5, 6], [2, 3])
|
||||
self._test_basic_ops_shape([10, 10, 10], [3])
|
||||
self._test_basic_ops_shape([50, 30, 20], [2])
|
||||
self._test_basic_ops_shape([5, 5, 5, 5, 5, 5], [2])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
1955
test/test_torch.py
1955
test/test_torch.py
File diff suppressed because it is too large
Load Diff
350
test/test_utils.py
Normal file
350
test/test_utils.py
Normal file
@ -0,0 +1,350 @@
|
||||
from __future__ import print_function
|
||||
import sys
|
||||
import os
|
||||
import math
|
||||
import shutil
|
||||
import random
|
||||
import tempfile
|
||||
import unittest
|
||||
import traceback
|
||||
import torch
|
||||
import torch.cuda
|
||||
import warnings
|
||||
from torch.autograd import Variable
|
||||
from torch.utils.trainer import Trainer
|
||||
from torch.utils.trainer.plugins import *
|
||||
from torch.utils.trainer.plugins.plugin import Plugin
|
||||
from torch.utils.serialization import load_lua
|
||||
|
||||
HAS_CUDA = torch.cuda.is_available()
|
||||
|
||||
from common import TestCase, run_tests, download_file
|
||||
|
||||
try:
|
||||
import cffi
|
||||
from torch.utils.ffi import compile_extension
|
||||
HAS_CFFI = True
|
||||
except ImportError:
|
||||
HAS_CFFI = False
|
||||
|
||||
|
||||
class SimplePlugin(Plugin):
|
||||
|
||||
def __init__(self, interval):
|
||||
super(SimplePlugin, self).__init__(interval)
|
||||
self.trainer = None
|
||||
self.num_iteration = 0
|
||||
self.num_epoch = 0
|
||||
self.num_batch = 0
|
||||
self.num_update = 0
|
||||
|
||||
def register(self, trainer):
|
||||
self.trainer = trainer
|
||||
|
||||
def iteration(self, *args):
|
||||
self.iteration_args = args
|
||||
self.num_iteration += 1
|
||||
|
||||
def epoch(self, *args):
|
||||
self.epoch_args = args
|
||||
self.num_epoch += 1
|
||||
|
||||
def batch(self, *args):
|
||||
self.batch_args = args
|
||||
self.num_batch += 1
|
||||
|
||||
def update(self, *args):
|
||||
self.update_args = args
|
||||
self.num_update += 1
|
||||
|
||||
|
||||
class ModelMock(object):
|
||||
|
||||
def __init__(self):
|
||||
self.num_calls = 0
|
||||
self.output = Variable(torch.ones(1, 1), requires_grad=True)
|
||||
|
||||
def __call__(self, i):
|
||||
self.num_calls += 1
|
||||
return self.output * 2
|
||||
|
||||
|
||||
class CriterionMock(object):
|
||||
|
||||
def __init__(self):
|
||||
self.num_calls = 0
|
||||
|
||||
def __call__(self, out, target):
|
||||
self.num_calls += 1
|
||||
return out
|
||||
|
||||
|
||||
class OptimizerMock(object):
|
||||
max_evals = 5
|
||||
min_evals = 1
|
||||
|
||||
def __init__(self):
|
||||
self.num_steps = 0
|
||||
self.num_evals = 0
|
||||
|
||||
def step(self, closure):
|
||||
for i in range(random.randint(self.min_evals, self.max_evals)):
|
||||
loss = closure()
|
||||
self.num_evals += 1
|
||||
self.num_steps += 1
|
||||
|
||||
def zero_grad(self):
|
||||
pass
|
||||
|
||||
|
||||
class DatasetMock(object):
|
||||
|
||||
def __iter__(self):
|
||||
for i in range(10):
|
||||
yield torch.randn(2, 10), torch.randperm(10)[:2]
|
||||
|
||||
def __len__(self):
|
||||
return 10
|
||||
|
||||
|
||||
class TestTrainer(TestCase):
|
||||
|
||||
intervals = [
|
||||
[(1, 'iteration')],
|
||||
[(1, 'epoch')],
|
||||
[(1, 'batch')],
|
||||
[(1, 'update')],
|
||||
[(5, 'iteration')],
|
||||
[(5, 'epoch')],
|
||||
[(5, 'batch')],
|
||||
[(5, 'update')],
|
||||
[(1, 'iteration'), (1, 'epoch')],
|
||||
[(5, 'update'), (1, 'iteration')],
|
||||
[(2, 'epoch'), (1, 'batch')],
|
||||
]
|
||||
|
||||
def setUp(self):
|
||||
self.optimizer = OptimizerMock()
|
||||
self.trainer = Trainer(ModelMock(), CriterionMock(),
|
||||
self.optimizer, DatasetMock())
|
||||
self.num_epochs = 3
|
||||
self.dataset_size = len(self.trainer.dataset)
|
||||
self.num_iters = self.num_epochs * self.dataset_size
|
||||
|
||||
def test_register_plugin(self):
|
||||
for interval in self.intervals:
|
||||
simple_plugin = SimplePlugin(interval)
|
||||
self.trainer.register_plugin(simple_plugin)
|
||||
self.assertEqual(simple_plugin.trainer, self.trainer)
|
||||
|
||||
def test_optimizer_step(self):
|
||||
self.trainer.run(epochs=1)
|
||||
self.assertEqual(self.trainer.optimizer.num_steps, 10)
|
||||
|
||||
def test_plugin_interval(self):
|
||||
for interval in self.intervals:
|
||||
self.setUp()
|
||||
simple_plugin = SimplePlugin(interval)
|
||||
self.trainer.register_plugin(simple_plugin)
|
||||
self.trainer.run(epochs=self.num_epochs)
|
||||
units = {
|
||||
('iteration', self.num_iters),
|
||||
('epoch', self.num_epochs),
|
||||
('batch', self.num_iters),
|
||||
('update', self.num_iters)
|
||||
}
|
||||
for unit, num_triggers in units:
|
||||
call_every = None
|
||||
for i, i_unit in interval:
|
||||
if i_unit == unit:
|
||||
call_every = i
|
||||
break
|
||||
if call_every:
|
||||
expected_num_calls = math.floor(num_triggers / call_every)
|
||||
else:
|
||||
expected_num_calls = 0
|
||||
num_calls = getattr(simple_plugin, 'num_' + unit)
|
||||
self.assertEqual(num_calls, expected_num_calls, 0)
|
||||
|
||||
def test_model_called(self):
|
||||
self.trainer.run(epochs=self.num_epochs)
|
||||
num_model_calls = self.trainer.model.num_calls
|
||||
num_crit_calls = self.trainer.criterion.num_calls
|
||||
self.assertEqual(num_model_calls, num_crit_calls)
|
||||
for num_calls in [num_model_calls, num_crit_calls]:
|
||||
lower_bound = OptimizerMock.min_evals * self.num_iters
|
||||
upper_bound = OptimizerMock.max_evals * self.num_iters
|
||||
self.assertEqual(num_calls, self.trainer.optimizer.num_evals)
|
||||
self.assertLessEqual(lower_bound, num_calls)
|
||||
self.assertLessEqual(num_calls, upper_bound)
|
||||
|
||||
def test_model_gradient(self):
|
||||
self.trainer.run(epochs=self.num_epochs)
|
||||
output_var = self.trainer.model.output
|
||||
expected_grad = torch.ones(1, 1) * 2 * self.optimizer.num_evals
|
||||
self.assertEqual(output_var.grad.data, expected_grad)
|
||||
|
||||
|
||||
test_dir = os.path.abspath(os.path.dirname(str(__file__)))
|
||||
|
||||
|
||||
class TestFFI(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tmpdir = tempfile.mkdtemp()
|
||||
os.chdir(self.tmpdir)
|
||||
sys.path.append(self.tmpdir)
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self.tmpdir)
|
||||
|
||||
@unittest.skipIf(not HAS_CFFI, "ffi tests require cffi package")
|
||||
def test_cpu(self):
|
||||
compile_extension(
|
||||
name='test_extensions.cpulib',
|
||||
header=test_dir + '/ffi/src/cpu/lib.h',
|
||||
sources=[
|
||||
test_dir + '/ffi/src/cpu/lib1.c',
|
||||
test_dir + '/ffi/src/cpu/lib2.c',
|
||||
],
|
||||
verbose=False,
|
||||
)
|
||||
from test_extensions import cpulib
|
||||
tensor = torch.ones(2, 2).float()
|
||||
|
||||
cpulib.good_func(tensor, 2, 1.5)
|
||||
self.assertEqual(tensor, torch.ones(2, 2) * 2 + 1.5)
|
||||
|
||||
new_tensor = cpulib.new_tensor(4)
|
||||
self.assertEqual(new_tensor, torch.ones(4, 4) * 4)
|
||||
|
||||
f = cpulib.int_to_float(5)
|
||||
self.assertIs(type(f), float)
|
||||
|
||||
self.assertRaises(TypeError,
|
||||
lambda: cpulib.good_func(tensor.double(), 2, 1.5))
|
||||
self.assertRaises(torch.FatalError,
|
||||
lambda: cpulib.bad_func(tensor, 2, 1.5))
|
||||
|
||||
@unittest.skipIf(not HAS_CFFI or not HAS_CUDA, "ffi tests require cffi package")
|
||||
def test_gpu(self):
|
||||
compile_extension(
|
||||
name='gpulib',
|
||||
header=test_dir + '/ffi/src/cuda/cudalib.h',
|
||||
sources=[
|
||||
test_dir + '/ffi/src/cuda/cudalib.c',
|
||||
],
|
||||
with_cuda=True,
|
||||
verbose=False,
|
||||
)
|
||||
import gpulib
|
||||
tensor = torch.ones(2, 2).float()
|
||||
|
||||
gpulib.good_func(tensor, 2, 1.5)
|
||||
self.assertEqual(tensor, torch.ones(2, 2) * 2 + 1.5)
|
||||
|
||||
ctensor = tensor.cuda().fill_(1)
|
||||
gpulib.cuda_func(ctensor, 2, 1.5)
|
||||
self.assertEqual(ctensor, torch.ones(2, 2) * 2 + 1.5)
|
||||
|
||||
self.assertRaises(TypeError,
|
||||
lambda: gpulib.cuda_func(tensor, 2, 1.5))
|
||||
self.assertRaises(TypeError,
|
||||
lambda: gpulib.cuda_func(ctensor.storage(), 2, 1.5))
|
||||
|
||||
|
||||
class TestLuaReader(TestCase):
|
||||
|
||||
@staticmethod
|
||||
def _module_test(name, test):
|
||||
def do_test(self):
|
||||
module = test['module']
|
||||
input = test['input']
|
||||
grad_output = test['grad_output']
|
||||
if hasattr(self, '_transform_' + name):
|
||||
input = getattr(self, '_transform_' + name)(input)
|
||||
output = module.forward(input)
|
||||
module.zeroGradParameters()
|
||||
grad_input = module.backward(input, grad_output)
|
||||
self.assertEqual(output, test['output'])
|
||||
self.assertEqual(grad_input, test['grad_input'])
|
||||
if module.parameters() is not None:
|
||||
params, d_params = module.parameters()
|
||||
self.assertEqual(params, test['params'])
|
||||
self.assertEqual(d_params, test['d_params'])
|
||||
else:
|
||||
self.assertFalse('params' in test and test['params'])
|
||||
self.assertFalse('params' in test and test['d_params'])
|
||||
return do_test
|
||||
|
||||
@staticmethod
|
||||
def _criterion_test(name, test):
|
||||
def do_test(self):
|
||||
module = test['module']
|
||||
input = test['input']
|
||||
if name == 'L1Cost':
|
||||
target = None
|
||||
else:
|
||||
target = test['target']
|
||||
if hasattr(self, '_transform_' + name):
|
||||
input, target = getattr(self, '_transform_' + name)(input, target)
|
||||
|
||||
output = module.forward(input, target)
|
||||
grad_input = module.backward(input, target)
|
||||
self.assertEqual(output, test['loss'])
|
||||
self.assertEqual(grad_input, test['grad_input'])
|
||||
return do_test
|
||||
|
||||
@classmethod
|
||||
def init(cls):
|
||||
DATA_URL = 'https://download.pytorch.org/test_data/legacy_modules.t7'
|
||||
data_dir = os.path.join(os.path.dirname(__file__), 'data')
|
||||
test_file_path = os.path.join(data_dir, 'legacy_modules.t7')
|
||||
succ = download_file(DATA_URL, test_file_path)
|
||||
if not succ:
|
||||
warnings.warn(("Couldn't download the test file for TestLuaReader! "
|
||||
"Tests will be incomplete!"), RuntimeWarning)
|
||||
return
|
||||
|
||||
tests = load_lua(test_file_path)
|
||||
for name, test in tests['modules'].items():
|
||||
test_name = 'test_' + name.replace('nn.', '')
|
||||
setattr(cls, test_name, cls._module_test(name, test))
|
||||
for name, test in tests['criterions'].items():
|
||||
test_name = 'test_' + name.replace('nn.', '')
|
||||
setattr(cls, test_name, cls._criterion_test(name, test))
|
||||
|
||||
def _transform_Index(self, input):
|
||||
return [input[0], input[1].sub(1)]
|
||||
|
||||
def _transform_LookupTable(self, input):
|
||||
return input.sub(1)
|
||||
|
||||
def _transform_MultiLabelMarginCriterion(self, input, target):
|
||||
return input, target.sub(1)
|
||||
|
||||
def _transform_ClassNLLCriterion(self, input, target):
|
||||
return input, target.sub(1)
|
||||
|
||||
def _transform_SpatialClassNLLCriterion(self, input, target):
|
||||
return input, target.sub(1)
|
||||
|
||||
def _transform_ClassSimplexCriterion(self, input, target):
|
||||
return input, target.sub(1)
|
||||
|
||||
def _transform_CrossEntropyCriterion(self, input, target):
|
||||
return input, target.sub(1)
|
||||
|
||||
def _transform_ParallelCriterion(self, input, target):
|
||||
return input, [target[0].sub(1), target[1]]
|
||||
|
||||
def _transform_MultiCriterion(self, input, target):
|
||||
return input, target.sub(1)
|
||||
|
||||
def _transform_MultiMarginCriterion(self, input, target):
|
||||
return input, target.sub(1)
|
||||
|
||||
|
||||
TestLuaReader.init()
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
@ -2,45 +2,38 @@ import os
|
||||
import yaml
|
||||
from string import Template
|
||||
from copy import deepcopy
|
||||
from .plugins import ArgcountChecker, OptionalArguments, ArgumentReferences, BeforeCall, ConstantArguments, ReturnArguments
|
||||
from .plugins import ArgcountChecker, OptionalArguments, ArgumentReferences, \
|
||||
BeforeAfterCall, ConstantArguments, ReturnArguments, GILRelease
|
||||
|
||||
|
||||
class cwrap(object):
|
||||
BASE_INDENT_SIZE = 6
|
||||
|
||||
RETURN_WRAPPERS = {
|
||||
'void': Template('$call;\n Py_RETURN_NONE;'),
|
||||
'long': Template('return PyLong_FromLong($call);'),
|
||||
'bool': Template('return PyBool_FromLong($call);'),
|
||||
}
|
||||
|
||||
TYPE_CHECK = {
|
||||
'void*': Template('PyLong_Check($arg)'),
|
||||
'bool': Template('PyLong_Check($arg)'),
|
||||
'float': Template('PyFloat_Check($arg)'),
|
||||
'double': Template('PyFloat_Check($arg)'),
|
||||
# TODO: this will only work for python3
|
||||
'int': Template('PyLong_Check($arg)'),
|
||||
'long': Template('PyLong_Check($arg)'),
|
||||
}
|
||||
|
||||
TYPE_UNPACK = {
|
||||
'void*': Template('PyLong_AsVoidPtr($arg)'),
|
||||
'bool': Template('PyLong_AsLong($arg)'),
|
||||
'float': Template('(float)PyFloat_AsDouble($arg)'),
|
||||
'double': Template('PyFloat_AsDouble($arg)'),
|
||||
# TODO: this will only work for python3
|
||||
'int': Template('PyLong_AsLong($arg)'),
|
||||
'long': Template('PyLong_AsLong($arg)'),
|
||||
'void': Template('Py_RETURN_NONE;'),
|
||||
'long': Template('return PyLong_FromLong($result);'),
|
||||
'bool': Template('return PyBool_FromLong($result);'),
|
||||
'void*': Template('return PyLong_FromVoidPtr($result);'),
|
||||
}
|
||||
|
||||
OPTION_TEMPLATE = Template("""
|
||||
${els}if ($arg_check) {
|
||||
|
||||
$call
|
||||
$pre_arg_assign
|
||||
$arg_assign
|
||||
$code
|
||||
""")
|
||||
|
||||
CALL_TEMPLATE = Template("$cname($arg_unpack)")
|
||||
ARG_ASSIGN_TEMPLATE = Template("""${type} ${name} = ${unpack};""")
|
||||
|
||||
DEFAULT_PLUGIN_CLASSES = [ArgcountChecker, ConstantArguments, OptionalArguments, ArgumentReferences, BeforeCall, ReturnArguments]
|
||||
OPTION_CODE_TEMPLATE = [
|
||||
'$call',
|
||||
'$return_result',
|
||||
]
|
||||
|
||||
FUNCTION_CALL_TEMPLATE = Template("$capture_result$cname($call_arg);")
|
||||
|
||||
DEFAULT_PLUGIN_CLASSES = [ArgcountChecker, ConstantArguments, OptionalArguments,
|
||||
ArgumentReferences, BeforeAfterCall, ReturnArguments, GILRelease]
|
||||
|
||||
def __init__(self, source, destination=None, plugins=[], default_plugins=True):
|
||||
if destination is None:
|
||||
@ -54,6 +47,7 @@ class cwrap(object):
|
||||
for plugin in self.plugins:
|
||||
plugin.initialize(self)
|
||||
|
||||
self.base_path = os.path.dirname(os.path.abspath(source))
|
||||
with open(source, 'r') as f:
|
||||
declarations = f.read()
|
||||
|
||||
@ -69,8 +63,10 @@ class cwrap(object):
|
||||
declaration_lines = []
|
||||
output = []
|
||||
in_declaration = False
|
||||
i = 0
|
||||
|
||||
for line in lines:
|
||||
while i < len(lines):
|
||||
line = lines[i]
|
||||
if line == '[[':
|
||||
declaration_lines = []
|
||||
in_declaration = True
|
||||
@ -93,18 +89,25 @@ class cwrap(object):
|
||||
output.append(wrapper)
|
||||
elif in_declaration:
|
||||
declaration_lines.append(line)
|
||||
elif '!!inc ' == line[:6]:
|
||||
fname = os.path.join(self.base_path, line[6:].strip())
|
||||
with open(fname, 'r') as f:
|
||||
included = f.read().split('\n')
|
||||
# insert it into lines at position i+1
|
||||
lines[i + 1:i + 1] = included
|
||||
else:
|
||||
output.append(line)
|
||||
i += 1
|
||||
|
||||
return '\n'.join(output)
|
||||
|
||||
def set_declaration_defaults(self, declaration):
|
||||
declaration.setdefault('arguments', [])
|
||||
declaration.setdefault('return', 'void')
|
||||
if not 'cname' in declaration:
|
||||
if 'cname' not in declaration:
|
||||
declaration['cname'] = declaration['name']
|
||||
# Simulate multiple dispatch, even if it's not necessary
|
||||
if not 'options' in declaration:
|
||||
if 'options' not in declaration:
|
||||
declaration['options'] = [{'arguments': declaration['arguments']}]
|
||||
del declaration['arguments']
|
||||
# Parse arguments (some of them can be strings)
|
||||
@ -140,19 +143,28 @@ class cwrap(object):
|
||||
return fallback(*args)
|
||||
|
||||
def get_type_check(self, arg, option):
|
||||
return self.search_plugins('get_type_check', (arg, option), lambda arg,_: self.TYPE_CHECK[arg['type']])
|
||||
return self.search_plugins('get_type_check', (arg, option), lambda arg, _: None)
|
||||
|
||||
def get_type_unpack(self, arg, option):
|
||||
return self.search_plugins('get_type_unpack', (arg, option), lambda arg,_: self.TYPE_UNPACK[arg['type']])
|
||||
return self.search_plugins('get_type_unpack', (arg, option), lambda arg, _: None)
|
||||
|
||||
def get_return_wrapper(self, option):
|
||||
return self.search_plugins('get_return_wrapper', (option,), lambda t: self.RETURN_WRAPPERS[option['return']])
|
||||
return self.search_plugins('get_return_wrapper', (option,), lambda _: self.RETURN_WRAPPERS[option['return']])
|
||||
|
||||
def get_wrapper_template(self, declaration):
|
||||
return self.search_plugins('get_wrapper_template', (declaration,), lambda _: None)
|
||||
|
||||
def get_assign_args(self, arguments):
|
||||
return self.search_plugins('get_assign_args', (arguments,), lambda _: arguments)
|
||||
|
||||
def get_arg_accessor(self, arg, option):
|
||||
return self.search_plugins('get_arg_accessor', (arg, option), lambda arg,_: 'PyTuple_GET_ITEM(args, {})'.format(arg['idx']))
|
||||
def wrap_accessor(arg, _):
|
||||
if arg.get('idx') is None:
|
||||
raise RuntimeError("Missing accessor for '{} {}'".format(
|
||||
arg['type'], arg['name']))
|
||||
return 'PyTuple_GET_ITEM(args, {})'.format(arg['idx'])
|
||||
|
||||
return self.search_plugins('get_arg_accessor', (arg, option), wrap_accessor)
|
||||
|
||||
def generate_wrapper(self, declaration):
|
||||
wrapper = ''
|
||||
@ -167,45 +179,109 @@ class cwrap(object):
|
||||
result = []
|
||||
for arg in arguments:
|
||||
accessor = self.get_arg_accessor(arg, option)
|
||||
res = getattr(self, base_fn_name)(arg, option).substitute(arg=accessor)
|
||||
tmpl = getattr(self, base_fn_name)(arg, option)
|
||||
if tmpl is None:
|
||||
fn = 'check' if base_fn_name == 'get_type_check' else 'unpack'
|
||||
raise RuntimeError("Missing type {} for '{} {}'".format(
|
||||
fn, arg['type'], arg['name']))
|
||||
res = tmpl.substitute(arg=accessor, idx=arg.get('idx'))
|
||||
for plugin in self.plugins:
|
||||
res = getattr(plugin, plugin_fn_name)(res, arg, accessor)
|
||||
|
||||
result.append(res)
|
||||
return result
|
||||
|
||||
def build_option_args(self, arguments, arg_unpack):
|
||||
assignement = []
|
||||
call_arg = []
|
||||
# If types or names needs to be changed
|
||||
arguments = self.get_assign_args(arguments)
|
||||
for arg, unpack in zip(arguments, arg_unpack):
|
||||
if arg['type'] == 'CONSTANT':
|
||||
call_arg.append(str(arg['name']))
|
||||
else:
|
||||
var_name = "arg_" + str(arg.get('assign_name', arg['name']))
|
||||
res = self.ARG_ASSIGN_TEMPLATE.substitute(
|
||||
type=arg['type'],
|
||||
name=var_name,
|
||||
unpack=unpack)
|
||||
|
||||
if var_name not in call_arg:
|
||||
assignement.append(res)
|
||||
call_arg.append(var_name)
|
||||
return assignement, call_arg
|
||||
|
||||
def indent_code(self, code):
|
||||
if code == '':
|
||||
return code
|
||||
code_lines = map(lambda s: s.strip(), code.split('\n'))
|
||||
code = '\n'
|
||||
depth = self.BASE_INDENT_SIZE
|
||||
for line in code_lines:
|
||||
depth -= line.count('}') * 2
|
||||
code += ' ' * depth + line + '\n'
|
||||
depth += line.count('{') * 2
|
||||
depth += line.count('(') * 4
|
||||
depth -= line.count(')') * 4
|
||||
return code[:-1]
|
||||
|
||||
def generate_option(self, option, is_first):
|
||||
checked_args = list(filter(
|
||||
lambda arg: not 'ignore_check' in arg or not arg['ignore_check'],
|
||||
lambda arg: 'ignore_check' not in arg or not arg['ignore_check'],
|
||||
option['arguments']))
|
||||
option['num_checked_args'] = len(checked_args)
|
||||
for i, arg in enumerate(checked_args):
|
||||
idx_args = list(filter(
|
||||
lambda arg: not arg.get('ignore_check') and not arg.get('no_idx'),
|
||||
option['arguments']))
|
||||
for i, arg in enumerate(idx_args):
|
||||
arg['idx'] = i
|
||||
|
||||
# Generate checks
|
||||
arg_checks = self.map_selected_arguments('get_type_check',
|
||||
'process_single_check', option, checked_args)
|
||||
'process_single_check', option, checked_args)
|
||||
arg_checks = ' &&\n '.join(arg_checks)
|
||||
for plugin in self.plugins:
|
||||
arg_checks = plugin.process_all_checks(arg_checks, option)
|
||||
|
||||
# Generate unpacks
|
||||
arg_unpack = self.map_selected_arguments('get_type_unpack',
|
||||
'process_single_unpack', option, option['arguments'])
|
||||
arg_unpack = ', '.join(arg_unpack)
|
||||
# Generate pre_arg assign
|
||||
pre_arg_assign = []
|
||||
for plugin in self.plugins:
|
||||
arg_unpack = plugin.process_all_unpacks(arg_unpack, option)
|
||||
pre_arg_assign = plugin.process_pre_arg_assign(pre_arg_assign, option)
|
||||
|
||||
# Generate arg assignment and call arguments
|
||||
arg_unpack = self.map_selected_arguments('get_type_unpack',
|
||||
'process_single_unpack', option, option['arguments'])
|
||||
arg_assign, call_arg = self.build_option_args(option['arguments'], arg_unpack)
|
||||
|
||||
call_arg = ', '.join(call_arg)
|
||||
for plugin in self.plugins:
|
||||
call_arg = plugin.process_all_call_arg(call_arg, option)
|
||||
|
||||
# Generate call
|
||||
raw_call = self.CALL_TEMPLATE.substitute(cname=option['cname'], arg_unpack=arg_unpack)
|
||||
call = self.get_return_wrapper(option).substitute(call=raw_call)
|
||||
try:
|
||||
return_result = self.get_return_wrapper(option).substitute()
|
||||
call = self.FUNCTION_CALL_TEMPLATE.substitute(capture_result='',
|
||||
cname=option['cname'], call_arg=call_arg)
|
||||
except KeyError:
|
||||
return_result = self.get_return_wrapper(option).substitute(result='__result')
|
||||
call = self.FUNCTION_CALL_TEMPLATE.substitute(capture_result=(option['return'] + ' __result = '),
|
||||
cname=option['cname'], call_arg=call_arg)
|
||||
|
||||
code_template = deepcopy(self.OPTION_CODE_TEMPLATE)
|
||||
for plugin in self.plugins:
|
||||
call = plugin.process_call(call, option)
|
||||
call = '\n '.join(map(lambda s: s.strip(), call.split('\n')))
|
||||
code_template = plugin.process_option_code_template(code_template,
|
||||
option)
|
||||
code_template = Template('\n'.join(code_template))
|
||||
code = code_template.substitute(call=call, return_result=return_result)
|
||||
code = self.indent_code(code)
|
||||
pre_arg_assign = self.indent_code('\n'.join(pre_arg_assign))
|
||||
arg_assign = self.indent_code('\n'.join(arg_assign))
|
||||
|
||||
# Put everything together
|
||||
return self.OPTION_TEMPLATE.substitute(
|
||||
els=('} else ' if not is_first else ''),
|
||||
arg_check=arg_checks,
|
||||
call=call
|
||||
pre_arg_assign=pre_arg_assign,
|
||||
arg_assign=arg_assign,
|
||||
code=code,
|
||||
)
|
||||
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class ArgcountChecker(CWrapPlugin):
|
||||
|
||||
def process_all_checks(self, checks, option):
|
||||
@ -7,6 +8,6 @@ class ArgcountChecker(CWrapPlugin):
|
||||
checks = '__argcount == 0'
|
||||
else:
|
||||
indent = '\n '
|
||||
checks = '__argcount == {} &&'.format(option['num_checked_args']) + \
|
||||
indent + checks
|
||||
argcount = option['num_checked_args'] + option.get('argcount_offset', 0)
|
||||
checks = '__argcount == {} &&'.format(str(argcount)) + indent + checks
|
||||
return checks
|
||||
|
||||
@ -1,5 +1,6 @@
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class ArgcountSortPlugin(CWrapPlugin):
|
||||
|
||||
def __init__(self, descending=True):
|
||||
@ -11,4 +12,3 @@ class ArgcountSortPlugin(CWrapPlugin):
|
||||
for declaration in declarations:
|
||||
declaration['options'].sort(key=num_checked_args, reverse=self.descending)
|
||||
return declarations
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class ArgumentReferences(CWrapPlugin):
|
||||
|
||||
def initialize(self, cwrap):
|
||||
|
||||
@ -1,7 +1,12 @@
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class AutoGPU(CWrapPlugin):
|
||||
|
||||
def __init__(self, has_self=True, condition=None):
|
||||
self.has_self = has_self
|
||||
self.condition = condition
|
||||
|
||||
DEFINES = """
|
||||
#ifdef THC_GENERIC_FILE
|
||||
#define THCP_AUTO_GPU 1
|
||||
@ -10,16 +15,16 @@ class AutoGPU(CWrapPlugin):
|
||||
#endif
|
||||
"""
|
||||
|
||||
BEFORE_CALL = """
|
||||
#if IS_CUDA
|
||||
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args, (PyObject*)self);
|
||||
#endif
|
||||
"""
|
||||
def process_pre_arg_assign(self, template, option):
|
||||
if not option.get('auto_gpu', True):
|
||||
return template
|
||||
call = 'THCPAutoGPU __autogpu_guard = THCPAutoGPU(args{});'.format(
|
||||
', (PyObject*)self' if self.has_self else '')
|
||||
|
||||
def process_call(self, code, option):
|
||||
return self.BEFORE_CALL + code
|
||||
if self.condition is not None:
|
||||
call = "#if {0}\n {1}\n#endif\n".format(self.condition, call)
|
||||
|
||||
return [call] + template
|
||||
|
||||
def process_full_file(self, code):
|
||||
return self.DEFINES + code
|
||||
|
||||
|
||||
|
||||
28
tools/cwrap/plugins/BeforeAfterCall.py
Normal file
28
tools/cwrap/plugins/BeforeAfterCall.py
Normal file
@ -0,0 +1,28 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class BeforeAfterCall(CWrapPlugin):
|
||||
|
||||
def initialize(self, cwrap):
|
||||
self.cwrap = cwrap
|
||||
|
||||
def insert_snippet(self, template, option, offset, name):
|
||||
prepend_str = option.get(name)
|
||||
if prepend_str is None:
|
||||
return
|
||||
if '$' in prepend_str:
|
||||
before_call_template = Template(option[name])
|
||||
args = {'arg' + str(i): self.cwrap.get_arg_accessor(arg, option) for i, arg
|
||||
in enumerate(option['arguments'])}
|
||||
prepend_str = before_call_template.substitute(args)
|
||||
template.insert(offset, prepend_str)
|
||||
|
||||
def process_option_code_template(self, template, option):
|
||||
if option.get('before_call') or option.get('after_call'):
|
||||
call_idx = template.index('$call')
|
||||
self.insert_snippet(template, option, call_idx, 'before_call')
|
||||
# call position might have changed
|
||||
call_idx = template.index('$call')
|
||||
self.insert_snippet(template, option, call_idx + 1, 'after_call')
|
||||
return template
|
||||
@ -1,18 +0,0 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
class BeforeCall(CWrapPlugin):
|
||||
|
||||
def initialize(self, cwrap):
|
||||
self.cwrap = cwrap
|
||||
|
||||
def process_call(self, code, option):
|
||||
if option.get('before_call', False):
|
||||
if '$' in option['before_call']:
|
||||
template = Template(option['before_call'])
|
||||
args = {'arg' + str(i): self.cwrap.get_arg_accessor(arg, option) for i, arg
|
||||
in enumerate(option['arguments'])}
|
||||
return template.substitute(args) + code
|
||||
else:
|
||||
return option['before_call'] + code
|
||||
return code
|
||||
28
tools/cwrap/plugins/BoolOption.py
Normal file
28
tools/cwrap/plugins/BoolOption.py
Normal file
@ -0,0 +1,28 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class BoolOption(CWrapPlugin):
|
||||
|
||||
UNPACK_TEMPLATE = Template('$arg == Py_True ? $if_true : $if_false')
|
||||
|
||||
def is_bool_option(self, arg):
|
||||
return arg['type'] == 'bool' and 'if_true' in arg and 'if_false' in arg
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
for declaration in declarations:
|
||||
for option in declaration['options']:
|
||||
for arg in option['arguments']:
|
||||
if self.is_bool_option(arg):
|
||||
arg['is_bool_option'] = True
|
||||
arg['type'] = 'const char*'
|
||||
return declarations
|
||||
|
||||
def get_type_check(self, arg, option):
|
||||
if arg.get('is_bool_option', False):
|
||||
return Template('PyBool_Check($arg)')
|
||||
|
||||
def get_type_unpack(self, arg, option):
|
||||
if arg.get('is_bool_option', False):
|
||||
return Template(self.UNPACK_TEMPLATE.safe_substitute(
|
||||
if_true=arg['if_true'], if_false=arg['if_false']))
|
||||
@ -1,6 +1,7 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class ConstantArguments(CWrapPlugin):
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
@ -18,5 +19,3 @@ class ConstantArguments(CWrapPlugin):
|
||||
def get_arg_accessor(self, arg, option):
|
||||
if arg['type'] == 'CONSTANT':
|
||||
return arg['name']
|
||||
|
||||
|
||||
|
||||
179
tools/cwrap/plugins/CuDNNPlugin.py
Normal file
179
tools/cwrap/plugins/CuDNNPlugin.py
Normal file
@ -0,0 +1,179 @@
|
||||
from string import Template
|
||||
import copy
|
||||
from copy import deepcopy
|
||||
from . import CWrapPlugin
|
||||
from itertools import product
|
||||
|
||||
|
||||
class CuDNNPlugin(CWrapPlugin):
|
||||
|
||||
TYPE_UNPACK = {
|
||||
'THTensor*': Template('((THPVoidTensor*)$arg)->cdata'),
|
||||
'int': Template('THPUtils_unpackLong($arg)'),
|
||||
'std::vector<int>': Template('THPUtils_unpackIntTuple($arg)'),
|
||||
'cudnnDataType_t': Template('$arg'),
|
||||
'cudnnHandle_t': Template('$arg'),
|
||||
'Convolution*': Template('(Convolution*)THPWrapper_get($arg)'),
|
||||
'bool': Template('$arg == Py_True'),
|
||||
'double': Template('THPDoubleUtils_unpackReal($arg)'),
|
||||
}
|
||||
|
||||
INPUT_ARGUMENT_MAP = {
|
||||
'THTensor*': 'THVoidTensor*',
|
||||
}
|
||||
|
||||
TYPE_CHECK = {
|
||||
'Convolution*': Template('THPWrapper_check($arg)'),
|
||||
'THTensor*': Template('(PyObject*)Py_TYPE($arg) == tensorClass'),
|
||||
'int': Template('THPUtils_checkLong($arg)'),
|
||||
'std::vector<int>': Template('THPUtils_checkIntTuple($arg)'),
|
||||
'bool': Template('PyBool_Check($arg)'),
|
||||
'double': Template('THPDoubleUtils_checkReal($arg)'),
|
||||
}
|
||||
|
||||
RETURN_WRAPPER = {
|
||||
'Convolution*': Template('return THPWrapper_New($result, [](void* arg) { delete (Convolution*)arg; });'),
|
||||
}
|
||||
|
||||
METHODS_DECLARATION = Template("""
|
||||
static PyMethodDef _THCUDNN_methods[] = {
|
||||
$methods
|
||||
{NULL}
|
||||
};
|
||||
|
||||
PyMethodDef* THCUDNN_methods()
|
||||
{
|
||||
return _THCUDNN_methods;
|
||||
}
|
||||
""")
|
||||
|
||||
WRAPPER_TEMPLATE = Template("""\
|
||||
static PyObject * $name(PyObject *self, PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
HANDLE_TH_ERRORS
|
||||
int __tuplecount = args ? PyTuple_Size(args) : 0;
|
||||
int __dictcount = kwargs ? PyDict_Size(kwargs) : 0;
|
||||
int __argcount = __tuplecount + __dictcount;
|
||||
PyObject* tensorClass = getTensorClass(args);
|
||||
THCPAutoGPU __autogpu_guard = THCPAutoGPU(args);
|
||||
|
||||
$options
|
||||
}
|
||||
|
||||
THPUtils_invalidArguments(args, kwargs, "$readable_name", $num_options, $expected_args);
|
||||
return NULL;
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
""")
|
||||
|
||||
RELEASE_ARG = Template("_${name}_guard.release();")
|
||||
|
||||
TYPE_NAMES = {
|
||||
'THTensor*': '" THPTensorStr "',
|
||||
'long': 'int',
|
||||
'bool': 'bool',
|
||||
'int': 'int',
|
||||
}
|
||||
|
||||
def __init__(self):
|
||||
self.declarations = []
|
||||
|
||||
def get_type_unpack(self, arg, option):
|
||||
return self.TYPE_UNPACK.get(arg['type'], None)
|
||||
|
||||
def get_type_check(self, arg, option):
|
||||
return self.TYPE_CHECK.get(arg['type'], None)
|
||||
|
||||
def get_assign_args(self, arguments):
|
||||
assign_args = []
|
||||
for arg in arguments:
|
||||
arg = copy.copy(arg)
|
||||
new_type = self.INPUT_ARGUMENT_MAP.get(arg['type'])
|
||||
if new_type is not None:
|
||||
arg['type'] = new_type
|
||||
assign_args.append(arg)
|
||||
return assign_args
|
||||
|
||||
def get_wrapper_template(self, declaration):
|
||||
arg_desc = []
|
||||
for option in declaration['options']:
|
||||
option_desc = [self.TYPE_NAMES.get(arg['type'], arg['type']) + ' ' + arg['name']
|
||||
for arg in option['arguments']
|
||||
if not arg.get('ignore_check', False)]
|
||||
# TODO: this should probably go to THPLongArgsPlugin
|
||||
if option_desc:
|
||||
arg_desc.append('({})'.format(', '.join(option_desc)))
|
||||
else:
|
||||
arg_desc.append('no arguments')
|
||||
arg_desc.sort(key=len)
|
||||
arg_desc = ['"' + desc + '"' for desc in arg_desc]
|
||||
arg_str = ', '.join(arg_desc)
|
||||
readable_name = declaration['python_name']
|
||||
return Template(self.WRAPPER_TEMPLATE.safe_substitute(
|
||||
readable_name=readable_name, num_options=len(arg_desc),
|
||||
expected_args=arg_str))
|
||||
|
||||
def get_return_wrapper(self, option):
|
||||
return self.RETURN_WRAPPER.get(option['return'], None)
|
||||
|
||||
def get_arg_accessor(self, arg, option):
|
||||
name = arg['name']
|
||||
if name == 'self':
|
||||
return 'self'
|
||||
elif name == 'dataType':
|
||||
return 'getCudnnDataType(tensorClass)'
|
||||
elif name == 'handle':
|
||||
return 'getCudnnHandle()'
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
for declaration in declarations:
|
||||
declaration.setdefault('python_name', '_{}'.format(declaration['name']))
|
||||
declaration['name'] = 'THCUDNN_{}'.format(declaration['name'])
|
||||
self.declarations.append(declaration)
|
||||
for option in declaration['options']:
|
||||
for arg in option['arguments']:
|
||||
if arg['name'] in ['self', 'state', 'dataType', 'handle']:
|
||||
arg['ignore_check'] = True
|
||||
declaration['options'] = self.filter_unique_options(declaration['options'])
|
||||
return declarations
|
||||
|
||||
def filter_unique_options(self, options):
|
||||
def signature(option):
|
||||
return '#'.join(arg['type'] for arg in option['arguments']
|
||||
if 'ignore_check' not in arg or not arg['ignore_check'])
|
||||
seen_signatures = set()
|
||||
unique = []
|
||||
for option in options:
|
||||
sig = signature(option)
|
||||
if sig not in seen_signatures:
|
||||
unique.append(option)
|
||||
seen_signatures.add(sig)
|
||||
return unique
|
||||
|
||||
def preprocessor_guard(self, code, condition):
|
||||
return '#if ' + condition + '\n' + code + '#endif\n'
|
||||
|
||||
def process_wrapper(self, code, declaration):
|
||||
if 'defined_if' in declaration:
|
||||
return self.preprocessor_guard(code, declaration['defined_if'])
|
||||
return code
|
||||
|
||||
def process_all_call_arg(self, code, option):
|
||||
return 'state, ' + code
|
||||
|
||||
def declare_methods(self):
|
||||
methods = ''
|
||||
for declaration in self.declarations:
|
||||
extra_flags = ' | ' + declaration.get('method_flags') if 'method_flags' in declaration else ''
|
||||
if not declaration.get('only_register'):
|
||||
extra_flags += ' | METH_KEYWORDS'
|
||||
entry = Template(' {"$python_name", (PyCFunction)$name, METH_VARARGS$extra_flags, NULL},\n').substitute(
|
||||
python_name=declaration['python_name'], name=declaration['name'], extra_flags=extra_flags
|
||||
)
|
||||
if 'defined_if' in declaration:
|
||||
entry = self.preprocessor_guard(entry, declaration['defined_if'])
|
||||
methods += entry
|
||||
return self.METHODS_DECLARATION.substitute(methods=methods)
|
||||
|
||||
def process_full_file(self, code):
|
||||
return code + self.declare_methods()
|
||||
29
tools/cwrap/plugins/GILRelease.py
Normal file
29
tools/cwrap/plugins/GILRelease.py
Normal file
@ -0,0 +1,29 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class GILRelease(CWrapPlugin):
|
||||
|
||||
OPTION_START = [
|
||||
'PyThreadState *_save = NULL;',
|
||||
'try {',
|
||||
]
|
||||
|
||||
BEFORE_CALL = 'Py_UNBLOCK_THREADS;'
|
||||
|
||||
AFTER_CALL = 'Py_BLOCK_THREADS;'
|
||||
|
||||
OPTION_END = [
|
||||
'} catch (...) {',
|
||||
'if (_save) {',
|
||||
'Py_BLOCK_THREADS;',
|
||||
'}',
|
||||
'throw;',
|
||||
'}',
|
||||
]
|
||||
|
||||
def process_option_code_template(self, template, option):
|
||||
call_idx = template.index('$call')
|
||||
template.insert(call_idx, self.BEFORE_CALL)
|
||||
template.insert(call_idx + 2, self.AFTER_CALL)
|
||||
return self.OPTION_START + template + self.OPTION_END
|
||||
223
tools/cwrap/plugins/GenericNN.py
Normal file
223
tools/cwrap/plugins/GenericNN.py
Normal file
@ -0,0 +1,223 @@
|
||||
import copy
|
||||
from string import Template
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class GenericNN(CWrapPlugin):
|
||||
INPUT_TYPE_CHECK = Template("checkTypes(is_cuda, $type, $tensor_args);")
|
||||
|
||||
HEADER_TEMPLATE = Template("void $name($args);")
|
||||
|
||||
WRAPPER_TEMPLATE = Template("""\
|
||||
void $name($args)
|
||||
{
|
||||
bool is_cuda = $input->isCuda();
|
||||
auto type = $input->type();
|
||||
$type_check
|
||||
$options
|
||||
} else {
|
||||
throw std::runtime_error("invalid arguments");
|
||||
}
|
||||
}
|
||||
""")
|
||||
|
||||
THNN_TEMPLATE = Template("""\
|
||||
if (type == thpp::Type::FLOAT) {
|
||||
THNN_Float$name(
|
||||
NULL,
|
||||
$float_args);
|
||||
} else if (type == thpp::Type::DOUBLE) {
|
||||
THNN_Double$name(
|
||||
NULL,
|
||||
$double_args);
|
||||
} else {
|
||||
throw std::runtime_error("unsupported tensor type");
|
||||
}""")
|
||||
|
||||
THCUNN_TEMPLATE = Template("""\
|
||||
#ifdef WITH_CUDA
|
||||
if (type == thpp::Type::FLOAT) {
|
||||
THNN_Cuda$name(
|
||||
state,
|
||||
$float_args);
|
||||
} else if (type == thpp::Type::DOUBLE) {
|
||||
THNN_CudaDouble$name(
|
||||
state,
|
||||
$double_args);
|
||||
} else if (type == thpp::Type::HALF) {
|
||||
THNN_CudaHalf$name(
|
||||
state,
|
||||
$half_args);
|
||||
} else {
|
||||
throw std::runtime_error("unsupported tensor type");
|
||||
}
|
||||
#endif
|
||||
""")
|
||||
|
||||
INDEX_TENSOR_TYPES = {'THIndexTensor*', 'THCIndexTensor*'}
|
||||
|
||||
REAL_TENSOR_TYPES = {'THTensor*', 'THCTensor*'}
|
||||
|
||||
INPUT_ARGUMENT_MAP = {
|
||||
'THNNState*': 'void*',
|
||||
'THCState*': 'void*',
|
||||
'THTensor*': 'thpp::Tensor*',
|
||||
'THCTensor*': 'thpp::Tensor*',
|
||||
'THIndexTensor*': 'thpp::Tensor*',
|
||||
'THCIndexTensor*': 'thpp::Tensor*',
|
||||
'THIndex_t': 'long',
|
||||
'accreal': 'double',
|
||||
}
|
||||
|
||||
def __init__(self, header=False):
|
||||
self.header = header
|
||||
self.declarations = []
|
||||
|
||||
def process_full_file(self, base_wrapper):
|
||||
if self.header:
|
||||
wrapper = '#pragma once\n\n'
|
||||
wrapper += '#include <THPP/Tensor.hpp>\n\n'
|
||||
else:
|
||||
wrapper = '#include "THNN_generic.h"\n'
|
||||
wrapper = '#include "THNN_generic.inc.h"\n\n'
|
||||
wrapper += 'namespace torch { namespace nn {\n\n'
|
||||
wrapper += base_wrapper
|
||||
wrapper += '}} // namespace torch::nn\n'
|
||||
return wrapper
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
for declaration in declarations:
|
||||
base_args = declaration['options'][0]['arguments']
|
||||
for option in declaration['options']:
|
||||
for idx, arg in enumerate(option['arguments']):
|
||||
arg['assign_name'] = base_args[idx]['name']
|
||||
arg['assign_type'] = base_args[idx]['type']
|
||||
if idx != 1:
|
||||
arg['ignore_check'] = True
|
||||
return declarations
|
||||
|
||||
def get_arg_accessor(self, arg, option):
|
||||
return self.get_type_unpack(arg, option)
|
||||
|
||||
def process_pre_arg_assign(self, pre_arg_assign, option):
|
||||
if option['backend'] == 'cunn':
|
||||
# Enclose arg_assign with CUDA guard
|
||||
pre_arg_assign.append('#ifdef WITH_CUDA')
|
||||
return pre_arg_assign
|
||||
|
||||
def process_option_code_template(self, template, option):
|
||||
template = []
|
||||
if option['backend'] == 'cunn':
|
||||
template.append('#endif')
|
||||
|
||||
def base_cast(arg, CReal, real):
|
||||
name = 'arg_' + arg['assign_name']
|
||||
type = arg['type']
|
||||
if type in self.REAL_TENSOR_TYPES:
|
||||
return ('(TH{CReal}Tensor*){name}->cdata()'
|
||||
.format(CReal=CReal, name=name))
|
||||
elif type in self.INDEX_TENSOR_TYPES:
|
||||
return '({type}){name}->cdata()'.format(type=type, name=name)
|
||||
elif type == 'THCState*':
|
||||
return '({}){}'.format(type, name)
|
||||
elif type == 'real':
|
||||
if real == 'half':
|
||||
return 'THC_float2half({})'.format(name)
|
||||
return '({real}){name}'.format(real=real, name=name)
|
||||
return name
|
||||
|
||||
def cast(arg, CReal, real):
|
||||
expr = base_cast(arg, CReal, real)
|
||||
if arg.get('optional', False):
|
||||
name = 'arg_' + arg['assign_name']
|
||||
return '{name} ? {expr} : NULL'.format(name=name, expr=expr)
|
||||
return expr
|
||||
|
||||
if option['backend'] == 'nn':
|
||||
float_args = []
|
||||
double_args = []
|
||||
for idx, arg in enumerate(option['arguments']):
|
||||
float_args.append(cast(arg, 'Float', 'float'))
|
||||
double_args.append(cast(arg, 'Double', 'double'))
|
||||
|
||||
code = self.THNN_TEMPLATE.substitute(
|
||||
name=option['cname'],
|
||||
float_args=',\n'.join(float_args),
|
||||
double_args=',\n'.join(double_args))
|
||||
template.append(code)
|
||||
|
||||
elif option['backend'] == 'cunn':
|
||||
float_args = []
|
||||
double_args = []
|
||||
half_args = []
|
||||
for idx, arg in enumerate(option['arguments']):
|
||||
float_args.append(cast(arg, 'Cuda', 'float'))
|
||||
double_args.append(cast(arg, 'CudaDouble', 'double'))
|
||||
half_args.append(cast(arg, 'CudaHalf', 'half'))
|
||||
|
||||
code = self.THCUNN_TEMPLATE.substitute(
|
||||
name=option['cname'],
|
||||
float_args=',\n'.join(float_args),
|
||||
double_args=',\n'.join(double_args),
|
||||
half_args=',\n'.join(half_args))
|
||||
template.append(code)
|
||||
|
||||
template.append('')
|
||||
return template
|
||||
|
||||
def get_type_unpack(self, arg, option):
|
||||
return Template(arg.get('assign_name', arg['name']))
|
||||
|
||||
def get_type_check(self, arg, option):
|
||||
if option['backend'] == 'cunn':
|
||||
return Template('is_cuda')
|
||||
else:
|
||||
return Template('!is_cuda')
|
||||
|
||||
def get_assign_args(self, arguments):
|
||||
assign_args = []
|
||||
for arg in arguments:
|
||||
arg = copy.copy(arg)
|
||||
new_type = self.INPUT_ARGUMENT_MAP.get(arg['type'])
|
||||
if new_type is not None:
|
||||
arg['type'] = new_type
|
||||
assign_args.append(arg)
|
||||
return assign_args
|
||||
|
||||
def get_wrapper_template(self, declaration):
|
||||
# get assign arguments string
|
||||
base_arguments = declaration['options'][0]['arguments']
|
||||
args = self.get_assign_args(base_arguments)
|
||||
arg_str = ', '.join([arg['type'] + ' ' + arg['name'] for arg in args])
|
||||
|
||||
if self.header:
|
||||
return Template(self.HEADER_TEMPLATE.safe_substitute(args=arg_str))
|
||||
|
||||
def get_checked_args(tensor_types):
|
||||
checked_args = []
|
||||
for arg in base_arguments:
|
||||
if arg['type'] in tensor_types:
|
||||
name = arg.get('assign_name', arg['name'])
|
||||
name_str = name
|
||||
if arg.get('optional', False):
|
||||
name_str = '?' + name_str
|
||||
checked_args += ['"' + name_str + '"', name]
|
||||
checked_args += ['NULL']
|
||||
return checked_args
|
||||
|
||||
real_args = get_checked_args(self.REAL_TENSOR_TYPES)
|
||||
long_args = get_checked_args(self.INDEX_TENSOR_TYPES)
|
||||
|
||||
# check input types
|
||||
types_checks = []
|
||||
if len(real_args) > 1:
|
||||
types_checks.append(self.INPUT_TYPE_CHECK.substitute(
|
||||
type='type', tensor_args=', '.join(real_args)))
|
||||
if len(long_args) > 1:
|
||||
types_checks.append(self.INPUT_TYPE_CHECK.substitute(
|
||||
type='thpp::Type::LONG', tensor_args=', '.join(long_args)))
|
||||
|
||||
return Template(self.WRAPPER_TEMPLATE.safe_substitute(
|
||||
input=args[0]['name'],
|
||||
args=arg_str,
|
||||
type_check='\n '.join(types_checks)))
|
||||
71
tools/cwrap/plugins/KwargsPlugin.py
Normal file
71
tools/cwrap/plugins/KwargsPlugin.py
Normal file
@ -0,0 +1,71 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class KwargsPlugin(CWrapPlugin):
|
||||
|
||||
ACCESSOR_TEMPLATE = Template('(__tuplecount > $idx ? PyTuple_GET_ITEM(args, $idx) : __kw_$name)')
|
||||
KWARG_ONLY_ACCESSOR_TEMPLATE = Template('__kw_$name')
|
||||
CHECK_TEMPLATE = Template('(__tuplecount > $idx || __kw_$name) && $code')
|
||||
KWARG_ONLY_CHECK_TEMPLATE = Template('__kw_$name && $code')
|
||||
WRAPPER_TEMPLATE = Template("""
|
||||
$declarations
|
||||
if (kwargs) {
|
||||
$lookups
|
||||
}
|
||||
""")
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
# We don't have access to declaration or options in get_arg_accessor
|
||||
# and process_single_check, so we have to push the flag down to
|
||||
# the args.
|
||||
for declaration in declarations:
|
||||
if declaration.get('no_kwargs'):
|
||||
for option in declaration['options']:
|
||||
for arg in option['arguments']:
|
||||
arg['no_kwargs'] = True
|
||||
# we need to use offsets for arg position in *arg if kwarg_only args
|
||||
# are not at the end
|
||||
for declaration in declarations:
|
||||
for option in declaration['options']:
|
||||
offset = 0
|
||||
for arg in option['arguments']:
|
||||
if arg.get('kwarg_only') and not arg.get('ignore_check', False):
|
||||
offset += 1
|
||||
else:
|
||||
arg['kwarg_offset'] = offset
|
||||
return declarations
|
||||
|
||||
def get_arg_accessor(self, arg, option):
|
||||
if arg.get('no_kwargs'):
|
||||
return
|
||||
if arg.get('kwarg_only'):
|
||||
return self.KWARG_ONLY_ACCESSOR_TEMPLATE.substitute(name=arg['name'])
|
||||
return self.ACCESSOR_TEMPLATE.substitute(idx=arg['idx'] - arg['kwarg_offset'], name=arg['name'])
|
||||
|
||||
def process_single_check(self, code, arg, arg_accessor):
|
||||
if arg.get('no_kwargs'):
|
||||
return code
|
||||
if arg.get('kwarg_only'):
|
||||
return self.KWARG_ONLY_CHECK_TEMPLATE.substitute(name=arg['name'], code=code)
|
||||
return self.CHECK_TEMPLATE.substitute(idx=arg['idx'] - arg['kwarg_offset'], name=arg['name'], code=code)
|
||||
|
||||
def process_wrapper(self, code, declaration):
|
||||
if declaration.get('no_kwargs'):
|
||||
return code
|
||||
seen_args = set()
|
||||
args = []
|
||||
for option in declaration['options']:
|
||||
for arg in option['arguments']:
|
||||
name = arg['name']
|
||||
if (not arg.get('ignore_check') and
|
||||
not arg.get('no_kwargs') and
|
||||
name not in seen_args):
|
||||
seen_args.add(name)
|
||||
args.append(name)
|
||||
declarations = '\n '.join(['PyObject *__kw_{} = NULL;'.format(a) for a in args])
|
||||
lookups = '\n '.join(
|
||||
['__kw_{name} = PyDict_GetItemString(kwargs, "{name}");'.format(name=a) for a in args])
|
||||
start_idx = code.find('{') + 1
|
||||
new_code = self.WRAPPER_TEMPLATE.substitute(declarations=declarations, lookups=lookups)
|
||||
return code[:start_idx] + new_code + code[start_idx:]
|
||||
@ -1,6 +1,8 @@
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
class NullableArguments(CWrapPlugin):
|
||||
|
||||
def process_single_check(self, code, arg, arg_accessor):
|
||||
if 'nullable' in arg and arg['nullable']:
|
||||
return '({} || {} == Py_None)'.format(code, arg_accessor)
|
||||
@ -10,5 +12,3 @@ class NullableArguments(CWrapPlugin):
|
||||
if 'nullable' in arg and arg['nullable']:
|
||||
return '({} == Py_None ? NULL : {})'.format(arg_accessor, code)
|
||||
return code
|
||||
|
||||
|
||||
|
||||
@ -2,6 +2,7 @@ from copy import deepcopy
|
||||
from . import CWrapPlugin
|
||||
from itertools import product
|
||||
|
||||
|
||||
class OptionalArguments(CWrapPlugin):
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
@ -22,18 +23,36 @@ class OptionalArguments(CWrapPlugin):
|
||||
# PyYAML interprets NULL as None...
|
||||
arg['name'] = 'NULL' if arg['default'] is None else arg['default']
|
||||
new_options.append(option_copy)
|
||||
declaration['options'] = self.filter_unique_options(declaration['options'] + new_options)
|
||||
declaration['options'] = self.filter_unique_options(new_options)
|
||||
return declarations
|
||||
|
||||
def filter_unique_options(self, options):
|
||||
def signature(option):
|
||||
return '#'.join(arg['type'] for arg in option['arguments'] if not 'ignore_check' in arg or not arg['ignore_check'])
|
||||
def signature(option, kwarg_only_count):
|
||||
if kwarg_only_count == 0:
|
||||
kwarg_only_count = None
|
||||
else:
|
||||
kwarg_only_count = -kwarg_only_count
|
||||
arg_signature = '#'.join(
|
||||
arg['type']
|
||||
for arg in option['arguments'][:kwarg_only_count]
|
||||
if not arg.get('ignore_check'))
|
||||
if kwarg_only_count is None:
|
||||
return arg_signature
|
||||
kwarg_only_signature = '#'.join(
|
||||
arg['name'] + '#' + arg['type']
|
||||
for arg in option['arguments'][kwarg_only_count:]
|
||||
if not arg.get('ignore_check'))
|
||||
return arg_signature + "#-#" + kwarg_only_signature
|
||||
seen_signatures = set()
|
||||
unique = []
|
||||
for option in options:
|
||||
sig = signature(option)
|
||||
if sig not in seen_signatures:
|
||||
unique.append(option)
|
||||
seen_signatures.add(sig)
|
||||
for num_kwarg_only in range(0, len(option['arguments']) + 1):
|
||||
sig = signature(option, num_kwarg_only)
|
||||
if sig not in seen_signatures:
|
||||
if num_kwarg_only > 0:
|
||||
for arg in option['arguments'][-num_kwarg_only:]:
|
||||
arg['kwarg_only'] = True
|
||||
unique.append(option)
|
||||
seen_signatures.add(sig)
|
||||
break
|
||||
return unique
|
||||
|
||||
|
||||
@ -1,9 +1,10 @@
|
||||
from . import CWrapPlugin
|
||||
from string import Template
|
||||
|
||||
|
||||
class ReturnArguments(CWrapPlugin):
|
||||
ARGUMENT_RETURN_TEMPLATE = Template("$call;Py_INCREF($arg);\nreturn (PyObject*)($arg);")
|
||||
TUPLE_RETURN_TEMPLATE = Template("$call;\nreturn PyTuple_Pack($num_args, $args);")
|
||||
ARGUMENT_RETURN_TEMPLATE = Template("Py_INCREF($arg);\nreturn (PyObject*)($arg);")
|
||||
TUPLE_RETURN_TEMPLATE = Template("return PyTuple_Pack($num_args, $args);")
|
||||
|
||||
def initialize(self, cwrap):
|
||||
self.cwrap = cwrap
|
||||
@ -16,4 +17,5 @@ class ReturnArguments(CWrapPlugin):
|
||||
if len(args) == 1:
|
||||
return Template(self.ARGUMENT_RETURN_TEMPLATE.safe_substitute(arg=accessors[0]))
|
||||
else:
|
||||
return Template(self.TUPLE_RETURN_TEMPLATE.safe_substitute(num_args=len(args), args=', '.join(accessors)))
|
||||
return Template(self.TUPLE_RETURN_TEMPLATE.safe_substitute(num_args=len(args),
|
||||
args=', '.join(accessors)))
|
||||
|
||||
@ -3,8 +3,13 @@ from string import Template
|
||||
from . import CWrapPlugin
|
||||
|
||||
|
||||
with open(os.path.join(os.path.dirname(__file__), 'templates', 'module_head.cpp'), 'r') as f:
|
||||
MODULE_HEAD = Template(f.read())
|
||||
MODULE_HEAD = """
|
||||
#include <Python.h>
|
||||
#include <exception>
|
||||
|
||||
#include "THP_API.h"
|
||||
|
||||
"""
|
||||
with open(os.path.join(os.path.dirname(__file__), 'templates', 'module_tail.cpp'), 'r') as f:
|
||||
MODULE_TAIL = Template(f.read())
|
||||
|
||||
@ -21,64 +26,88 @@ $METHODS
|
||||
class StandaloneExtension(CWrapPlugin):
|
||||
|
||||
TYPE_UNPACK = {
|
||||
'THFloatTensor*': Template('(THFloatTensor*)(((Tensor*)$arg)->cdata)'),
|
||||
'THDoubleTensor*': Template('(THDoubleTensor*)(((Tensor*)$arg)->cdata)'),
|
||||
'THLongTensor*': Template('(THLongTensor*)(((Tensor*)$arg)->cdata)'),
|
||||
'THIntTensor*': Template('(THIntTensor*)(((Tensor*)$arg)->cdata)'),
|
||||
'THCudaTensor*': Template('(THCudaTensor*)(((Tensor*)$arg)->cdata)'),
|
||||
'THCudaLongTensor*': Template('(THCudaLongTensor*)(((Tensor*)$arg)->cdata)'),
|
||||
'float': Template('__getFloat($arg)'),
|
||||
'double': Template('__getFloat($arg)'),
|
||||
'bool': Template('__getLong($arg)'),
|
||||
'int': Template('__getLong($arg)'),
|
||||
'long': Template('__getLong($arg)'),
|
||||
'void*': Template('(void*)__getLong($arg)'),
|
||||
# TODO: implement this
|
||||
'THGenerator*': Template('NULL'),
|
||||
'THFloatTensor*': Template('THPFloatTensor_CData((THPFloatTensor*)$arg)'),
|
||||
'THDoubleTensor*': Template('THPDoubleTensor_CData((THPDoubleTensor*)$arg)'),
|
||||
'THLongTensor*': Template('THPLongTensor_CData((THPLongTensor*)$arg)'),
|
||||
'THIntTensor*': Template('THPIntTensor_CData((THPIntTensor*)$arg)'),
|
||||
'THCudaHalfTensor*': Template('THCPHalfTensor_CData((THCPHalfTensor*)$arg)'),
|
||||
'THCudaTensor*': Template('THCPFloatTensor_CData((THCPFloatTensor*)$arg)'),
|
||||
'THCudaDoubleTensor*': Template('THCPDoubleTensor_CData((THCPDoubleTensor*)$arg)'),
|
||||
'THCudaLongTensor*': Template('THCPLongTensor_CData((THCPLongTensor*)$arg)'),
|
||||
'half': Template('THPHalfUtils_unpackReal($arg)'),
|
||||
'float': Template('THPFloatUtils_unpackReal($arg)'),
|
||||
'double': Template('THPDoubleUtils_unpackReal($arg)'),
|
||||
'bool': Template('($arg == Py_True ? true : false)'),
|
||||
'int': Template('THPUtils_unpackLong($arg)'),
|
||||
'long': Template('THPUtils_unpackLong($arg)'),
|
||||
'void*': Template('(void*)THPUtils_unpackLong($arg)'),
|
||||
'THGenerator*': Template('THPGenerator_CData((THPGenerator*)$arg)'),
|
||||
}
|
||||
|
||||
TYPE_CHECK = {
|
||||
'THDoubleTensor*': Template('(PyObject*)Py_TYPE($arg) == THPDoubleTensorClass'),
|
||||
'THFloatTensor*': Template('(PyObject*)Py_TYPE($arg) == THPFloatTensorClass'),
|
||||
'THLongTensor*': Template('(PyObject*)Py_TYPE($arg) == THPLongTensorClass'),
|
||||
'THIntTensor*': Template('(PyObject*)Py_TYPE($arg) == THPIntTensorClass'),
|
||||
'THCudaTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPFloatTensorClass'),
|
||||
'THDoubleTensor*': Template('(PyObject*)Py_TYPE($arg) == THPDoubleTensorClass'),
|
||||
'THFloatTensor*': Template('(PyObject*)Py_TYPE($arg) == THPFloatTensorClass'),
|
||||
'THLongTensor*': Template('(PyObject*)Py_TYPE($arg) == THPLongTensorClass'),
|
||||
'THIntTensor*': Template('(PyObject*)Py_TYPE($arg) == THPIntTensorClass'),
|
||||
'THCudaHalfTensor*': Template('THCPHalfTensor_Check($arg)'),
|
||||
'THCudaTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPFloatTensorClass'),
|
||||
'THCudaDoubleTensor*': Template('THCPDoubleTensor_Check($arg)'),
|
||||
'THCudaLongTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPLongTensorClass'),
|
||||
'float': Template('__checkFloat($arg)'),
|
||||
'double': Template('__checkFloat($arg)'),
|
||||
'bool': Template('__checkLong($arg)'),
|
||||
'int': Template('__checkLong($arg)'),
|
||||
'long': Template('__checkLong($arg)'),
|
||||
'void*': Template('__checkLong($arg)'),
|
||||
# TODO: implement this
|
||||
'THGenerator*': Template('false'),
|
||||
'half': Template('THPHalfUtils_checkReal($arg)'),
|
||||
'float': Template('THPFloatUtils_checkReal($arg)'),
|
||||
'double': Template('THPDoubleUtils_checkReal($arg)'),
|
||||
'bool': Template('PyBool_Check($arg)'),
|
||||
'int': Template('THPUtils_checkLong($arg)'),
|
||||
'long': Template('THPUtils_checkLong($arg)'),
|
||||
'void*': Template('THPUtils_checkLong($arg)'),
|
||||
'THGenerator*': Template('(PyObject*)Py_TYPE($arg) == THPGeneratorClass'),
|
||||
}
|
||||
|
||||
WRAPPER_TEMPLATE = Template("""
|
||||
PyObject * $name(PyObject *_unused, PyObject *args)
|
||||
{
|
||||
HANDLE_TH_ERRORS
|
||||
int __argcount = args ? PyTuple_Size(args) : 0;
|
||||
try {
|
||||
$options
|
||||
} else {
|
||||
__invalidArgs(args, "");
|
||||
return NULL;
|
||||
}
|
||||
} catch (std::exception &e) {
|
||||
PyErr_SetString(PyExc_RuntimeError, e.what());
|
||||
return NULL;
|
||||
$options
|
||||
} else {
|
||||
THPUtils_invalidArguments(args, NULL, "$name", 1, $expected_args);
|
||||
return NULL;
|
||||
}
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
""")
|
||||
|
||||
def __init__(self, module_name, with_cuda=False):
|
||||
TYPE_NAMES = {
|
||||
'THGenerator*': 'Generator',
|
||||
'THCudaHalfTensor*': 'torch.cuda.HalfTensor',
|
||||
'THCudaTensor*': 'torch.cuda.FloatTensor',
|
||||
'THCudaDoubleTensor*': 'torch.cuda.DoubleTensor',
|
||||
'THCudaLongTensor*': 'torch.cuda.LongTensor',
|
||||
'THDoubleTensor*': 'torch.DoubleTensor',
|
||||
'THFloatTensor*': 'torch.FloatTensor',
|
||||
'THBoolTensor*': 'torch.ByteTensor',
|
||||
'THLongTensor*': 'torch.LongTensor',
|
||||
'THIndexTensor*': 'torch.LongTensor',
|
||||
'THIntTensor*': 'torch.IntTensor',
|
||||
'THLongStorage*': 'torch.LongStorage',
|
||||
'long': 'int',
|
||||
'int': 'int',
|
||||
'real': 'float',
|
||||
'half': 'float',
|
||||
'double': 'float',
|
||||
'float': 'float',
|
||||
'accreal': 'float',
|
||||
'bool': 'bool',
|
||||
'void*': 'int',
|
||||
}
|
||||
|
||||
def __init__(self, module_name):
|
||||
self.module_name = module_name
|
||||
self.with_cuda = with_cuda
|
||||
self.declarations = []
|
||||
|
||||
def process_full_file(self, code):
|
||||
short_name = self.module_name.split('.')[-1]
|
||||
new_code = MODULE_HEAD.substitute(requres_cuda=('1' if self.with_cuda else '0'))
|
||||
new_code = MODULE_HEAD
|
||||
new_code += code
|
||||
new_code += self.declare_module_methods()
|
||||
new_code += MODULE_TAIL.substitute(full_name=self.module_name, short_name=short_name)
|
||||
@ -101,4 +130,22 @@ PyObject * $name(PyObject *_unused, PyObject *args)
|
||||
return self.TYPE_CHECK.get(arg['type'], None)
|
||||
|
||||
def get_wrapper_template(self, declaration):
|
||||
return self.WRAPPER_TEMPLATE
|
||||
arg_desc = []
|
||||
|
||||
def describe_arg(arg):
|
||||
desc = self.TYPE_NAMES[arg['type']] + ' ' + arg['name']
|
||||
if arg.get('nullable'):
|
||||
return '[{} or None]'.format(desc)
|
||||
return desc
|
||||
for option in declaration['options']:
|
||||
option_desc = [describe_arg(arg)
|
||||
for arg in option['arguments']
|
||||
if not arg.get('ignore_check', False)]
|
||||
if option_desc:
|
||||
arg_desc.append('({})'.format(', '.join(option_desc)))
|
||||
else:
|
||||
arg_desc.append('no arguments')
|
||||
arg_desc.sort(key=len)
|
||||
arg_desc = ['"' + desc + '"' for desc in arg_desc]
|
||||
arg_str = ', '.join(arg_desc)
|
||||
return Template(self.WRAPPER_TEMPLATE.safe_substitute(expected_args=arg_str))
|
||||
|
||||
@ -1,44 +0,0 @@
|
||||
from string import Template
|
||||
from . import CWrapPlugin
|
||||
|
||||
class THPLongArgsPlugin(CWrapPlugin):
|
||||
PARSE_LONG_ARGS = Template("""\
|
||||
THLongStoragePtr __long_args_guard = THPUtils_getLongStorage(args, $num_checked);
|
||||
THLongStorage* __long_args = __long_args_guard.get();
|
||||
""")
|
||||
|
||||
def get_arg_accessor(self, arg, option):
|
||||
if 'long_args' in option and option['long_args'] and arg['name'] == 'long_args':
|
||||
return '__long_args'
|
||||
|
||||
def get_type_unpack(self, arg, option):
|
||||
if option.get('long_args', False) and arg['name'] == 'long_args':
|
||||
return Template('$arg')
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
for declaration in declarations:
|
||||
for option in declaration['options']:
|
||||
if not 'long_args' in option or not option['long_args']:
|
||||
continue
|
||||
for arg in option['arguments']:
|
||||
if arg['name'] == 'long_args':
|
||||
arg['ignore_check'] = True
|
||||
return declarations
|
||||
|
||||
def process_all_checks(self, code, option):
|
||||
if 'long_args' in option and option['long_args']:
|
||||
code = code.replace('__argcount ==', '__argcount >')
|
||||
return code
|
||||
|
||||
def process_option_code(self, code, option):
|
||||
if 'long_args' in option and option['long_args']:
|
||||
lines = code.split('\n')
|
||||
end_checks = 0
|
||||
for i, line in enumerate(lines):
|
||||
if ') {' in line:
|
||||
end_checks = i
|
||||
break
|
||||
lines = lines[:end_checks+1] + [self.PARSE_LONG_ARGS.substitute(num_checked=option['num_checked_args'])] + lines[end_checks+1:]
|
||||
code = '\n'.join(lines)
|
||||
return code
|
||||
|
||||
@ -1,148 +1,255 @@
|
||||
from string import Template
|
||||
from copy import deepcopy
|
||||
from . import CWrapPlugin
|
||||
from itertools import product
|
||||
from itertools import product, chain
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
class THPPlugin(CWrapPlugin):
|
||||
|
||||
TYPE_UNPACK = {
|
||||
'THFloatTensor*': Template('((THPFloatTensor*)$arg)->cdata'),
|
||||
'THDoubleTensor*': Template('((THPDoubleTensor*)$arg)->cdata'),
|
||||
'THLongTensor*': Template('((THPLongTensor*)$arg)->cdata'),
|
||||
'THIntTensor*': Template('((THPIntTensor*)$arg)->cdata'),
|
||||
'THTensor*': Template('((THPTensor*)$arg)->cdata'),
|
||||
'THBoolTensor*': Template('((THPBoolTensor*)$arg)->cdata'),
|
||||
'THIndexTensor*': Template('((THPIndexTensor*)$arg)->cdata'),
|
||||
'THLongStorage*': Template('((THPLongStorage*)$arg)->cdata'),
|
||||
'THStorage*': Template('((THPStorage*)$arg)->cdata'),
|
||||
'THGenerator*': Template('((THPGenerator*)$arg)->cdata'),
|
||||
'void*': Template('THPUtils_unpackLong($arg)'),
|
||||
'long': Template('THPUtils_unpackLong($arg)'),
|
||||
'int': Template('THPUtils_unpackLong($arg)'),
|
||||
'bool': Template('THPUtils_unpackLong($arg)'),
|
||||
'float': Template('THPFloatUtils_unpackReal($arg)'),
|
||||
'double': Template('THPDoubleUtils_unpackReal($arg)'),
|
||||
'real': Template('THPUtils_(unpackReal)($arg)'),
|
||||
'accreal': Template('THPUtils_(unpackAccreal)($arg)'),
|
||||
'THFloatTensor*': Template('((THPFloatTensor*)$arg)->cdata'),
|
||||
'THDoubleTensor*': Template('((THPDoubleTensor*)$arg)->cdata'),
|
||||
'THLongTensor*': Template('((THPLongTensor*)$arg)->cdata'),
|
||||
'THIntTensor*': Template('((THPIntTensor*)$arg)->cdata'),
|
||||
'THTensor*': Template('((THPTensor*)$arg)->cdata'),
|
||||
'THBoolTensor*': Template('((THPBoolTensor*)$arg)->cdata'),
|
||||
'THIndexTensor*': Template('((THPIndexTensor*)$arg)->cdata'),
|
||||
'THIntegerTensor*': Template('((THPIntegerTensor*)$arg)->cdata'),
|
||||
|
||||
'THCudaTensor*': Template('((THCPFloatTensor*)$arg)->cdata'),
|
||||
'THCudaDoubleTensor*': Template('((THCPDoubleTensor*)$arg)->cdata'),
|
||||
'THCudaLongTensor*': Template('((THCPLongTensor*)$arg)->cdata'),
|
||||
|
||||
'THSFloatTensor*': Template('((THSPFloatTensor*)$arg)->cdata'),
|
||||
'THSDoubleTensor*': Template('((THSPDoubleTensor*)$arg)->cdata'),
|
||||
'THSLongTensor*': Template('((THSPLongTensor*)$arg)->cdata'),
|
||||
'THSIntTensor*': Template('((THSPIntTensor*)$arg)->cdata'),
|
||||
'THSTensor*': Template('((THSPTensor*)$arg)->cdata'),
|
||||
'THSBoolTensor*': Template('((THSPBoolTensor*)$arg)->cdata'),
|
||||
'THSIndexTensor*': Template('((THSPIndexTensor*)$arg)->cdata'),
|
||||
|
||||
'THLongStorage*': Template('((THPLongStorage*)$arg)->cdata'),
|
||||
'THStorage*': Template('((THPStorage*)$arg)->cdata'),
|
||||
'THGenerator*': Template('((THPGenerator*)$arg)->cdata'),
|
||||
'THSize*': Template('__size.get()'),
|
||||
'THStride*': Template('__stride.get()'),
|
||||
'void*': Template('THPUtils_unpackLong($arg)'),
|
||||
'long': Template('THPUtils_unpackLong($arg)'),
|
||||
'int': Template('THPUtils_unpackLong($arg)'),
|
||||
'bool': Template('($arg == Py_True ? true : false)'),
|
||||
'float': Template('THPFloatUtils_unpackReal($arg)'),
|
||||
'double': Template('THPDoubleUtils_unpackReal($arg)'),
|
||||
'real': Template('THPUtils_(unpackReal)($arg)'),
|
||||
'accreal': Template('THPUtils_(unpackAccreal)($arg)'),
|
||||
}
|
||||
|
||||
TYPE_CHECK = {
|
||||
'THDoubleTensor*': Template('(PyObject*)Py_TYPE($arg) == THPDoubleTensorClass'),
|
||||
'THFloatTensor*': Template('(PyObject*)Py_TYPE($arg) == THPFloatTensorClass'),
|
||||
'THLongTensor*': Template('(PyObject*)Py_TYPE($arg) == THPLongTensorClass'),
|
||||
'THIntTensor*': Template('(PyObject*)Py_TYPE($arg) == THPIntTensorClass'),
|
||||
'THCudaTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPFloatTensorClass'),
|
||||
'THTensor*': Template('(PyObject*)Py_TYPE($arg) == THPTensorClass'),
|
||||
'THBoolTensor*': Template('(PyObject*)Py_TYPE($arg) == THPBoolTensorClass'),
|
||||
'THIndexTensor*': Template('(PyObject*)Py_TYPE($arg) == THPIndexTensorClass'),
|
||||
'THLongStorage*': Template('(PyObject*)Py_TYPE($arg) == THPLongStorageClass'),
|
||||
'THStorage*': Template('(PyObject*)Py_TYPE($arg) == THPStorageClass'),
|
||||
'THGenerator*': Template('Py_TYPE($arg) == &THPGeneratorType'),
|
||||
'void*': Template('THPUtils_checkLong($arg)'),
|
||||
'long': Template('THPUtils_checkLong($arg)'),
|
||||
'int': Template('THPUtils_checkLong($arg)'),
|
||||
'bool': Template('THPUtils_checkLong($arg)'),
|
||||
'float': Template('THPFloatUtils_checkReal($arg)'),
|
||||
'double': Template('THPDoubleUtils_checkReal($arg)'),
|
||||
'real': Template('THPUtils_(checkReal)($arg)'),
|
||||
# TODO
|
||||
'accreal': Template('THPUtils_(checkReal)($arg)'),
|
||||
'THDoubleTensor*': Template('(PyObject*)Py_TYPE($arg) == THPDoubleTensorClass'),
|
||||
'THFloatTensor*': Template('(PyObject*)Py_TYPE($arg) == THPFloatTensorClass'),
|
||||
'THLongTensor*': Template('(PyObject*)Py_TYPE($arg) == THPLongTensorClass'),
|
||||
'THIntTensor*': Template('(PyObject*)Py_TYPE($arg) == THPIntTensorClass'),
|
||||
'THTensor*': Template('(PyObject*)Py_TYPE($arg) == THPTensorClass'),
|
||||
'THBoolTensor*': Template('(PyObject*)Py_TYPE($arg) == THPBoolTensorClass'),
|
||||
'THIndexTensor*': Template('(PyObject*)Py_TYPE($arg) == THPIndexTensorClass'),
|
||||
'THIntegerTensor*': Template('(PyObject*)Py_TYPE($arg) == THPIntegerTensorClass'),
|
||||
|
||||
'THCudaTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPFloatTensorClass'),
|
||||
'THCudaDoubleTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPDoubleTensorClass'),
|
||||
'THCudaLongTensor*': Template('(PyObject*)Py_TYPE($arg) == THCPLongTensorClass'),
|
||||
|
||||
'THSDoubleTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPDoubleTensorClass'),
|
||||
'THSFloatTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPFloatTensorClass'),
|
||||
'THSLongTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPLongTensorClass'),
|
||||
'THSIntTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPIntTensorClass'),
|
||||
'THSTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPTensorClass'),
|
||||
'THSBoolTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPBoolTensorClass'),
|
||||
'THSIndexTensor*': Template('(PyObject*)Py_TYPE($arg) == THSPIndexTensorClass'),
|
||||
|
||||
'THLongStorage*': Template('(PyObject*)Py_TYPE($arg) == THPLongStorageClass'),
|
||||
'THStorage*': Template('(PyObject*)Py_TYPE($arg) == THPStorageClass'),
|
||||
'THGenerator*': Template('(PyObject*)Py_TYPE($arg) == THPGeneratorClass'),
|
||||
'THSize*': Template('THPUtils_tryUnpackLongs($arg, __size)'),
|
||||
'THStride*': Template('THPUtils_tryUnpackLongs($arg, __stride)'),
|
||||
'void*': Template('THPUtils_checkLong($arg)'),
|
||||
'long': Template('THPUtils_checkLong($arg)'),
|
||||
'int': Template('THPUtils_checkLong($arg)'),
|
||||
'bool': Template('PyBool_Check($arg)'),
|
||||
'float': Template('THPFloatUtils_checkReal($arg)'),
|
||||
'double': Template('THPDoubleUtils_checkReal($arg)'),
|
||||
'real': Template('THPUtils_(checkReal)($arg)'),
|
||||
'accreal': Template('THPUtils_(checkAccreal)($arg)'),
|
||||
}
|
||||
|
||||
SIZE_VARARG_CHECK = Template('THPUtils_tryUnpackLongVarArgs(args, $idx, __size)')
|
||||
|
||||
RETURN_WRAPPER = {
|
||||
'THTensor*': Template('return THPTensor_(newObject)($call);'),
|
||||
'THLongStorage*': Template('return THPLongStorage_newObject($call);'),
|
||||
'THTensor*': Template('return THPTensor_(New)($result);'),
|
||||
'THSTensor*': Template('return THSPTensor_(New)($result);'),
|
||||
'THLongTensor*': Template('return THPLongTensor_New($result);'),
|
||||
'THLongStorage*': Template('return THPLongStorage_New($result);'),
|
||||
'THCudaLongTensor*': Template('return THCPLongTensor_New($result);'),
|
||||
# TODO: make it smarter - it should return python long if result doesn't fit into an int
|
||||
'long': Template('return PyInt_FromLong($call);'),
|
||||
# TODO
|
||||
'accreal': Template('return PyFloat_FromDouble($call);'),
|
||||
'self': Template('$call;\nPy_INCREF(self);\nreturn (PyObject*)self;'),
|
||||
'real': Template('return THPUtils_(newReal)($call);'),
|
||||
'long': Template('return PyInt_FromLong($result);'),
|
||||
'accreal': Template('return THPUtils_(newAccreal)($result);'),
|
||||
'self': Template('Py_INCREF(self);\nreturn (PyObject*)self;'),
|
||||
'real': Template('return THPUtils_(newReal)($result);'),
|
||||
}
|
||||
|
||||
TENSOR_METHODS_DECLARATION = Template("""
|
||||
static PyMethodDef THPTensor_$stateless(methods)[] = {
|
||||
$methods
|
||||
{NULL}
|
||||
static PyMethodDef TH${sparse}PTensor_$stateless(methods)[] = {
|
||||
$methods
|
||||
{NULL}
|
||||
};
|
||||
""")
|
||||
|
||||
WRAPPER_TEMPLATE = Template("""\
|
||||
PyObject * $name(PyObject *self, PyObject *args)
|
||||
PyObject * $name(PyObject *self, PyObject *args, PyObject *kwargs)
|
||||
{
|
||||
HANDLE_TH_ERRORS
|
||||
int __argcount = args ? PyTuple_Size(args) : 0;
|
||||
int __tuplecount = args ? PyTuple_Size(args) : 0;
|
||||
int __dictcount = kwargs ? PyDict_Size(kwargs) : 0;
|
||||
int __argcount = __tuplecount + __dictcount;
|
||||
$variables
|
||||
$init
|
||||
|
||||
$options
|
||||
} else {
|
||||
THPUtils_invalidArguments(args, $expected_args);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
THPUtils_invalidArguments(args, kwargs, "$readable_name", $num_options, $expected_args);
|
||||
return NULL;
|
||||
END_HANDLE_TH_ERRORS
|
||||
}
|
||||
""")
|
||||
|
||||
ALLOCATE_TMPL = Template("""\
|
||||
THP${type}TensorPtr _${name}_guard = (THP${type}Tensor*) THP${type}Tensor_NewEmpty();
|
||||
if (!_${name}_guard.get()) return NULL;
|
||||
THP${type}Tensor* $name = _${name}_guard.get();
|
||||
""")
|
||||
|
||||
ALLOCATE_CUDA = Template("""\
|
||||
#if IS_CUDA
|
||||
${cuda}
|
||||
#else
|
||||
${cpu}
|
||||
#endif
|
||||
""")
|
||||
|
||||
def _allocate(typename, tmpl, cuda_tmpl=None, sparse=False):
|
||||
code = tmpl.safe_substitute(type=typename)
|
||||
if typename == '':
|
||||
code = code.replace('NewEmpty', '(NewEmpty)')
|
||||
if cuda_tmpl:
|
||||
cuda_code = code.replace('THP', 'THCP')
|
||||
code = cuda_tmpl.substitute(cuda=cuda_code, cpu=code)
|
||||
if sparse:
|
||||
code = code.replace('THP', 'THSP')
|
||||
code = code.replace('THCP', 'THCSP')
|
||||
return Template(code)
|
||||
|
||||
ALLOCATE_TYPE = {
|
||||
'THTensor*': Template("""\
|
||||
THTensorPtr _th_$name = THTensor_(new)(LIBRARY_STATE_NOARGS);
|
||||
THPTensorPtr _${name}_guard = (THPTensor*)THPTensor_(newObject)(_th_$name.get());
|
||||
THPTensor* $name = _${name}_guard.get();
|
||||
_th_$name.release();
|
||||
"""),
|
||||
'THLongTensor*': Template("""\
|
||||
THLongTensorPtr _th_$name = THLongTensor_new(LIBRARY_STATE_NOARGS);
|
||||
THPLongTensorPtr _${name}_guard = (THPLongTensor*)THPLongTensor_newObject(_th_$name.get());
|
||||
THPLongTensor* $name = _${name}_guard.get();
|
||||
_th_$name.release();
|
||||
"""),
|
||||
'THBoolTensor*': Template("""
|
||||
#if IS_CUDA
|
||||
THCByteTensorPtr _t_$name = THCudaByteTensor_new(LIBRARY_STATE_NOARGS);
|
||||
THCPByteTensorPtr _${name}_guard = (THCPByteTensor*)THCPByteTensor_newObject(_t_$name);
|
||||
THCPByteTensor *$name = _${name}_guard.get();
|
||||
#else
|
||||
THByteTensorPtr _t_$name = THByteTensor_new();
|
||||
THPByteTensorPtr _${name}_guard = (THPByteTensor*)THPByteTensor_newObject(_t_$name);
|
||||
THPByteTensor *$name = _${name}_guard.get();
|
||||
#endif
|
||||
_t_$name.release();
|
||||
"""),
|
||||
'THIndexTensor*': Template("""
|
||||
#if IS_CUDA
|
||||
THCLongTensorPtr _t_$name = THCudaLongTensor_new(LIBRARY_STATE_NOARGS);
|
||||
THCPLongTensorPtr _${name}_guard = (THCPLongTensor*)THCPLongTensor_newObject(_t_$name);
|
||||
THCPLongTensor *$name = _${name}_guard.get();
|
||||
#else
|
||||
THLongTensorPtr _t_$name = THLongTensor_new();
|
||||
THPLongTensorPtr _${name}_guard = (THPLongTensor*)THPLongTensor_newObject(_t_$name);
|
||||
THPLongTensor *$name = _${name}_guard.get();
|
||||
#endif
|
||||
_t_$name.release();
|
||||
"""),
|
||||
'THTensor*': _allocate('', ALLOCATE_TMPL),
|
||||
'THLongTensor*': _allocate('Long', ALLOCATE_TMPL),
|
||||
'THIntTensor*': _allocate('Int', ALLOCATE_TMPL),
|
||||
'THBoolTensor*': _allocate('Byte', ALLOCATE_TMPL, ALLOCATE_CUDA),
|
||||
'THIndexTensor*': _allocate('Long', ALLOCATE_TMPL, ALLOCATE_CUDA),
|
||||
'THIntegerTensor*': _allocate('Int', ALLOCATE_TMPL, ALLOCATE_CUDA),
|
||||
|
||||
'THSTensor*': _allocate('', ALLOCATE_TMPL, sparse=True),
|
||||
}
|
||||
|
||||
RELEASE_ARG = Template("_${name}_guard.release();")
|
||||
TYPE_NAMES = {
|
||||
'THTensor*': '" THPTensorStr "',
|
||||
'THSTensor*': '" THSPTensorStr "',
|
||||
'THStorage*': '" THPStorageStr "',
|
||||
'THGenerator*': 'torch.Generator',
|
||||
'THLongStorage*': '" THPModuleStr "LongStorage',
|
||||
'THLongTensor*': '" THPModuleStr "LongTensor',
|
||||
'THIntTensor*': '" THPModuleStr "IntTensor',
|
||||
'THBoolTensor*': '" THPModuleStr "ByteTensor',
|
||||
'THIndexTensor*': '" THPModuleStr "LongTensor',
|
||||
'THIntegerTensor*': '" THPModuleStr "IntTensor',
|
||||
'THFloatTensor*': '" THPModuleStr "FloatTensor',
|
||||
'THDoubleTensor*': '" THPModuleStr "DoubleTensor',
|
||||
'THCudaTensor*': 'torch.cuda.FloatTensor',
|
||||
'THCudaDoubleTensor*': 'torch.cuda.DoubleTensor',
|
||||
'THCudaLongTensor*': 'torch.cuda.LongTensor',
|
||||
'THSize*': 'torch.Size',
|
||||
'THStride*': 'tuple',
|
||||
'long': 'int',
|
||||
'real': '" RealStr "',
|
||||
'double': 'float',
|
||||
'accreal': '" RealStr "',
|
||||
'bool': 'bool',
|
||||
'const char*': 'bool', # Can come only from bool option.
|
||||
}
|
||||
|
||||
OUT_INIT = """
|
||||
__out = kwargs ? PyDict_GetItemString(kwargs, "out") : NULL;
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.declarations = []
|
||||
self.stateless_declarations = []
|
||||
self.docstrings = []
|
||||
|
||||
def get_type_unpack(self, arg, option):
|
||||
return self.TYPE_UNPACK.get(arg['type'], None)
|
||||
|
||||
def get_type_check(self, arg, option):
|
||||
if arg['type'] == 'THSize*' and arg.get('long_args', False):
|
||||
return self.SIZE_VARARG_CHECK
|
||||
return self.TYPE_CHECK.get(arg['type'], None)
|
||||
|
||||
# TODO: argument descriptions shouldn't be part of THP, but rather a general cwrap thing
|
||||
def get_wrapper_template(self, declaration):
|
||||
arg_desc = []
|
||||
for option in declaration['options']:
|
||||
option_desc = [arg['type'] + ' ' + arg['name'] for arg in option['arguments'] if not arg.get('ignore_check', False)]
|
||||
if option_desc:
|
||||
arg_desc.append('({})'.format(', '.join(option_desc)))
|
||||
arg_desc = OrderedDict()
|
||||
|
||||
def format_arg(arg, var_args=False):
|
||||
if var_args and arg.get('long_args', False):
|
||||
return 'int ... ' + arg['name']
|
||||
else:
|
||||
arg_desc.append('no arguments')
|
||||
arg_str = '"' + ' or '.join(arg_desc) + '"'
|
||||
return Template(self.WRAPPER_TEMPLATE.safe_substitute(expected_args=arg_str))
|
||||
return self.TYPE_NAMES[arg['type']] + ' ' + arg['name']
|
||||
|
||||
def format_args(args, var_args=False):
|
||||
option_desc = [format_arg(arg, var_args)
|
||||
for arg in args
|
||||
if not arg.get('ignore_check', False) and
|
||||
not arg.get('output')]
|
||||
output_args = list(filter(lambda a: a.get('output'), args))
|
||||
if output_args:
|
||||
if len(output_args) > 1:
|
||||
out_type = 'tuple['
|
||||
out_type += ', '.join(
|
||||
self.TYPE_NAMES[arg['type']] for arg in output_args)
|
||||
out_type += ']'
|
||||
option_desc += ['#' + out_type + ' out']
|
||||
else:
|
||||
arg = output_args[0]
|
||||
option_desc += ['#' + self.TYPE_NAMES[arg['type']] + ' out']
|
||||
|
||||
if option_desc:
|
||||
return '({})'.format(', '.join(option_desc))
|
||||
else:
|
||||
return 'no arguments'
|
||||
|
||||
for option in declaration['options']:
|
||||
arg_desc[format_args(option['arguments'], False)] = True
|
||||
arg_desc[format_args(option['arguments'], True)] = True
|
||||
|
||||
arg_desc = sorted(list(arg_desc.keys()), key=len)
|
||||
arg_desc = ['"' + desc + '"' for desc in arg_desc]
|
||||
arg_str = ', '.join(arg_desc)
|
||||
variables_str = '\n'.join(declaration.get('variables', []))
|
||||
init_str = '\n'.join(declaration.get('init', []))
|
||||
if 'stateless' in declaration['name']:
|
||||
readable_name = 'torch.' + declaration['python_name']
|
||||
else:
|
||||
readable_name = declaration['python_name']
|
||||
return Template(self.WRAPPER_TEMPLATE.safe_substitute(
|
||||
readable_name=readable_name, num_options=len(arg_desc),
|
||||
expected_args=arg_str, variables=variables_str, init=init_str))
|
||||
|
||||
def get_return_wrapper(self, option):
|
||||
return self.RETURN_WRAPPER.get(option['return'], None)
|
||||
@ -150,105 +257,227 @@ PyObject * $name(PyObject *self, PyObject *args)
|
||||
def get_arg_accessor(self, arg, option):
|
||||
if arg['name'] == 'self':
|
||||
return 'self'
|
||||
if 'allocate' in arg and arg['allocate']:
|
||||
return arg['name']
|
||||
if arg.get('output'):
|
||||
if not option['output_provided']:
|
||||
return arg['name']
|
||||
if option['output_count'] == 1:
|
||||
return '__out'
|
||||
else:
|
||||
return 'PyTuple_GET_ITEM(__out, {})'.format(arg['output_idx'])
|
||||
|
||||
def process_docstrings(self):
|
||||
for declaration in self.declarations:
|
||||
docstr = declaration.get('docstring_method')
|
||||
if docstr is None:
|
||||
continue
|
||||
declaration['docstring_content'] = docstr.replace('\n', '\\n')
|
||||
declaration['docstring_var'] = 'docstr_' + declaration['python_name']
|
||||
for declaration in self.stateless_declarations:
|
||||
docstr = declaration.get('docstring_stateless')
|
||||
if docstr is None:
|
||||
continue
|
||||
declaration['docstring_content'] = docstr.replace('\n', '\\n')
|
||||
declaration['docstring_var'] = 'stateless_docstr_' + declaration['python_name']
|
||||
|
||||
def generate_out_options(self, declaration):
|
||||
new_options = []
|
||||
declaration.setdefault('init', [])
|
||||
declaration['init'] += [self.OUT_INIT]
|
||||
for option in declaration['options']:
|
||||
out_idx = []
|
||||
for i, arg in enumerate(option['arguments']):
|
||||
if arg.get('output'):
|
||||
out_idx.append(i)
|
||||
if not out_idx:
|
||||
option['has_output'] = True
|
||||
option['output_provided'] = False
|
||||
new_options.append(option)
|
||||
continue
|
||||
for output_provided in (True, False):
|
||||
option_copy = deepcopy(option)
|
||||
option_copy['has_output'] = True
|
||||
option_copy['output_provided'] = output_provided
|
||||
option_copy['output_count'] = len(out_idx)
|
||||
for i, idx in enumerate(out_idx):
|
||||
arg = option_copy['arguments'][idx]
|
||||
arg['output_idx'] = i
|
||||
if not output_provided:
|
||||
arg['ignore_check'] = True
|
||||
else:
|
||||
option_copy['argcount_offset'] = -len(out_idx) + 1
|
||||
arg['no_kwargs'] = True
|
||||
arg['no_idx'] = True
|
||||
new_options.append(option_copy)
|
||||
declaration['options'] = new_options
|
||||
|
||||
def process_declarations(self, declarations):
|
||||
new_declarations = []
|
||||
register_only = [d for d in declarations if d.get('only_register', False)]
|
||||
declarations = [d for d in declarations if not d.get('only_register', False)]
|
||||
|
||||
def has_arg_type(declaration, type_name):
|
||||
return any(arg['type'] == type_name
|
||||
for option in declaration['options']
|
||||
for arg in option['arguments'])
|
||||
|
||||
def has_long_args(declaration):
|
||||
return any(arg.get('long_args', False)
|
||||
for option in declaration['options']
|
||||
for arg in option['arguments'])
|
||||
|
||||
def has_output_args(declaration):
|
||||
return any(arg.get('output')
|
||||
for option in declaration['options']
|
||||
for arg in option['arguments'])
|
||||
|
||||
for declaration in declarations:
|
||||
# Disable all methods for THHalfTensor, unless cpu_half is True
|
||||
if not declaration.get('cpu_half', False):
|
||||
defined_if = '!defined(TH_REAL_IS_HALF)'
|
||||
if 'defined_if' in declaration:
|
||||
defined_if += ' && (' + declaration['defined_if'] + ')'
|
||||
declaration['defined_if'] = defined_if
|
||||
|
||||
if declaration.get('only_register', False):
|
||||
continue
|
||||
declaration['python_name'] = declaration['name']
|
||||
|
||||
declaration.setdefault('python_name', declaration['name'])
|
||||
declaration.setdefault('variables', [])
|
||||
if has_arg_type(declaration, 'THSize*'):
|
||||
declaration['variables'] += ['THLongStoragePtr __size;']
|
||||
if has_arg_type(declaration, 'THStride*'):
|
||||
declaration['variables'] += ['THLongStoragePtr __stride;']
|
||||
if has_output_args(declaration):
|
||||
declaration['variables'] += ['PyObject *__out;']
|
||||
self.generate_out_options(declaration)
|
||||
if has_long_args(declaration):
|
||||
declaration['no_kwargs'] = True
|
||||
for option in declaration['options']:
|
||||
option['cname'] = 'TH{}Tensor_({})'.format(
|
||||
'S' if option.get('sparse', False) else '', option['cname'])
|
||||
if declaration.get('with_stateless', False) or declaration.get('only_stateless', False):
|
||||
stateless_declaration = self.make_stateless(deepcopy(declaration))
|
||||
stateless_declaration = self.make_stateless(declaration)
|
||||
new_declarations.append(stateless_declaration)
|
||||
self.stateless_declarations.append(stateless_declaration)
|
||||
if declaration.get('only_stateless', False):
|
||||
continue
|
||||
|
||||
self.declarations.append(declaration)
|
||||
declaration['name'] = 'THPTensor_({})'.format(declaration['name'])
|
||||
declaration['name'] = 'TH{}PTensor_({})'.format(
|
||||
'S' if declaration.get('sparse', False) else '', declaration['name'])
|
||||
for option in declaration['options']:
|
||||
option['cname'] = 'THTensor_({})'.format(option['cname'])
|
||||
for arg in option['arguments']:
|
||||
if arg['name'] == 'self':
|
||||
arg['ignore_check'] = True
|
||||
if 'allocate' in arg and arg['allocate']:
|
||||
arg['ignore_check'] = True
|
||||
declarations = [d for d in declarations if not d.get('only_stateless', False)]
|
||||
|
||||
register_only = [d for d in declarations if d.get('only_register', False)]
|
||||
declarations = [d for d in declarations
|
||||
if (not d.get('only_stateless', False)) and (not d.get('only_register', False))]
|
||||
self.declarations.extend(filter(lambda x: not x.get('only_stateless', False), register_only))
|
||||
self.stateless_declarations.extend(filter(lambda x: x.get('only_stateless', False), register_only))
|
||||
return declarations + new_declarations
|
||||
|
||||
self.process_docstrings()
|
||||
|
||||
all_declarations = declarations + new_declarations
|
||||
return all_declarations
|
||||
|
||||
def make_stateless(self, declaration):
|
||||
declaration['name'] = 'THPTensor_stateless_({})'.format(declaration['name'])
|
||||
new_options = []
|
||||
declaration = deepcopy(declaration)
|
||||
declaration['name'] = 'TH{}PTensor_stateless_({})'.format(
|
||||
'S' if declaration.get('sparse', False) else '', declaration['name'])
|
||||
for option in declaration['options']:
|
||||
option['cname'] = 'THTensor_({})'.format(option['cname'])
|
||||
allocated = []
|
||||
for i, arg in enumerate(option['arguments']):
|
||||
if 'allocate' in arg and arg['allocate']:
|
||||
arg['ignore_check'] = True
|
||||
allocated.append(i)
|
||||
for arg in option['arguments']:
|
||||
if arg['name'] == 'self':
|
||||
arg['assign_name'] = 'self'
|
||||
arg['name'] = 'source'
|
||||
for permutation in product((True, False), repeat=len(allocated)):
|
||||
option_copy = deepcopy(option)
|
||||
for i, bit in zip(allocated, permutation):
|
||||
arg = option_copy['arguments'][i]
|
||||
# By default everything is allocated, so we don't have to do anything
|
||||
if not bit:
|
||||
del arg['allocate']
|
||||
del arg['ignore_check']
|
||||
new_options.append(option_copy)
|
||||
declaration['options'] = self.filter_unique_options(declaration['options'] + new_options)
|
||||
return declaration
|
||||
|
||||
def filter_unique_options(self, options):
|
||||
def signature(option):
|
||||
return '#'.join(arg['type'] for arg in option['arguments'] if not 'ignore_check' in arg or not arg['ignore_check'])
|
||||
seen_signatures = set()
|
||||
unique = []
|
||||
for option in options:
|
||||
sig = signature(option)
|
||||
if sig not in seen_signatures:
|
||||
unique.append(option)
|
||||
seen_signatures.add(sig)
|
||||
return unique
|
||||
|
||||
def declare_methods(self, stateless):
|
||||
def declare_methods(self, stateless, sparse):
|
||||
tensor_methods = ''
|
||||
for declaration in (self.declarations if not stateless else self.stateless_declarations):
|
||||
extra_flags = ' | ' + declaration.get('method_flags') if 'method_flags' in declaration else ''
|
||||
entry = Template(' {"$python_name", (PyCFunction)$name, METH_VARARGS$extra_flags, NULL},\n').substitute(
|
||||
python_name=declaration['python_name'], name=declaration['name'], extra_flags=extra_flags
|
||||
)
|
||||
if declaration.get('sparse', False) != sparse:
|
||||
continue
|
||||
flags = 'METH_VARARGS'
|
||||
flags += ' | ' + declaration.get('method_flags') if 'method_flags' in declaration else ''
|
||||
if not declaration.get('only_register'):
|
||||
flags += ' | METH_KEYWORDS'
|
||||
if declaration.get('override_method_flags'):
|
||||
flags = declaration['override_method_flags']
|
||||
entry = Template(' {"$python_name", (PyCFunction)$name, $flags, $docstring},\n').substitute(
|
||||
python_name=declaration['python_name'], name=declaration['name'], flags=flags,
|
||||
docstring=declaration.get('docstring_var', 'NULL')
|
||||
)
|
||||
if 'defined_if' in declaration:
|
||||
entry = self.preprocessor_guard(entry, declaration['defined_if'])
|
||||
tensor_methods += entry
|
||||
return self.TENSOR_METHODS_DECLARATION.substitute(methods=tensor_methods, stateless=('' if not stateless else 'stateless_'))
|
||||
generated = self.TENSOR_METHODS_DECLARATION.substitute(
|
||||
methods=tensor_methods,
|
||||
stateless=('' if not stateless else 'stateless_'),
|
||||
sparse=('' if not sparse else 'S'),
|
||||
)
|
||||
if sparse:
|
||||
generated = '#ifndef TH_REAL_IS_HALF\n' + generated + '\n#endif\n\n'
|
||||
return generated
|
||||
|
||||
def process_full_file(self, code):
|
||||
# We have to find a place before all undefs
|
||||
idx = code.find('// PUT DEFINITIONS IN HERE PLEASE')
|
||||
return code[:idx] + self.declare_methods(False) + self.declare_methods(True) + code[idx:]
|
||||
return (code[:idx] +
|
||||
self.declare_methods(False, False) +
|
||||
self.declare_methods(True, False) +
|
||||
self.declare_methods(False, True) +
|
||||
self.declare_methods(True, True) +
|
||||
code[idx:]
|
||||
)
|
||||
|
||||
def preprocessor_guard(self, code, condition):
|
||||
return '#if ' + condition + '\n' + code + '#endif\n'
|
||||
return '#if ' + condition + '\n' + code + '#endif\n'
|
||||
|
||||
def process_wrapper(self, code, declaration):
|
||||
if 'defined_if' in declaration:
|
||||
return self.preprocessor_guard(code, declaration['defined_if'])
|
||||
return code
|
||||
|
||||
def process_all_unpacks(self, code, option):
|
||||
def process_all_call_arg(self, code, option):
|
||||
return 'LIBRARY_STATE ' + code
|
||||
|
||||
def process_call(self, code, option):
|
||||
def process_all_checks(self, code, option):
|
||||
if option.get('has_output'):
|
||||
indent = " " * 10
|
||||
if option['output_provided']:
|
||||
checks = "__out != NULL &&\n" + indent
|
||||
if option['output_count'] > 1:
|
||||
checks += "PyTuple_Check(__out) &&\n" + indent
|
||||
length_check = "PyTuple_GET_SIZE(__out) == {} &&\n".format(
|
||||
option['output_count'])
|
||||
checks += length_check + indent
|
||||
code = checks + code
|
||||
else:
|
||||
code = "__out == NULL &&\n" + indent + code
|
||||
|
||||
if any(arg.get('long_args', False) for arg in option['arguments']):
|
||||
code = code.replace('__argcount ==', '__argcount >=')
|
||||
expected = str(int(option.get('output_provided', False)))
|
||||
code = '__dictcount == ' + expected + ' &&\n ' + code
|
||||
|
||||
return code
|
||||
|
||||
def process_pre_arg_assign(self, template, option):
|
||||
new_args = []
|
||||
for arg in option['arguments']:
|
||||
if 'allocate' in arg and arg['allocate']:
|
||||
if not option.get('output_provided', True) and arg.get('output'):
|
||||
new_args.append(self.ALLOCATE_TYPE[arg['type']].substitute(name=arg['name']))
|
||||
return '\n '.join(new_args) + '\n' + code
|
||||
template = new_args + template
|
||||
return template
|
||||
|
||||
def generate_docstrings_cpp(self):
|
||||
template = Template('char* $name = "$content";')
|
||||
return '\n\n'.join(
|
||||
template.substitute(name=decl['docstring_var'], content=decl['docstring_content'])
|
||||
for decl in chain(self.declarations, self.stateless_declarations)
|
||||
if 'docstring_var' in decl)
|
||||
|
||||
def generate_docstrings_h(self):
|
||||
template = Template('extern char* $name;')
|
||||
return '\n\n'.join(
|
||||
template.substitute(name=decl['docstring_var'])
|
||||
for decl in chain(self.declarations, self.stateless_declarations)
|
||||
if 'docstring_var' in decl)
|
||||
|
||||
@ -16,6 +16,9 @@ class CWrapPlugin(object):
|
||||
def get_wrapper_template(self, declaration):
|
||||
pass
|
||||
|
||||
def get_assign_args(self, arguments):
|
||||
pass
|
||||
|
||||
def get_arg_accessor(self, arg, option):
|
||||
pass
|
||||
|
||||
@ -31,7 +34,7 @@ class CWrapPlugin(object):
|
||||
def process_single_unpack(self, code, arg, arg_accessor):
|
||||
return code
|
||||
|
||||
def process_all_unpacks(self, code, option):
|
||||
def process_all_call_arg(self, code, option):
|
||||
return code
|
||||
|
||||
def process_option_code(self, code, option):
|
||||
@ -43,8 +46,11 @@ class CWrapPlugin(object):
|
||||
def process_declarations(self, declarations):
|
||||
return declarations
|
||||
|
||||
def process_call(self, code, option):
|
||||
return code
|
||||
def process_option_code_template(self, template, option):
|
||||
return template
|
||||
|
||||
def process_pre_arg_assign(self, template, option):
|
||||
return template
|
||||
|
||||
|
||||
from .StandaloneExtension import StandaloneExtension
|
||||
@ -52,6 +58,10 @@ from .NullableArguments import NullableArguments
|
||||
from .OptionalArguments import OptionalArguments
|
||||
from .ArgcountChecker import ArgcountChecker
|
||||
from .ArgumentReferences import ArgumentReferences
|
||||
from .BeforeCall import BeforeCall
|
||||
from .BeforeAfterCall import BeforeAfterCall
|
||||
from .ConstantArguments import ConstantArguments
|
||||
from .ReturnArguments import ReturnArguments
|
||||
from .GILRelease import GILRelease
|
||||
from .AutoGPU import AutoGPU
|
||||
from .CuDNNPlugin import CuDNNPlugin
|
||||
from .GenericNN import GenericNN
|
||||
|
||||
@ -1,171 +0,0 @@
|
||||
#include <Python.h>
|
||||
#include <exception>
|
||||
|
||||
#define REQUIRES_CUDA $requres_cuda
|
||||
|
||||
// TODO: use THP instead of this hack
|
||||
struct Tensor {
|
||||
PyObject_HEAD
|
||||
void *cdata;
|
||||
};
|
||||
|
||||
PyObject *THPDoubleStorageClass = NULL;
|
||||
PyObject *THPFloatStorageClass = NULL;
|
||||
PyObject *THPLongStorageClass = NULL;
|
||||
PyObject *THPIntStorageClass = NULL;
|
||||
PyObject *THPShortStorageClass = NULL;
|
||||
PyObject *THPCharStorageClass = NULL;
|
||||
PyObject *THPByteStorageClass = NULL;
|
||||
|
||||
PyObject *THPDoubleTensorClass = NULL;
|
||||
PyObject *THPFloatTensorClass = NULL;
|
||||
PyObject *THPLongTensorClass = NULL;
|
||||
PyObject *THPIntTensorClass = NULL;
|
||||
PyObject *THPShortTensorClass = NULL;
|
||||
PyObject *THPCharTensorClass = NULL;
|
||||
PyObject *THPByteTensorClass = NULL;
|
||||
|
||||
#if REQUIRES_CUDA
|
||||
PyObject *THCPDoubleStorageClass = NULL;
|
||||
PyObject *THCPFloatStorageClass = NULL;
|
||||
PyObject *THCPLongStorageClass = NULL;
|
||||
PyObject *THCPIntStorageClass = NULL;
|
||||
PyObject *THCPHalfStorageClass = NULL;
|
||||
PyObject *THCPShortStorageClass = NULL;
|
||||
PyObject *THCPCharStorageClass = NULL;
|
||||
PyObject *THCPByteStorageClass = NULL;
|
||||
|
||||
PyObject *THCPDoubleTensorClass = NULL;
|
||||
PyObject *THCPFloatTensorClass = NULL;
|
||||
PyObject *THCPLongTensorClass = NULL;
|
||||
PyObject *THCPIntTensorClass = NULL;
|
||||
PyObject *THCPHalfTensorClass = NULL;
|
||||
PyObject *THCPShortTensorClass = NULL;
|
||||
PyObject *THCPCharTensorClass = NULL;
|
||||
PyObject *THCPByteTensorClass = NULL;
|
||||
#endif
|
||||
|
||||
static bool __loadClasses()
|
||||
{
|
||||
#define ASSERT_NOT_NULL(ptr) if (!(ptr)) { PyErr_SetString(PyExc_RuntimeError, "couldn't load classes"); return false; }
|
||||
PyObject *torch_module = PyImport_ImportModule("torch");
|
||||
if (!torch_module) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "class loader couldn't access torch module");
|
||||
return false;
|
||||
}
|
||||
PyObject* module_dict = PyModule_GetDict(torch_module);
|
||||
|
||||
ASSERT_NOT_NULL(THPDoubleStorageClass = PyMapping_GetItemString(module_dict,(char*)"DoubleStorage"));
|
||||
ASSERT_NOT_NULL(THPFloatStorageClass = PyMapping_GetItemString(module_dict,(char*)"FloatStorage"));
|
||||
ASSERT_NOT_NULL(THPLongStorageClass = PyMapping_GetItemString(module_dict,(char*)"LongStorage"));
|
||||
ASSERT_NOT_NULL(THPIntStorageClass = PyMapping_GetItemString(module_dict,(char*)"IntStorage"));
|
||||
ASSERT_NOT_NULL(THPShortStorageClass = PyMapping_GetItemString(module_dict,(char*)"ShortStorage"));
|
||||
ASSERT_NOT_NULL(THPCharStorageClass = PyMapping_GetItemString(module_dict,(char*)"CharStorage"));
|
||||
ASSERT_NOT_NULL(THPByteStorageClass = PyMapping_GetItemString(module_dict,(char*)"ByteStorage"));
|
||||
|
||||
ASSERT_NOT_NULL(THPDoubleTensorClass = PyMapping_GetItemString(module_dict,(char*)"DoubleTensor"));
|
||||
ASSERT_NOT_NULL(THPFloatTensorClass = PyMapping_GetItemString(module_dict,(char*)"FloatTensor"));
|
||||
ASSERT_NOT_NULL(THPLongTensorClass = PyMapping_GetItemString(module_dict,(char*)"LongTensor"));
|
||||
ASSERT_NOT_NULL(THPIntTensorClass = PyMapping_GetItemString(module_dict,(char*)"IntTensor"));
|
||||
ASSERT_NOT_NULL(THPShortTensorClass = PyMapping_GetItemString(module_dict,(char*)"ShortTensor"));
|
||||
ASSERT_NOT_NULL(THPCharTensorClass = PyMapping_GetItemString(module_dict,(char*)"CharTensor"));
|
||||
ASSERT_NOT_NULL(THPByteTensorClass = PyMapping_GetItemString(module_dict,(char*)"ByteTensor"));
|
||||
|
||||
#if REQUIRES_CUDA
|
||||
PyObject *cuda_module = PyImport_ImportModule("torch.cuda");
|
||||
if (!torch_module) {
|
||||
PyErr_SetString(PyExc_RuntimeError, "class loader couldn't access torch.cuda module");
|
||||
return false;
|
||||
}
|
||||
PyObject* cuda_module_dict = PyModule_GetDict(cuda_module);
|
||||
|
||||
ASSERT_NOT_NULL(THCPDoubleStorageClass = PyMapping_GetItemString(cuda_module_dict, (char*)"DoubleStorage"));
|
||||
ASSERT_NOT_NULL(THCPFloatStorageClass = PyMapping_GetItemString(cuda_module_dict, (char*)"FloatStorage"));
|
||||
ASSERT_NOT_NULL(THCPHalfStorageClass = PyMapping_GetItemString(cuda_module_dict, (char*)"HalfStorage"));
|
||||
ASSERT_NOT_NULL(THCPLongStorageClass = PyMapping_GetItemString(cuda_module_dict, (char*)"LongStorage"));
|
||||
ASSERT_NOT_NULL(THCPIntStorageClass = PyMapping_GetItemString(cuda_module_dict, (char*)"IntStorage"));
|
||||
ASSERT_NOT_NULL(THCPShortStorageClass = PyMapping_GetItemString(cuda_module_dict, (char*)"ShortStorage"));
|
||||
ASSERT_NOT_NULL(THCPCharStorageClass = PyMapping_GetItemString(cuda_module_dict, (char*)"CharStorage"));
|
||||
ASSERT_NOT_NULL(THCPByteStorageClass = PyMapping_GetItemString(cuda_module_dict, (char*)"ByteStorage"));
|
||||
|
||||
ASSERT_NOT_NULL(THCPDoubleTensorClass = PyMapping_GetItemString(cuda_module_dict, (char*)"DoubleTensor"));
|
||||
ASSERT_NOT_NULL(THCPHalfTensorClass = PyMapping_GetItemString(cuda_module_dict, (char*)"HalfTensor"));
|
||||
ASSERT_NOT_NULL(THCPFloatTensorClass = PyMapping_GetItemString(cuda_module_dict, (char*)"FloatTensor"));
|
||||
ASSERT_NOT_NULL(THCPLongTensorClass = PyMapping_GetItemString(cuda_module_dict, (char*)"LongTensor"));
|
||||
ASSERT_NOT_NULL(THCPIntTensorClass = PyMapping_GetItemString(cuda_module_dict, (char*)"IntTensor"));
|
||||
ASSERT_NOT_NULL(THCPShortTensorClass = PyMapping_GetItemString(cuda_module_dict, (char*)"ShortTensor"));
|
||||
ASSERT_NOT_NULL(THCPCharTensorClass = PyMapping_GetItemString(cuda_module_dict, (char*)"CharTensor"));
|
||||
ASSERT_NOT_NULL(THCPByteTensorClass = PyMapping_GetItemString(cuda_module_dict, (char*)"ByteTensor"));
|
||||
#endif
|
||||
|
||||
return true;
|
||||
#undef ASSERT_NOT_NULL
|
||||
}
|
||||
|
||||
// TODO: duplicate code
|
||||
#include <string>
|
||||
void __invalidArgs(PyObject *given_args, const char *expected_args_desc) {
|
||||
static const std::string PREFIX = "Invalid arguments! Got ";
|
||||
std::string error_msg;
|
||||
error_msg.reserve(2000);
|
||||
error_msg += PREFIX;
|
||||
|
||||
// TODO: assert that args is a tuple?
|
||||
Py_ssize_t num_args = PyTuple_Size(given_args);
|
||||
if (num_args == 0) {
|
||||
error_msg += "no arguments";
|
||||
} else {
|
||||
error_msg += "(";
|
||||
for (int i = 0; i < num_args; i++) {
|
||||
PyObject *arg = PyTuple_GET_ITEM(given_args, i);
|
||||
if (i > 0)
|
||||
error_msg += ", ";
|
||||
error_msg += Py_TYPE(arg)->tp_name;
|
||||
}
|
||||
error_msg += ")";
|
||||
}
|
||||
error_msg += ", but expected ";
|
||||
error_msg += expected_args_desc;
|
||||
PyErr_SetString(PyExc_ValueError, error_msg.c_str());
|
||||
}
|
||||
|
||||
bool __checkFloat(PyObject *arg) {
|
||||
#if PY_MAJOR_VERSION != 2
|
||||
return PyFloat_Check(arg) || PyLong_Check(arg);
|
||||
#else
|
||||
return PyFloat_Check(arg) || PyLong_Check(arg) || PyInt_Check(arg);
|
||||
#endif
|
||||
}
|
||||
|
||||
double __getFloat(PyObject *arg) {
|
||||
if (PyFloat_Check(arg)) {
|
||||
return PyFloat_AsDouble(arg);
|
||||
#if PY_MAJOR_VERSION == 2
|
||||
} else if (PyInt_Check(arg)) {
|
||||
return (double)PyInt_AsLong(arg);
|
||||
#endif
|
||||
} else {
|
||||
return PyLong_AsDouble(arg);
|
||||
}
|
||||
}
|
||||
|
||||
bool __checkLong(PyObject *arg) {
|
||||
#if PY_MAJOR_VERSION != 2
|
||||
return PyLong_Check(arg);
|
||||
#else
|
||||
return PyInt_Check(arg) || PyLong_Check(arg);
|
||||
#endif
|
||||
}
|
||||
|
||||
long __getLong(PyObject *arg) {
|
||||
#if PY_MAJOR_VERSION != 2
|
||||
return PyLong_AsLong(arg);
|
||||
#else
|
||||
if (PyInt_Check(arg)) {
|
||||
return PyInt_AsLong(arg);
|
||||
} else {
|
||||
return PyLong_AsLong(arg);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -28,8 +28,6 @@ PyMODINIT_FUNC PyInit_$short_name()
|
||||
ASSERT_TRUE(module = PyModule_Create(&module_def));
|
||||
#endif
|
||||
|
||||
ASSERT_TRUE(__loadClasses());
|
||||
|
||||
#if PY_MAJOR_VERSION != 2
|
||||
return module;
|
||||
#endif
|
||||
|
||||
@ -1 +1,2 @@
|
||||
from .generate_wrappers import generate_wrappers
|
||||
from .generate_wrappers import generate_wrappers, wrap_function, \
|
||||
import_module, wrap_generic_function
|
||||
|
||||
@ -2,12 +2,13 @@ import os
|
||||
import sys
|
||||
from string import Template, ascii_lowercase
|
||||
from ..cwrap import cwrap
|
||||
from ..cwrap.plugins import StandaloneExtension, NullableArguments
|
||||
from ..cwrap.plugins import StandaloneExtension, GenericNN, NullableArguments, AutoGPU
|
||||
|
||||
BASE_PATH = os.path.realpath(os.path.join(__file__, '..', '..', '..'))
|
||||
WRAPPER_PATH = os.path.join(BASE_PATH, 'torch', 'csrc', 'nn')
|
||||
THNN_UTILS_PATH = os.path.join(BASE_PATH, 'torch', '_thnn', 'utils.py')
|
||||
|
||||
|
||||
def import_module(name, path):
|
||||
if sys.version_info >= (3, 5):
|
||||
import importlib.util
|
||||
@ -34,6 +35,7 @@ FUNCTION_TEMPLATE = Template("""\
|
||||
|
||||
COMMON_TRANSFORMS = {
|
||||
'THIndex_t': 'long',
|
||||
'THCIndex_t': 'long',
|
||||
'THInteger_t': 'int',
|
||||
}
|
||||
COMMON_CPU_TRANSFORMS = {
|
||||
@ -41,31 +43,52 @@ COMMON_CPU_TRANSFORMS = {
|
||||
'THIndexTensor*': 'THLongTensor*',
|
||||
'THIntegerTensor*': 'THIntTensor*',
|
||||
}
|
||||
COMMON_GPU_TRANSFORMS = {
|
||||
'THCState*': 'void*',
|
||||
'THCIndexTensor*': 'THCudaLongTensor*',
|
||||
}
|
||||
|
||||
TYPE_TRANSFORMS = {
|
||||
'Float': {
|
||||
'THTensor*': 'THFloatTensor*',
|
||||
'real': 'float',
|
||||
'accreal': 'double',
|
||||
},
|
||||
'Double': {
|
||||
'THTensor*': 'THDoubleTensor*',
|
||||
'real': 'double',
|
||||
'accreal': 'double',
|
||||
},
|
||||
'CudaHalf': {
|
||||
'THCTensor*': 'THCudaHalfTensor*',
|
||||
'real': 'half',
|
||||
'accreal': 'float',
|
||||
},
|
||||
'Cuda': {
|
||||
'THCState*': 'void*',
|
||||
'THIndexTensor*': 'THCudaLongTensor*',
|
||||
}
|
||||
'THCTensor*': 'THCudaTensor*',
|
||||
'real': 'float',
|
||||
'accreal': 'float',
|
||||
},
|
||||
'CudaDouble': {
|
||||
'THCTensor*': 'THCudaDoubleTensor*',
|
||||
'real': 'double',
|
||||
'accreal': 'double',
|
||||
},
|
||||
}
|
||||
for t, transforms in TYPE_TRANSFORMS.items():
|
||||
transforms.update(COMMON_TRANSFORMS)
|
||||
TYPE_TRANSFORMS['Float'].update(COMMON_CPU_TRANSFORMS)
|
||||
TYPE_TRANSFORMS['Double'].update(COMMON_CPU_TRANSFORMS)
|
||||
|
||||
for t in ['Float', 'Double']:
|
||||
TYPE_TRANSFORMS[t].update(COMMON_CPU_TRANSFORMS)
|
||||
for t in ['CudaHalf', 'Cuda', 'CudaDouble']:
|
||||
TYPE_TRANSFORMS[t].update(COMMON_GPU_TRANSFORMS)
|
||||
|
||||
|
||||
def wrap_function(name, type, arguments):
|
||||
cname = 'THNN_' + type + name
|
||||
declaration = ''
|
||||
declaration += 'extern "C" void ' + cname + '(' + ', '.join(TYPE_TRANSFORMS[type].get(arg.type, arg.type) for arg in arguments) + ');\n'
|
||||
declaration += 'extern "C" void ' + cname + \
|
||||
'(' + ', '.join(TYPE_TRANSFORMS[type].get(arg.type, arg.type) for arg in arguments) + ');\n'
|
||||
declaration += FUNCTION_TEMPLATE.substitute(name=type + name, cname=cname)
|
||||
indent = ' ' * 4
|
||||
dict_indent = ' ' * 6
|
||||
@ -75,15 +98,18 @@ def wrap_function(name, type, arguments):
|
||||
declaration += prefix + TYPE_TRANSFORMS[type].get(arg.type, arg.type) + ' ' + arg.name + '\n'
|
||||
else:
|
||||
t = TYPE_TRANSFORMS[type].get(arg.type, arg.type)
|
||||
declaration += prefix + 'type: ' + t + '\n' + \
|
||||
dict_indent + 'name: ' + arg.name + '\n' + \
|
||||
dict_indent + 'nullable: True' + '\n'
|
||||
declaration += prefix + 'type: ' + t + '\n' + \
|
||||
dict_indent + 'name: ' + arg.name + '\n' + \
|
||||
dict_indent + 'nullable: True' + '\n'
|
||||
declaration += ']]\n\n\n'
|
||||
return declaration
|
||||
|
||||
|
||||
def generate_wrappers():
|
||||
wrap_nn()
|
||||
wrap_cunn()
|
||||
wrap_generic()
|
||||
|
||||
|
||||
def wrap_nn():
|
||||
wrapper = '#include <TH/TH.h>\n\n\n'
|
||||
@ -98,18 +124,81 @@ def wrap_nn():
|
||||
NullableArguments(),
|
||||
])
|
||||
|
||||
|
||||
def wrap_cunn():
|
||||
wrapper = '#include <TH/TH.h>\n'
|
||||
wrapper += '#include <THC/THC.h>\n\n\n'
|
||||
cunn_functions = thnn_utils.parse_header(thnn_utils.THCUNN_H_PATH)
|
||||
# Get rid of Cuda prefix
|
||||
for function in cunn_functions:
|
||||
function.name = function.name[4:]
|
||||
for fn in cunn_functions:
|
||||
wrapper += wrap_function(fn.name, 'Cuda', fn.arguments)
|
||||
for t in ['CudaHalf', 'Cuda', 'CudaDouble']:
|
||||
wrapper += wrap_function(fn.name, t, fn.arguments)
|
||||
with open('torch/csrc/nn/THCUNN.cwrap', 'w') as f:
|
||||
f.write(wrapper)
|
||||
cwrap('torch/csrc/nn/THCUNN.cwrap', plugins=[
|
||||
StandaloneExtension('torch._thnn._THCUNN', with_cuda=True),
|
||||
StandaloneExtension('torch._thnn._THCUNN'),
|
||||
NullableArguments(),
|
||||
AutoGPU(has_self=False),
|
||||
])
|
||||
|
||||
GENERIC_FUNCTION_TEMPLATE = Template("""\
|
||||
[[
|
||||
name: $name
|
||||
return: void
|
||||
options:
|
||||
""")
|
||||
|
||||
|
||||
def wrap_generic_function(name, backends):
|
||||
declaration = ''
|
||||
declaration += GENERIC_FUNCTION_TEMPLATE.substitute(name=name)
|
||||
for backend in backends:
|
||||
declaration += ' - cname: ' + name + '\n'
|
||||
declaration += ' backend: ' + backend['name'] + '\n'
|
||||
declaration += ' arguments:\n'
|
||||
for arg in backend['arguments']:
|
||||
declaration += ' - arg: ' + arg.type + ' ' + arg.name + '\n'
|
||||
if arg.is_optional:
|
||||
declaration += ' optional: True\n'
|
||||
declaration += ']]\n\n\n'
|
||||
return declaration
|
||||
|
||||
|
||||
def wrap_generic():
|
||||
from collections import OrderedDict
|
||||
defs = OrderedDict()
|
||||
|
||||
def should_wrap_function(name):
|
||||
if name.startswith('LookupTable'):
|
||||
return False
|
||||
return (name.endswith('updateOutput') or
|
||||
name.endswith('updateGradInput') or
|
||||
name.endswith('accGradParameters') or
|
||||
name.endswith('backward'))
|
||||
|
||||
def add_functions(name, functions):
|
||||
for fn in functions:
|
||||
if not should_wrap_function(fn.name):
|
||||
continue
|
||||
if fn.name not in defs:
|
||||
defs[fn.name] = []
|
||||
defs[fn.name] += [{
|
||||
'name': name,
|
||||
'arguments': fn.arguments[1:],
|
||||
}]
|
||||
|
||||
add_functions('nn', thnn_utils.parse_header(thnn_utils.THNN_H_PATH))
|
||||
add_functions('cunn', thnn_utils.parse_header(thnn_utils.THCUNN_H_PATH))
|
||||
|
||||
wrapper = ''
|
||||
for name, backends in defs.items():
|
||||
wrapper += wrap_generic_function(name, backends)
|
||||
with open('torch/csrc/nn/THNN_generic.cwrap', 'w') as f:
|
||||
f.write(wrapper)
|
||||
|
||||
cwrap('torch/csrc/nn/THNN_generic.cwrap', plugins=[
|
||||
GenericNN(header=True),
|
||||
], default_plugins=False, destination='torch/csrc/nn/THNN_generic.h')
|
||||
|
||||
cwrap('torch/csrc/nn/THNN_generic.cwrap', plugins=[
|
||||
GenericNN(),
|
||||
], default_plugins=False)
|
||||
|
||||
17
tools/setup_helpers/cuda.py
Normal file
17
tools/setup_helpers/cuda.py
Normal file
@ -0,0 +1,17 @@
|
||||
import ctypes.util
|
||||
import os
|
||||
|
||||
from .env import check_env_flag
|
||||
|
||||
if check_env_flag('NO_CUDA'):
|
||||
WITH_CUDA = False
|
||||
CUDA_HOME = None
|
||||
else:
|
||||
CUDA_HOME = os.getenv('CUDA_HOME', '/usr/local/cuda')
|
||||
if not os.path.exists(CUDA_HOME):
|
||||
cudart_path = ctypes.util.find_library('cudart')
|
||||
if cudart_path is not None:
|
||||
CUDA_HOME = os.path.dirname(cudart_path)
|
||||
else:
|
||||
CUDA_HOME = None
|
||||
WITH_CUDA = CUDA_HOME is not None
|
||||
49
tools/setup_helpers/cudnn.py
Normal file
49
tools/setup_helpers/cudnn.py
Normal file
@ -0,0 +1,49 @@
|
||||
import os
|
||||
import glob
|
||||
from itertools import chain
|
||||
|
||||
from .env import check_env_flag
|
||||
from .cuda import WITH_CUDA, CUDA_HOME
|
||||
|
||||
|
||||
def gather_paths(env_vars):
|
||||
return list(chain(*(os.getenv(v, '').split(':') for v in env_vars)))
|
||||
|
||||
|
||||
WITH_CUDNN = False
|
||||
CUDNN_LIB_DIR = None
|
||||
CUDNN_INCLUDE_DIR = None
|
||||
if WITH_CUDA and not check_env_flag('NO_CUDNN'):
|
||||
lib_paths = list(filter(bool, [
|
||||
os.getenv('CUDNN_LIB_DIR'),
|
||||
os.path.join(CUDA_HOME, 'lib'),
|
||||
os.path.join(CUDA_HOME, 'lib64'),
|
||||
'/usr/lib/x86_64-linux-gnu/',
|
||||
] + gather_paths([
|
||||
'LIBRARY_PATH',
|
||||
])))
|
||||
include_paths = list(filter(bool, [
|
||||
os.getenv('CUDNN_INCLUDE_DIR'),
|
||||
os.path.join(CUDA_HOME, 'include'),
|
||||
'/usr/include/',
|
||||
] + gather_paths([
|
||||
'CPATH',
|
||||
'C_INCLUDE_PATH',
|
||||
'CPLUS_INCLUDE_PATH',
|
||||
])))
|
||||
for path in lib_paths:
|
||||
if path is None or not os.path.exists(path):
|
||||
continue
|
||||
if glob.glob(os.path.join(path, 'libcudnn*')):
|
||||
CUDNN_LIB_DIR = path
|
||||
break
|
||||
for path in include_paths:
|
||||
if path is None or not os.path.exists(path):
|
||||
continue
|
||||
if os.path.exists((os.path.join(path, 'cudnn.h'))):
|
||||
CUDNN_INCLUDE_DIR = path
|
||||
break
|
||||
if not CUDNN_LIB_DIR or not CUDNN_INCLUDE_DIR:
|
||||
CUDNN_LIB_DIR = CUDNN_INCLUDE_DIR = None
|
||||
else:
|
||||
WITH_CUDNN = True
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user