diff --git a/test/cpp/jit/test.cpp b/test/cpp/jit/test.cpp index 20cab91f2827..cc5b26fecb07 100644 --- a/test/cpp/jit/test.cpp +++ b/test/cpp/jit/test.cpp @@ -90,9 +90,11 @@ TH_FORALL_TESTS_CUDA(JIT_GTEST_CUDA) #endif #define JIT_TEST(name) test##name(); -void runJITCPPTests() { +void runJITCPPTests(bool runCuda) { TH_FORALL_TESTS(JIT_TEST) - TH_FORALL_TESTS_CUDA(JIT_TEST) + if (runCuda) { + TH_FORALL_TESTS_CUDA(JIT_TEST) + } // This test is special since it requires prior setup in python. // So it's included here but not in the pure cpp gtest suite diff --git a/test/test_jit.py b/test/test_jit.py index 5763645ae41f..98d8bd4a9036 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -1484,13 +1484,23 @@ class TestJit(JitTestCase): for node in g.nodes(): self.assertTrue(g2.findNode(node.kind()) is not None) + @unittest.skipIf(IS_WINDOWS, "NYI: JIT tests not yet supported on windows") + @unittest.skipIf(IS_SANDCASTLE, "gtest runs these in sandcastle") + @unittest.skipIf(RUN_CUDA, "covered by test_cpp_cuda") + @skipIfRocm + def test_cpp(self): + from cpp.jit import tests_setup + tests_setup.setup() + torch._C._jit_run_cpp_tests(run_cuda=False) + tests_setup.shutdown() + @unittest.skipIf(IS_WINDOWS, "NYI: fuser support for Windows") @unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA") @skipIfRocm def test_cpp_cuda(self): from cpp.jit import tests_setup tests_setup.setup() - torch._C._jit_run_cpp_tests() + torch._C._jit_run_cpp_tests(run_cuda=True) tests_setup.shutdown() def test_batchnorm(self): diff --git a/torch/csrc/jit/init.cpp b/torch/csrc/jit/init.cpp index e693a1584fe3..922873fee58d 100644 --- a/torch/csrc/jit/init.cpp +++ b/torch/csrc/jit/init.cpp @@ -79,11 +79,11 @@ bool loadPythonClasses() { } // anonymous namespace #if defined(_WIN32) -void runJITCPPTests() { +void runJITCPPTests(bool runCuda) { AT_ERROR("JIT tests not yet supported on Windows"); } #else -void runJITCPPTests(); +void runJITCPPTests(bool runCuda); #endif void initJITBindings(PyObject* module) { @@ -182,14 +182,15 @@ void initJITBindings(PyObject* module) { [](std::shared_ptr graph) { CreateAutodiffSubgraphs(graph); }) .def( "_jit_run_cpp_tests", - [] { + [](bool runCuda) { // We have to release the GIL inside this method, because if we // happen to initialize the autograd engine in these tests, the // newly spawned worker threads will try to initialize their // PyThreadState*, and they need the GIL for this. AutoNoGIL _no_gil; - return runJITCPPTests(); - }) + return runJITCPPTests(runCuda); + }, + py::arg("run_cuda")) .def( "_jit_flatten", [](py::handle& obj) {