[test] split tracer related tests out of test_jit (#40142)

Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/40142

test_jit is becoming huge again, which makes editor hard to load and
write new tests, this split out the tracer related tests.

Test Plan: Imported from OSS

Reviewed By: ailzhang

Differential Revision: D22085035

Pulled By: wanchaol

fbshipit-source-id: 696bee84985ecfbfeac8e2ee5c27f1bdda8de394
This commit is contained in:
Wanchao Liang
2020-06-17 17:24:52 -07:00
committed by Facebook GitHub Bot
parent e34e32850e
commit 27d789500b
5 changed files with 2070 additions and 2033 deletions

2045
test/jit/test_tracer.py Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -11,12 +11,13 @@ from torch.testing import FileCheck
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \ from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests enable_profiling_mode_for_profiling_tests
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, _inline_everything, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU
from textwrap import dedent from textwrap import dedent
from itertools import product, permutations from itertools import product, permutations
from test_jit import JitTestCase, enable_cpu_fuser, RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, \ from test_jit import backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \
backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \ LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell, _inline_everything
if GRAPH_EXECUTOR == ProfilingMode.PROFILING: if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_executor(True)

View File

@ -11,13 +11,14 @@ from torch.testing import FileCheck
from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \ from torch.testing._internal.common_utils import run_tests, IS_SANDCASTLE, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, skipIfRocm enable_profiling_mode_for_profiling_tests, skipIfRocm
from torch.testing._internal.jit_utils import JitTestCase, _inline_everything, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU
from textwrap import dedent from textwrap import dedent
from itertools import product, permutations from itertools import product, permutations
from test_jit import JitTestCase, RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, \ from test_jit import backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \
backward_graph, all_backward_graphs, get_lstm_inputs, get_milstm_inputs, \ LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell
LSTMCellC, LSTMCellF, LSTMCellS, MiLSTMCell, _inline_everything
from te_utils import CudaCodeGenExecuted from te_utils import CudaCodeGenExecuted

View File

@ -36,6 +36,13 @@ import textwrap
RUN_CUDA = torch.cuda.is_available() RUN_CUDA = torch.cuda.is_available()
RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1 RUN_CUDA_MULTI_GPU = RUN_CUDA and torch.cuda.device_count() > 1
RUN_CUDA_HALF = RUN_CUDA
if torch.cuda.is_available():
CUDA_VERSION = torch._C._cuda_getCompiledVersion()
for d in range(torch.cuda.device_count()):
major = torch.cuda.get_device_capability(d)[0]
if (major < 6):
RUN_CUDA_HALF = False
def execWrapper(code, glob, loc): def execWrapper(code, glob, loc):
exec(code, glob, loc) exec(code, glob, loc)