mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[torchdynamo hash update] update the pinned torchdynamo hash (#85774)
This PR is auto-generated nightly by [this action](https://github.com/pytorch/pytorch/blob/master/.github/workflows/_update-commit-hash.yml). Update the pinned torchdynamo hash. Pull Request resolved: https://github.com/pytorch/pytorch/pull/85774 Approved by: https://github.com/pytorchbot, https://github.com/malfet
This commit is contained in:
@ -21,7 +21,7 @@ torch._C._jit_set_profiling_executor(True)
|
||||
torch._C._get_graph_executor_optimize(True)
|
||||
|
||||
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, \
|
||||
enable_profiling_mode_for_profiling_tests, slowTest
|
||||
enable_profiling_mode_for_profiling_tests, slowTest, skipIfTorchDynamo
|
||||
from torch.testing._internal.jit_utils import JitTestCase, \
|
||||
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining, \
|
||||
clone_inputs, get_traced_sample_variant_pairs, TensorExprTestOptions, NoTracerWarnContextManager
|
||||
@ -2657,6 +2657,7 @@ def f({', '.join(param_names)}):
|
||||
self.assertEqual(kernel.fallback(tuple(param_values)), correct_val)
|
||||
|
||||
@onlyCPU
|
||||
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
|
||||
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
|
||||
@ops([op for op in op_db if get_name(op) in works_list], allowed_dtypes=(torch.float,))
|
||||
def test_working(self, device, dtype, op):
|
||||
|
Reference in New Issue
Block a user