[torchdynamo hash update] update the pinned torchdynamo hash (#85774)

This PR is auto-generated nightly by [this action](https://github.com/pytorch/pytorch/blob/master/.github/workflows/_update-commit-hash.yml).
Update the pinned torchdynamo hash.
Pull Request resolved: https://github.com/pytorch/pytorch/pull/85774
Approved by: https://github.com/pytorchbot, https://github.com/malfet
This commit is contained in:
PyTorch MergeBot
2022-10-05 17:02:33 +00:00
parent 97d2e1df55
commit 7f607e8cb5
4 changed files with 7 additions and 4 deletions

View File

@ -21,7 +21,7 @@ torch._C._jit_set_profiling_executor(True)
torch._C._get_graph_executor_optimize(True)
from torch.testing._internal.common_utils import run_tests, ProfilingMode, GRAPH_EXECUTOR, \
enable_profiling_mode_for_profiling_tests, slowTest
enable_profiling_mode_for_profiling_tests, slowTest, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, \
RUN_CUDA, RUN_CUDA_HALF, RUN_CUDA_MULTI_GPU, warmup_backward, set_fusion_group_inlining, \
clone_inputs, get_traced_sample_variant_pairs, TensorExprTestOptions, NoTracerWarnContextManager
@ -2657,6 +2657,7 @@ def f({', '.join(param_names)}):
self.assertEqual(kernel.fallback(tuple(param_values)), correct_val)
@onlyCPU
@skipIfTorchDynamo("TorchDynamo fails here for unknown reasons")
@unittest.skipIf(not LLVM_ENABLED, "Compiles with TensorExprKernel")
@ops([op for op in op_db if get_name(op) in works_list], allowed_dtypes=(torch.float,))
def test_working(self, device, dtype, op):