Update Dynamo pin (#83829)

As title
Pull Request resolved: https://github.com/pytorch/pytorch/pull/83829
Approved by: https://github.com/ezyang
This commit is contained in:
Animesh Jain
2022-08-26 20:49:43 +00:00
committed by PyTorch MergeBot
parent 61b9d8fccd
commit 6a58603956
8 changed files with 13 additions and 7 deletions

View File

@ -1 +1 @@
f19410cd8204fa1c30ca72f81142508e128be66f
058b3581bde241ed72b4092d92e561dd9d82fff0

View File

@ -172,7 +172,7 @@ test_jit_hooks() {
test_dynamo() {
pushd ../torchdynamo
pytest tests
pytest test
popd
}

View File

@ -199,6 +199,8 @@ test_dynamo_shard() {
test_overrides \
test_python_dispatch \
test_fx \
test_package \
test_vmap \
--shard "$1" "$NUM_TEST_SHARDS" \
--verbose
assert_git_not_dirty
@ -597,7 +599,7 @@ test_vec256() {
test_dynamo() {
pushd ../torchdynamo
pytest tests
pytest test
popd
}

View File

@ -6,6 +6,7 @@ import sys
from typing import Any, List
import torch
from torch.testing._internal.common_utils import skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, make_global
@ -599,6 +600,7 @@ class TestWith(JitTestCase):
self.assertFalse(w.requires_grad)
@skipIfTorchDynamo("Torchdynamo cannot correctly handle profiler.profile calls")
def test_with_record_function(self):
"""
Check that torch.autograd.profiler.record_function context manager is

View File

@ -1858,6 +1858,7 @@ graph(%Ra, %Rb):
self.assertEqual(training, 'aten::bernoulli_' in profile(scripted, X))
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.SIMPLE, 'Testing differentiable graph')
@skipIfTorchDynamo("Torchdynamo cannot correctly handle profiler.profile calls")
def test_dropout_func_requires_grad(self):
def dropout_training(input):
return F.dropout(input, 0.5, training=True)

View File

@ -19269,6 +19269,7 @@ class TestModuleGlobalHooks(TestCase):
with self.assertRaisesRegex(RuntimeError, 'got 2, but expected 1'):
module(input).sum().backward()
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/847")
def test_module_backward_global_hook_writeable(self):
module = nn.Sigmoid()
input = torch.randn(5, 5, requires_grad=True)

View File

@ -10,7 +10,7 @@ from itertools import permutations, product
from torch.testing import make_tensor
from torch.testing._internal.common_dtype import all_types, all_types_and, floating_types_and
from torch.testing._internal.common_utils import \
(TestCase, run_tests, slowTest)
(TestCase, run_tests, skipIfTorchDynamo, slowTest)
from torch.testing._internal.common_device_type import \
(instantiate_device_type_tests, dtypes, onlyNativeDeviceTypes,
onlyCUDA, dtypesIfCUDA, dtypesIfCPU, onlyCPU, largeTensorTest)
@ -357,6 +357,7 @@ class TestSortAndSelect(TestCase):
for shape in shapes:
test(shape)
@skipIfTorchDynamo("https://github.com/pytorch/torchdynamo/issues/982")
def test_topk(self, device):
def topKViaSort(t, k, dim, dir):
sorted, indices = t.sort(dim, dir)

View File

@ -923,9 +923,8 @@ class CrossRefMode(torch.overrides.TorchFunctionMode):
TEST_WITH_TORCHDYNAMO = os.getenv('PYTORCH_TEST_WITH_DYNAMO') == '1'
if TEST_WITH_TORCHDYNAMO:
import torchdynamo
# torchdynamo.config.trace = True
# torchdynamo.config.debug = True
torchdynamo.config.print_internal_exceptions = False
import logging
torchdynamo.config.log_level = logging.ERROR
# TODO - Collect errors with fake tensors
torchdynamo.config.fake_tensor_propagation = False
# Do not spend time on helper functions that are called with different inputs