mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[dynamo] control one_graph behavior additionally through config (#154283)
`torch.compile` now always goes through `torch._dynamo._optimize`. fullgraph is now implemented in `torch.compile` by looking at `config.error_on_graph_break`. Export still goes through `torch._dynamo._optimize_assert`, which uses `tx.one_graph` instead of `config.error_on_graph_break`. Pull Request resolved: https://github.com/pytorch/pytorch/pull/154283 Approved by: https://github.com/jansel, https://github.com/anijain2305
This commit is contained in:
committed by
PyTorch MergeBot
parent
2c68c3e8d5
commit
b46eb1ccaf
@ -64,6 +64,7 @@ from torch.testing._internal.common_utils import (
|
||||
skipIfCrossRef,
|
||||
skipIfRocm,
|
||||
skipIfTorchDynamo,
|
||||
skipIfWindows,
|
||||
TemporaryFileName,
|
||||
TEST_WITH_TORCHDYNAMO,
|
||||
TestCase,
|
||||
@ -2226,6 +2227,9 @@ class FakeTensorDispatchCache(TestCase):
|
||||
lambda: torch.ops.aten.index(x, [None, idx_tensor1]),
|
||||
)
|
||||
|
||||
@skipIfWindows(
|
||||
msg="weird bug - cache may not be cleared after https://github.com/pytorch/pytorch/pull/154283"
|
||||
)
|
||||
@skipIfTorchDynamo("cache hit/miss changes with invoke_subgraph caching")
|
||||
def test_invoke_subgraph(self):
|
||||
"""
|
||||
|
||||
Reference in New Issue
Block a user