From c73f5080de545039eb4383d77e4d10a301a0bc77 Mon Sep 17 00:00:00 2001 From: Tugsbayasgalan Manlaibaatar Date: Fri, 17 Oct 2025 10:07:14 -0700 Subject: [PATCH] Migrating some more callsites (#163580) Pull Request resolved: https://github.com/pytorch/pytorch/pull/163580 Approved by: https://github.com/avikchaudhuri ghstack dependencies: #165582 --- .../benchmarks/sum_floordiv.py | 7 +++++- .../pr_time_benchmarks/expected_results.csv | 2 +- test/dynamo/test_export.py | 25 ++++++++++--------- test/inductor/test_aot_inductor_utils.py | 5 +++- test/inductor/test_memory_planning.py | 4 +-- 5 files changed, 26 insertions(+), 17 deletions(-) diff --git a/benchmarks/dynamo/pr_time_benchmarks/benchmarks/sum_floordiv.py b/benchmarks/dynamo/pr_time_benchmarks/benchmarks/sum_floordiv.py index 8a292f602c03..a1f3a28a466a 100644 --- a/benchmarks/dynamo/pr_time_benchmarks/benchmarks/sum_floordiv.py +++ b/benchmarks/dynamo/pr_time_benchmarks/benchmarks/sum_floordiv.py @@ -3,6 +3,7 @@ import sys from benchmark_base import BenchmarkBase import torch +from torch._dynamo.utils import CompileTimeInstructionCounter class Benchmark(BenchmarkBase): @@ -32,7 +33,11 @@ class Benchmark(BenchmarkBase): def _work(self): # enable_cpp_symbolic_shape_guards has impact on this benchmark # Keep using False value for consistency. - with torch._dynamo.config.patch("enable_cpp_symbolic_shape_guards", False): + with ( + torch._dynamo.config.patch("enable_cpp_symbolic_shape_guards", False), + torch._export.config.patch(use_new_tracer_experimental=True), + CompileTimeInstructionCounter.record(), + ): torch.export.export(self.m, (self.input,), strict=True) diff --git a/benchmarks/dynamo/pr_time_benchmarks/expected_results.csv b/benchmarks/dynamo/pr_time_benchmarks/expected_results.csv index 2ebde03ffea4..5ca33b7600b2 100644 --- a/benchmarks/dynamo/pr_time_benchmarks/expected_results.csv +++ b/benchmarks/dynamo/pr_time_benchmarks/expected_results.csv @@ -38,7 +38,7 @@ update_hint_regression,compile_time_instruction_count,1719000000,0.1 -sum_floordiv_regression,compile_time_instruction_count,966100000,0.1 +sum_floordiv_regression,compile_time_instruction_count,3686995725,0.1 diff --git a/test/dynamo/test_export.py b/test/dynamo/test_export.py index f3f438d241af..db69ce0d1d20 100644 --- a/test/dynamo/test_export.py +++ b/test/dynamo/test_export.py @@ -2712,19 +2712,20 @@ def forward(self, x): torch._dynamo.exc.UserError, ".*y.*size.*2.* = 4 is not equal to .*x.*size.*1.* = 3", ): - torch.export.export(bar, (x, y), dynamic_shapes=dynamic_shapes, strict=True) + with torch._export.config.patch(use_new_tracer_experimental=True): + torch.export.export( + bar, (x, y), dynamic_shapes=dynamic_shapes, strict=True + ) y = torch.randn(10, 3, 3) - ebar = torch.export.export( - bar, (x, y), dynamic_shapes=dynamic_shapes, strict=True - ) - self.assertEqual( - [ - str(node.meta["val"].shape) - for node in ebar.graph_module.graph.nodes - if node.op == "placeholder" - ], - ["torch.Size([s17, s27, s27])", "torch.Size([s17, s27, s27])"], - ) + with torch._export.config.patch(use_new_tracer_experimental=True): + ebar = torch.export.export( + bar, (x, y), dynamic_shapes=dynamic_shapes, strict=True + ) + + for node in ebar.graph_module.graph.nodes: + if node.op == "placeholder": + shape = node.meta["val"].shape + self.assertEqual(shape[1], shape[2]) @torch._dynamo.config.patch( capture_dynamic_output_shape_ops=True, diff --git a/test/inductor/test_aot_inductor_utils.py b/test/inductor/test_aot_inductor_utils.py index a60952dabe70..2a9f593c5a6c 100644 --- a/test/inductor/test_aot_inductor_utils.py +++ b/test/inductor/test_aot_inductor_utils.py @@ -157,7 +157,10 @@ class AOTIRunnerUtil: # This should really be the default behavior of torch.export.export model = WrapperModule(model) - with torch.no_grad(): + with ( + torch.no_grad(), + torch._export.config.patch(use_new_tracer_experimental=True), + ): # strict=False needs extra migration work ep = torch.export.export( model, diff --git a/test/inductor/test_memory_planning.py b/test/inductor/test_memory_planning.py index 1bcdeaa08e95..867121cd68f9 100644 --- a/test/inductor/test_memory_planning.py +++ b/test/inductor/test_memory_planning.py @@ -92,13 +92,13 @@ class TestMemoryPlanning(TestCase): ) FileCheck().check( - "int64_t int_array_0[] = {24L + align(12L*s77), };" + "int64_t int_array_0[] = {24L + align(12L*s6), };" ).check_next("int64_t int_array_1[] = {1L, };").check_next( "AtenTensorHandle pool1_handle;" ).check_next( "aoti_torch_empty_strided(1, int_array_0, int_array_1," ).check_next("RAIIAtenTensorHandle pool1(pool1_handle);").check_next( - "int64_t int_array_2[] = {s77, 3L};" + "int64_t int_array_2[] = {s6, 3L};" ).check_next("int64_t int_array_3[] = {3L, 1L};").check_next( "AtenTensorHandle tmp_tensor_handle_0;" ).check_next("aoti_torch__alloc_from_pool(pool1, 0").run(code)