Revert "[Graph Partition] Pass all OSS unit tests (#154667)"

This reverts commit ca7315c17162ea21b1ca5ba23f4bf6168766c7b9.

Reverted https://github.com/pytorch/pytorch/pull/154667 on behalf of https://github.com/clee2000 due to broke inductor/test_memory.py::TestOperatorReorderForPeakMemory::test_reorder_peak_memory_lpmf [GH job link](https://github.com/pytorch/pytorch/actions/runs/16885961204/job/47836769279) [HUD commit link](ca7315c171) note to self: bad TD ([comment](https://github.com/pytorch/pytorch/pull/154667#issuecomment-3176805477))
This commit is contained in:
PyTorch MergeBot
2025-08-11 20:34:27 +00:00
parent 9eedd2a20b
commit 09381f5dac
11 changed files with 325 additions and 378 deletions

View File

@ -2179,10 +2179,7 @@ class Scheduler:
self.nodes = comms.reorder_compute_and_comm_for_overlap(self.nodes)
self.process_grouped_nodes()
if (
torch._inductor.config.graph_partition
and torch._inductor.config.triton.cudagraphs
):
if torch._inductor.config.graph_partition:
self.nodes = self.maybe_reorder_for_minimizing_partition(self.nodes)
self.nodes = self.reorder_for_partition_with_simple_dependency(self.nodes)
@ -4315,12 +4312,6 @@ class Scheduler:
) -> bool:
"""Return True if we should partition the inductor graph on this node"""
# When not using cudagraphs, keep all kernels in the `call` function
# instead of graph partition functions, since graph partition only brings
# benefit to cudagraph
if not torch._inductor.config.triton.cudagraphs:
return True
# avoid duplicating logs when should_partition is called multiple times
# on the same node
def noop_log(msg: str, node: Optional[BaseSchedulerNode]) -> None: