mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "[Graph Partition] Pass all OSS unit tests (#154667)"
This reverts commit ca7315c17162ea21b1ca5ba23f4bf6168766c7b9.
Reverted https://github.com/pytorch/pytorch/pull/154667 on behalf of https://github.com/clee2000 due to broke inductor/test_memory.py::TestOperatorReorderForPeakMemory::test_reorder_peak_memory_lpmf [GH job link](https://github.com/pytorch/pytorch/actions/runs/16885961204/job/47836769279) [HUD commit link](ca7315c171
) note to self: bad TD ([comment](https://github.com/pytorch/pytorch/pull/154667#issuecomment-3176805477))
This commit is contained in:
@ -2179,10 +2179,7 @@ class Scheduler:
|
||||
self.nodes = comms.reorder_compute_and_comm_for_overlap(self.nodes)
|
||||
self.process_grouped_nodes()
|
||||
|
||||
if (
|
||||
torch._inductor.config.graph_partition
|
||||
and torch._inductor.config.triton.cudagraphs
|
||||
):
|
||||
if torch._inductor.config.graph_partition:
|
||||
self.nodes = self.maybe_reorder_for_minimizing_partition(self.nodes)
|
||||
self.nodes = self.reorder_for_partition_with_simple_dependency(self.nodes)
|
||||
|
||||
@ -4315,12 +4312,6 @@ class Scheduler:
|
||||
) -> bool:
|
||||
"""Return True if we should partition the inductor graph on this node"""
|
||||
|
||||
# When not using cudagraphs, keep all kernels in the `call` function
|
||||
# instead of graph partition functions, since graph partition only brings
|
||||
# benefit to cudagraph
|
||||
if not torch._inductor.config.triton.cudagraphs:
|
||||
return True
|
||||
|
||||
# avoid duplicating logs when should_partition is called multiple times
|
||||
# on the same node
|
||||
def noop_log(msg: str, node: Optional[BaseSchedulerNode]) -> None:
|
||||
|
Reference in New Issue
Block a user