mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert "[inductor] Estimate peak memory allocfree and applying to reordering collectives (#160113)"
This reverts commit 9d18bf01b1661d227f6af41ac07a1e9ef20a9e1a. Reverted https://github.com/pytorch/pytorch/pull/160113 on behalf of https://github.com/huydhn due to Sorry for reverting your change, but lots of failures showing up after this lands ([comment](https://github.com/pytorch/pytorch/pull/160113#issuecomment-3209487237))
This commit is contained in:
@ -2160,12 +2160,6 @@ class Scheduler:
|
||||
OrderedSet(V.graph.get_output_names()),
|
||||
)
|
||||
if config.reorder_for_compute_comm_overlap:
|
||||
if not config.reorder_for_peak_memory:
|
||||
from .memory import assign_memory_planning_info_for_scheduler_buffers
|
||||
|
||||
assign_memory_planning_info_for_scheduler_buffers(
|
||||
self.nodes, self.name_to_buf
|
||||
)
|
||||
from torch._logging import trace_structured
|
||||
|
||||
trace_structured(
|
||||
@ -2562,7 +2556,7 @@ class Scheduler:
|
||||
)
|
||||
|
||||
graph_outputs: OrderedSet[str] = OrderedSet(V.graph.get_output_names())
|
||||
buf_info_list, _, _ = compute_memory_timeline(
|
||||
buf_info_list, _ = compute_memory_timeline(
|
||||
self.nodes,
|
||||
name_to_freeable_input_buf,
|
||||
graph_outputs,
|
||||
|
Reference in New Issue
Block a user