Add suppressions to torch/_inductor (#165062)

Adds suppressions to pyrefly will typecheck clean: https://github.com/pytorch/pytorch/issues/163283

Split this directory into two PRs to keep them from being too large.

Test plan:
dmypy restart && python3 scripts/lintrunner.py -a
pyrefly check

step 1: delete lines in the pyrefly.toml file from the project-excludes field
step 2: run pyrefly check
step 3: add suppressions, clean up unused suppressions
before: https://gist.github.com/maggiemoss/4b3bf2037014e116bc00706a16aef199

after:
INFO 0 errors (6,884 ignored)

Pull Request resolved: https://github.com/pytorch/pytorch/pull/165062
Approved by: https://github.com/oulgen, https://github.com/mlazos
This commit is contained in:
Maggie Moss
2025-10-09 20:34:15 +00:00
committed by PyTorch MergeBot
parent e7fd296930
commit 9944cac6e6
76 changed files with 445 additions and 33 deletions

View File

@ -1019,6 +1019,7 @@ def maybe_estimate_runtime_benchmark(snode: BaseSchedulerNode) -> Optional[float
if mm_fn is None:
return None
bench_fn = mm_fn
# pyrefly: ignore # unbound-name
args_kwargs_fn = lambda: snode_args_kwargs(snode) # noqa: E731
else:
return None
@ -1295,6 +1296,7 @@ class SchedulerNode(BaseSchedulerNode):
new_order = self_dep.decide_loop_order_to_match(other_dep)
if new_order:
# pyrefly: ignore # bad-assignment
metrics.num_loop_reordering += 1
loop_ordering_log.debug(
"Reorder loops for %s with order %s", self.get_name(), new_order
@ -1591,6 +1593,7 @@ class FusedSchedulerNode(BaseSchedulerNode):
self.get_name(),
)
return False
# pyrefly: ignore # bad-assignment
metrics.num_loop_reordering += 1
loop_ordering_log.debug(
"Reorder loops for fused node %s with order %s", self.get_name(), new_order
@ -2322,6 +2325,7 @@ class Scheduler:
self.name_to_fused_node = {n.get_name(): n for n in self.nodes}
self.compute_ancestors()
# pyrefly: ignore # bad-assignment
metrics.ir_nodes_pre_fusion += len(self.nodes)
from torch._inductor.debug import log_ir_post_fusion, log_ir_pre_fusion
@ -2549,6 +2553,7 @@ class Scheduler:
]
return DedupList(new_items, new_membership)
# pyrefly: ignore # not-a-type
name_to_users: defaultdict[str, DedupList[NodeUser]] = collections.defaultdict(
DedupList
)
@ -2585,12 +2590,14 @@ class Scheduler:
else:
name_to_users[buf1_name] = name_to_users[buf2_name]
# pyrefly: ignore # not-a-type
def rename(n: str) -> str:
if n in self.mutation_renames:
return rename(self.mutation_renames[n])
return n
def add_user(
# pyrefly: ignore # not-a-type
used_by_name: str,
user_node: Union[BaseSchedulerNode, OutputNode],
can_inplace: bool = False,
@ -2600,6 +2607,7 @@ class Scheduler:
NodeUser(user_node, can_inplace, is_weak)
)
# pyrefly: ignore # not-a-type
unbacked_symbol_to_origin_node: dict[sympy.Symbol, Optional[str]] = {}
# NB: None means that the dependency is on an input. Don't actually
@ -2658,6 +2666,7 @@ class Scheduler:
and (dep := next(iter(node.read_writes.writes)))
and isinstance(dep, MemoryDep)
):
# pyrefly: ignore # unbound-name
node_mode = dep.mode
else:
node_mode = None
@ -3429,9 +3438,12 @@ class Scheduler:
str(e),
)
continue
# pyrefly: ignore # missing-attribute
with multi_node.swap_as_triton_caller(choice):
ms_fused, path = self.benchmark_codegened_module(
mod_fused, device
mod_fused,
# pyrefly: ignore # bad-argument-type
device,
)
new_timings[choice] = ms_fused
if ms_fused < min_ms_fused:
@ -3443,12 +3455,15 @@ class Scheduler:
if min_ms_fused < (ms1 + ms2) and ms_fused_choice is not None:
if config.multi_kernel_hints:
hint_override_best_fusion_choice[None] = ms_fused_choice
# pyrefly: ignore # missing-attribute
multi_node.finalize_as_triton_callers(
hint_override_best_fusion_choice
)
else:
# pyrefly: ignore # missing-attribute
multi_node.finalize_as_triton_caller(ms_fused_choice)
# pyrefly: ignore # missing-attribute
multi_node._choice_timings[None] = new_timings
return True
else:
@ -3478,21 +3493,27 @@ class Scheduler:
fut.result()
ms1, path1 = self.benchmark_codegened_module(
future_and_mod_l1[1], device
future_and_mod_l1[1],
# pyrefly: ignore # bad-argument-type
device,
)
if math.isinf(ms1):
why("register spilling of the first kernel")
return False
ms2, path2 = self.benchmark_codegened_module(
future_and_mod_l2[1], device
future_and_mod_l2[1],
# pyrefly: ignore # bad-argument-type
device,
)
if math.isinf(ms2):
why("register spilling of the second kernel")
return False
ms_fused, path_fused = self.benchmark_codegened_module(
future_and_mod_l1_fused[1], device
future_and_mod_l1_fused[1],
# pyrefly: ignore # bad-argument-type
device,
)
if math.isinf(ms_fused):
why("register spilling of the fused kernel")
@ -4323,6 +4344,7 @@ class Scheduler:
if config.expand_dimension_for_pointwise_nodes and (
expand_analysis := self.get_expand_dim_for_pointwise_nodes(node1, node2)
):
# pyrefly: ignore # unbound-name
(expand_dim, smaller_node, expand_size) = expand_analysis
smaller_node.expand_dimension_for_pointwise_node(expand_dim, expand_size)
shared_data_score = self.score_fusion_memory(node1, node2)
@ -4633,6 +4655,7 @@ class Scheduler:
device.type == "cuda"
and (device_props := torch.cuda.get_device_properties(device)).major < 7
):
# pyrefly: ignore # unbound-name
raise GPUTooOldForTriton(device_props, inspect.currentframe())
elif is_gpu(device.type) and not device.type == "mps":
raise TritonMissing(inspect.currentframe())
@ -4930,6 +4953,7 @@ class Scheduler:
if isinstance(buf.node, ir.MutationOutput) and (
real_name := self.mutation_real_name.get(buf_name, None)
):
# pyrefly: ignore # unbound-name
return is_none_layout(real_name)
return True
@ -5028,6 +5052,7 @@ class Scheduler:
signatures.append(partition_signature)
unmet_output_names = partition_input_names.union(
# pyrefly: ignore # unsupported-operation
unmet_output_names - returned_output_names
)
@ -5410,6 +5435,7 @@ class Scheduler:
self.current_device = self.default_device_context
# pyrefly: ignore # unbound-name
if self.default_device_context and config.triton.autotune_at_compile_time:
V.graph.wrapper_code.write_get_raw_stream_header()
@ -5453,6 +5479,7 @@ class Scheduler:
prologue, template_node, epilogue = node.get_prologue_template_epilogue(
list(node.get_nodes())
)
# pyrefly: ignore # unbound-name
self.get_backend(device).codegen_template(
template_node, epilogue, prologue
)
@ -5461,6 +5488,7 @@ class Scheduler:
self.codegen_extern_call(node)
elif node.is_foreach():
node = typing.cast(ForeachKernelSchedulerNode, node)
# pyrefly: ignore # unbound-name
backend_ = self.get_backend(device)
from .codegen.cuda_combined_scheduling import CUDACombinedScheduling
from .codegen.simd import SIMDScheduling
@ -5471,12 +5499,15 @@ class Scheduler:
raise AssertionError(f"{type(self)=}")
backend.codegen_combo_kernel(node)
elif isinstance(node, (FusedSchedulerNode, SchedulerNode)):
# pyrefly: ignore # unbound-name
self.get_backend(device).codegen_node(node)
else:
assert isinstance(node, NopKernelSchedulerNode)
node.mark_run()
# pyrefly: ignore # unbound-name
if config.triton.debug_sync_kernel:
# pyrefly: ignore # unbound-name
self.get_backend(device).codegen_sync()
self.available_buffer_names.update(node.get_buffer_names())