From 8cb2fb44f29f6b19400a04ea970807f651657b0c Mon Sep 17 00:00:00 2001 From: Nan Zhang Date: Fri, 17 Oct 2025 21:08:29 +0000 Subject: [PATCH] [Inductor] Support fallback for all gemm like ops (#165755) Summary: Fill op_override field for bmm aten ops so they can be converted properly in the wrapper_fxir backend Reviewed By: StellarrZ Differential Revision: D84840948 Pull Request resolved: https://github.com/pytorch/pytorch/pull/165755 Approved by: https://github.com/blaine-rister --- torch/_inductor/kernel/bmm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/torch/_inductor/kernel/bmm.py b/torch/_inductor/kernel/bmm.py index b22e7a1f6149..06c4a63497d7 100644 --- a/torch/_inductor/kernel/bmm.py +++ b/torch/_inductor/kernel/bmm.py @@ -119,7 +119,7 @@ bmm_template = TritonTemplate( cache_codegen_enabled_for_template=True, ) -aten_bmm = ExternKernelChoice(torch.bmm, "at::bmm_out") +aten_bmm = ExternKernelChoice(torch.bmm, "at::bmm_out", op_overload=aten.bmm.out) aten_bmm_dtype = ExternKernelChoice( torch.bmm, "at::_bmm_out_dtype_cuda",