mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[aoti][mps] Add fused_rms and sdpa_mps fallback ops (#156844)
Needed for llama3.1 Pull Request resolved: https://github.com/pytorch/pytorch/pull/156844 Approved by: https://github.com/desertfire ghstack dependencies: #156843
This commit is contained in:
committed by
PyTorch MergeBot
parent
17dab018e3
commit
aff9c1eec5
@ -39,10 +39,12 @@ inductor_fallback_ops: dict[str, dict[str, list[str]]] = {
|
||||
"aten._flash_attention_forward.default": {},
|
||||
"aten._fused_moving_avg_obs_fq_helper_functional.default": {},
|
||||
"aten._fused_moving_avg_obs_fq_helper.default": {},
|
||||
"aten._fused_rms_norm.default": {},
|
||||
"aten._histogramdd_from_bin_cts.default": {},
|
||||
"aten._int_mm.out": {},
|
||||
"aten._pdist_backward.default": {},
|
||||
"aten._pdist_forward.default": {},
|
||||
"aten._scaled_dot_product_attention_math_for_mps.default": {},
|
||||
"aten._scaled_dot_product_cudnn_attention_backward.default": {},
|
||||
"aten._scaled_dot_product_cudnn_attention.default": {},
|
||||
"aten._scaled_dot_product_efficient_attention_backward.default": {},
|
||||
|
Reference in New Issue
Block a user