Extend CPU implementation of MSELoss to BF16 (#139959)

It's strange that it has not been implemented for the type yet

Pull Request resolved: https://github.com/pytorch/pytorch/pull/139959
Approved by: https://github.com/jgong5, https://github.com/janeyx99
ghstack dependencies: #139961
This commit is contained in:
Nikita Shulga
2024-11-07 08:50:02 -08:00
committed by PyTorch MergeBot
parent 22dd17c7bb
commit ae01f2b61b
3 changed files with 3 additions and 6 deletions

View File

@ -20800,10 +20800,7 @@ op_db: List[OpInfo] = [
supports_out=False,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
dtypes=floating_types_and(torch.float16),
backward_dtypes=floating_types(),
dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
backward_dtypesIfCUDA=floating_types_and(torch.bfloat16, torch.float16),
dtypes=floating_types_and(torch.float16, torch.bfloat16),
skips=(
# RuntimeError: input->type()->kind() == TypeKind::OptionalType
# INTERNAL ASSERT FAILED at "../torch/csrc/jit/passes/utils/check_alias_annotation.cpp":252,