fix sigmoid for torch.complex datatypes on CPU (#140391)

Fix https://github.com/pytorch/pytorch/issues/135777.
This issue is caused by the lack of special handling of the case where the real number/imag number is 0/Inf/NaN in the vectorized implementation of `reciprocal`. For correctness, I temporarily fallback the implementation of `reciprocal` to scalar implementation.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/140391
Approved by: https://github.com/mingfeima, https://github.com/Skylion007
ghstack dependencies: #140358
This commit is contained in:
Sun, Jiayi
2025-01-19 18:53:42 -08:00
committed by PyTorch MergeBot
parent 507bf65c6a
commit c922ccb7c4
6 changed files with 58 additions and 33 deletions

View File

@ -20081,9 +20081,9 @@ op_db: List[OpInfo] = [
skips=(
# Reference: https://github.com/pytorch/pytorch/issues/56012
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_extremal',
dtypes=[torch.complex64, torch.cdouble]),
dtypes=[torch.complex64, torch.cdouble], device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs', 'test_reference_numerics_large',
dtypes=[torch.chalf, torch.complex64, torch.cdouble])),
dtypes=[torch.chalf, torch.complex64, torch.cdouble], device_type='cuda')),
dtypes=all_types_and_complex_and(torch.bool, torch.float16, torch.bfloat16),
dtypesIfCUDA=all_types_and_complex_and(torch.complex32, torch.bool, torch.half, torch.bfloat16),
supports_forward_ad=True,
@ -22579,10 +22579,10 @@ python_ref_db = [
# Reference: https://github.com/pytorch/pytorch/issues/56012
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_extremal',
dtypes=[torch.complex64, torch.cdouble]),
dtypes=[torch.complex64, torch.cdouble], device_type='cuda'),
DecorateInfo(unittest.skip("Skipped!"), 'TestUnaryUfuncs',
'test_reference_numerics_large',
dtypes=[torch.chalf, torch.complex64, torch.cdouble])
dtypes=[torch.chalf, torch.complex64, torch.cdouble], device_type='cuda')
),
),
ElementwiseUnaryPythonRefInfo(