mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[BE] Unskip some tensor creation tests on Mac (#146952)
Followup after https://github.com/pytorch/pytorch/pull/145367 One should never use skip, but rather xfail otherwise one never knows when test is finally fixed. `test_float_to_int_conversion_finite` were fixed on MacOS a while back (guess since the time Intel builds were disbaled), while `test_float_to_int_conversion_nonfinite` is fixed by https://github.com/pytorch/pytorch/pull/145367 that selects architecture-appropriate reference values for Arm ISA Note, that results of floating to integral types cast are undefined if floating point value is outside of integral dynamic range "Fixes" https://github.com/pytorch/pytorch/issues/38752 Pull Request resolved: https://github.com/pytorch/pytorch/pull/146952 Approved by: https://github.com/atalman, https://github.com/seemethere
This commit is contained in:
committed by
PyTorch MergeBot
parent
78ebd3c502
commit
0acbf8039a
@ -26,9 +26,7 @@ from torch.testing._internal.common_utils import (
|
||||
set_default_dtype,
|
||||
set_default_tensor_type,
|
||||
TEST_SCIPY,
|
||||
IS_MACOS,
|
||||
IS_PPC,
|
||||
IS_JETSON,
|
||||
IS_WINDOWS,
|
||||
IS_FBCODE,
|
||||
IS_SANDCASTLE,
|
||||
@ -1051,8 +1049,6 @@ class TestTensorCreation(TestCase):
|
||||
# errors with UBSAN. These casts are deliberate in PyTorch, however, and
|
||||
# NumPy may have the same behavior.
|
||||
@onlyNativeDeviceTypes
|
||||
@unittest.skipIf(IS_MACOS or IS_JETSON, "Test is broken on MacOS and Jetson, \
|
||||
see https://github.com/pytorch/pytorch/issues/38752")
|
||||
@unittest.skipIf(IS_PPC, "Test is broken on PowerPC, see https://github.com/pytorch/pytorch/issues/39671")
|
||||
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
|
||||
def test_float_to_int_conversion_finite(self, device, dtype):
|
||||
@ -1081,10 +1077,10 @@ class TestTensorCreation(TestCase):
|
||||
self._float_to_int_conversion_helper(vals, device, dtype, refs)
|
||||
|
||||
# Note: CUDA will fail this test on most dtypes, often dramatically.
|
||||
# Note: This test validates undefined behavior consistency in float-to-ints casts
|
||||
# NB: torch.uint16, torch.uint32, torch.uint64 excluded as this
|
||||
# nondeterministically fails, warning "invalid value encountered in cast"
|
||||
@onlyCPU
|
||||
@unittest.skipIf(IS_MACOS, "Nonfinite conversion results on MacOS are different from others.")
|
||||
@unittest.skipIf(IS_S390X, "Test fails for int16 on s390x. Needs investigation.")
|
||||
@dtypes(torch.bool, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64)
|
||||
def test_float_to_int_conversion_nonfinite(self, device, dtype):
|
||||
|
Reference in New Issue
Block a user