diff --git a/test/test_autograd.py b/test/test_autograd.py index c79e2afec018..6f9d10c0598a 100644 --- a/test/test_autograd.py +++ b/test/test_autograd.py @@ -40,7 +40,7 @@ from torch.testing._internal.common_methods_invocations import mask_not_all_zero from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm, onlyCPU, onlyCUDA, dtypes, dtypesIfCUDA, deviceCountAtLeast, skipMeta) -from torch.testing._internal.common_dtype import floating_types_and +from torch.testing._internal.common_dtype import get_all_dtypes from torch.testing._internal.logging_tensor import no_dispatch import pickle @@ -7497,7 +7497,7 @@ class TestAutogradDeviceType(TestCase): # At the time of writing this test, copy_ is not generated from native_functions.yaml # there was a bug that bfloat16 was not recognized as floating. x = torch.randn(10, device=device, requires_grad=True) - floating_dt = floating_types_and(torch.half, torch.bfloat16) + floating_dt = [dt for dt in get_all_dtypes() if dt.is_floating_point] for dt in floating_dt: y = torch.empty(10, device=device, dtype=dt) y.copy_(x) diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py index f0ab0c35d156..8bf2ed9e7428 100644 --- a/test/test_binary_ufuncs.py +++ b/test/test_binary_ufuncs.py @@ -23,8 +23,8 @@ from torch.testing._internal.common_device_type import ( skipCUDAIfRocm, skipIf, ops, OpDTypes, skipMeta) from torch.testing import make_tensor from torch.testing._internal.common_dtype import ( - all_types_and_complex_and, all_types_and, integral_types, complex_types, integral_types_and, - floating_types_and, floating_and_complex_types, get_all_math_dtypes, + all_types_and_complex_and, integral_types_and, get_all_dtypes, get_all_int_dtypes, get_all_math_dtypes, + get_all_complex_dtypes, get_all_fp_dtypes, ) from torch.testing._internal.common_methods_invocations import ( binary_ufuncs, _NOTHING, @@ -709,7 +709,7 @@ class TestBinaryUfuncs(TestCase): id_after = id(t) self.assertEqual(id_before, id_after) - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False, include_complex=False)) def test_div_rounding_modes(self, device, dtype): if dtype.is_floating_point: low, high = -10.0, 10.0 @@ -809,7 +809,8 @@ class TestBinaryUfuncs(TestCase): actual = torch.divide(a, zero, rounding_mode=rounding_mode) self.assertEqual(actual, expect, exact_dtype=exact_dtype) - @dtypes(*all_types_and(torch.half)) + @dtypes(*get_all_dtypes( + include_bool=False, include_complex=False, include_bfloat16=False)) def test_div_rounding_numpy(self, device, dtype): info = (torch.finfo(dtype) if dtype.is_floating_point else torch.iinfo(dtype)) @@ -1261,7 +1262,7 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(cpu_out, cuda_out) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half)) + @dtypes(*(get_all_dtypes(include_bool=False, include_bfloat16=False))) def test_complex_scalar_pow_tensor(self, device, dtype): complexes = [0.5j, 1. + 1.j, -1.5j, 2.2 - 1.6j, 1 + 0j] first_exp = make_tensor((100,), dtype=dtype, device=device, low=-2, high=2) @@ -1653,8 +1654,7 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(expected, python_op(first, second)) self.assertEqual(expected, torch_op(first, second)) - @dtypes(*product(all_types_and(torch.half, torch.bfloat16, torch.bool), - all_types_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*product(get_all_dtypes(include_complex=False), get_all_dtypes(include_complex=False))) def test_maximum_minimum_type_promotion(self, device, dtypes): a = torch.tensor((0, 1), device=device, dtype=dtypes[0]) b = torch.tensor((1, 0), device=device, dtype=dtypes[1]) @@ -1662,7 +1662,7 @@ class TestBinaryUfuncs(TestCase): result = op(a, b) self.assertEqual(result.dtype, torch.result_type(a, b)) - @dtypes(*integral_types_and(torch.bool)) + @dtypes(*(get_all_int_dtypes() + [torch.bool])) def test_maximum_minimum_int_and_bool(self, device, dtype): ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum), (torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin)) @@ -1688,7 +1688,7 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(out, numpy_result) @precisionOverride({torch.bfloat16: 1e-2}) - @dtypes(*(floating_types_and(torch.half, torch.bfloat16))) + @dtypes(*(get_all_fp_dtypes())) def test_maximum_minimum_float(self, device, dtype): ops = ((torch.maximum, torch.max, np.maximum), (torch.minimum, torch.min, np.minimum), (torch.fmax, None, np.fmax), (torch.fmin, None, np.fmin)) @@ -1716,7 +1716,7 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(tensor_result, numpy_result, exact_dtype=False) self.assertEqual(out, numpy_result, exact_dtype=False) - @dtypes(*(floating_types_and(torch.half, torch.bfloat16))) + @dtypes(*(get_all_fp_dtypes())) def test_maximum_minimum_float_nan_and_inf(self, device, dtype): # np.maximum and np.minimum functions compare input arrays element-wisely. # if one of the elements being compared is a NaN, then that element is returned. @@ -1752,7 +1752,7 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(tensor_result, numpy_result) self.assertEqual(out, numpy_result) - @dtypes(*product(complex_types(), all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*product(get_all_complex_dtypes(), get_all_dtypes())) def test_maximum_minimum_complex(self, device, dtypes): for torch_op in (torch.maximum, torch.minimum, torch.max, torch.min, torch.fmax, torch.fmin): with self.assertRaisesRegex(RuntimeError, '.+not implemented for.+'): @@ -1794,8 +1794,7 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(tensor_result_1, numpy_result_1) self.assertEqual(tensor_result_2, numpy_result_2) - @dtypes(*product(floating_types_and(torch.half, torch.bfloat16), - floating_types_and(torch.half, torch.bfloat16))) + @dtypes(*product(get_all_fp_dtypes(), get_all_fp_dtypes())) def test_maximum_and_minimum_subgradient(self, device, dtypes): def run_test(f, a, b, expected_a_grad, expected_b_grad): a = torch.tensor(a, requires_grad=True, device=device, dtype=dtypes[0]) @@ -1824,9 +1823,9 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(x, 4.5) @onlyCPU - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_sub(self, device, dtype): - if dtype in integral_types(): + if dtype in get_all_int_dtypes(): # Before Python 3.10, floats were implicitly converted to ints, but with # DeprecationWarning: an integer is required (got type float). # Implicit conversion to integers using __int__ is deprecated, @@ -1897,8 +1896,8 @@ class TestBinaryUfuncs(TestCase): self.assertFalse(torch.isnan(ma[i]), "max(a, b): {}, a: {}, b: {}".format(ma[i], a[i], b[i])) self.assertFalse(torch.isnan(mi[i]), "min(a, b): {}, a: {}, b: {}".format(mi[i], a[i], b[i])) - @dtypes(*product(all_types_and(torch.half, torch.bfloat16, torch.bool), - all_types_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*product(get_all_dtypes(include_complex=False), + get_all_dtypes(include_complex=False))) def test_copysign(self, device, dtypes): def _test_copysign_numpy(a, b): torch_result = torch.copysign(a, b) @@ -1915,7 +1914,7 @@ class TestBinaryUfuncs(TestCase): expected = torch.from_numpy(np.copysign(np_a, np_b)) # To handle inconsistencies of type promotion between PyTorch and Numpy # Applied for both arguments having integral precision and bfloat16 - types = integral_types_and(torch.bool, torch.bfloat16) + types = [torch.bool, torch.bfloat16] + get_all_int_dtypes() if a.dtype in types or b.dtype in types: promoted_type = torch.promote_types(torch_result.dtype, expected.dtype) torch_result = torch_result.to(promoted_type) @@ -1960,13 +1959,13 @@ class TestBinaryUfuncs(TestCase): for case in cases: _test_copysign_numpy(torch.tensor([case], device=device, dtype=dtypes[0]), b) - if dtypes[1] in floating_types_and(torch.half, torch.bfloat16): + if dtypes[1] in get_all_fp_dtypes(): a = make_tensor((10, 10), device=device, dtype=dtypes[0], low=-9, high=9) for case in cases: _test_copysign_numpy(a, torch.tensor([case], device=device, dtype=dtypes[1])) - @dtypes(*product(floating_types_and(torch.half, torch.bfloat16), - floating_types_and(torch.half, torch.bfloat16))) + @dtypes(*product(get_all_fp_dtypes(), + get_all_fp_dtypes())) def test_copysign_subgradient(self, device, dtypes): # Input is 0.0 x = torch.tensor([0.0, 0.0, 0.0], dtype=dtypes[0], device=device, requires_grad=True) @@ -2106,7 +2105,7 @@ class TestBinaryUfuncs(TestCase): z = torch.tensor([30 / v.item() for v in x], device=device) self.assertEqual(y, z, exact_dtype=False) - @dtypes(*floating_types_and(torch.half)) + @dtypes(*get_all_fp_dtypes(include_bfloat16=False)) def test_fmod_remainder_by_zero_float(self, device, dtype): fn_list = (torch.fmod, torch.remainder) for fn in fn_list: @@ -2118,7 +2117,7 @@ class TestBinaryUfuncs(TestCase): @onlyNativeDeviceTypes # Check Issue https://github.com/pytorch/pytorch/issues/48130 @skipCUDAIfRocm # Error happens on both ROCM and XLA - @dtypes(*integral_types()) + @dtypes(*get_all_int_dtypes()) def test_fmod_remainder_by_zero_integral(self, device, dtype): fn_list = (torch.fmod, torch.remainder) for fn in fn_list: @@ -2143,7 +2142,7 @@ class TestBinaryUfuncs(TestCase): value = 255 if dtype == torch.uint8 else -1 self.assertTrue(torch.all(fn(x, zero) == value)) - @dtypes(*all_types_and(torch.half)) + @dtypes(*get_all_dtypes(include_bfloat16=False, include_bool=False, include_complex=False)) def test_fmod_remainder(self, device, dtype): # Use numpy as reference def _helper(x, mod, fns_list): @@ -2180,7 +2179,7 @@ class TestBinaryUfuncs(TestCase): # Mods: Integer, Float, Tensor, Non-contiguous Tensor mods = [3, 2.3, mod, mod.t()] # mod with floating-point dtype - if dtype in integral_types(): + if dtype in get_all_int_dtypes(): mod_float = make_tensor((10, 10), device=device, dtype=torch.float, low=-9, high=9) mod[mod == 0] = 1 mods.append(mod_float) @@ -2401,7 +2400,7 @@ class TestBinaryUfuncs(TestCase): a // b @unittest.skipIf(TEST_WITH_ASAN, "Integer overflows are not allowed under ASAN") - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_muldiv_scalar(self, device, dtype): x = make_tensor((10, 3), dtype=dtype, device=device, low=None, high=None) s = make_tensor((1,), dtype=dtype, device="cpu", low=None, high=None).item() @@ -2441,8 +2440,7 @@ class TestBinaryUfuncs(TestCase): return x - @dtypes(*tuple(itertools.combinations_with_replacement(all_types_and_complex_and(torch.half, - torch.bfloat16, torch.bool), 2))) + @dtypes(*tuple(itertools.combinations_with_replacement(get_all_dtypes(), 2))) def test_comparison_ops_type_promotion_and_broadcasting(self, device, dtypes): # issue #42660 # testing all combinations of broadcasting and type promotion @@ -2624,8 +2622,8 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(torch_op(a, 2.2), expected_op(a, 2.2)) @onlyNativeDeviceTypes - @dtypes(*list(product(all_types_and(torch.half, torch.bfloat16, torch.bool), - all_types_and(torch.half, torch.bfloat16, torch.bool)))) + @dtypes(*list(product(get_all_dtypes(include_complex=False), + get_all_dtypes(include_complex=False)))) def test_heaviside(self, device, dtypes): input_dtype = dtypes[0] values_dtype = dtypes[1] @@ -2684,7 +2682,8 @@ class TestBinaryUfuncs(TestCase): with self.assertRaisesRegex(RuntimeError, 'Expected all tensors to be on the same device'): torch.heaviside(y, x) - @dtypes(*list(product(complex_types(), complex_types()))) + @dtypes(*list(product(get_all_complex_dtypes(), + get_all_complex_dtypes()))) def test_heaviside_complex(self, device, dtypes): input_dtype = dtypes[0] values_dtype = dtypes[1] @@ -2719,18 +2718,15 @@ class TestBinaryUfuncs(TestCase): getattr(a, op + '_')(b) self.assertEqual(expected_res, a) - @dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), - all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*product(get_all_dtypes(), get_all_dtypes())) def test_logical_xor(self, device, dtypes): self._test_logical(device, dtypes, 'logical_xor', [10, 0, 1, 0], [1, 0, 0, 10], [0, 0, 1, 1]) - @dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), - all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*product(get_all_dtypes(), get_all_dtypes())) def test_logical_and(self, device, dtypes): self._test_logical(device, dtypes, 'logical_and', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 0, 0]) - @dtypes(*product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), - all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*product(get_all_dtypes(), get_all_dtypes())) def test_logical_or(self, device, dtypes): self._test_logical(device, dtypes, 'logical_or', [10, 0, 1, 0], [1, 0, 0, 10], [1, 0, 1, 1]) @@ -2839,7 +2835,7 @@ class TestBinaryUfuncs(TestCase): self._test_logaddexp(device, dtype, base2=True) def test_add(self, device): - dtypes = floating_and_complex_types() + dtypes = [torch.float, torch.double] + get_all_complex_dtypes() for dtype in dtypes: # [res] torch.add([res,] tensor1, tensor2) m1 = torch.randn(100, 100, dtype=dtype, device=device) @@ -3041,7 +3037,7 @@ class TestBinaryUfuncs(TestCase): torch.tensor([0, 1, 0, 1, 0, 1], dtype=torch.bool, device=device)) self.assertFalse(a.equal(b)) - @dtypes(*all_types_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes(include_complex=False)) def test_logical(self, device, dtype): if dtype != torch.bool: x = torch.tensor([1, 2, 3, 4], device=device, dtype=dtype) @@ -3228,8 +3224,8 @@ class TestBinaryUfuncs(TestCase): self.unary_check_input_output_mem_overlap( doubles, sz, lambda input, out: torch.pow(42, input, out=out)) - @dtypes(*list(product(all_types_and_complex_and(torch.half, torch.bfloat16), - all_types_and_complex_and(torch.half, torch.bfloat16)))) + @dtypes(*list(product(get_all_dtypes(include_bool=False), + get_all_dtypes(include_bool=False)))) def test_float_power(self, device, dtypes): def to_np(value): if isinstance(value, torch.Tensor) and value.dtype == torch.bfloat16: @@ -3325,8 +3321,8 @@ class TestBinaryUfuncs(TestCase): torch.Tensor.float_power_(base.clone(), exp) @skipIf(not TEST_SCIPY, "Scipy required for the test.") - @dtypes(*product(all_types_and(torch.half, torch.bool), - all_types_and(torch.half, torch.bool))) + @dtypes(*product(get_all_dtypes(include_complex=False, include_bfloat16=False), + get_all_dtypes(include_complex=False, include_bfloat16=False))) def test_xlogy_xlog1py(self, device, dtypes): x_dtype, y_dtype = dtypes @@ -3337,7 +3333,7 @@ class TestBinaryUfuncs(TestCase): self.assertEqual(expected, out) def xlogy_inplace_variant_helper(x, y): - if x.dtype in integral_types_and(torch.bool): + if x.dtype in get_all_int_dtypes() + [torch.bool]: with self.assertRaisesRegex(RuntimeError, "can't be cast to the desired output type"): x.clone().xlogy_(y) @@ -3464,7 +3460,10 @@ class TestBinaryUfuncs(TestCase): _compare_helper(t, zeros, *xlog1py_fns) _compare_helper(t, 0., *xlog1py_fns) - @dtypes(*product(all_types_and(torch.bool), all_types_and(torch.bool))) + @dtypes(*product(get_all_dtypes(include_complex=False, + include_half=False, include_bfloat16=False), + get_all_dtypes(include_complex=False, + include_half=False, include_bfloat16=False))) @skipIf(not TEST_SCIPY, "Scipy required for the test.") @slowTest def test_zeta(self, device, dtypes): diff --git a/test/test_complex.py b/test/test_complex.py index 88404902631f..9f2e0ad32401 100644 --- a/test/test_complex.py +++ b/test/test_complex.py @@ -3,12 +3,12 @@ import torch from torch.testing._internal.common_device_type import instantiate_device_type_tests, dtypes from torch.testing._internal.common_utils import TestCase, run_tests -from torch.testing._internal.common_dtype import complex_types +from torch.testing._internal.common_dtype import get_all_complex_dtypes devices = (torch.device('cpu'), torch.device('cuda:0')) class TestComplexTensor(TestCase): - @dtypes(*complex_types()) + @dtypes(*get_all_complex_dtypes()) def test_to_list(self, device, dtype): # test that the complex float tensor has expected values and # there's no garbage value in the resultant list diff --git a/test/test_foreach.py b/test/test_foreach.py index 4da23dc66fc3..89bed6cc746f 100644 --- a/test/test_foreach.py +++ b/test/test_foreach.py @@ -16,8 +16,7 @@ from torch.testing._internal.common_methods_invocations import ( foreach_unary_op_db, foreach_binary_op_db, foreach_pointwise_op_db, foreach_minmax_op_db, foreach_reduce_op_db) from torch.testing._internal.common_dtype import ( - all_types_and_complex_and, all_types_and, integral_types, complex_types, - floating_types_and, floating_types, integral_types_and, + get_all_dtypes, get_all_int_dtypes, get_all_complex_dtypes, get_all_fp_dtypes, ) # Includes some values such that N * N won't be a multiple of 4, @@ -141,7 +140,7 @@ class TestForeach(TestCase): self._binary_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath, is_inplace=True) if opinfo.supports_alpha_param: alpha = None - if dtype in integral_types(): + if dtype in get_all_int_dtypes(): alpha = 3 elif dtype.is_complex: alpha = complex(3, 3) @@ -170,7 +169,7 @@ class TestForeach(TestCase): @ops(foreach_binary_op_db) def test_binary_op_tensorlists_fastpath(self, device, dtype, op): for N in N_values: - disable_fastpath = op.ref == torch.div and dtype in integral_types_and(torch.bool) + disable_fastpath = op.ref == torch.div and dtype in get_all_int_dtypes() + [torch.bool] if op.ref == torch.add and dtype == torch.bool: disable_fastpath = True self._test_binary_op_tensorlists(device, dtype, op, N, True, disable_fastpath) @@ -191,17 +190,17 @@ class TestForeach(TestCase): @ops(foreach_binary_op_db) def test_binary_op_scalar_fastpath(self, device, dtype, op): for N, scalar in itertools.product(N_values, Scalars): - disable_fastpath = op.ref == torch.div and dtype in integral_types_and(torch.bool) + disable_fastpath = op.ref == torch.div and dtype in get_all_int_dtypes() + [torch.bool] if isinstance(scalar, int): disable_fastpath |= dtype == torch.bool if isinstance(scalar, float): - disable_fastpath |= dtype in integral_types_and(torch.bool) + disable_fastpath |= dtype in get_all_int_dtypes() + [torch.bool] if isinstance(scalar, bool): disable_fastpath |= dtype == torch.bool if op.ref in (torch.add, torch.mul): disable_fastpath = False if isinstance(scalar, complex): - disable_fastpath |= dtype not in complex_types() + disable_fastpath |= dtype not in get_all_complex_dtypes() self._test_binary_op_scalar(device, dtype, op, N, scalar, True, disable_fastpath) @ops(foreach_binary_op_db) @@ -230,16 +229,16 @@ class TestForeach(TestCase): def test_binary_op_scalarlist_fastpath(self, device, dtype, op): for N in N_values: for type_str, scalarlist in getScalarLists(N): - bool_int_div = op.ref == torch.div and dtype in integral_types_and(torch.bool) + bool_int_div = op.ref == torch.div and dtype in get_all_int_dtypes() + [torch.bool] disable_fastpath = bool_int_div if type_str == "int": disable_fastpath |= dtype == torch.bool if type_str == "float": - disable_fastpath |= dtype in integral_types_and(torch.bool) + disable_fastpath |= dtype in get_all_int_dtypes() + [torch.bool] if type_str == "complex": - disable_fastpath |= dtype not in complex_types() + disable_fastpath |= dtype not in get_all_complex_dtypes() if type_str == "mixed": - disable_fastpath |= True and dtype not in complex_types() + disable_fastpath |= True and dtype not in get_all_complex_dtypes() self._test_binary_op_scalarlist(device, dtype, op, N, scalarlist, True, disable_fastpath) @ops(foreach_binary_op_db) @@ -296,7 +295,7 @@ class TestForeach(TestCase): @skipMeta @ops(foreach_pointwise_op_db) def test_pointwise_op_fastpath(self, device, dtype, op): - disable_fastpath = dtype in integral_types_and(torch.bool) + disable_fastpath = dtype in get_all_int_dtypes() + [torch.bool] # for N, scalar in itertools.product(N_values, Scalars): for N in N_values: self._test_pointwise_op(device, dtype, op, N, True, disable_fastpath) @@ -354,7 +353,7 @@ class TestForeach(TestCase): op, ref, inplace_op, inplace_ref = self._get_funcs(opinfo, 1) inputs = opinfo.sample_inputs(device, dtype, N, noncontiguous=not is_fastpath), # note(mkozuki): Complex inputs for `_foreach_abs` go through slowpath. - if opinfo.name == "_foreach_abs" and dtype in complex_types(): + if opinfo.name == "_foreach_abs" and dtype in get_all_complex_dtypes(): is_fastpath = False self._regular_unary_test(dtype, op, ref, inputs, is_fastpath) self._inplace_unary_test(dtype, inplace_op, inplace_ref, inputs, is_fastpath) @@ -365,7 +364,7 @@ class TestForeach(TestCase): for N in N_values: self._test_unary(device, dtype, op, N, is_fastpath=True) - @ops(foreach_unary_op_db, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @ops(foreach_unary_op_db, dtypes=get_all_dtypes()) def test_unary_slowpath(self, device, dtype, op): for N in N_values: self._test_unary(device, dtype, op, N, is_fastpath=False) @@ -382,7 +381,7 @@ class TestForeach(TestCase): self._minmax_test(op, inputs, True, N if dtype == torch.bool else 1) @ops(foreach_minmax_op_db, - dtypes=all_types_and(torch.half, torch.bfloat16, torch.bool)) + dtypes=get_all_dtypes(include_half=True, include_bfloat16=True, include_complex=False)) def test_minmax_slowpath(self, device, dtype, op): for N in N_values: inputs = tuple(op.sample_inputs(device, dtype, N, noncontiguous=True) for _ in range(2)) @@ -390,7 +389,7 @@ class TestForeach(TestCase): # note(mkozuki): ForeachFuncInfo's of both `_foreach_maximum` and `_foreach_minimum` include integer types. # so, manually limit dtypes to fp types for inf&nan tests. - @ops(foreach_minmax_op_db, dtypes=floating_types_and(torch.half, torch.bfloat16)) + @ops(foreach_minmax_op_db, dtypes=get_all_fp_dtypes(include_bfloat16=True, include_half=True)) def test_minmax_float_inf_nan(self, device, dtype, op): inputs = ( [ @@ -415,7 +414,7 @@ class TestForeach(TestCase): @ops(foreach_reduce_op_db) def test_reduce_fastpath(self, device, dtype, op): for N, ord in itertools.product(N_values, (0, 1, 2, -1, -2)): - if ord in (1, 2) and dtype in floating_types_and(torch.half, torch.bfloat16): + if ord in (1, 2) and dtype in torch.testing.get_all_fp_dtypes(): n_expected_cudaLaunchKernels = 3 else: n_expected_cudaLaunchKernels = N @@ -428,7 +427,7 @@ class TestForeach(TestCase): inputs = op.sample_inputs(device, dtype, N, noncontiguous=True), self._reduce_test(op, inputs, ord, False, 1) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_add_scalar_with_empty_list_and_empty_tensor(self, device, dtype): # TODO: enable empty list case for tensors in [[torch.randn([0])]]: @@ -438,7 +437,7 @@ class TestForeach(TestCase): torch._foreach_add_(tensors, 1) self.assertEqual(res, tensors) - @ops(foreach_binary_op_db, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @ops(foreach_binary_op_db, dtypes=get_all_dtypes()) def test_binary_op_scalar_with_overlapping_tensors(self, device, dtype, op): foreach_op, ref = op.method_variant, op.ref tensors = [torch.ones(1, 1, device=device, dtype=dtype).expand(2, 1, 3)] @@ -470,7 +469,7 @@ class TestForeach(TestCase): runtime_error = e self.assertIsNone(runtime_error) - @ops(foreach_binary_op_db, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @ops(foreach_binary_op_db, dtypes=get_all_dtypes()) def test_binary_op_list_error_cases(self, device, dtype, op): foreach_op, foreach_op_, ref, ref_ = op.method_variant, op.inplace_variant, op.ref, op.ref_inplace tensors1 = [] @@ -525,7 +524,7 @@ class TestForeach(TestCase): return with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"): foreach_op([tensor1], [tensor2]) - if dtype in integral_types_and(torch.bool) and foreach_op == torch._foreach_div: + if dtype in get_all_int_dtypes() + [torch.bool] and foreach_op == torch._foreach_div: with self.assertRaisesRegex(RuntimeError, "result type"): foreach_op_([tensor1], [tensor2]) else: @@ -534,7 +533,7 @@ class TestForeach(TestCase): @skipMeta @unittest.skipIf(not torch.cuda.is_available(), "CUDA not found") - @ops(foreach_binary_op_db, dtypes=all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @ops(foreach_binary_op_db, dtypes=get_all_dtypes()) def test_binary_op_list_slow_path(self, device, dtype, op): # note(mkozuki): why `n_expected_cudaLaunchKernels=0`? # In this test, foreach functions don't go through fast path, @@ -626,7 +625,7 @@ class TestForeach(TestCase): self.assertEqual(actual, tensors1) @onlyCUDA - @ops(foreach_pointwise_op_db, allowed_dtypes=floating_types()) + @ops(foreach_pointwise_op_db, allowed_dtypes=get_all_fp_dtypes(include_half=False, include_bfloat16=False)) def test_pointwise_op_tensors_on_different_devices(self, device, dtype, op): # tensors1: ['cuda', 'cpu] # tensors2: ['cuda', 'cpu] diff --git a/test/test_linalg.py b/test/test_linalg.py index 6d7a70d6477e..010555ec4b71 100644 --- a/test/test_linalg.py +++ b/test/test_linalg.py @@ -25,8 +25,8 @@ from torch.testing._internal.common_device_type import \ onlyCUDA, skipCUDAVersionIn, skipMeta, skipCUDAIfNoCusolver) from torch.testing import make_tensor from torch.testing._internal.common_dtype import ( - all_types, all_types_and_complex_and, floating_and_complex_types, integral_types, - floating_and_complex_types_and, floating_types_and, complex_types, + all_types, floating_and_complex_types, get_all_dtypes, get_all_int_dtypes, get_all_complex_dtypes, + get_all_fp_dtypes, ) from torch.testing._internal.common_cuda import SM53OrLater, tf32_on_and_off, CUDA11OrLater, CUDA9 from torch.distributions.binomial import Binomial @@ -101,7 +101,7 @@ class TestLinalg(TestCase): # Tests torch.outer, and its alias, torch.ger, vs. NumPy @precisionOverride({torch.bfloat16: 1e-1}) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*(get_all_dtypes())) def test_outer(self, device, dtype): def run_test_case(a, b): if dtype == torch.bfloat16: @@ -745,7 +745,7 @@ class TestLinalg(TestCase): check(m_scalar, a, b, beta, alpha) # test nans and infs are not propagated to the output when beta == 0 - float_and_complex_dtypes = floating_and_complex_types_and(torch.half, torch.bfloat16) + float_and_complex_dtypes = get_all_fp_dtypes() + get_all_complex_dtypes() if beta == 0 and dtype in float_and_complex_dtypes: m[0][10] = m[10][10] = m[20][20] = float('inf') m[1][10] = m[11][10] = m[21][20] = float('nan') @@ -758,7 +758,7 @@ class TestLinalg(TestCase): self._test_addr_vs_numpy(device, dtype, beta=False, alpha=False) self._test_addr_vs_numpy(device, dtype, beta=True, alpha=True) - @dtypes(*integral_types()) + @dtypes(*(get_all_int_dtypes())) def test_addr_integral(self, device, dtype): with self.assertRaisesRegex(RuntimeError, 'argument beta must not be a floating point number.'): @@ -779,7 +779,7 @@ class TestLinalg(TestCase): self._test_addr_vs_numpy(device, dtype, beta=2, alpha=2) @precisionOverride({torch.bfloat16: 1e-1}) - @dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes())) def test_addr_float_and_complex(self, device, dtype): with self.assertRaisesRegex(RuntimeError, 'Boolean beta only supported for Boolean results.'): @@ -792,11 +792,11 @@ class TestLinalg(TestCase): self._test_addr_vs_numpy(device, dtype, beta=0., alpha=2) # when beta is not zero self._test_addr_vs_numpy(device, dtype, beta=0.5, alpha=2) - if dtype in complex_types(): + if dtype in get_all_complex_dtypes(): self._test_addr_vs_numpy(device, dtype, beta=(0 + 0.1j), alpha=(0.2 - 0.2j)) - @dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), - all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*itertools.product(get_all_dtypes(), + get_all_dtypes())) def test_outer_type_promotion(self, device, dtypes): a = torch.randn(5).to(device=device, dtype=dtypes[0]) b = torch.randn(5).to(device=device, dtype=dtypes[1]) @@ -806,7 +806,7 @@ class TestLinalg(TestCase): # don't use @dtypes decorator to avoid generating ~1700 tests per device def test_addr_type_promotion(self, device): - for dtypes0, dtypes1, dtypes2 in product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), repeat=3): + for dtypes0, dtypes1, dtypes2 in product(get_all_dtypes(), repeat=3): a = make_tensor((5,), device=device, dtype=dtypes0, low=-2, high=2) b = make_tensor((5,), device=device, dtype=dtypes1, low=-2, high=2) m = make_tensor((5, 5), device=device, dtype=dtypes2, low=-2, high=2) @@ -5072,11 +5072,9 @@ class TestLinalg(TestCase): A_LU, pivots = fn(torch.lu, (2, 0, 0)) self.assertEqual([(2, 0, 0), (2, 0)], [A_LU.shape, pivots.shape]) - @dtypesIfCUDA(*floating_and_complex_types_and( - *[torch.half] if not CUDA9 else [], - *[torch.bfloat16] if CUDA11OrLater and SM53OrLater else [] - )) - @dtypes(*all_types_and_complex_and(torch.bfloat16)) + @dtypesIfCUDA(torch.cfloat, torch.cdouble, + *get_all_fp_dtypes(include_half=not CUDA9, include_bfloat16=(CUDA11OrLater and SM53OrLater))) + @dtypes(*(set(get_all_dtypes()) - {torch.half, torch.bool})) def test_blas_alpha_beta_empty(self, device, dtype): # This test is disabled on CUDA 9 due to: # See: https://github.com/pytorch/pytorch/issues/31006 @@ -5112,7 +5110,7 @@ class TestLinalg(TestCase): self.assertEqual(torch.full((2, 3), beta * value, dtype=dtype, device=device), torch.addmm(input=input, mat1=mat, mat2=mat2, alpha=alpha, beta=beta, out=out)) - @dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_complex_dtypes() + get_all_fp_dtypes())) def test_blas_nan_out(self, device, dtype): # These functions should work correctly with NaN filled outputs, # but need special handling, see [NOTE: cpu_zero] @@ -5723,9 +5721,9 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A @precisionOverride({torch.bfloat16: 1e-0, torch.half: 5e-4, torch.float: 1e-4, torch.double: 1e-8, torch.cfloat: 1e-4, torch.cdouble: 1e-8}) - @dtypesIfCUDA(*floating_and_complex_types_and( - *[torch.bfloat16] if TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater) else [], - *[torch.half] if not TEST_WITH_ROCM else [])) + @dtypesIfCUDA(*get_all_complex_dtypes(), + *get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)), + include_half=(not TEST_WITH_ROCM))) @dtypes(torch.bfloat16, torch.float, torch.double, torch.cfloat, torch.cdouble) def test_addmv(self, device, dtype): # have to use torch.randn(...).to(bfloat16) instead of @@ -5760,8 +5758,7 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A for m, v in itertools.product(ms, vs): self._test_addmm_addmv(torch.addmv, t, m, v, beta=0) - @dtypesIfCUDA(*floating_types_and(*[torch.bfloat16] if TEST_WITH_ROCM or (CUDA11OrLater and - SM53OrLater) else [])) + @dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)))) @dtypes(torch.float, torch.double) def test_addmv_rowmajor_colmajor_incx_incy_lda(self, device, dtype): # tests (o, s)*(s). o is output size, s is summed size. @@ -5792,9 +5789,9 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A @precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6, torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8}) - @dtypesIfCUDA(*floating_and_complex_types_and( - *[torch.bfloat16] if TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater) else [])) - @dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_complex_dtypes(), + *get_all_fp_dtypes(include_bfloat16=(TEST_WITH_ROCM or (CUDA11OrLater and SM53OrLater)))) + @dtypes(*get_all_complex_dtypes(), *get_all_fp_dtypes()) @tf32_on_and_off(0.05) def test_addmm(self, device, dtype): M = torch.randn(10, 25, device=device).to(dtype) @@ -5827,7 +5824,7 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A self._test_addmm_addmv(torch.addmm, M, m1, m2, transpose_out=t4) @dtypes(torch.float, torch.double) - @dtypesIfCUDA(*floating_and_complex_types()) + @dtypesIfCUDA(*([torch.float, torch.double] + get_all_complex_dtypes())) @tf32_on_and_off(0.005) def test_addmm_sizes(self, device, dtype): for m in [0, 1, 25]: @@ -6026,7 +6023,7 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A @precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05}) @skipCUDAIf(torch.version.cuda == "10.1", "flaky on CUDA 10.1") @onlyNativeDeviceTypes - @dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes()) @tf32_on_and_off(0.05) def test_bmm(self, device, dtype): if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater: @@ -6138,7 +6135,7 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A @precisionOverride({torch.half: 0.05, torch.bfloat16: 0.05}) @onlyNativeDeviceTypes - @dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes()) @tf32_on_and_off(0.05) def test_addbmm(self, device, dtype): if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater: @@ -6211,7 +6208,7 @@ scipy_lobpcg | {:10.2e} | {:10.2e} | {:6} | N/A @precisionOverride({torch.half: 0.1, torch.bfloat16: 0.5}) @onlyNativeDeviceTypes - @dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes(), *get_all_complex_dtypes()) @tf32_on_and_off(0.05) def test_baddbmm(self, device, dtype): if self.device_type == 'cuda' and dtype is torch.bfloat16 and CUDA11OrLater and not SM53OrLater: diff --git a/test/test_nn.py b/test/test_nn.py index c008ab263f0f..ad7c5df2b0a2 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -35,7 +35,7 @@ from torch.nn.utils import parameters_to_vector, vector_to_parameters from torch.nn import Parameter from torch.nn.parameter import UninitializedParameter, UninitializedBuffer from torch.nn.parallel._functions import Broadcast -from torch.testing._internal.common_dtype import integral_types, floating_types_and, get_all_math_dtypes +from torch.testing._internal.common_dtype import integral_types, get_all_fp_dtypes, get_all_math_dtypes from torch.testing._internal.common_utils import freeze_rng_state, run_tests, TestCase, skipIfNoLapack, skipIfRocm, \ skipIfRocmVersionLessThan, skipIfNotMiopenSuggestNHWC, TEST_NUMPY, TEST_SCIPY, TEST_WITH_ROCM, download_file, \ get_function_arglist, load_tests, \ @@ -13104,7 +13104,7 @@ class TestNNDeviceType(NNTestCase): @onlyCUDA @skipCUDAIfNoCudnn - @dtypes(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else [])) + @dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM)) def test_Conv2d_deterministic_cudnn(self, device, dtype): inputs = torch.randn(2, 3, 5, 5, device=device, dtype=dtype, requires_grad=True) with cudnn.flags(enabled=True, benchmark=True, deterministic=True): @@ -13123,7 +13123,7 @@ class TestNNDeviceType(NNTestCase): @onlyCUDA - @dtypes(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else [])) + @dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM)) def test_Conv2d_large_workspace(self, device, dtype): # These sizes require huge cuDNN workspaces. Make sure we choose a # reasonable algorithm that does not run out of memory @@ -13248,7 +13248,7 @@ class TestNNDeviceType(NNTestCase): @onlyCUDA - @dtypes(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else [])) + @dtypes(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM)) def test_noncontig_conv_grad(self, device, dtype): # FIXME: remove after adding non-contiguous grad tests for all modules module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to(device, dtype) @@ -17602,7 +17602,7 @@ class TestNNDeviceType(NNTestCase): self.assertEqual(q.size(), out[0].size()) self.assertEqual(dtype, out[0].dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, *[torch.bfloat16] if AMPERE_OR_ROCM else [])) + @dtypesIfCUDA(*get_all_fp_dtypes(include_bfloat16=AMPERE_OR_ROCM)) @dtypes(torch.float) def test_Conv2d_naive_groups(self, device, dtype): # Check that grouped convolutions matches two half convolutions @@ -17970,37 +17970,37 @@ class TestNNDeviceType(NNTestCase): self.assertEqual(output[0, 0, 0, 0], float("-inf")) self.assertEqual(indices[0, 0, 0, 0], 0) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_MaxPool1d_indices(self, device, dtype): self._test_maxpool_indices(1, device=device, dtype=dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_MaxPool2d_indices(self, device, dtype): self._test_maxpool_indices(2, device=device, dtype=dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_MaxPool3d_indices(self, device, dtype): self._test_maxpool_indices(3, device=device, dtype=dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_AdaptiveMaxPool1d_indices(self, device, dtype): self._test_maxpool_indices(1, adaptive=True, device=device, dtype=dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_AdaptiveMaxPool2d_indices(self, device, dtype): self._test_maxpool_indices(2, adaptive=True, device=device, dtype=dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_AdaptiveMaxPool3d_indices(self, device, dtype): self._test_maxpool_indices(3, adaptive=True, device=device, dtype=dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_maxpool_indices_no_batch_dim(self, device, dtype): """Check that indices with no batch dim is consistent with a single batch.""" @@ -18165,7 +18165,7 @@ class TestNNDeviceType(NNTestCase): self.assertRaisesRegex(RuntimeError, r"stride should not be zero|stride must be greater than zero", lambda: fn_module(x)) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_pool_large_size(self, device, dtype): for op in ('max', 'avg'): @@ -18179,7 +18179,7 @@ class TestNNDeviceType(NNTestCase): # check if the output shape was still computed correctly self.assertEqual(x.shape[2], res.shape[2]) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float) def test_pool_invalid_size(self, device, dtype): for op in ('max', 'avg'): diff --git a/test/test_numpy_interop.py b/test/test_numpy_interop.py index 1258eac2e709..2c1395a19ac8 100644 --- a/test/test_numpy_interop.py +++ b/test/test_numpy_interop.py @@ -9,7 +9,7 @@ from torch.testing._internal.common_utils import \ (TestCase, run_tests) from torch.testing._internal.common_device_type import \ (instantiate_device_type_tests, onlyCPU, dtypes, skipMeta) -from torch.testing._internal.common_dtype import all_types_and_complex_and +from torch.testing._internal.common_dtype import get_all_dtypes # For testing handling NumPy objects and sending tensors to / accepting # arrays from NumPy. @@ -397,7 +397,7 @@ class TestNumPyInterop(TestCase): self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.long).storage()) self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.uint8).storage()) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_numpy_scalar_cmp(self, device, dtype): if dtype.is_complex: tensors = (torch.tensor(complex(1, 3), dtype=dtype, device=device), diff --git a/test/test_ops.py b/test/test_ops.py index 8625c9651627..45a3116ab89a 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -8,7 +8,7 @@ import itertools import torch from torch.testing import make_tensor -from torch.testing._internal.common_dtype import floating_and_complex_types_and, all_types_and_complex_and +from torch.testing._internal.common_dtype import floating_and_complex_types_and, get_all_dtypes from torch.testing._internal.common_utils import \ (TestCase, is_iterable_of_tensors, run_tests, IS_SANDCASTLE, clone_input_helper, IS_IN_CI, suppress_warnings, noncontiguous_like, @@ -81,7 +81,7 @@ class TestCommon(TestCase): if dtype in allowed_backward_dtypes: unsupported_backward_dtypes.append(dtype) - for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for dtype in get_all_dtypes(): # tries to acquire samples - failure indicates lack of support requires_grad = (dtype in allowed_backward_dtypes and op.supports_autograd) try: diff --git a/test/test_reductions.py b/test/test_reductions.py index 52c0a8a1d257..87768875b157 100644 --- a/test/test_reductions.py +++ b/test/test_reductions.py @@ -13,8 +13,8 @@ import warnings from torch._six import inf, nan from torch.testing import make_tensor from torch.testing._internal.common_dtype import ( - all_types_and_complex_and, get_all_math_dtypes, integral_types, complex_types, floating_types_and, - integral_types_and, floating_and_complex_types_and, all_types_and, + get_all_dtypes, get_all_math_dtypes, get_all_int_dtypes, get_all_complex_dtypes, get_all_fp_dtypes, + integral_types_and, floating_and_complex_types_and ) from torch.testing._internal.common_utils import ( TestCase, run_tests, skipIfNoSciPy, slowTest, torch_to_numpy_dtype_dict, @@ -357,13 +357,13 @@ class TestReductions(TestCase): self.assertEqual(result, expected, exact_dtype=False) @ops(filter(lambda op: op.ref is not None, reduction_ops), - allowed_dtypes=all_types_and_complex_and(torch.half, torch.bool)) + allowed_dtypes=get_all_dtypes(include_bfloat16=False)) def test_ref_scalar_input(self, device, dtype, op: ReductionOpInfo): """Compares op against reference for scalar input tensors""" self._test_ref(op, make_tensor([], dtype=dtype, device=device)) @ops(filter(lambda op: op.ref is not None, reduction_ops), - allowed_dtypes=all_types_and_complex_and(torch.half, torch.bool)) + allowed_dtypes=get_all_dtypes(include_bfloat16=False)) def test_ref_small_input(self, device, dtype, op: ReductionOpInfo): """Compares op against reference for small input tensors""" t = make_tensor((5, 3, 4, 2), dtype=dtype, device=device, low=-2, high=2, exclude_zero=True) @@ -391,7 +391,7 @@ class TestReductions(TestCase): self._test_ref(op, make_tensor((275000000,), dtype=dtype, device=device, low=-1, high=1, exclude_zero=True)) @ops(filter(lambda op: op.ref is not None, reduction_ops), - allowed_dtypes=all_types_and_complex_and(torch.half, torch.bool)) + allowed_dtypes=get_all_dtypes(include_bfloat16=False)) def test_ref_duplicate_values(self, device, dtype, op: ReductionOpInfo): """Compares op against reference for input tensors with duplicate values""" t = make_tensor((4, 4), dtype=dtype, device=device, low=-2, high=2, exclude_zero=True) @@ -1415,7 +1415,7 @@ class TestReductions(TestCase): test_dtype_bfloat16(False, True) test_dtype_bfloat16(True, True) - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False, include_complex=False)) def test_nansum(self, device, dtype): args = product( (True, False), # noncontiguous @@ -1468,14 +1468,15 @@ class TestReductions(TestCase): self.compare_with_numpy(torch_func_partial, np_func_partial, x, device=None, dtype=None, atol=atol, rtol=rtol, exact_dtype=exact_dtype) - @dtypes(*all_types_and_complex_and(torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) + + get_all_complex_dtypes())) def test_count_nonzero(self, device, dtype): self._test_reduction_function_with_numpy(torch.count_nonzero, np.count_nonzero, device, dtype) self._test_reduction_function_with_numpy(torch.count_nonzero, np.count_nonzero, device, dtype, True) def _test_sum_reduction_vs_numpy(self, torch_fn, np_fn, device, dtype, with_keepdim=False, with_extremal=False): def is_integral(dtype): - return dtype in integral_types() + return dtype in get_all_int_dtypes() # On Windows CI, the current version of `numpy` promotes all lower integers # dtypes to int32 while `torch` promotes them to int64. Hence we skip on checking @@ -1504,30 +1505,28 @@ class TestReductions(TestCase): with_keepdim=with_keepdim, with_extremal=with_extremal) @onlyNativeDeviceTypes - @dtypes(*all_types_and(torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False))) def test_sum_vs_numpy(self, device, dtype): self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype) self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype, with_extremal=True) self._test_sum_reduction_vs_numpy(torch.sum, np.sum, device, dtype, with_keepdim=True) @onlyNativeDeviceTypes - @dtypes(*all_types_and(torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False))) def test_nansum_vs_numpy(self, device, dtype): self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype) self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype, with_extremal=True) self._test_sum_reduction_vs_numpy(torch.nansum, np.nansum, device, dtype, with_keepdim=True) - @dtypes(*complex_types()) + @dtypes(*(get_all_complex_dtypes())) def test_nansum_complex(self, device, dtype): x = torch.randn((3, 3, 3), device=device, dtype=dtype) with self.assertRaisesRegex(RuntimeError, "nansum does not support complex inputs"): torch.nansum(x) - @dtypes(*all_types_and(torch.half)) - def test_nansum_out_dtype(self, device, dtype): - out_dtype = dtype - inp_dtypes = all_types_and(torch.half) if out_dtype.is_floating_point else integral_types() - for inp_dtype in inp_dtypes: + def test_nansum_out_dtype(self, device): + dtypes = list(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False)) + for inp_dtype, out_dtype in combinations(dtypes, 2): shape = _rand_shape(random.randint(2, 5), min_size=5, max_size=10) x = _generate_input(shape, inp_dtype, device, with_extremal=False) torch_fn = partial(torch.nansum, dtype=out_dtype) @@ -1535,7 +1534,7 @@ class TestReductions(TestCase): np_fn = partial(np.nansum, dtype=np_out_dtype) self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None) - @dtypes(*all_types_and(torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False))) def test_argminmax_multiple(self, device, dtype): # Case: All Ones t = torch.ones(3, 3, device=device, dtype=dtype) @@ -1543,7 +1542,7 @@ class TestReductions(TestCase): self.compare_with_numpy(torch.argmin, np.argmin, t) # Case: With single `nan` present. - if dtype in floating_types_and(torch.half, torch.bfloat16): + if dtype in get_all_fp_dtypes(): t[2, 2] = float('nan') self.compare_with_numpy(torch.argmax, np.argmax, t) self.compare_with_numpy(torch.argmin, np.argmin, t) @@ -1620,7 +1619,8 @@ class TestReductions(TestCase): [0, 0]], device=device, dtype=dtype) verify_against_numpy(t) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool)) + @dtypes(*(get_all_dtypes(include_half=True, include_bfloat16=False, + include_bool=True, include_complex=True))) def test_all_any_vs_numpy(self, device, dtype): # Note [all, any uint8 compatibility]: However for compatibility reason, # for `uint8`, they return Tensor of same dtype `uint8`. @@ -1844,7 +1844,7 @@ class TestReductions(TestCase): with self.assertRaisesRegex(RuntimeError, rmsg): torch.min(x, dim=0, out=(illegal_values, illegal_indices)) - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False, include_complex=False)) def test_dim_arg_reduction_scalar(self, device, dtype): example = 4.0 @@ -1862,7 +1862,7 @@ class TestReductions(TestCase): @precisionOverride({torch.float16: 1e-2, torch.bfloat16: 1e-2}) - @dtypes(*set(all_types_and(torch.half, torch.bfloat16)) - {torch.uint8}) + @dtypes(*(set(get_all_dtypes(include_bool=False, include_complex=False)) - {torch.uint8})) def test_dim_reduction(self, device, dtype): example = [[-1, 2, 1], [5, 3, 6]] @@ -3237,7 +3237,8 @@ as the input tensor excluding its innermost dimension'): shape = (2, 0, 4) x = torch.randn(shape, device=device) - for dtype in all_types_and_complex_and(torch.half, torch.bool): + for dtype in get_all_dtypes(include_half=True, include_bfloat16=False, + include_bool=True, include_complex=True): # Refer: [all, any uint8 compatibility] if dtype == torch.uint8: out_dtype = torch.uint8 diff --git a/test/test_serialization.py b/test/test_serialization.py index 9204392683b6..abb238b0df08 100644 --- a/test/test_serialization.py +++ b/test/test_serialization.py @@ -23,7 +23,7 @@ from torch.serialization import check_module_version_greater_or_equal from torch.testing._internal.common_utils import TestCase, IS_WINDOWS, \ TEST_DILL, run_tests, download_file, BytesIOContext, TemporaryFileName from torch.testing._internal.common_device_type import instantiate_device_type_tests -from torch.testing._internal.common_dtype import all_types_and_complex_and +from torch.testing._internal.common_dtype import get_all_dtypes # These tests were all copied from `test/test_torch.py` at some point, so see # the actual blame, see this revision @@ -616,11 +616,10 @@ class SerializationMixin(object): self.assertEqual(a, a_loaded) self.assertEqual(b, b_loaded) - for device, dtype in product(devices, all_types_and_complex_and(torch.half, - torch.bfloat16, torch.bool)): + for device, dtype in product(devices, get_all_dtypes()): a = torch.tensor([], dtype=dtype, device=device) - for other_dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for other_dtype in get_all_dtypes(): s = torch._TypedStorage( wrap_storage=a.storage()._untyped(), dtype=other_dtype) diff --git a/test/test_shape_ops.py b/test/test_shape_ops.py index b6557eed0d25..13c636d6563a 100644 --- a/test/test_shape_ops.py +++ b/test/test_shape_ops.py @@ -15,7 +15,7 @@ from torch.testing._internal.common_utils import ( from torch.testing._internal.common_device_type import ( instantiate_device_type_tests, onlyCPU, onlyCUDA, dtypes, onlyNativeDeviceTypes, dtypesIfCUDA, largeTensorTest) -from torch.testing._internal.common_dtype import all_types_and_complex_and, all_types, all_types_and +from torch.testing._internal.common_dtype import get_all_dtypes # TODO: replace with make_tensor def _generate_input(shape, dtype, device, with_extremal): @@ -227,8 +227,9 @@ class TestShapeOps(TestCase): self.assertEqual(expected, result) @onlyNativeDeviceTypes - @dtypes(*all_types()) - @dtypesIfCUDA(*all_types_and(torch.half)) + @dtypes(*get_all_dtypes(include_complex=False, include_bool=False, include_half=False, + include_bfloat16=False)) + @dtypesIfCUDA(*get_all_dtypes(include_complex=False, include_bool=False, include_bfloat16=False)) def test_trace(self, device, dtype): def test(shape): tensor = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9) @@ -340,7 +341,7 @@ class TestShapeOps(TestCase): with self.assertRaisesRegex(RuntimeError, error_msg): torch.clamp(X) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_flip(self, device, dtype): make_from_data = partial(torch.tensor, device=device, dtype=dtype) make_from_size = partial(make_tensor, device=device, dtype=dtype) @@ -439,7 +440,7 @@ class TestShapeOps(TestCase): for dims in test_dims: self.assertEqual(size, list(data.flip(dims).size())) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_flip_errors(self, device, dtype): make_arg = partial(make_tensor, dtype=dtype, device=device) data = make_arg((2, 2, 2)) @@ -457,7 +458,7 @@ class TestShapeOps(TestCase): def _rand_shape(self, dim, min_size, max_size): return tuple(torch.randint(min_size, max_size + 1, (dim,))) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_flip_numpy(self, device, dtype): make_arg = partial(make_tensor, dtype=dtype, device=device) @@ -566,7 +567,7 @@ class TestShapeOps(TestCase): t.nonzero() self.assertEqual(len(w), 0) - @dtypes(*all_types_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_complex=False)) def test_nonzero(self, device, dtype): shapes = [ diff --git a/test/test_sort_and_select.py b/test/test_sort_and_select.py index ba99d3ed7a0f..0d21313f882f 100644 --- a/test/test_sort_and_select.py +++ b/test/test_sort_and_select.py @@ -8,7 +8,9 @@ from torch._six import nan from itertools import permutations, product from torch.testing import make_tensor -from torch.testing._internal.common_dtype import all_types, all_types_and, floating_types_and +from torch.testing._internal.common_dtype import ( + all_types, all_types_and, floating_types_and, get_all_dtypes, get_all_int_dtypes, get_all_fp_dtypes, +) from torch.testing._internal.common_utils import \ (TestCase, run_tests, slowTest) from torch.testing._internal.common_device_type import \ @@ -131,7 +133,7 @@ class TestSortAndSelect(TestCase): 'random with NaNs') # FIXME: remove torch.bool from unsupported types once support is added for cub sort - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*set(get_all_dtypes()) - {torch.bool, torch.complex64, torch.complex128}) def test_stable_sort(self, device, dtype): sizes = (100, 1000, 10000) for ncopies in sizes: @@ -224,7 +226,7 @@ class TestSortAndSelect(TestCase): self.assertEqual(values, values_cont) # FIXME: remove torch.bool from unsupported types once support is added for cub sort - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*set(get_all_dtypes()) - {torch.bool, torch.complex64, torch.complex128}) def test_stable_sort_against_numpy(self, device, dtype): if dtype in floating_types_and(torch.float16, torch.bfloat16): inf = float('inf') @@ -287,7 +289,7 @@ class TestSortAndSelect(TestCase): idx_numpy = np.argsort(sample_numpy, axis=dim, kind='stable') self.assertEqual(idx_torch, idx_numpy) - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_msort(self, device, dtype): def test(shape): tensor = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9) @@ -684,7 +686,7 @@ class TestSortAndSelect(TestCase): for curr_size in (small, large, verylarge): self._test_topk_dtype(device, dtype, False, curr_size) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.float, torch.double, torch.bfloat16) def test_topk_nonfinite(self, device, dtype): x = torch.tensor([float('nan'), float('inf'), 1e4, 0, -1e4, -float('inf')], device=device, dtype=dtype) @@ -715,8 +717,11 @@ class TestSortAndSelect(TestCase): self.assertEqual(ind, expected_ind, atol=0, rtol=0) @onlyNativeDeviceTypes - @dtypesIfCUDA(*all_types_and(torch.bfloat16)) - @dtypes(*all_types()) + @dtypesIfCUDA(*(get_all_dtypes(include_complex=False, + include_bool=False, + include_half=False, + include_bfloat16=True))) + @dtypes(*(get_all_dtypes(include_complex=False, include_bool=False, include_half=False, include_bfloat16=False))) def test_topk_zero(self, device, dtype): # https://github.com/pytorch/pytorch/issues/49205 t = torch.rand(2, 2, device=device).to(dtype=dtype) @@ -770,9 +775,12 @@ class TestSortAndSelect(TestCase): self.assertEqual(expected_inverse.view(additional_shape), y_inverse) self.assertEqual(expected_counts, y_counts) - @dtypesIfCPU(*all_types_and(torch.bool, torch.bfloat16)) - @dtypes(*all_types_and(torch.half, torch.bool)) + @dtypesIfCPU(*set(get_all_dtypes()) - {torch.complex64, torch.complex128}) + @dtypes(*set(get_all_dtypes()) - {torch.bfloat16, torch.complex64, torch.complex128}) def test_unique(self, device, dtype): + if dtype is torch.half and self.device_type == 'cpu': + return # CPU does not have half support + def ensure_tuple(x): if isinstance(x, torch.Tensor): return (x,) @@ -827,9 +835,12 @@ class TestSortAndSelect(TestCase): count += 1 self.assertEqual(j, count) - @dtypesIfCPU(*all_types_and(torch.bool, torch.bfloat16)) - @dtypes(*all_types_and(torch.half, torch.bool)) + @dtypesIfCPU(*set(get_all_dtypes()) - {torch.complex64, torch.complex128}) + @dtypes(*set(get_all_dtypes()) - {torch.bfloat16, torch.complex64, torch.complex128}) def test_unique_consecutive(self, device, dtype): + if dtype is torch.half and self.device_type == 'cpu': + return # CPU does not have half support + if dtype is torch.bool: x = torch.tensor([True, False, False, False, True, True, False, False, False], dtype=torch.bool, device=device) expected_unique = torch.tensor([True, False, True, False], dtype=torch.bool, device=device) diff --git a/test/test_sparse.py b/test/test_sparse.py index bdf976d88910..90be1a9c1afe 100644 --- a/test/test_sparse.py +++ b/test/test_sparse.py @@ -7,6 +7,9 @@ import operator import random import unittest from torch.testing import make_tensor +from torch.testing._internal.common_dtype import ( + all_types_and_complex, +) from torch.testing._internal.common_utils import TestCase, run_tests, skipIfRocm, do_test_dtypes, \ do_test_empty_full, load_tests, TEST_NUMPY, IS_WINDOWS, gradcheck, coalescedonoff, \ DeterministicGuard, first_sample, IS_LINUX @@ -14,6 +17,7 @@ from torch.testing._internal.common_cuda import TEST_CUDA, _get_torch_cuda_versi from numbers import Number from typing import Dict, Any from distutils.version import LooseVersion +from torch.testing import get_all_complex_dtypes, get_all_fp_dtypes from torch.testing._internal.common_cuda import \ (SM53OrLater, SM80OrLater, CUDA11OrLater) from torch.testing._internal.common_device_type import \ @@ -22,8 +26,7 @@ from torch.testing._internal.common_device_type import \ from torch.testing._internal.common_methods_invocations import \ (sparse_unary_ufuncs, sparse_masked_reduction_ops) from torch.testing._internal.common_dtype import ( - all_types, all_types_and_complex, all_types_and_complex_and, floating_and_complex_types, - floating_and_complex_types_and, integral_types, floating_types_and, + floating_and_complex_types, floating_and_complex_types_and, get_all_dtypes, get_all_int_dtypes, ) # load_tests from torch.testing._internal.common_utils is used to automatically filter tests for @@ -1956,7 +1959,7 @@ class TestSparse(TestCase): def _test_log1p_tensor(self, sparse_tensor, coalesced): def is_integral(dtype): - return dtype in integral_types() + return dtype in get_all_int_dtypes() dense_tensor = sparse_tensor.to_dense() expected_output = dense_tensor.log1p() @@ -1987,7 +1990,7 @@ class TestSparse(TestCase): sparse_tensor.requires_grad_() @coalescedonoff - @dtypes(*all_types()) + @dtypes(*get_all_dtypes(include_bool=False, include_half=False, include_complex=False)) def test_log1p(self, device, dtype, coalesced): if coalesced: input_coalesced = torch.sparse_coo_tensor( @@ -2095,7 +2098,7 @@ class TestSparse(TestCase): def _test_asin_arcsin(self, sparse_tensor, coalesced): def is_integral(dtype): - return dtype in integral_types() + return dtype in get_all_int_dtypes() is_integral_dtype = is_integral(sparse_tensor.dtype) dense_tensor = sparse_tensor.to_dense() @@ -2130,7 +2133,7 @@ class TestSparse(TestCase): op(sparse_tensor) @coalescedonoff - @dtypes(*all_types()) + @dtypes(*get_all_dtypes(include_bool=False, include_half=False, include_complex=False)) def test_asin_arcsin(self, device, dtype, coalesced): if coalesced: input_coalesced = torch.sparse_coo_tensor( @@ -2617,14 +2620,14 @@ class TestSparse(TestCase): @onlyCPU # not really, but we only really want to run this once def test_dtypes(self, device): - all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16) + all_sparse_dtypes = get_all_dtypes(include_complex=True) do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu')) if torch.cuda.is_available(): do_test_dtypes(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cuda:0')) @onlyCPU # not really, but we only really want to run this once def test_empty_full(self, device): - all_sparse_dtypes = all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16) + all_sparse_dtypes = get_all_dtypes(include_complex=True) do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, torch.device('cpu')) if torch.cuda.device_count() > 0: do_test_empty_full(self, all_sparse_dtypes, torch.sparse_coo, None) @@ -3221,12 +3224,14 @@ class TestSparse(TestCase): # TODO: Check after why ROCm's cusparseXcsrgemm2Nnz function doesn't return the same nnz value as CUDA @skipIfRocm @coalescedonoff - @dtypes(*floating_and_complex_types()) - @dtypesIfCUDA(*floating_types_and(*[torch.half] if CUDA11OrLater and SM53OrLater else [], - *[torch.bfloat16] if CUDA11OrLater and SM80OrLater else [], - *[torch.complex64] if CUDA11OrLater else [], - *[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else [])) - @precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2, torch.complex64: 1e-2, torch.float32: 1e-2}) + @dtypes(*get_all_complex_dtypes(), + *get_all_fp_dtypes(include_half=False, include_bfloat16=False)) + @dtypesIfCUDA(*((torch.complex64,) if CUDA11OrLater else ()), + *((torch.complex128,) if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else ()), + *get_all_fp_dtypes( + include_half=(CUDA11OrLater and SM53OrLater), + include_bfloat16=(CUDA11OrLater and SM80OrLater))) + @precisionOverride({torch.bfloat16: 2.5e-2, torch.float16: 2.5e-2, torch.complex64: 1e-2, torch.float32: 1e-2}) def test_sparse_matmul(self, device, dtype, coalesced): """ This function test `torch.sparse.mm` when both the mat1 and mat2 are sparse tensors. diff --git a/test/test_sparse_csr.py b/test/test_sparse_csr.py index 3790f85b520f..004bcf9c9b1f 100644 --- a/test/test_sparse_csr.py +++ b/test/test_sparse_csr.py @@ -4,7 +4,7 @@ import torch import random import itertools import unittest -from torch.testing import make_tensor +from torch.testing import get_all_complex_dtypes, get_all_fp_dtypes, floating_and_complex_types, make_tensor from torch.testing._internal.common_cuda import SM53OrLater, SM80OrLater, TEST_CUSPARSE_GENERIC from torch.testing._internal.common_utils import \ (TEST_WITH_ROCM, TEST_SCIPY, TEST_MKL, IS_WINDOWS, TestCase, run_tests, load_tests, coalescedonoff, parametrize) @@ -14,10 +14,7 @@ from torch.testing._internal.common_device_type import \ from torch.testing._internal.common_methods_invocations import \ (op_db, sparse_csr_unary_ufuncs, ) from torch.testing._internal.common_cuda import _get_torch_cuda_version, CUDA11OrLater -from torch.testing._internal.common_dtype import ( - floating_types, all_types_and_complex_and, floating_and_complex_types, floating_types_and, - all_types_and_complex, floating_and_complex_types_and -) +from torch.testing._internal.common_dtype import floating_types, get_all_dtypes from test_sparse import CUSPARSE_SPMM_COMPLEX128_SUPPORTED if TEST_SCIPY: @@ -138,7 +135,7 @@ class TestSparseCSR(TestCase): self.assertEqual(str(torch.sparse_csr), 'torch.sparse_csr') self.assertEqual(type(torch.sparse_csr), torch.layout) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_sparse_csr_constructor_shape_inference(self, device, dtype): crow_indices = [0, 2, 4] col_indices = [0, 1, 0, 1] @@ -151,7 +148,7 @@ class TestSparseCSR(TestCase): self.assertEqual(dtype, sparse.dtype) self.assertEqual(torch.device(device), sparse.device) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_sparse_csr_constructor(self, device, dtype): crow_indices = [0, 2, 4] col_indices = [0, 1, 0, 1] @@ -168,7 +165,7 @@ class TestSparseCSR(TestCase): self.assertEqual(torch.tensor(col_indices, dtype=index_dtype), sparse.col_indices()) self.assertEqual(torch.tensor(values, dtype=dtype), sparse.values()) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_sparse_csr_batch_constructor(self, device, dtype): batch_shape = (2, 3) crow_indices = torch.tensor([0, 2, 4], device=device).repeat(6, 1).reshape(*batch_shape, -1) @@ -186,7 +183,7 @@ class TestSparseCSR(TestCase): self.assertEqual(col_indices.to(index_dtype), sparse.col_indices()) self.assertEqual(values, sparse.values()) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_sparse_csr_batch_constructor_shape_inference(self, device, dtype): batch_shape = (2, 3) crow_indices = torch.tensor([0, 2, 4], device=device).repeat(6, 1).reshape(*batch_shape, -1) @@ -195,7 +192,7 @@ class TestSparseCSR(TestCase): sparse = torch.sparse_csr_tensor(crow_indices, col_indices, values, dtype=dtype, device=device) self.assertEqual((*batch_shape, crow_indices.shape[-1] - 1, col_indices.max() + 1), sparse.shape) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_sparse_csr_constructor_from_lists(self, device, dtype): # without size sparse = torch.sparse_csr_tensor([0, 2, 4], @@ -225,7 +222,7 @@ class TestSparseCSR(TestCase): self.assertEqual(torch.tensor([1, 2, 3, 4], dtype=dtype, device=device), sparse.values()) @skipMeta - @dtypes(*all_types_and_complex_and(torch.bool, torch.bfloat16, torch.half)) + @dtypes(*get_all_dtypes()) def test_empty(self, device, dtype): ns = [5, 2, 0] batch_shapes = [(), (2,), (2, 3)] @@ -248,13 +245,13 @@ class TestSparseCSR(TestCase): self.assertEqual(result.values().dtype, dtype) @skipMeta - @dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_empty_errors(self, device, dtype): with self.assertRaisesRegex(RuntimeError, "torch.empty: Only batched sparse CSR matrices are supported, but got size"): torch.empty((5,), dtype=dtype, device=device, layout=torch.sparse_csr) @skipMeta - @dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_clone(self, device, dtype): from operator import mul from functools import reduce @@ -268,7 +265,7 @@ class TestSparseCSR(TestCase): self.assertEqual(sparse, cloned_sparse) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_copy(self, device, dtype): def run_test(shape, nnz, index_type): @@ -286,7 +283,7 @@ class TestSparseCSR(TestCase): run_test((*b, m, n), m * n, index_dtype) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_copy_errors(self, device, dtype): for index_dtype in [torch.int32, torch.int64]: shape1 = (2, 3) @@ -305,7 +302,7 @@ class TestSparseCSR(TestCase): a.copy_(b) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_resize(self, device, dtype): batch_shapes = [(), (2,), (2, 3)] for index_dtype, b in zip([torch.int32, torch.int64], batch_shapes): @@ -333,7 +330,7 @@ class TestSparseCSR(TestCase): self.assertEqual(a._nnz(), 5) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_resize_errors(self, device, dtype): for index_dtype in [torch.int32, torch.int64]: shape = (2, 3) @@ -472,7 +469,7 @@ class TestSparseCSR(TestCase): device=device) @onlyCUDA - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_factory_device_type_inference(self, device, dtype): cpu_cuda = ('cpu', 'cuda') cpu_cuda_none = cpu_cuda + (None,) @@ -543,7 +540,7 @@ class TestSparseCSR(TestCase): self.assertExpected('\n'.join(printed)) self.maxDiff = orig_maxDiff - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_sparse_csr_from_dense(self, device, dtype): dense = torch.tensor([[4, 5, 0], [0, 0, 0], [1, 0, 0]], dtype=dtype, device=device) sparse = dense.to_sparse_csr() @@ -563,7 +560,7 @@ class TestSparseCSR(TestCase): self.assertEqual(torch.tensor([0, 1, 2] * 3, dtype=torch.int64), sparse.col_indices()) self.assertEqual(torch.tensor([2] * 9, dtype=dtype), sparse.values()) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_sparse_csr_to_dense(self, device, dtype): mn = [5, 2, 0] for (m, n) in itertools.product(mn, mn): @@ -655,7 +652,7 @@ class TestSparseCSR(TestCase): with self.assertRaisesRegex(RuntimeError, r"size \(16, 16\) with block size \(5, 5\)"): block_t = torch.sparse._csr_to_block_csr(t, (5, 5)) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_sparse_csr_from_dense_convert_error(self, device, dtype): size = (4, 2, 4) dense = make_tensor(size, dtype=dtype, device=device) @@ -681,9 +678,8 @@ class TestSparseCSR(TestCase): @skipCPUIfNoMklSparse @skipCUDAIfNoCusparseGeneric @dtypes(*floating_and_complex_types()) - @dtypesIfCUDA(*floating_and_complex_types_and( - *[torch.half] if SM53OrLater else [], - *[torch.bfloat16] if SM80OrLater else [])) + @dtypesIfCUDA(*get_all_complex_dtypes(), + *get_all_fp_dtypes(include_half=SM53OrLater, include_bfloat16=SM80OrLater)) def test_csr_matvec(self, device, dtype): side = 100 for index_dtype in [torch.int32, torch.int64]: @@ -926,9 +922,9 @@ class TestSparseCSR(TestCase): @skipCPUIfNoMklSparse @dtypes(*floating_and_complex_types()) - @dtypesIfCUDA(*floating_and_complex_types_and( - *[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [], - *[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else [])) + @dtypesIfCUDA(*get_all_complex_dtypes(), + *get_all_fp_dtypes(include_half=SM53OrLater and TEST_CUSPARSE_GENERIC, + include_bfloat16=SM80OrLater and TEST_CUSPARSE_GENERIC)) @precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2}) def test_sparse_mm(self, device, dtype): def test_shape(d1, d2, d3, nnz, transposed, index_dtype): @@ -945,9 +941,9 @@ class TestSparseCSR(TestCase): test_shape(7, 8, 9, 20, True, index_dtype) @dtypes(*floating_and_complex_types()) - @dtypesIfCUDA(*floating_and_complex_types_and( - *[torch.half] if SM53OrLater and TEST_CUSPARSE_GENERIC else [], - *[torch.bfloat16] if SM80OrLater and TEST_CUSPARSE_GENERIC else [])) + @dtypesIfCUDA(*get_all_complex_dtypes(), + *get_all_fp_dtypes(include_half=SM53OrLater and TEST_CUSPARSE_GENERIC, + include_bfloat16=SM80OrLater and TEST_CUSPARSE_GENERIC)) @precisionOverride({torch.bfloat16: 1e-2, torch.float16: 1e-2}) def test_sparse_addmm(self, device, dtype): def test_shape(m, n, p, nnz, broadcast, index_dtype, alpha_beta=None): @@ -979,10 +975,10 @@ class TestSparseCSR(TestCase): @dtypes(*floating_and_complex_types()) @precisionOverride({torch.double: 1e-8, torch.float: 1e-4, torch.bfloat16: 0.6, torch.half: 1e-1, torch.cfloat: 1e-4, torch.cdouble: 1e-8}) - @dtypesIfCUDA(*floating_types_and(torch.complex64, - *[torch.bfloat16] if SM80OrLater else [], - *[torch.half] if SM53OrLater else [], - *[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else [])) + @dtypesIfCUDA(torch.complex64, + *((torch.complex128,) if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else ()), + *torch.testing.get_all_fp_dtypes(include_bfloat16=SM80OrLater, + include_half=SM53OrLater)) @skipCUDAIf( not _check_cusparse_spgemm_available(), "cuSparse Generic API SpGEMM is not available" @@ -1052,10 +1048,10 @@ class TestSparseCSR(TestCase): @skipCPUIfNoMklSparse @dtypes(*floating_and_complex_types()) - @dtypesIfCUDA(*floating_types_and(torch.complex64, - *[torch.bfloat16] if SM80OrLater else [], - *[torch.half] if SM53OrLater else [], - *[torch.complex128] if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else [])) + @dtypesIfCUDA(torch.complex64, + *((torch.complex128,) if CUSPARSE_SPMM_COMPLEX128_SUPPORTED else ()), + *torch.testing.get_all_fp_dtypes(include_bfloat16=SM80OrLater, + include_half=SM53OrLater)) @skipCUDAIf( not _check_cusparse_spgemm_available(), "cuSparse Generic API SpGEMM is not available" @@ -1432,7 +1428,7 @@ class TestSparseCSR(TestCase): torch.sparse.sampled_addmm(a_sparse, a, a_sparse) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_coo_csr_conversion(self, device, dtype): for m, n in itertools.product([5, 2, 0], [5, 2, 0]): size = (m, n) @@ -1443,7 +1439,7 @@ class TestSparseCSR(TestCase): self.assertEqual(csr_sparse.to_dense(), dense) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_csr_coo_conversion(self, device, dtype): for m, n in itertools.product([5, 2, 0], [5, 2, 0]): size = (m, n) @@ -1678,7 +1674,7 @@ class TestSparseCSR(TestCase): args = [make_tensor(a.shape, device=device, dtype=dtype, noncontiguous=True, requires_grad=True) for a in sample.args] self.assertTrue(torch.autograd.gradcheck(fn, args, fast_mode=True)) - @dtypes(*all_types_and_complex()) + @dtypes(*get_all_dtypes(include_bool=False)) def test_direct_coo_csr_conversion(self, device, dtype): for m, n in itertools.product([5, 2, 0], [5, 2, 0]): size = (m, n) @@ -1688,12 +1684,12 @@ class TestSparseCSR(TestCase): self.assertEqual(coo_sparse.to_sparse_csr().to_sparse_coo(), coo_sparse) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_sum(self, device, dtype): def run_test(shape, nnz, index_type): a = self.genSparseCSRTensor(shape, nnz, dtype=dtype, device=device, index_dtype=index_dtype) self.assertEqual(a.sum(), a.values().sum()) - if dtype in floating_types(): + if dtype in get_all_fp_dtypes(): a.requires_grad_(True) with self.assertRaisesRegex(RuntimeError, ("Function SumBackward0 returned an invalid gradient at " + @@ -1708,7 +1704,7 @@ class TestSparseCSR(TestCase): @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_transpose(self, device, dtype): def run_test(shape, nnz, index_type, dim0, dim1): @@ -1729,7 +1725,7 @@ class TestSparseCSR(TestCase): # TODO: This is a stopgap for a rigorous extension of our autograd tests # to test the functionality of detach @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_exercise_detach(self, device, dtype): shape = (3, 3) nnz = 4 diff --git a/test/test_tensor_creation_ops.py b/test/test_tensor_creation_ops.py index 29dfd12b7fdf..5a5f27f4c0b2 100644 --- a/test/test_tensor_creation_ops.py +++ b/test/test_tensor_creation_ops.py @@ -20,8 +20,7 @@ from torch.testing._internal.common_device_type import ( onlyCPU, largeTensorTest, precisionOverride, dtypes, onlyCUDA, skipCPUIf, dtypesIfCUDA, skipMeta, get_all_device_types) from torch.testing._internal.common_dtype import ( - all_types_and_complex_and, get_all_math_dtypes, all_types_and, floating_and_complex_types, - floating_types, floating_and_complex_types_and, integral_types_and + get_all_dtypes, get_all_math_dtypes, get_all_int_dtypes, get_all_fp_dtypes, get_all_complex_dtypes ) from torch.testing._creation import float_to_corresponding_complex_type_map @@ -149,7 +148,7 @@ class TestTensorCreation(TestCase): exact_dtype=False) def test_cat_all_dtypes_and_devices(self, device): - for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for dt in get_all_dtypes(): x = torch.tensor([[1, 2], [3, 4]], dtype=dt, device=device) expected1 = torch.tensor([[1, 2], [3, 4], [1, 2], [3, 4]], dtype=dt, device=device) @@ -159,7 +158,7 @@ class TestTensorCreation(TestCase): self.assertEqual(torch.cat((x, x), 1), expected2) def test_fill_all_dtypes_and_devices(self, device): - for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for dt in get_all_dtypes(): for x in [torch.tensor((10, 10), dtype=dt, device=device), torch.empty(10000, dtype=dt, device=device)]: # large tensor numel = x.numel() @@ -313,7 +312,7 @@ class TestTensorCreation(TestCase): (3, 1), (5, 3, 1), (7, 5, 3, 1), # very fat matrices (1, 3), (5, 1, 3), (7, 5, 1, 3), # very thin matrices (1, 3, 3, 3), (3, 1, 3, 3, 3)] # unsqueezed batch dimensions - dtypes = all_types_and_complex_and(torch.half, torch.bool) + dtypes = [dtype for dtype in get_all_dtypes() if dtype != torch.bfloat16] for s, d, dtype in product(shapes, diagonals, dtypes): run_test(s, device, d, dtype) @@ -1011,7 +1010,8 @@ class TestTensorCreation(TestCase): np_fn(np_input) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) + + get_all_complex_dtypes())) def test_hstack_column_stack(self, device, dtype): ops = ((torch.hstack, np.hstack), (torch.column_stack, np.column_stack)) for torch_op, np_op in ops: @@ -1030,7 +1030,8 @@ class TestTensorCreation(TestCase): torch_result) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) + + get_all_complex_dtypes())) def test_vstack_row_stack(self, device, dtype): ops = ((torch.vstack, np.vstack), (torch.row_stack, np.row_stack)) for torch_op, np_op in ops: @@ -1047,7 +1048,8 @@ class TestTensorCreation(TestCase): self.assertEqual(actual, expected) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) + + get_all_complex_dtypes())) def test_dstack(self, device, dtype): self._test_special_stacks(2, 3, torch.dstack, np.dstack, device, dtype) for i in range(5): @@ -1755,7 +1757,7 @@ class TestTensorCreation(TestCase): lambda: t.random_(from_, to_) ) - @dtypes(*all_types_and(torch.bfloat16, torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_random_full_range(self, device, dtype): size = 2000 alpha = 0.1 @@ -1789,7 +1791,7 @@ class TestTensorCreation(TestCase): self.assertTrue(from_ <= t.to(torch.double).min() < (from_ + delta)) self.assertTrue((to_inc_ - delta) < t.to(torch.double).max() <= to_inc_) - @dtypes(*all_types_and(torch.bfloat16, torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_random_from_to(self, device, dtype): size = 2000 alpha = 0.1 @@ -1878,7 +1880,7 @@ class TestTensorCreation(TestCase): lambda: t.random_(from_, to_) ) - @dtypes(*all_types_and(torch.bfloat16, torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_random_to(self, device, dtype): size = 2000 alpha = 0.1 @@ -1936,7 +1938,7 @@ class TestTensorCreation(TestCase): lambda: t.random_(from_, to_) ) - @dtypes(*all_types_and(torch.bfloat16, torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_random_default(self, device, dtype): size = 2000 alpha = 0.1 @@ -2127,7 +2129,13 @@ class TestTensorCreation(TestCase): self.assertRaises(TypeError, lambda: torch.set_default_tensor_type(torch.float32)) # don't allow passing dtype to set_default_dtype - for t in all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.qint8): + for t in get_all_dtypes( + include_half=True, + include_bfloat16=True, + include_bool=True, + include_complex=True, + include_complex32=True, + include_qint=True): # only floating-point types are supported as the default type if t in ( torch.half, @@ -2666,7 +2674,7 @@ class TestTensorCreation(TestCase): self.assertEqual(x.stride(), y.stride()) def test_eye(self, device): - for dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for dtype in get_all_dtypes(): if dtype == torch.bfloat16: continue # Test the RuntimeError is raised when either m or n is a negative number @@ -2699,7 +2707,8 @@ class TestTensorCreation(TestCase): self.assertEqual(res1, res2) @precisionOverride({torch.float: 1e-8, torch.double: 1e-10}) - @dtypes(*floating_and_complex_types()) + @dtypes(*(get_all_fp_dtypes(include_half=False, include_bfloat16=False) + + get_all_complex_dtypes())) def test_linspace_vs_numpy(self, device, dtype): start = -0.0316082797944545745849609375 + (0.8888888888j if dtype.is_complex else 0) end = .0315315723419189453125 + (0.444444444444j if dtype.is_complex else 0) @@ -2736,7 +2745,7 @@ class TestTensorCreation(TestCase): device, dtype) @precisionOverride({torch.float: 1e-6, torch.double: 1e-10}) - @dtypes(*floating_types()) + @dtypes(*get_all_fp_dtypes(include_half=False, include_bfloat16=False)) def test_logspace_vs_numpy(self, device, dtype): start = -0.0316082797944545745849609375 end = .0315315723419189453125 @@ -2841,7 +2850,7 @@ class TestTensorCreation(TestCase): shapes = [(5, 0, 1), (0,), (0, 0, 1, 0, 2, 0, 0)] for shape in shapes: - for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for dt in get_all_dtypes(): self.assertEqual(shape, torch.zeros(shape, device=device, dtype=dt).shape) self.assertEqual(shape, torch.zeros_like(torch.zeros(shape, device=device, dtype=dt)).shape) @@ -2927,8 +2936,8 @@ class TestTensorCreation(TestCase): bfloat16_tensor = torch.arange(0, 6, step=2, dtype=torch.bfloat16, device=device) self.assertEqual(ref_tensor, bfloat16_tensor) - @dtypes(*all_types_and_complex_and(torch.bfloat16)) - @dtypesIfCUDA(*all_types_and_complex_and(torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False, include_half=False)) + @dtypesIfCUDA(*get_all_dtypes(include_bool=False, include_half=True)) def test_linspace(self, device, dtype): _from = random.random() to = _from + random.random() @@ -3045,12 +3054,12 @@ class TestTensorCreation(TestCase): # See NOTE [Linspace+Logspace precision override] @skipCPUIf(True, "compares with CPU") @precisionOverride({torch.half: 0.0039 + LINSPACE_LOGSPACE_EXTRA_EPS}) - @dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes())) def test_linspace_device_vs_cpu(self, device, dtype): self._test_linspace(device, dtype, steps=10) @skipCPUIf(True, "compares with CPU") - @dtypes(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_fp_dtypes() + get_all_complex_dtypes())) def test_linspace_special_steps(self, device, dtype): for steps in self.LINSPACE_LOGSPACE_SPECIAL_STEPS: self._test_linspace(device, dtype, steps=steps) @@ -3091,9 +3100,10 @@ class TestTensorCreation(TestCase): self._test_logspace(device, dtype, steps=steps) self._test_logspace_base2(device, dtype, steps=steps) - @dtypes(*all_types_and(torch.bfloat16)) - @dtypesIfCUDA(*integral_types_and(torch.half, torch.bfloat16, torch.float32) if TEST_WITH_ROCM else - all_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False, include_half=False, include_complex=False)) + @dtypesIfCUDA(*((get_all_int_dtypes() + [torch.float32, torch.float16, torch.bfloat16]) + if TEST_WITH_ROCM + else get_all_dtypes(include_bool=False, include_half=True, include_complex=False))) def test_logspace(self, device, dtype): _from = random.random() to = _from + random.random() @@ -3891,7 +3901,7 @@ class TestAsArray(TestCase): # data pointer (which is basically the point here), since they all # return 0. @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_alias_from_tensor(self, device, dtype): self._test_alias_with_cvt(identity, device, dtype) @@ -3902,7 +3912,7 @@ class TestAsArray(TestCase): # Skipping 'meta', since 'to_dlpack' does not work for them. @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_alias_from_dlpack(self, device, dtype): self._test_alias_with_cvt(to_dlpack, device, dtype) @@ -3934,13 +3944,13 @@ class TestAsArray(TestCase): # Copy is forced because of different dtype if not only_with_dtype: - for other in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for other in get_all_dtypes(): if dtype != other: check(same_dtype=False, dtype=other) check(same_dtype=False, dtype=other, copy=True) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_copy_tensor(self, device, dtype): self._test_copy_with_cvt(identity, device, dtype) @@ -3950,7 +3960,7 @@ class TestAsArray(TestCase): self._test_copy_with_cvt(to_numpy, device, dtype) @skipMeta - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_copy_from_dlpack(self, device, dtype): self._test_copy_with_cvt(to_dlpack, device, dtype) @@ -3973,17 +3983,17 @@ class TestAsArray(TestCase): @onlyCUDA @deviceCountAtLeast(2) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_copy_from_tensor_mult_devices(self, devices, dtype): self._test_copy_mult_devices(devices, dtype, identity) @onlyCUDA @deviceCountAtLeast(2) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_copy_from_dlpack_mult_devices(self, devices, dtype): self._test_copy_mult_devices(devices, dtype, to_dlpack) - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_copy_list(self, device, dtype): original = make_tensor((5, 5), dtype=dtype, device=torch.device("cpu")) diff --git a/test/test_testing.py b/test/test_testing.py index 2ccb6ff36282..7e5441368591 100644 --- a/test/test_testing.py +++ b/test/test_testing.py @@ -22,13 +22,14 @@ from torch.testing._internal.common_device_type import \ deviceCountAtLeast, ops, expectedFailureMeta) from torch.testing._internal.common_methods_invocations import op_db import torch.testing._internal.opinfo_helper as opinfo_helper -from torch.testing._internal.common_dtype import all_types_and_complex_and +from torch.testing._internal.common_dtype import get_all_dtypes from torch.testing._internal.common_modules import modules, module_db # For testing TestCase methods and torch.testing functions class TestTesting(TestCase): # Ensure that assertEqual handles numpy arrays properly - @dtypes(*all_types_and_complex_and(torch.bool, torch.half)) + @dtypes(*(get_all_dtypes(include_half=True, include_bfloat16=False, + include_bool=True, include_complex=True))) def test_assertEqual_numpy(self, device, dtype): S = 10 test_sizes = [ diff --git a/test/test_torch.py b/test/test_torch.py index 86da23646715..e873a6e35794 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -54,8 +54,8 @@ import torch.backends.quantized import torch.testing._internal.data from torch.testing._internal.common_cuda import tf32_on_and_off, tf32_is_not_fp32 from torch.testing._internal.common_dtype import ( - floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types, - all_types_and, floating_types, floating_and_complex_types, integral_types, + get_all_fp_dtypes, get_all_int_dtypes, get_all_math_dtypes, get_all_dtypes, get_all_complex_dtypes, + all_types_and_complex_and ) # Protects against includes accidentally setting the default dtype @@ -221,7 +221,7 @@ class TestTorchDeviceType(TestCase): self.assertEqual(s, storage_type(l)) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_tensor_storage_type(self, device, dtype): a = make_tensor((10,), dtype=dtype, device=device, low=-9, high=9) @@ -231,7 +231,7 @@ class TestTorchDeviceType(TestCase): self.assertEqual(a.storage_type(), expected_storage_type) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_tensor_from_storage(self, device, dtype): a = make_tensor((4, 5, 3), dtype=dtype, device=device, low=-9, high=9) a_s = a.storage() @@ -240,7 +240,7 @@ class TestTorchDeviceType(TestCase): c = torch.tensor(a_s._untyped(), device=device, dtype=dtype).reshape(a.size()) self.assertEqual(a, c) - for error_dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for error_dtype in get_all_dtypes(): if error_dtype == dtype: continue with self.assertRaisesRegex(RuntimeError, r'Expected a Storage of type'): @@ -248,7 +248,7 @@ class TestTorchDeviceType(TestCase): torch.tensor(error_storage, device=device, dtype=dtype) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_set_storage(self, device, dtype): a = make_tensor((4, 5, 3), dtype=dtype, device=device, low=-9, high=9) a_s = a.storage() @@ -257,7 +257,7 @@ class TestTorchDeviceType(TestCase): c = torch.tensor([], device=device, dtype=dtype).set_(a_s._untyped()).reshape(a.size()) self.assertEqual(a, c) - for error_dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for error_dtype in get_all_dtypes(): if error_dtype == dtype: continue with self.assertRaisesRegex(RuntimeError, r'Expected a Storage of type'): @@ -1563,13 +1563,13 @@ else: _sync_raises_helper(f, level) - @dtypes(*floating_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes()) def test_log_normal(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).log_normal_() self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_geometric(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).geometric_(0.5) self.assertEqual(a.dtype, dtype) @@ -1601,9 +1601,9 @@ else: self.assertEqual(a_with_output.dtype, y.dtype) self.assertEqual(a_with_output.size(), torch.Size([3, 2])) - @dtypes(*floating_types()) - @dtypesIfCPU(*floating_types_and(torch.bfloat16)) - @dtypesIfCUDA(*floating_types_and(torch.half)) + @dtypes(*get_all_fp_dtypes(include_half=False, include_bfloat16=False)) + @dtypesIfCPU(*(get_all_fp_dtypes(include_half=False, include_bfloat16=True))) + @dtypesIfCUDA(*(get_all_fp_dtypes(include_bfloat16=False))) def test_bernoulli_p(self, device, dtype): for trivial_p in ([0, 1], [1, 0, 1, 1, 0, 1]): x = torch.tensor(trivial_p, dtype=dtype, device=device) @@ -1623,9 +1623,9 @@ else: self.assertTrue(isBinary(p)) # RngUniform not implemented for Integral type in XLA test - @dtypes(*floating_types()) - @dtypesIfCPU(*all_types_and(torch.bool)) - @dtypesIfCUDA(*all_types_and(torch.bool, torch.half)) + @dtypes(*(get_all_fp_dtypes(include_half=False, include_bfloat16=False))) + @dtypesIfCPU(*(get_all_dtypes(include_half=False, include_bfloat16=False, include_complex=False))) + @dtypesIfCUDA(*(get_all_dtypes(include_bfloat16=False, include_complex=False))) def test_bernoulli_self(self, device, dtype): def isBinary(t): @@ -1637,7 +1637,7 @@ else: t.bernoulli_(0.5) self.assertTrue(isBinary(t)) - for p_dtype in floating_types_and(*[torch.half] if device.startswith('cuda') else []): + for p_dtype in get_all_fp_dtypes(include_half=device.startswith('cuda'), include_bfloat16=False): p = torch.rand(10, dtype=p_dtype, device=device).expand(10, 10) t.fill_(2) t.bernoulli_(p) @@ -1652,8 +1652,8 @@ else: self.assertTrue(isBinary(t)) @slowTest - @dtypes(*floating_types()) - @dtypesIfCUDA(*floating_types_and(torch.half)) + @dtypes(*(get_all_fp_dtypes(include_half=False, include_bfloat16=False))) + @dtypesIfCUDA(*(get_all_fp_dtypes(include_bfloat16=False))) def test_bernoulli_edge_cases(self, device, dtype): # Need to draw a lot of samples to cover every random floating point number. a = torch.zeros(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 0 @@ -1664,7 +1664,7 @@ else: num_zeros = (torch.bernoulli(b) == 0).sum() self.assertEqual(num_zeros, 0) - @dtypes(*floating_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes()) def test_exponential(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).exponential_(0.5) self.assertEqual(a.dtype, dtype) @@ -1731,7 +1731,7 @@ else: check(x, correction, fweights, aweights) @skipIfNoSciPy - @dtypes(*floating_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes()) def test_uniform_kstest(self, device, dtype): from scipy import stats size = 1000 @@ -1743,8 +1743,8 @@ else: self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy - @dtypes(*floating_types_and(torch.half)) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes(include_bfloat16=False)) + @dtypesIfCUDA(*get_all_fp_dtypes()) def test_normal_kstest(self, device, dtype): from scipy import stats size = 1000 @@ -1755,7 +1755,7 @@ else: self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy - @dtypes(*floating_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes()) def test_lognormal_kstest(self, device, dtype): from scipy import stats size = 1000 @@ -1769,7 +1769,7 @@ else: self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy - @dtypes(*floating_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes()) def test_exponential_kstest(self, device, dtype): from scipy import stats size = 1000 @@ -1779,7 +1779,7 @@ else: self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy - @dtypes(*floating_types_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_fp_dtypes()) def test_cauchy_kstest(self, device, dtype): from scipy import stats size = 1000 @@ -1800,7 +1800,7 @@ else: self.assertFalse(x.isinf().sum()) @skipIfNoSciPy - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_geometric_kstest(self, device, dtype): from scipy import stats size = 1000 @@ -2280,7 +2280,7 @@ else: # All tensors appear contiguous on XLA @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bool)) + @dtypes(*get_all_dtypes(include_bfloat16=False)) def test_diff_noncontig(self, device, dtype): shapes = ( (1,), @@ -2300,9 +2300,9 @@ else: self._test_diff_numpy(non_contig) # RngNormal not implemented for type f16 for XLA - @dtypes(*all_types_and_complex_and(torch.bool)) - @dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool)) - @dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool)) + @dtypes(*get_all_dtypes(include_half=False, include_bfloat16=False)) + @dtypesIfCPU(*get_all_dtypes(include_bfloat16=False)) + @dtypesIfCUDA(*get_all_dtypes(include_bfloat16=False)) def test_diff(self, device, dtype): shapes = ( (1,), @@ -2541,7 +2541,7 @@ else: # FIXME: move to shape ops test suite def test_unfold_all_devices_and_dtypes(self, device): - for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for dt in get_all_dtypes(): if dt == torch.bool: x = torch.empty((0, 1, 3, 0), dtype=dt, device=device) @@ -2563,7 +2563,7 @@ else: # FIXME: move to data movement test suite def test_copy_all_dtypes_and_devices(self, device): from copy import copy - for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for dt in get_all_dtypes(): x = torch.tensor([1, 2, 3, 4], dtype=dt, device=device) x_clone = x.clone() y = copy(x) @@ -2632,7 +2632,7 @@ else: self.assertEqual(dst, src.conj_physical()) def test_clone_all_dtypes_and_devices(self, device): - for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): + for dt in get_all_dtypes(include_complex32=True): x = torch.tensor((1, 1), dtype=dt, device=device) y = x.clone() self.assertEqual(x, y) @@ -2703,7 +2703,7 @@ else: self.assertEqual(sz, y.size()) # FIXME: move to test indexing - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_index_copy(self, device, dtype): # We just test for num_copy <= num_dest, as otherwise there are repeated indices # and the behavior is undefined @@ -2738,7 +2738,7 @@ else: # onlyNativeDeviceTypes due to an XLA error: # https://github.com/pytorch/pytorch/issues/53256 @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_index_copy_scalars(self, device, dtype): # Create the 8 possible combinations of scalar sizes for target / index / source scalars = ((make_tensor(size_t, dtype=dtype, device=device, low=None, high=None), @@ -2848,7 +2848,7 @@ else: self.assertEqual(output, input_list) # FIXME: move to test indexing - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_index_fill(self, device, dtype): x = torch.tensor([[1, 2], [4, 5]], dtype=dtype, device=device) index = torch.tensor([0], device=device) @@ -2866,7 +2866,7 @@ else: # FIXME: move to test indexing # The test fails for zero-dimensional tensors on XLA @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_index_select(self, device, dtype): num_src, num_out = 3, 5 @@ -2912,7 +2912,7 @@ else: self.assertEqual(out.item(), source.item()) # FIXME: find a test suite for the take operator - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_take(self, device, dtype): idx_size = (4,) @@ -2947,7 +2947,7 @@ else: # FIXME: find a test suite for the put operator # The bool instance does not work on GPU. See # https://github.com/pytorch/pytorch/issues/54317 - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_put(self, device, dtype): src_size = (4,) @@ -3018,7 +3018,7 @@ else: # FIXME: find a test suite for the put operator # The bool instance does not work on GPU. See # https://github.com/pytorch/pytorch/issues/54317 - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_put_accumulate(self, device, dtype): # Test for parallel adds with accumulate == True low_precision = dtype == torch.half or dtype == torch.bfloat16 @@ -3062,9 +3062,13 @@ else: device_type = torch.device(device).type return device_type != 'cuda' or (reduceop == 'multiply' and dtype.is_floating_point) - @dtypes(*floating_and_complex_types()) - @dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) - @dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + # FIXME: port to test_scatter_gather_ops.py + # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) + # So, we are skipping it here. + @dtypes(*(get_all_fp_dtypes(include_bfloat16=False, include_half=False) + + get_all_complex_dtypes())) + @dtypesIfCPU(*get_all_dtypes()) + @dtypesIfCUDA(*get_all_dtypes()) def test_scatter_reduce_operations_to_large_input(self, device, dtype): index = torch.tensor([[1], [2]], device=device, dtype=torch.long) test_data = [ @@ -3089,9 +3093,13 @@ else: input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result) - @dtypes(*floating_and_complex_types()) - @dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) - @dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + # FIXME: port to test_scatter_gather_ops.py + # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) + # So, we are skipping it here. + @dtypes(*(get_all_fp_dtypes(include_bfloat16=False, include_half=False) + + get_all_complex_dtypes())) + @dtypesIfCPU(*get_all_dtypes()) + @dtypesIfCUDA(*get_all_dtypes()) def test_scatter_reduce_scalar(self, device, dtype): index = torch.tensor([[1], [2]], device=device, dtype=torch.long) test_data = [ @@ -3128,9 +3136,13 @@ else: torch.tensor([[3], [1]], device=device, dtype=torch.float32).repeat(1, width)) - @dtypes(*floating_and_complex_types()) - @dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) - @dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + # FIXME: port to test_scatter_gather_ops.py + # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) + # So, we are skipping it here. + @dtypes(*(get_all_fp_dtypes(include_bfloat16=False, include_half=False) + + get_all_complex_dtypes())) + @dtypesIfCPU(*get_all_dtypes()) + @dtypesIfCUDA(*get_all_dtypes()) def test_scatter_reduce_non_unique_index(self, device, dtype): height = 2 width = 2 @@ -3151,8 +3163,12 @@ else: input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}") + # FIXME: port to test_scatter_gather_ops.py + # torch.{zeros, ones} do not support ComplexHalf (torch.complex32) + # So, we are skipping it here. @onlyCUDA - @dtypes(*integral_types(), *complex_types()) + @dtypes(*(get_all_complex_dtypes() + + get_all_int_dtypes())) def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype): height = 2 width = 2 @@ -3204,7 +3220,7 @@ else: # FIXME: find a test suite for the masked scatter operator @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_masked_scatter(self, device, dtype): dt = dtype with warnings.catch_warnings(record=True) as w: @@ -3293,7 +3309,7 @@ else: self.assertEqual(result, result_cpu) # FIXME: find a test suite for the masked select operator - @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) + @dtypes(*get_all_dtypes()) def test_masked_select(self, device, dtype): if device == 'cpu': warn = 'masked_select received a mask with dtype torch.uint8,' @@ -3361,7 +3377,7 @@ else: self.assertEqual(out_dc, expected, atol=0, rtol=0) # FIXME: find a test suite for the masked fill operator - @dtypes(*product(all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16), (torch.uint8, torch.bool))) + @dtypes(*product(get_all_dtypes(), (torch.uint8, torch.bool))) def test_masked_fill(self, device, dtypes): dtype = dtypes[0] mask_dtype = dtypes[1] @@ -4394,7 +4410,7 @@ else: # FIXME: move dlpack tests to their own test class/suite @skipMeta @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_dlpack_capsule_conversion(self, device, dtype): # DLpack does not explicitly support bool (xref dmlc/dlpack#75) x = make_tensor((5,), dtype=dtype, device=device) @@ -4403,7 +4419,7 @@ else: @skipMeta @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_dlpack_protocol_conversion(self, device, dtype): x = make_tensor((5,), dtype=dtype, device=device) z = from_dlpack(x) @@ -4419,7 +4435,7 @@ else: @skipMeta @onlyCUDA - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_dlpack_conversion_with_streams(self, device, dtype): # Create a stream where the tensor will reside stream = torch.cuda.Stream() @@ -4438,7 +4454,7 @@ else: @skipMeta @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_from_dlpack(self, device, dtype): x = make_tensor((5,), dtype=dtype, device=device) y = torch.from_dlpack(x) @@ -4446,7 +4462,7 @@ else: @skipMeta @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_from_dlpack_noncontinguous(self, device, dtype): x = make_tensor((25,), dtype=dtype, device=device).reshape(5, 5) @@ -4472,7 +4488,7 @@ else: @skipMeta @onlyCUDA - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_dlpack_conversion_with_diff_streams(self, device, dtype): stream_a = torch.cuda.Stream() stream_b = torch.cuda.Stream() @@ -4489,7 +4505,7 @@ else: @skipMeta @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_from_dlpack_dtype(self, device, dtype): x = make_tensor((5,), dtype=dtype, device=device) y = torch.from_dlpack(x) @@ -4521,7 +4537,7 @@ else: @skipMeta @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*get_all_dtypes(include_bool=False)) def test_dlpack_tensor_invalid_stream(self, device, dtype): with self.assertRaises(TypeError): x = make_tensor((5,), dtype=dtype, device=device) @@ -5031,7 +5047,8 @@ else: # FIXME: move to elementwise ternary test suite @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes() + + get_all_complex_dtypes())) def test_where_scalar_invalid_combination_raises(self, device, dtype): def checkRaises(scalar_type, dtype, condition, x, scalar_1): @@ -5044,7 +5061,8 @@ else: # FIXME: move to elementwise ternary test suite @skipCUDAVersionIn([(11, 2)]) # test fails for 11.2, see https://github.com/pytorch/pytorch/issues/51980 - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes() + + get_all_complex_dtypes())) def test_where_scalar_valid_combination(self, device, dtype): def checkResult(scalar_type, dtype, condition, x, scalar_1): @@ -7403,12 +7421,12 @@ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], # Verifies that (deep)copies of dtypes are the same objects def test_copy_dtypes(self): - for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for dtype in get_all_dtypes(include_complex32=True): copied_dtype = copy.deepcopy(dtype) self.assertIs(dtype, copied_dtype) def test_dtype_is_signed(self): - for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.half): + for dtype in get_all_dtypes(): self.assertEqual(dtype.is_signed, torch.is_signed(torch.tensor(0, dtype=dtype))) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.quint8.is_signed) diff --git a/test/test_type_promotion.py b/test/test_type_promotion.py index a157f49962d5..f32a89933f08 100644 --- a/test/test_type_promotion.py +++ b/test/test_type_promotion.py @@ -11,7 +11,7 @@ from torch.testing._internal.common_utils import (TestCase, run_tests, load_test from torch.testing._internal.common_device_type import (instantiate_device_type_tests, onlyNativeDeviceTypes, dtypes, dtypesIfCUDA, onlyCPU, expectedFailureMeta, skipMeta) from torch.testing._internal.common_dtype import ( - all_types_and_complex_and, all_types_and, get_all_math_dtypes, integral_types_and, floating_types_and + get_all_dtypes, get_all_math_dtypes, get_all_int_dtypes, get_all_fp_dtypes ) if TEST_NUMPY: @@ -184,7 +184,7 @@ class TestTypePromotion(TestCase): self.assertEqual(bf + scalar, scalar + bf) # with tensor - for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for dtype in get_all_dtypes(): t = torch.tensor(1, dtype=dtype, device=device) self.assertEqual(bf + t, t + bf) if dtype in (torch.float16, torch.float32, torch.float64, torch.cfloat, torch.cdouble): @@ -340,8 +340,7 @@ class TestTypePromotion(TestCase): # this seems like odd behavior but ints also create float tensors, numpy doesn't have this function. self.assertEqual(torch.scalar_tensor(False, device=device), torch.tensor(0., device=device)) - @dtypes(*itertools.product(all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool), - all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*itertools.product(get_all_dtypes(), get_all_dtypes())) def test_result_type(self, device, dtypes): "Test result_type for tensor vs tensor and scalar vs scalar." @@ -563,7 +562,7 @@ class TestTypePromotion(TestCase): @float_double_default_dtype def test_promote_self(self, device): - for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for dtype in get_all_dtypes(): self.assertEqual(torch.promote_types(dtype, dtype), dtype) @expectedFailureMeta @@ -881,7 +880,7 @@ class TestTypePromotion(TestCase): @onlyNativeDeviceTypes def test_cat_different_dtypes(self, device): - dtypes = all_types_and_complex_and(torch.half, torch.bool) + dtypes = get_all_dtypes(include_bfloat16=False) for x_dtype, y_dtype in itertools.product(dtypes, dtypes): x_vals, y_vals = [1, 2, 3], [4, 5, 6] @@ -900,7 +899,7 @@ class TestTypePromotion(TestCase): @onlyNativeDeviceTypes def test_cat_out_different_dtypes(self, device): - dtypes = all_types_and_complex_and(torch.half) + dtypes = get_all_dtypes(include_bfloat16=False, include_bool=False) for x_dtype, y_dtype, out_dtype in itertools.product(dtypes, dtypes, dtypes): out = torch.zeros(6, device=device, dtype=out_dtype) x = torch.tensor([1, 2, 3], device=device, dtype=x_dtype) @@ -972,19 +971,21 @@ class TestTypePromotion(TestCase): self.assertEqual(result, a - b, exact_dtype=False) self.assertNotEqual(result, a.double() - b, exact_dtype=False) - @dtypesIfCUDA(*itertools.product(all_types_and(torch.half, torch.bool), - all_types_and(torch.half, torch.bool))) - @dtypes(*itertools.product(all_types_and(torch.bool), - all_types_and(torch.bool))) + @dtypesIfCUDA(*itertools.product(get_all_dtypes(include_bfloat16=False, include_complex=False), + get_all_dtypes(include_bfloat16=False, include_complex=False))) + @dtypes(*itertools.product(get_all_dtypes(include_half=False, include_bfloat16=False, + include_complex=False), + get_all_dtypes(include_half=False, include_bfloat16=False, + include_complex=False))) def test_atan2_type_promotion(self, device, dtypes): dtype1, dtype2 = dtypes default_float = torch.get_default_dtype() def is_int(dtype): - return dtype in integral_types_and(torch.bool) + return dtype in get_all_int_dtypes() + [torch.bool] def is_float(dtype): - return dtype in floating_types_and(torch.half) + return dtype in get_all_fp_dtypes(include_half=True, include_bfloat16=False) def get_binary_float_result_type(x, y): dtype1 = x.dtype diff --git a/test/test_unary_ufuncs.py b/test/test_unary_ufuncs.py index 07f9271b84a6..e3f528279349 100644 --- a/test/test_unary_ufuncs.py +++ b/test/test_unary_ufuncs.py @@ -21,8 +21,8 @@ from torch.testing._internal.common_device_type import ( OpDTypes) from torch.testing import make_tensor from torch.testing._internal.common_dtype import ( - floating_types_and, all_types_and_complex_and, integral_types_and, get_all_math_dtypes, - complex_types, all_types_and, floating_and_complex_types_and + floating_types_and, all_types_and_complex_and, floating_and_complex_types_and, get_all_dtypes, get_all_math_dtypes, + get_all_int_dtypes, get_all_fp_dtypes, get_all_complex_dtypes ) if TEST_SCIPY: @@ -514,7 +514,8 @@ class TestUnaryUfuncs(TestCase): out = torch.empty_like(input, dtype=out_dtype) self._test_out_arg(op, input, out, expected, **torch_kwargs) - @dtypes(*all_types_and(torch.bool, torch.half)) + @dtypes(*(get_all_int_dtypes() + [torch.bool] + + get_all_fp_dtypes(include_bfloat16=False))) def test_nan_to_num(self, device, dtype): for contiguous in [False, True]: x = make_tensor((64, 64), low=0., high=100., dtype=dtype, device=device) @@ -592,7 +593,7 @@ class TestUnaryUfuncs(TestCase): self.compare_with_numpy(torch.digamma, scipy.special.digamma, tensor) @skipCUDAIfRocm - @dtypes(*floating_types_and(torch.half)) + @dtypes(*get_all_fp_dtypes(include_half=True, include_bfloat16=False)) def test_frexp(self, device, dtype): input = make_tensor((50, 50), dtype=dtype, device=device) mantissa, exponent = torch.frexp(input) @@ -607,13 +608,15 @@ class TestUnaryUfuncs(TestCase): @skipCUDAIfRocm def test_frexp_assert_raises(self, device): - invalid_input_dtypes = integral_types_and(torch.bool) + complex_types() + invalid_input_dtypes = get_all_int_dtypes() + \ + get_all_complex_dtypes() + \ + [torch.bool] for dtype in invalid_input_dtypes: input = make_tensor((50, 50), dtype=dtype, device=device) with self.assertRaisesRegex(RuntimeError, r"torch\.frexp\(\) only supports floating-point dtypes"): torch.frexp(input) - for dtype in floating_types_and(torch.half): + for dtype in get_all_fp_dtypes(include_half=True, include_bfloat16=False): input = make_tensor((50, 50), dtype=dtype, device=device) dtypes = list(all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16)) @@ -1177,7 +1180,7 @@ class TestUnaryUfuncs(TestCase): t = torch.rand(1000, device=device).to(dtype) * r self._i0_helper(t) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.bfloat16, torch.float32, torch.float64) @unittest.skipIf(not TEST_SCIPY, "SciPy not found") def test_i0_range1(self, device, dtype): @@ -1185,7 +1188,7 @@ class TestUnaryUfuncs(TestCase): # The domain is (-13.25, 13.25) self._i0_range_helper(13.25, device, dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.bfloat16, torch.float32, torch.float64) @unittest.skipIf(not TEST_SCIPY, "SciPy not found") def test_i0_range2(self, device, dtype): @@ -1200,7 +1203,7 @@ class TestUnaryUfuncs(TestCase): # The domain is (-709.75, 709.75) self._i0_range_helper(709.75, device, dtype) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.bfloat16, torch.float32, torch.float64) @unittest.skipIf(not TEST_SCIPY, "SciPy not found") def test_i0_special(self, device, dtype): @@ -1210,7 +1213,7 @@ class TestUnaryUfuncs(TestCase): t = torch.tensor([inf, -inf, nan], device=device, dtype=dtype) self.assertTrue(torch.i0(t).isnan().all()) - @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) + @dtypesIfCUDA(*get_all_fp_dtypes()) @dtypes(torch.bfloat16, torch.float32, torch.float64) @unittest.skipIf(not TEST_SCIPY, "SciPy not found") def test_special_i0_i1_vs_scipy(self, device, dtype): @@ -1316,7 +1319,7 @@ class TestUnaryUfuncs(TestCase): for num in abs_zeros: self.assertGreater(math.copysign(1.0, num), 0.0) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_dtypes(include_bool=False))) def test_isposinf_isneginf_non_boolean_output(self, device, dtype): # test non-boolean tensors as the `out=` parameters # boolean outputs are tested in the above testcases @@ -1358,8 +1361,10 @@ class TestUnaryUfuncs(TestCase): self.assertEqual(torch.empty(0, dtype=torch.long), z[0]) # TODO: rationalize with exp OpInfo - @dtypes(*floating_and_complex_types_and(torch.bfloat16)) - @dtypesIfCUDA(*floating_and_complex_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_fp_dtypes(include_half=False) + + get_all_complex_dtypes())) + @dtypesIfCUDA(*(get_all_fp_dtypes(include_half=True) + + get_all_complex_dtypes())) def test_exp(self, device, dtype): for v in (2, -2) + ((1j, 1 + 1j) if dtype.is_complex else ()): a = torch.tensor(v, dtype=dtype, device=device) * torch.arange(18, device=device) / 3 * math.pi diff --git a/test/test_view_ops.py b/test/test_view_ops.py index 71d273d6505b..1b6fda76192f 100644 --- a/test/test_view_ops.py +++ b/test/test_view_ops.py @@ -16,7 +16,7 @@ from torch.testing._internal.common_utils import ( from torch.testing._internal.common_device_type import \ (instantiate_device_type_tests, onlyCPU, dtypes, onlyNativeDeviceTypes, skipMeta) from torch.testing._internal.common_dtype import ( - all_types_and_complex_and, complex_types, all_types_and, floating_and_complex_types_and, + get_all_dtypes, get_all_int_dtypes, get_all_fp_dtypes, get_all_complex_dtypes ) # TODO: replace this with make_tensor() in common_utils.py @@ -121,14 +121,14 @@ class TestViewOps(TestCase): else: return x.transpose(dim0, dim1) - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_conj_self(self, device, dtype): t = torch.ones(5, 5, device=device) s = t.conj() self.assertTrue(s is t) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bool)) + @dtypes(*get_all_dtypes(include_bfloat16=False)) def test_view_dtype_new(self, device, dtype): dtypes = torch_to_numpy_dtype_dict.copy() del dtypes[torch.bool] @@ -210,18 +210,18 @@ class TestViewOps(TestCase): # because view(dtype) does not support backward yet # TODO: Remove this when autograd support is added if dtype.is_floating_point or dtype.is_complex: - for view_dtype in floating_and_complex_types_and(torch.half, torch.bfloat16): + for view_dtype in [*get_all_fp_dtypes(), *get_all_complex_dtypes()]: t = make_tensor((5, 5, 64), dtype=dtype, device=device, low=-5, high=5, requires_grad=True) self.assertFalse(t.view(view_dtype).requires_grad) # Test the extra error checks that happen when the view dtype # has a greater element size than the original dtype @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_view_dtype_upsize_errors(self, device, dtype): dtype_size = torch._utils._element_size(dtype) - for view_dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for view_dtype in get_all_dtypes(): view_dtype_size = torch._utils._element_size(view_dtype) if view_dtype_size <= dtype_size: continue @@ -302,7 +302,7 @@ class TestViewOps(TestCase): self.assertEqual(res.shape, torch.Size([0])) @onlyNativeDeviceTypes - @dtypes(*complex_types(), torch.complex32) + @dtypes(*get_all_complex_dtypes(include_complex32=True)) def test_view_as_real(self, device, dtype): def fn(contiguous_input=True): t = torch.randn(3, 4, dtype=dtype, device=device) @@ -340,7 +340,7 @@ class TestViewOps(TestCase): self.assertEqual(res.shape, torch.Size([2])) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_view_tensor_split(self, device, dtype): a = make_tensor((40, 30), dtype=dtype, device=device, low=-9, high=9) a_split_dim0 = a.tensor_split(7, 0) @@ -351,7 +351,7 @@ class TestViewOps(TestCase): self.assertTrue(self.is_view_of(a, a_split_dim1_tensor)) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_view_tensor_hsplit(self, device, dtype): t = make_tensor((4, 4, 4), dtype=dtype, device=device, low=-9, high=9) t_hsplit = torch.hsplit(t, 2) @@ -361,7 +361,7 @@ class TestViewOps(TestCase): self.assertEqual(t_hsplit[1][2, 0, 2], t[2, 2, 2]) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_view_tensor_vsplit(self, device, dtype): t = make_tensor((4, 4, 4), dtype=dtype, device=device, low=-9, high=9) t_vsplit = torch.vsplit(t, 2) @@ -371,7 +371,7 @@ class TestViewOps(TestCase): self.assertEqual(t_vsplit[1][0, 2, 2], t[2, 2, 2]) @onlyNativeDeviceTypes - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_view_tensor_dsplit(self, device, dtype): t = make_tensor((4, 4, 4), dtype=dtype, device=device, low=-9, high=9) t_dsplit = torch.dsplit(t, 2) @@ -381,7 +381,7 @@ class TestViewOps(TestCase): self.assertEqual(t_dsplit[1][2, 2, 0], t[2, 2, 2]) @onlyNativeDeviceTypes - @dtypes(*all_types_and(torch.half, torch.bfloat16)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes())) def test_imag_noncomplex(self, device, dtype): t = torch.ones((5, 5), dtype=dtype, device=device) @@ -389,7 +389,7 @@ class TestViewOps(TestCase): torch.imag(t) @onlyNativeDeviceTypes - @dtypes(*complex_types()) + @dtypes(*get_all_complex_dtypes()) def test_real_imag_view(self, device, dtype): def compare_with_numpy(contiguous_input=True): t = torch.randn(3, 3, dtype=dtype, device=device) @@ -420,7 +420,7 @@ class TestViewOps(TestCase): self.assertEqual(a[5:].imag, a.imag[5:]) @onlyNativeDeviceTypes - @dtypes(*complex_types()) + @dtypes(*get_all_complex_dtypes()) def test_conj_imag_view(self, device, dtype) -> None: t = _make_tensor((4, 5,), dtype, device) t_numpy_conj = torch.from_numpy(t.cpu().numpy().conj()).to(device=device) @@ -445,7 +445,7 @@ class TestViewOps(TestCase): self.assertEqual(torch.add(b, c), b.add_(c)) @onlyNativeDeviceTypes - @dtypes(*product(complex_types(), all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool))) + @dtypes(*product(get_all_complex_dtypes(), get_all_dtypes())) @suppress_warnings def test_set_real_imag(self, device, dtypes): x = torch.randn(10, dtype=dtypes[0], device=device) @@ -1264,7 +1264,7 @@ class TestOldViewOps(TestCase): scalar = torch.tensor(5, device=device) self.assertEqual(scalar, scalar.T) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*(torch.testing.get_all_dtypes())) def test_transposes(self, device, dtype): for op in ("T", "H", "mT", "mH", "adjoint"): shapes = ((), (2, 3), (2, 3, 4)) if op[0] == "m" or op == "adjoint" else ((), (2, 3),) @@ -1280,7 +1280,7 @@ class TestOldViewOps(TestCase): t2 = t2.conj() self.assertEqual(t2, t1) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*(torch.testing.get_all_dtypes())) def test_transposes_errors(self, device, dtype): for op in ("H", "mT", "mH", "adjoint"): shapes = ((2,), (2, 3, 4)) if op == "H" else ((2,),) @@ -1406,7 +1406,8 @@ class TestOldViewOps(TestCase): self.assertEqual(np_res, torch_res) # TODO: are these view ops? - @dtypes(*all_types_and_complex_and(torch.half)) + @dtypes(*(get_all_int_dtypes() + get_all_fp_dtypes(include_bfloat16=False) + + get_all_complex_dtypes())) def test_atleast(self, device, dtype): self._test_atleast_dim(torch.atleast_1d, np.atleast_1d, device, dtype) self._test_atleast_dim(torch.atleast_2d, np.atleast_2d, device, dtype) @@ -1543,7 +1544,7 @@ class TestOldViewOps(TestCase): self.assertEqual(res1, res2_numpy) # Skip BFloat16 since numpy does not support it - @dtypes(*all_types_and_complex_and(torch.half, torch.bool)) + @dtypes(*get_all_dtypes(include_bfloat16=False)) def test_broadcast_to(self, device, dtype): def can_broadcast(s0, s1): # s0.dim() <= s1.dim(), reverse s0 and s1 to compare trailing dimension @@ -1646,7 +1647,7 @@ class TestOldViewOps(TestCase): self.assertEqual(tensor.view(6, 2, 1), contig_tensor.view(6, 2, 1)) self.assertEqual(tensor.view(1, 6, 2, 1), contig_tensor.view(1, 6, 2, 1)) - @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool)) + @dtypes(*get_all_dtypes()) def test_reshape_view_semantics(self, device, dtype): tensor = make_tensor((15, 4), dtype=dtype, device=device) target = (20, 3) @@ -1673,7 +1674,7 @@ class TestOldViewOps(TestCase): @onlyNativeDeviceTypes # Skip BFloat16 since numpy does not support it - @dtypes(*all_types_and_complex_and(torch.half, torch.bool)) + @dtypes(*get_all_dtypes(include_bfloat16=False)) def test_tensor_split_sections(self, device, dtype): input_sizes = [ (0,), @@ -1704,7 +1705,7 @@ class TestOldViewOps(TestCase): @onlyNativeDeviceTypes # Skip BFloat16 since numpy does not support it - @dtypes(*all_types_and_complex_and(torch.half, torch.bool)) + @dtypes(*get_all_dtypes(include_bfloat16=False)) def test_tensor_split_indices(self, device, dtype): input_sizes = [ (0,), @@ -1783,20 +1784,20 @@ class TestOldViewOps(TestCase): def test_resize_all_dtypes_and_devices(self, device): shape = (2, 2) - for dt in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for dt in get_all_dtypes(): x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device) x.resize_(shape) self.assertEqual(shape, x.shape) def test_resize_as_all_dtypes_and_devices(self, device): - for dt in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for dt in get_all_dtypes(): x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device) y = torch.tensor([[1, 2, 3], [4, 5, 6]], dtype=dt, device=device) x.resize_as_(y) self.assertEqual(y.shape, x.shape) def test_view_all_dtypes_and_devices(self, device): - for dt in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): + for dt in get_all_dtypes(): x = torch.tensor([[1, 2], [3, 4], [5, 6]], dtype=dt, device=device) self.assertEqual(x.view(6).shape, [6]) diff --git a/torch/testing/_internal/common_methods_invocations.py b/torch/testing/_internal/common_methods_invocations.py index c0c4ec8f0b74..ebd5db7c65de 100644 --- a/torch/testing/_internal/common_methods_invocations.py +++ b/torch/testing/_internal/common_methods_invocations.py @@ -2552,22 +2552,14 @@ def sample_inputs_addcmul_addcdiv(op_info, device, dtype, requires_grad, **kwarg sample_inputs = [] for input_args, broadcasts_input in test_cases: - # addcdiv should accept inputs with zero value - # Currently, it throws ZeroDivisionError when the denominator is zero - # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed - args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad, - exclude_zero=True) if isinstance(arg, tuple) else arg + args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad) if isinstance(arg, tuple) else arg for arg in input_args) sample_inputs.append(SampleInput( args[0], args=args[1:], broadcasts_input=broadcasts_input)) - # addcdiv should accept inputs with zero value - # Currently, it throws ZeroDivisionError when the denominator is zero - # TODO: exclude_zeros can be removed after https://github.com/pytorch/pytorch/issues/73638 is fixed - args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad, - exclude_zero=True) if isinstance(arg, tuple) else arg + args = tuple(make_tensor(arg, dtype=dtype, device=device, requires_grad=requires_grad) if isinstance(arg, tuple) else arg for arg in input_args) sample_inputs.append(SampleInput( args[0],