mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Revert D30279364: [codemod][lint][fbcode/c*] Enable BLACK by default
Test Plan: revert-hammer
Differential Revision:
D30279364 (b004307252
)
Original commit changeset: c1ed77dfe43a
fbshipit-source-id: eab50857675c51e0088391af06ec0ecb14e2347e
This commit is contained in:
committed by
Facebook GitHub Bot
parent
ed0b8a3e83
commit
1022443168
@ -1,13 +1,12 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
from itertools import product
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
from torch.testing._internal.common_device_type import (
|
||||
instantiate_device_type_tests,
|
||||
onlyCPU,
|
||||
dtypes,
|
||||
)
|
||||
from torch.testing._internal.common_utils import TestCase, run_tests
|
||||
from torch.testing._internal.common_utils import \
|
||||
(TestCase, run_tests)
|
||||
from torch.testing._internal.common_device_type import \
|
||||
(instantiate_device_type_tests, onlyCPU, dtypes)
|
||||
|
||||
# For testing handling NumPy objects and sending tensors to / accepting
|
||||
# arrays from NumPy.
|
||||
@ -18,7 +17,7 @@ class TestNumPyInterop(TestCase):
|
||||
@onlyCPU
|
||||
def test_numpy_non_writeable(self, device):
|
||||
arr = np.zeros(5)
|
||||
arr.flags["WRITEABLE"] = False
|
||||
arr.flags['WRITEABLE'] = False
|
||||
self.assertWarns(UserWarning, lambda: torch.from_numpy(arr))
|
||||
|
||||
@onlyCPU
|
||||
@ -96,7 +95,7 @@ class TestNumPyInterop(TestCase):
|
||||
x = get_castable_tensor((sz1, sz2), dtp)
|
||||
y = x.numpy()
|
||||
check2d(x, y)
|
||||
self.assertTrue(y.flags["C_CONTIGUOUS"])
|
||||
self.assertTrue(y.flags['C_CONTIGUOUS'])
|
||||
|
||||
# with storage offset
|
||||
xm = get_castable_tensor((sz1 * 2, sz2), dtp)
|
||||
@ -104,13 +103,13 @@ class TestNumPyInterop(TestCase):
|
||||
y = x.numpy()
|
||||
self.assertTrue(x.storage_offset() > 0)
|
||||
check2d(x, y)
|
||||
self.assertTrue(y.flags["C_CONTIGUOUS"])
|
||||
self.assertTrue(y.flags['C_CONTIGUOUS'])
|
||||
|
||||
# non-contiguous 2D
|
||||
x = get_castable_tensor((sz2, sz1), dtp).t()
|
||||
y = x.numpy()
|
||||
check2d(x, y)
|
||||
self.assertFalse(y.flags["C_CONTIGUOUS"])
|
||||
self.assertFalse(y.flags['C_CONTIGUOUS'])
|
||||
|
||||
# with storage offset
|
||||
xm = get_castable_tensor((sz2 * 2, sz1), dtp)
|
||||
@ -222,14 +221,12 @@ class TestNumPyInterop(TestCase):
|
||||
self.assertEqual(torch.from_numpy(x).shape, (2, 0))
|
||||
|
||||
# check ill-sized strides raise exception
|
||||
x = np.array([3.0, 5.0, 8.0])
|
||||
x = np.array([3., 5., 8.])
|
||||
x.strides = (3,)
|
||||
self.assertRaises(ValueError, lambda: torch.from_numpy(x))
|
||||
|
||||
def test_from_list_of_ndarray_warning(self, device):
|
||||
warning_msg = (
|
||||
r"Creating a tensor from a list of numpy.ndarrays is extremely slow"
|
||||
)
|
||||
warning_msg = r"Creating a tensor from a list of numpy.ndarrays is extremely slow"
|
||||
with self.assertWarnsOnceRegex(UserWarning, warning_msg):
|
||||
torch.tensor([np.array([0]), np.array([1])], device=device)
|
||||
|
||||
@ -278,7 +275,7 @@ class TestNumPyInterop(TestCase):
|
||||
]
|
||||
for tp, dtype in zip(types, dtypes):
|
||||
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
|
||||
if np.dtype(dtype).kind == "u": # type: ignore[misc]
|
||||
if np.dtype(dtype).kind == 'u': # type: ignore[misc]
|
||||
# .type expects a XxxTensor, which have no type hints on
|
||||
# purpose, so ignore during mypy type checking
|
||||
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
|
||||
@ -307,7 +304,7 @@ class TestNumPyInterop(TestCase):
|
||||
asarray = np.asarray(x, dtype=dtype)
|
||||
self.assertEqual(asarray.dtype, dtype)
|
||||
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
|
||||
if np.dtype(dtype).kind == "u": # type: ignore[misc]
|
||||
if np.dtype(dtype).kind == 'u': # type: ignore[misc]
|
||||
wrapped_x = np.array([1, -2, 3, -4], dtype=dtype)
|
||||
for i in range(len(x)):
|
||||
self.assertEqual(asarray[i], wrapped_x[i])
|
||||
@ -321,7 +318,7 @@ class TestNumPyInterop(TestCase):
|
||||
for tp, dtype in zip(float_types, float_dtypes):
|
||||
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
|
||||
array = np.array([1, 2, 3, 4], dtype=dtype)
|
||||
for func in ["sin", "sqrt", "ceil"]:
|
||||
for func in ['sin', 'sqrt', 'ceil']:
|
||||
ufunc = getattr(np, func)
|
||||
res_x = ufunc(x)
|
||||
res_array = ufunc(array)
|
||||
@ -334,21 +331,14 @@ class TestNumPyInterop(TestCase):
|
||||
x = torch.tensor([1, 2, 3, 4]).type(tp) # type: ignore[call-overload]
|
||||
array = np.array([1, 2, 3, 4], dtype=dtype)
|
||||
geq2_x = np.greater_equal(x, 2)
|
||||
geq2_array = np.greater_equal(array, 2).astype("uint8")
|
||||
geq2_array = np.greater_equal(array, 2).astype('uint8')
|
||||
self.assertIsInstance(geq2_x, torch.ByteTensor)
|
||||
for i in range(len(x)):
|
||||
self.assertEqual(geq2_x[i], geq2_array[i])
|
||||
|
||||
@onlyCPU
|
||||
def test_multiplication_numpy_scalar(self, device) -> None:
|
||||
for np_dtype in [
|
||||
np.float32,
|
||||
np.float64,
|
||||
np.int32,
|
||||
np.int64,
|
||||
np.int16,
|
||||
np.uint8,
|
||||
]:
|
||||
for np_dtype in [np.float32, np.float64, np.int32, np.int64, np.int16, np.uint8]:
|
||||
for t_dtype in [torch.float, torch.double]:
|
||||
# mypy raises an error when np.floatXY(2.0) is called
|
||||
# even though this is valid code
|
||||
@ -366,11 +356,8 @@ class TestNumPyInterop(TestCase):
|
||||
@onlyCPU
|
||||
def test_parse_numpy_int(self, device):
|
||||
# Only concrete class can be given where "Type[number[_64Bit]]" is expected
|
||||
self.assertRaisesRegex(
|
||||
RuntimeError,
|
||||
"Overflow",
|
||||
lambda: torch.mean(torch.randn(1, 1), np.uint64(-1)),
|
||||
) # type: ignore[call-overload]
|
||||
self.assertRaisesRegex(RuntimeError, "Overflow",
|
||||
lambda: torch.mean(torch.randn(1, 1), np.uint64(-1))) # type: ignore[call-overload]
|
||||
# https://github.com/pytorch/pytorch/issues/29252
|
||||
for nptype in [np.int16, np.int8, np.uint8, np.int32, np.int64]:
|
||||
scalar = 3
|
||||
@ -380,10 +367,7 @@ class TestNumPyInterop(TestCase):
|
||||
# np integral type can be treated as a python int in native functions with
|
||||
# int parameters:
|
||||
self.assertEqual(torch.ones(5).diag(scalar), torch.ones(5).diag(np_val))
|
||||
self.assertEqual(
|
||||
torch.ones([2, 2, 2, 2]).mean(scalar),
|
||||
torch.ones([2, 2, 2, 2]).mean(np_val),
|
||||
)
|
||||
self.assertEqual(torch.ones([2, 2, 2, 2]).mean(scalar), torch.ones([2, 2, 2, 2]).mean(np_val))
|
||||
|
||||
# numpy integral type parses like a python int in custom python bindings:
|
||||
self.assertEqual(torch.Storage(np_val).size(), scalar) # type: ignore[attr-defined]
|
||||
@ -400,40 +384,25 @@ class TestNumPyInterop(TestCase):
|
||||
self.assertEqual((np_val + t).dtype, t.dtype)
|
||||
|
||||
def test_has_storage_numpy(self, device):
|
||||
for dtype in [np.float32, np.float64, np.int64, np.int32, np.int16, np.uint8]:
|
||||
for dtype in [np.float32, np.float64, np.int64,
|
||||
np.int32, np.int16, np.uint8]:
|
||||
arr = np.array([1], dtype=dtype)
|
||||
self.assertIsNotNone(
|
||||
torch.tensor(arr, device=device, dtype=torch.float32).storage()
|
||||
)
|
||||
self.assertIsNotNone(
|
||||
torch.tensor(arr, device=device, dtype=torch.double).storage()
|
||||
)
|
||||
self.assertIsNotNone(
|
||||
torch.tensor(arr, device=device, dtype=torch.int).storage()
|
||||
)
|
||||
self.assertIsNotNone(
|
||||
torch.tensor(arr, device=device, dtype=torch.long).storage()
|
||||
)
|
||||
self.assertIsNotNone(
|
||||
torch.tensor(arr, device=device, dtype=torch.uint8).storage()
|
||||
)
|
||||
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.float32).storage())
|
||||
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.double).storage())
|
||||
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.int).storage())
|
||||
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.long).storage())
|
||||
self.assertIsNotNone(torch.tensor(arr, device=device, dtype=torch.uint8).storage())
|
||||
|
||||
@dtypes(*torch.testing.get_all_dtypes())
|
||||
def test_numpy_scalar_cmp(self, device, dtype):
|
||||
if dtype.is_complex:
|
||||
tensors = (
|
||||
torch.tensor(complex(1, 3), dtype=dtype, device=device),
|
||||
torch.tensor([complex(1, 3), 0, 2j], dtype=dtype, device=device),
|
||||
torch.tensor(
|
||||
[[complex(3, 1), 0], [-1j, 5]], dtype=dtype, device=device
|
||||
),
|
||||
)
|
||||
tensors = (torch.tensor(complex(1, 3), dtype=dtype, device=device),
|
||||
torch.tensor([complex(1, 3), 0, 2j], dtype=dtype, device=device),
|
||||
torch.tensor([[complex(3, 1), 0], [-1j, 5]], dtype=dtype, device=device))
|
||||
else:
|
||||
tensors = (
|
||||
torch.tensor(3, dtype=dtype, device=device),
|
||||
torch.tensor([1, 0, -3], dtype=dtype, device=device),
|
||||
torch.tensor([[3, 0, -1], [3, 5, 4]], dtype=dtype, device=device),
|
||||
)
|
||||
tensors = (torch.tensor(3, dtype=dtype, device=device),
|
||||
torch.tensor([1, 0, -3], dtype=dtype, device=device),
|
||||
torch.tensor([[3, 0, -1], [3, 5, 4]], dtype=dtype, device=device))
|
||||
|
||||
for tensor in tensors:
|
||||
if dtype == torch.bfloat16:
|
||||
@ -442,24 +411,17 @@ class TestNumPyInterop(TestCase):
|
||||
continue
|
||||
|
||||
np_array = tensor.cpu().numpy()
|
||||
for t, a in product(
|
||||
(tensor.flatten()[0], tensor.flatten()[0].item()),
|
||||
(np_array.flatten()[0], np_array.flatten()[0].item()),
|
||||
):
|
||||
for t, a in product((tensor.flatten()[0], tensor.flatten()[0].item()),
|
||||
(np_array.flatten()[0], np_array.flatten()[0].item())):
|
||||
self.assertEqual(t, a)
|
||||
if (
|
||||
dtype == torch.complex64
|
||||
and torch.is_tensor(t)
|
||||
and type(a) == np.complex64
|
||||
):
|
||||
if dtype == torch.complex64 and torch.is_tensor(t) and type(a) == np.complex64:
|
||||
# TODO: Imaginary part is dropped in this case. Need fix.
|
||||
# https://github.com/pytorch/pytorch/issues/43579
|
||||
self.assertFalse(t == a)
|
||||
else:
|
||||
self.assertTrue(t == a)
|
||||
|
||||
|
||||
instantiate_device_type_tests(TestNumPyInterop, globals())
|
||||
|
||||
if __name__ == "__main__":
|
||||
if __name__ == '__main__':
|
||||
run_tests()
|
||||
|
Reference in New Issue
Block a user