fix numpy1.24 deprecations in unittests (#93997)

Fixes https://github.com/pytorch/pytorch/issues/91329

Pull Request resolved: https://github.com/pytorch/pytorch/pull/93997
Approved by: https://github.com/ngimel, https://github.com/jerryzh168
This commit is contained in:
Yuxin Wu
2023-02-18 00:59:06 +00:00
committed by PyTorch MergeBot
parent 9dbfca7840
commit 9bb2fe3eae
6 changed files with 10 additions and 9 deletions

View File

@ -3007,7 +3007,7 @@ class TestDynamicQuantizedOps(TestCase):
# W_scale = 1.0
# W_zp = 0
W_scales = np.ones(output_channels)
W_zps = np.zeros(output_channels).astype(np.int)
W_zps = np.zeros(output_channels).astype(int)
W_value_min = -128
W_value_max = 127
W_q0 = np.round(
@ -3571,9 +3571,9 @@ class TestQuantizedLinear(TestCase):
# xnnpack forces W_zp to 0 when using symmetric quantization
# ONEDNN only supports symmetric quantization of weight
if dtype == torch.qint8 or qengine_is_onednn():
W_zps = np.zeros(output_channels).astype(np.int)
W_zps = np.zeros(output_channels).astype(int)
else:
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(np.int)
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(int)
# when using symmetric quantization
# special restriction for xnnpack fully connected op weight
# [-127, 127] instead of [-128, 127]

View File

@ -1434,7 +1434,7 @@ class TestReductions(TestCase):
vals = [[True, True], [True, False], [False, False], []]
for val in vals:
result = torch.prod(torch.tensor(val, device=device), dtype=torch.bool).item()
expect = np.prod(np.array(val), dtype=np.bool)
expect = np.prod(np.array(val), dtype=bool)
self.assertEqual(result, expect)
result = torch.prod(torch.tensor(val, device=device)).item()

View File

@ -1444,14 +1444,14 @@ class TestTensorCreation(TestCase):
def test_ctor_with_numpy_array(self, device):
correct_dtypes = [
np.double,
np.float,
float,
np.float16,
np.int64,
np.int32,
np.int16,
np.int8,
np.uint8,
np.bool,
bool,
]
incorrect_byteorder = '>' if sys.byteorder == 'little' else '<'

View File

@ -807,7 +807,7 @@ class TestTensorBoardNumpy(BaseTestCase):
model = ModelHelper(name="mnist")
# how come those inputs don't break the forward pass =.=a
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(int))
with core.NameScope("conv1"):
conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5)
@ -842,7 +842,7 @@ class TestTensorBoardNumpy(BaseTestCase):
def test_caffe2_simple_cnnmodel(self):
model = cnn.CNNModelHelper("NCHW", name="overfeat")
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(int))
with core.NameScope("conv1"):
conv1 = model.Conv("data", "conv1", 3, 96, 11, stride=4)
relu1 = model.Relu(conv1, conv1)

View File

@ -6367,7 +6367,7 @@ class TestTorch(TestCase):
# fail parse with float variables
self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3.), torch.tensor(4))))
# fail parse with numpy floats
self.assertRaises(TypeError, lambda: torch.ones((np.float(3.), torch.tensor(4))))
self.assertRaises(TypeError, lambda: torch.ones((3., torch.tensor(4))))
self.assertRaises(TypeError, lambda: torch.ones((np.array(3.), torch.tensor(4))))
# fail parse with > 1 element variables

View File

@ -380,6 +380,7 @@ def make_histogram(values, bins, max_bins=None):
limits = new_limits
# Find the first and the last bin defining the support of the histogram:
cum_counts = np.cumsum(np.greater(counts, 0))
start, end = np.searchsorted(cum_counts, [0, cum_counts[-1] - 1], side="right")
start = int(start)