mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[codemod][numpy] replace np.str with str (#103931)
Summary: `np.str` is removed from numpy 1.20.0. It was an alias to builtin `str` and it's safe to do the replacement. The whole changes is mechanical, generated using the following onliner: ``` fbgr -sl 'np\.str\b' | xargs perl -pi -e 's,\bnp\.str\b,str,g' ``` Test Plan: sandcastle Differential Revision: D46586144 Pull Request resolved: https://github.com/pytorch/pytorch/pull/103931 Approved by: https://github.com/huydhn
This commit is contained in:
committed by
PyTorch MergeBot
parent
72f09faf10
commit
ae1ed27756
@ -1636,7 +1636,7 @@ class Net:
|
||||
return do_set(self.GivenTensorIntFill)
|
||||
elif array.dtype == np.int64:
|
||||
return do_set(self.GivenTensorInt64Fill)
|
||||
elif array.dtype == np.str:
|
||||
elif array.dtype == str:
|
||||
return do_set(self.GivenTensorStringFill)
|
||||
elif array.dtype == np.bool:
|
||||
return do_set(self.GivenTensorBoolFill)
|
||||
|
@ -1629,8 +1629,8 @@ class TestOperators(hu.HypothesisTestCase):
|
||||
c0 = np.ones([10, 1, 2, 16]).astype(np.float32)
|
||||
c1 = np.ones([10, 16, 2, 16]).astype(np.float32)
|
||||
c2 = np.ones([10, 16, 2, 1]).astype(np.float32)
|
||||
# index = np.array([0, 1, 2, 1, 4], dtype=np.int)
|
||||
# lengths = np.array([3, 2], dtype=np.int)
|
||||
# index = np.array([0, 1, 2, 1, 4], dtype=int)
|
||||
# lengths = np.array([3, 2], dtype=int)
|
||||
index = np.array([0, 1, 2, 1, 4], np.int64)
|
||||
lengths = np.array([3, 2], np.int32)
|
||||
|
||||
|
@ -148,7 +148,7 @@ class LayerModelHelper(model_helper.ModelHelper):
|
||||
op_name = 'GivenTensorIntFill'
|
||||
elif array.dtype == np.int64:
|
||||
op_name = 'GivenTensorInt64Fill'
|
||||
elif array.dtype == np.str:
|
||||
elif array.dtype == str:
|
||||
op_name = 'GivenTensorStringFill'
|
||||
elif array.dtype == np.bool:
|
||||
op_name = 'GivenTensorBoolFill'
|
||||
|
@ -98,7 +98,7 @@ def adagrad_sparse_test_helper(
|
||||
# Create an indexing array containing values that are lists of indices,
|
||||
# which index into grad
|
||||
if grad.size == 0:
|
||||
indices = np.empty(shape=(0,), dtype=np.int)
|
||||
indices = np.empty(shape=(0,), dtype=int)
|
||||
else:
|
||||
indices = np.random.choice(
|
||||
np.arange(grad.shape[0]),
|
||||
|
@ -37,7 +37,7 @@ class TestCastOp(hu.HypothesisTestCase):
|
||||
'Cast', 'data', 'data_cast', to=core.DataType.STRING)
|
||||
|
||||
def ref(data):
|
||||
ret = data.astype(dtype=np.str)
|
||||
ret = data.astype(dtype=str)
|
||||
# the string blob will be fetched as object, we feed and re-fetch
|
||||
# to mimic this.
|
||||
with hu.temp_workspace('tmp_ref_int_to_string'):
|
||||
|
@ -32,8 +32,8 @@ def heatmaps_to_keypoints(maps, rois):
|
||||
heights = rois[:, 3] - rois[:, 1]
|
||||
widths = np.maximum(widths, 1)
|
||||
heights = np.maximum(heights, 1)
|
||||
widths_ceil = np.ceil(widths).astype(np.int)
|
||||
heights_ceil = np.ceil(heights).astype(np.int)
|
||||
widths_ceil = np.ceil(widths).astype(int)
|
||||
heights_ceil = np.ceil(heights).astype(int)
|
||||
|
||||
num_keypoints = np.maximum(maps.shape[1], _NUM_KEYPOINTS)
|
||||
|
||||
|
@ -32,7 +32,7 @@ class TestTile(serial.SerializedTestCase):
|
||||
)
|
||||
|
||||
def tile_ref(X, tiles, axis):
|
||||
dims = np.asarray([1, 1, 1], dtype=np.int)
|
||||
dims = np.asarray([1, 1, 1], dtype=int)
|
||||
dims[axis] = tiles
|
||||
tiled_data = np.tile(X, dims)
|
||||
return (tiled_data,)
|
||||
@ -61,7 +61,7 @@ class TestTile(serial.SerializedTestCase):
|
||||
)
|
||||
|
||||
def tile_ref(X, tiles, axis):
|
||||
dims = np.asarray([1, 1], dtype=np.int)
|
||||
dims = np.asarray([1, 1], dtype=int)
|
||||
dims[axis] = tiles
|
||||
tiled_data = np.tile(X, dims)
|
||||
return (tiled_data,)
|
||||
@ -99,7 +99,7 @@ class TestTile(serial.SerializedTestCase):
|
||||
)
|
||||
|
||||
def tile_ref(X, tiles, axis):
|
||||
dims = np.asarray([1, 1, 1], dtype=np.int)
|
||||
dims = np.asarray([1, 1, 1], dtype=int)
|
||||
dims[axis] = tiles
|
||||
tiled_data = np.tile(X, dims)
|
||||
return (tiled_data,)
|
||||
|
@ -1252,7 +1252,7 @@ def InitEmptyRecord(net, schema_or_record, enforce_types=False):
|
||||
|
||||
|
||||
_DATA_TYPE_FOR_DTYPE = [
|
||||
(np.str, core.DataType.STRING),
|
||||
(str, core.DataType.STRING),
|
||||
(np.float16, core.DataType.FLOAT16),
|
||||
(np.float32, core.DataType.FLOAT),
|
||||
(np.float64, core.DataType.DOUBLE),
|
||||
|
@ -94,12 +94,12 @@ class TestDB(unittest.TestCase):
|
||||
s = schema.Tuple(np.int32, str, np.float32)
|
||||
s2 = schema.Struct(
|
||||
('field_0', schema.Scalar(dtype=np.int32)),
|
||||
('field_1', schema.Scalar(dtype=np.str)),
|
||||
('field_1', schema.Scalar(dtype=str)),
|
||||
('field_2', schema.Scalar(dtype=np.float32))
|
||||
)
|
||||
self.assertEqual(s, s2)
|
||||
self.assertEqual(s[0], schema.Scalar(dtype=np.int32))
|
||||
self.assertEqual(s[1], schema.Scalar(dtype=np.str))
|
||||
self.assertEqual(s[1], schema.Scalar(dtype=str))
|
||||
self.assertEqual(s[2], schema.Scalar(dtype=np.float32))
|
||||
self.assertEqual(
|
||||
s[2, 0],
|
||||
|
@ -67,7 +67,7 @@ def Caffe2TensorToNumpyArray(tensor):
|
||||
tensor.int64_data, dtype=np.int64).reshape(tensor.dims)
|
||||
elif tensor.data_type == caffe2_pb2.TensorProto.INT32:
|
||||
return np.asarray(
|
||||
tensor.int32_data, dtype=np.int).reshape(tensor.dims) # pb.INT32=>np.int use int32_data
|
||||
tensor.int32_data, dtype=int).reshape(tensor.dims) # pb.INT32=>int use int32_data
|
||||
elif tensor.data_type == caffe2_pb2.TensorProto.INT16:
|
||||
return np.asarray(
|
||||
tensor.int32_data, dtype=np.int16).reshape(tensor.dims) # pb.INT16=>np.int16 use int32_data
|
||||
@ -100,9 +100,9 @@ def NumpyArrayToCaffe2Tensor(arr, name=None):
|
||||
elif arr.dtype == np.int64:
|
||||
tensor.data_type = caffe2_pb2.TensorProto.INT64
|
||||
tensor.int64_data.extend(list(arr.flatten().astype(np.int64)))
|
||||
elif arr.dtype == np.int or arr.dtype == np.int32:
|
||||
elif arr.dtype == int or arr.dtype == np.int32:
|
||||
tensor.data_type = caffe2_pb2.TensorProto.INT32
|
||||
tensor.int32_data.extend(arr.flatten().astype(np.int).tolist())
|
||||
tensor.int32_data.extend(arr.flatten().astype(int).tolist())
|
||||
elif arr.dtype == np.int16:
|
||||
tensor.data_type = caffe2_pb2.TensorProto.INT16
|
||||
tensor.int32_data.extend(list(arr.flatten().astype(np.int16))) # np.int16=>pb.INT16 use int32_data
|
||||
|
@ -271,10 +271,10 @@ class OmniglotNShot:
|
||||
|
||||
# [b, setsz, 1, 84, 84]
|
||||
x_spts = np.array(x_spts).astype(np.float32).reshape(self.batchsz, setsz, 1, self.resize, self.resize)
|
||||
y_spts = np.array(y_spts).astype(np.int).reshape(self.batchsz, setsz)
|
||||
y_spts = np.array(y_spts).astype(int).reshape(self.batchsz, setsz)
|
||||
# [b, qrysz, 1, 84, 84]
|
||||
x_qrys = np.array(x_qrys).astype(np.float32).reshape(self.batchsz, querysz, 1, self.resize, self.resize)
|
||||
y_qrys = np.array(y_qrys).astype(np.int).reshape(self.batchsz, querysz)
|
||||
y_qrys = np.array(y_qrys).astype(int).reshape(self.batchsz, querysz)
|
||||
|
||||
x_spts, y_spts, x_qrys, y_qrys = [
|
||||
torch.from_numpy(z).to(self.device) for z in
|
||||
|
@ -3840,9 +3840,9 @@ class TestQuantizedLinear(TestCase):
|
||||
# xnnpack forces W_zp to 0 when using symmetric quantization
|
||||
# ONEDNN only supports symmetric quantization of weight
|
||||
if dtype == torch.qint8 or qengine_is_onednn():
|
||||
W_zps = np.zeros(output_channels).astype(np.int)
|
||||
W_zps = np.zeros(output_channels).astype(int)
|
||||
else:
|
||||
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(np.int)
|
||||
W_zps = np.round(np.random.rand(output_channels) * 100 - 50).astype(int)
|
||||
# when using symmetric quantization
|
||||
# special restriction for xnnpack fully connected op weight
|
||||
# [-127, 127] instead of [-128, 127]
|
||||
|
Reference in New Issue
Block a user