mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Adds suppressions to pyrefly will typecheck clean: https://github.com/pytorch/pytorch/issues/163283 Almost there! Test plan: dmypy restart && python3 scripts/lintrunner.py -a pyrefly check step 1: delete lines in the pyrefly.toml file from the project-excludes field step 2: run pyrefly check step 3: add suppressions, clean up unused suppressions before: https://gist.github.com/maggiemoss/4b3bf2037014e116bc00706a16aef199 after: INFO 0 errors (5,064 ignored) Only four directories left to enable Pull Request resolved: https://github.com/pytorch/pytorch/pull/164877 Approved by: https://github.com/oulgen
131 lines
4.2 KiB
Python
131 lines
4.2 KiB
Python
# mypy: allow-untyped-defs
|
|
import numpy as np
|
|
import numpy.typing as npt
|
|
|
|
|
|
# Functions for converting
|
|
def figure_to_image(figures, close=True):
|
|
"""Render matplotlib figure to numpy format.
|
|
|
|
Note that this requires the ``matplotlib`` package.
|
|
|
|
Args:
|
|
figures (matplotlib.pyplot.figure or list of figures): figure or a list of figures
|
|
close (bool): Flag to automatically close the figure
|
|
|
|
Returns:
|
|
numpy.array: image in [CHW] order
|
|
"""
|
|
import matplotlib.pyplot as plt
|
|
import matplotlib.backends.backend_agg as plt_backend_agg
|
|
|
|
def render_to_rgb(figure):
|
|
canvas = plt_backend_agg.FigureCanvasAgg(figure)
|
|
canvas.draw()
|
|
data: npt.NDArray = np.frombuffer(canvas.buffer_rgba(), dtype=np.uint8)
|
|
w, h = figure.canvas.get_width_height()
|
|
image_hwc = data.reshape([h, w, 4])[:, :, 0:3]
|
|
image_chw = np.moveaxis(image_hwc, source=2, destination=0)
|
|
if close:
|
|
plt.close(figure)
|
|
return image_chw
|
|
|
|
if isinstance(figures, list):
|
|
images = [render_to_rgb(figure) for figure in figures]
|
|
return np.stack(images)
|
|
else:
|
|
image = render_to_rgb(figures)
|
|
return image
|
|
|
|
|
|
def _prepare_video(V):
|
|
"""
|
|
Convert a 5D tensor into 4D tensor.
|
|
|
|
Convesrion is done from [batchsize, time(frame), channel(color), height, width] (5D tensor)
|
|
to [time(frame), new_width, new_height, channel] (4D tensor).
|
|
|
|
A batch of images are spread to a grid, which forms a frame.
|
|
e.g. Video with batchsize 16 will have a 4x4 grid.
|
|
"""
|
|
b, t, c, h, w = V.shape
|
|
|
|
if V.dtype == np.uint8:
|
|
V = np.float32(V) / 255.0
|
|
|
|
def is_power2(num):
|
|
return num != 0 and ((num & (num - 1)) == 0)
|
|
|
|
# pad to nearest power of 2, all at once
|
|
# pyrefly: ignore # index-error
|
|
if not is_power2(V.shape[0]):
|
|
# pyrefly: ignore # index-error
|
|
len_addition = int(2 ** V.shape[0].bit_length() - V.shape[0])
|
|
V = np.concatenate((V, np.zeros(shape=(len_addition, t, c, h, w))), axis=0)
|
|
|
|
n_rows = 2 ** ((b.bit_length() - 1) // 2)
|
|
# pyrefly: ignore # index-error
|
|
n_cols = V.shape[0] // n_rows
|
|
|
|
V = np.reshape(V, newshape=(n_rows, n_cols, t, c, h, w))
|
|
V = np.transpose(V, axes=(2, 0, 4, 1, 5, 3))
|
|
V = np.reshape(V, newshape=(t, n_rows * h, n_cols * w, c))
|
|
|
|
return V
|
|
|
|
|
|
def make_grid(I, ncols=8):
|
|
# I: N1HW or N3HW
|
|
assert isinstance(I, np.ndarray), "plugin error, should pass numpy array here"
|
|
if I.shape[1] == 1:
|
|
I = np.concatenate([I, I, I], 1)
|
|
assert I.ndim == 4 and I.shape[1] == 3
|
|
nimg = I.shape[0]
|
|
H = I.shape[2]
|
|
W = I.shape[3]
|
|
ncols = min(nimg, ncols)
|
|
nrows = int(np.ceil(float(nimg) / ncols))
|
|
canvas = np.zeros((3, H * nrows, W * ncols), dtype=I.dtype)
|
|
i = 0
|
|
for y in range(nrows):
|
|
for x in range(ncols):
|
|
if i >= nimg:
|
|
break
|
|
canvas[:, y * H : (y + 1) * H, x * W : (x + 1) * W] = I[i]
|
|
i = i + 1
|
|
return canvas
|
|
|
|
# if modality == 'IMG':
|
|
# if x.dtype == np.uint8:
|
|
# x = x.astype(np.float32) / 255.0
|
|
|
|
|
|
def convert_to_HWC(tensor, input_format): # tensor: numpy array
|
|
assert len(set(input_format)) == len(
|
|
input_format
|
|
), f"You can not use the same dimension shordhand twice. input_format: {input_format}"
|
|
assert len(tensor.shape) == len(
|
|
input_format
|
|
), f"size of input tensor and input format are different. \
|
|
tensor shape: {tensor.shape}, input_format: {input_format}"
|
|
input_format = input_format.upper()
|
|
|
|
if len(input_format) == 4:
|
|
index = [input_format.find(c) for c in "NCHW"]
|
|
tensor_NCHW = tensor.transpose(index)
|
|
tensor_CHW = make_grid(tensor_NCHW)
|
|
return tensor_CHW.transpose(1, 2, 0)
|
|
|
|
if len(input_format) == 3:
|
|
index = [input_format.find(c) for c in "HWC"]
|
|
tensor_HWC = tensor.transpose(index)
|
|
if tensor_HWC.shape[2] == 1:
|
|
tensor_HWC = np.concatenate([tensor_HWC, tensor_HWC, tensor_HWC], 2)
|
|
return tensor_HWC
|
|
|
|
if len(input_format) == 2:
|
|
index = [input_format.find(c) for c in "HW"]
|
|
tensor = tensor.transpose(index)
|
|
tensor = np.stack([tensor, tensor, tensor], 2)
|
|
return tensor
|