mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-21 05:34:18 +08:00
Apply parts of pyupgrade to torch (starting with the safest changes). This PR only does two things: removes the need to inherit from object and removes unused future imports. Pull Request resolved: https://github.com/pytorch/pytorch/pull/94308 Approved by: https://github.com/ezyang, https://github.com/albanD
55 lines
1.5 KiB
Python
55 lines
1.5 KiB
Python
import torch
|
|
import functools
|
|
import random
|
|
import operator
|
|
import numpy as np
|
|
import time
|
|
|
|
# shim for torch.cuda.Event when running on cpu
|
|
class Event:
|
|
def __init__(self, enable_timing):
|
|
pass
|
|
|
|
def record(self):
|
|
self.time = time.perf_counter()
|
|
|
|
def elapsed_time(self, end_event):
|
|
assert isinstance(end_event, Event)
|
|
return end_event.time - self.time
|
|
|
|
def gen_sparse_csr(shape, nnz):
|
|
fill_value = 0
|
|
total_values = functools.reduce(operator.mul, shape, 1)
|
|
dense = np.random.randn(total_values)
|
|
fills = random.sample(list(range(total_values)), total_values - nnz)
|
|
|
|
for f in fills:
|
|
dense[f] = fill_value
|
|
dense = torch.from_numpy(dense.reshape(shape))
|
|
|
|
return dense.to_sparse_csr()
|
|
|
|
def gen_sparse_coo(shape, nnz):
|
|
dense = np.random.randn(*shape)
|
|
values = []
|
|
indices = [[], []]
|
|
for n in range(nnz):
|
|
row = random.randint(0, shape[0] - 1)
|
|
col = random.randint(0, shape[1] - 1)
|
|
indices[0].append(row)
|
|
indices[1].append(col)
|
|
values.append(dense[row, col])
|
|
|
|
return torch.sparse_coo_tensor(indices, values, size=shape)
|
|
|
|
def gen_sparse_coo_and_csr(shape, nnz):
|
|
total_values = functools.reduce(operator.mul, shape, 1)
|
|
dense = np.random.randn(total_values)
|
|
fills = random.sample(list(range(total_values)), total_values - nnz)
|
|
|
|
for f in fills:
|
|
dense[f] = 0
|
|
|
|
dense = torch.from_numpy(dense.reshape(shape))
|
|
return dense.to_sparse(), dense.to_sparse_csr()
|