mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[codemod][lint][fbcode/c*] Enable BLACK by default
Test Plan: manual inspection & sandcastle Reviewed By: zertosh Differential Revision: D30279364 fbshipit-source-id: c1ed77dfe43a3bde358f92737cd5535ae5d13c9a
This commit is contained in:
committed by
Facebook GitHub Bot
parent
aac3c7bd06
commit
b004307252
@ -1,10 +1,10 @@
|
||||
import torch
|
||||
import numpy as np
|
||||
import torch._C._te as te
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch._C._te as te
|
||||
from torch.testing._internal.common_utils import run_tests
|
||||
from torch.testing._internal.jit_utils import JitTestCase
|
||||
import unittest
|
||||
|
||||
LLVM_ENABLED = torch._C._llvm_enabled()
|
||||
|
||||
@ -19,19 +19,19 @@ class kernel_arena_scope(object):
|
||||
|
||||
def construct_adder(n: int, dtype=te.Dtype.Float):
|
||||
dN = te.ExprHandle.int(n)
|
||||
A = te.Placeholder('A', dtype, [dN])
|
||||
B = te.Placeholder('B', dtype, [dN])
|
||||
A = te.Placeholder("A", dtype, [dN])
|
||||
B = te.Placeholder("B", dtype, [dN])
|
||||
|
||||
def compute(i):
|
||||
return A.load([i]) + B.load([i])
|
||||
|
||||
C = te.Compute('C', [te.DimArg(dN, 'i')], compute)
|
||||
C = te.Compute("C", [te.DimArg(dN, "i")], compute)
|
||||
|
||||
loopnest = te.LoopNest([C])
|
||||
loopnest.prepare_for_codegen()
|
||||
stmt = te.simplify(loopnest.root_stmt())
|
||||
|
||||
return te.construct_codegen('ir_eval', stmt, [A, B, C])
|
||||
return te.construct_codegen("ir_eval", stmt, [A, B, C])
|
||||
|
||||
|
||||
class TestTensorExprPyBind(JitTestCase):
|
||||
@ -63,15 +63,17 @@ class TestTensorExprPyBind(JitTestCase):
|
||||
|
||||
ONE = te.ExprHandle.int(1)
|
||||
FOUR = te.ExprHandle.int(4)
|
||||
A = te.BufHandle('A', [ONE, FOUR], dtype)
|
||||
B = te.BufHandle('B', [FOUR, ONE], dtype)
|
||||
C = te.BufHandle('C', [ONE, ONE], dtype)
|
||||
A = te.BufHandle("A", [ONE, FOUR], dtype)
|
||||
B = te.BufHandle("B", [FOUR, ONE], dtype)
|
||||
C = te.BufHandle("C", [ONE, ONE], dtype)
|
||||
|
||||
s = te.ExternalCall(C, "nnc_aten_matmul", [A, B], [])
|
||||
|
||||
loopnest = te.LoopNest(s, [C])
|
||||
loopnest.prepare_for_codegen()
|
||||
codegen = te.construct_codegen('ir_eval', s, [te.BufferArg(x) for x in [A, B, C]])
|
||||
codegen = te.construct_codegen(
|
||||
"ir_eval", s, [te.BufferArg(x) for x in [A, B, C]]
|
||||
)
|
||||
|
||||
tA = torch.ones(1, 4)
|
||||
tB = torch.ones(4, 1)
|
||||
@ -88,15 +90,12 @@ class TestTensorExprPyBind(JitTestCase):
|
||||
def compute(i):
|
||||
return A.load(i) - B.load(i)
|
||||
|
||||
C = te.Compute('C', [dN], compute)
|
||||
C = te.Compute("C", [dN], compute)
|
||||
|
||||
loopnest = te.LoopNest([C])
|
||||
loopnest.prepare_for_codegen()
|
||||
|
||||
cg = te.construct_codegen(
|
||||
'ir_eval',
|
||||
loopnest.simplify(),
|
||||
[A, B, C, dN])
|
||||
cg = te.construct_codegen("ir_eval", loopnest.simplify(), [A, B, C, dN])
|
||||
|
||||
def test_with_shape(n):
|
||||
tA = torch.randn(n, dtype=torch.double)
|
||||
@ -113,15 +112,14 @@ class TestTensorExprPyBind(JitTestCase):
|
||||
one = te.ExprHandle.int(1)
|
||||
te.Placeholder([one], torch.float32) # ok
|
||||
te.Placeholder([one]) # ok
|
||||
self.assertRaises(TypeError,
|
||||
lambda: te.Placeholder([one], "float55"))
|
||||
self.assertRaises(TypeError, lambda: te.Placeholder([one], "float55"))
|
||||
|
||||
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
|
||||
def test_kernel_with_tensor_inputs(self):
|
||||
def f(a, b, c):
|
||||
return a + b + c
|
||||
|
||||
device, size = 'cpu', (4, 4)
|
||||
device, size = "cpu", (4, 4)
|
||||
x = torch.rand(size, device=device)
|
||||
y = torch.rand(size, device=device)
|
||||
z = torch.rand(size, device=device)
|
||||
@ -149,9 +147,9 @@ graph(%a.1 : Float(4, 4, strides=[4, 1], requires_grad=0, device=cpu),
|
||||
def f(a, b, c):
|
||||
return a + b + c
|
||||
|
||||
x = torch.tensor(0.1, dtype=torch.float, device='cpu')
|
||||
y = torch.tensor(0.6, dtype=torch.float, device='cpu')
|
||||
z = torch.tensor(0.7, dtype=torch.float, device='cpu')
|
||||
x = torch.tensor(0.1, dtype=torch.float, device="cpu")
|
||||
y = torch.tensor(0.6, dtype=torch.float, device="cpu")
|
||||
z = torch.tensor(0.7, dtype=torch.float, device="cpu")
|
||||
|
||||
graph_str = """
|
||||
graph(%a.1 : Float(requires_grad=0, device=cpu),
|
||||
@ -173,7 +171,7 @@ graph(%a.1 : Float(requires_grad=0, device=cpu),
|
||||
|
||||
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
|
||||
def test_kernel_shape_prop(self):
|
||||
device, size = 'cpu', (4, 4)
|
||||
device, size = "cpu", (4, 4)
|
||||
x = torch.rand(size, device=device)
|
||||
y = torch.rand(size, device=device)
|
||||
|
||||
@ -253,7 +251,7 @@ graph(%a : Tensor, %b : Tensor):
|
||||
# Now compilation should pass
|
||||
kernel = torch._C._te.TensorExprKernel(graph)
|
||||
|
||||
device, size = 'cpu', (4, 4)
|
||||
device, size = "cpu", (4, 4)
|
||||
x = torch.rand(size, device=device)
|
||||
y = torch.rand(size, device=device)
|
||||
|
||||
@ -266,7 +264,7 @@ graph(%a : Tensor, %b : Tensor):
|
||||
def f(a):
|
||||
return a.t()
|
||||
|
||||
device, size = 'cpu', (3, 4)
|
||||
device, size = "cpu", (3, 4)
|
||||
x = torch.rand(size, device=device)
|
||||
|
||||
graph_str = """
|
||||
@ -288,7 +286,7 @@ graph(%a.1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
|
||||
def f(a):
|
||||
return a.transpose(-1, -2)
|
||||
|
||||
device, size = 'cpu', (3, 4)
|
||||
device, size = "cpu", (3, 4)
|
||||
x = torch.rand(size, device=device)
|
||||
|
||||
graph_str = """
|
||||
@ -312,7 +310,7 @@ graph(%a.1 : Float(3, 4, strides=[4, 1], requires_grad=0, device=cpu)):
|
||||
def f(a):
|
||||
return a.permute([2, 1, 0])
|
||||
|
||||
device, size = 'cpu', (3, 4, 5)
|
||||
device, size = "cpu", (3, 4, 5)
|
||||
x = torch.rand(size, device=device)
|
||||
|
||||
graph_str = """
|
||||
@ -338,7 +336,7 @@ graph(%a.1 : Float(3, 4, 5, strides=[20, 5, 1], requires_grad=0, device=cpu)):
|
||||
def f(a):
|
||||
return a.nan_to_num()
|
||||
|
||||
device = 'cpu'
|
||||
device = "cpu"
|
||||
x = torch.ones((2, 2), device=device)
|
||||
x[0, 0] = x[1, 1] = torch.nan
|
||||
graph_str = """
|
||||
@ -353,15 +351,20 @@ graph(%x : Float(2, 2, strides=[2, 1], requires_grad=0, device=cpu)):
|
||||
def get_dim_args(dims):
|
||||
dim_args = []
|
||||
for dim in dims:
|
||||
dim_args.append(te.DimArg(dim, 'i' + str(len(dim_args))))
|
||||
dim_args.append(te.DimArg(dim, "i" + str(len(dim_args))))
|
||||
return dim_args
|
||||
|
||||
def compute(idxs):
|
||||
load = inputs[0].as_buf().load(idxs)
|
||||
return te.ifThenElse(te.ExprHandle.isnan(load), te.ExprHandle.float(0.), load)
|
||||
return te.ifThenElse(
|
||||
te.ExprHandle.isnan(load), te.ExprHandle.float(0.0), load
|
||||
)
|
||||
|
||||
return te.Compute2("custom_nan_to_num", get_dim_args(out_shape), compute)
|
||||
|
||||
kernel = torch._C._te.TensorExprKernel(graph, {'aten::nan_to_num' : my_custom_lowering})
|
||||
kernel = torch._C._te.TensorExprKernel(
|
||||
graph, {"aten::nan_to_num": my_custom_lowering}
|
||||
)
|
||||
res1 = kernel.run((x,))
|
||||
res2 = kernel.fallback((x,))
|
||||
correct = f(x)
|
||||
@ -373,7 +376,7 @@ graph(%x : Float(2, 2, strides=[2, 1], requires_grad=0, device=cpu)):
|
||||
def f(a):
|
||||
return a.expand((2, 3, 4))
|
||||
|
||||
device = 'cpu'
|
||||
device = "cpu"
|
||||
x = torch.rand((1, 3, 1), device=device)
|
||||
graph_str = """
|
||||
graph(%a : Float(1, 3, 1, strides=[3, 1, 1], requires_grad=0, device=cpu)):
|
||||
@ -395,19 +398,21 @@ graph(%a : Float(1, 3, 1, strides=[3, 1, 1], requires_grad=0, device=cpu)):
|
||||
np.testing.assert_allclose(res2.numpy(), correct.numpy(), atol=2e-3)
|
||||
|
||||
def test_forgot_kernel_arena(self):
|
||||
self.assertRaises(RuntimeError, lambda: torch._C._te.VarHandle("n", torch._C._te.Dtype.Int))
|
||||
self.assertRaises(
|
||||
RuntimeError, lambda: torch._C._te.VarHandle("n", torch._C._te.Dtype.Int)
|
||||
)
|
||||
|
||||
@unittest.skipIf(not LLVM_ENABLED, "LLVM backend not enabled")
|
||||
def test_alloc_in_loop(self):
|
||||
with kernel_arena_scope():
|
||||
a, tmp, b = [
|
||||
te.Placeholder(name, te.Dtype.Float, [te.ExprHandle.int(1)])
|
||||
for name in ["a", "tmp", "b"]]
|
||||
for name in ["a", "tmp", "b"]
|
||||
]
|
||||
t0, t100 = [te.ExprHandle.int(n) for n in [0, 100]]
|
||||
body = te.Block([
|
||||
tmp.store([t0], a.load([t0])),
|
||||
b.store([t0], tmp.load([t0]))
|
||||
])
|
||||
body = te.Block(
|
||||
[tmp.store([t0], a.load([t0])), b.store([t0], tmp.load([t0]))]
|
||||
)
|
||||
for _ in range(4):
|
||||
i = te.VarHandle("i", te.Dtype.Int)
|
||||
body = te.For.make(i, t0, t100, body)
|
||||
@ -417,5 +422,6 @@ graph(%a : Float(1, 3, 1, strides=[3, 1, 1], requires_grad=0, device=cpu)):
|
||||
ta, tb = [torch.ones(1) for _ in range(2)]
|
||||
f.call([ta.data_ptr(), tb.data_ptr()])
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
if __name__ == "__main__":
|
||||
run_tests()
|
||||
|
Reference in New Issue
Block a user