mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[torchfuzz] remove fixed xfail (#165116)
Pull Request resolved: https://github.com/pytorch/pytorch/pull/165116 Approved by: https://github.com/PaulZhang12
This commit is contained in:
committed by
PyTorch MergeBot
parent
253fd765bd
commit
3f27100d3e
@ -428,81 +428,6 @@ class TestFuzzerCompileIssues(TestCase):
|
||||
out_compiled.sum().backward()
|
||||
print("Compile Success! ✅")
|
||||
|
||||
@pytest.mark.xfail(reason="Issue #163876")
|
||||
def test_fuzzer_issue_163876(self):
|
||||
torch.manual_seed(0)
|
||||
|
||||
def foo(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7):
|
||||
t0 = arg0 # size=(29, 50, 32, 5), stride=(46400, 1600, 32, 1), dtype=float16, device=cuda
|
||||
t1 = arg1 # size=(29, 50, 32, 5), stride=(46400, 1600, 32, 1), dtype=float16, device=cuda
|
||||
t2 = arg2 # size=(29, 50, 32, 5), stride=(46400, 1600, 32, 1), dtype=float16, device=cuda
|
||||
t3 = torch.nn.functional.scaled_dot_product_attention(
|
||||
t0, t1, t2
|
||||
) # size=(29, 50, 32, 5), stride=(8000, 160, 5, 1), dtype=float16, device=cuda
|
||||
t4 = (
|
||||
t3.min(dim=3).values
|
||||
) # size=(29, 50, 32), stride=(1600, 32, 1), dtype=float16, device=cuda
|
||||
t5 = arg3 # size=(3, 10, 4640), stride=(46400, 4640, 1), dtype=float16, device=cuda
|
||||
t6 = t5.var(
|
||||
dim=0
|
||||
) # size=(10, 4640), stride=(4640, 1), dtype=float16, device=cuda
|
||||
t7 = t6.reshape(
|
||||
(29, 50, 32)
|
||||
) # size=(29, 50, 32), stride=(1600, 32, 1), dtype=float16, device=cuda
|
||||
t8 = arg4 # size=(29, 50, 32), stride=(1600, 32, 1), dtype=float16, device=cuda
|
||||
t9 = arg5 # size=(32, 50, 29), stride=(1, 32, 1600), dtype=float16, device=cuda
|
||||
t10 = t9.clone()
|
||||
t10.zero_() # size=(32, 50, 29), stride=(1, 32, 1600), dtype=float16, device=cuda
|
||||
t11 = t10.transpose(
|
||||
0, 2
|
||||
) # size=(29, 50, 32), stride=(1600, 32, 1), dtype=float16, device=cuda
|
||||
t12 = torch.pow(
|
||||
torch.pow(t4, t8), t11
|
||||
) # size=(29, 50, 32), stride=(1600, 32, 1), dtype=float16, device=cuda
|
||||
t13 = arg6 # size=(29, 50, 32), stride=(1450, 50, 1), dtype=float16, device=cuda
|
||||
t15 = torch.nn.functional.layer_norm(
|
||||
t13, (32,)
|
||||
) # size=(29, 50, 32), stride=(1600, 32, 1), dtype=float16, device=cuda
|
||||
t16 = (
|
||||
(t12) / t15
|
||||
) # size=(29, 50, 32), stride=(1600, 32, 1), dtype=float16, device=cuda
|
||||
t17 = (
|
||||
((((t4) - t7) - t16) - t11) - t16
|
||||
) # size=(29, 50, 32), stride=(1600, 32, 1), dtype=float16, device=cuda
|
||||
output = t17
|
||||
return output
|
||||
|
||||
arg0 = torch.rand(
|
||||
[29, 50, 32, 5], dtype=torch.float16, device="cuda", requires_grad=True
|
||||
)
|
||||
arg1 = torch.rand(
|
||||
[29, 50, 32, 5], dtype=torch.float16, device="cuda", requires_grad=True
|
||||
)
|
||||
arg2 = torch.rand(
|
||||
[29, 50, 32, 5], dtype=torch.float16, device="cuda", requires_grad=True
|
||||
)
|
||||
arg3 = torch.rand(
|
||||
[3, 10, 4640], dtype=torch.float16, device="cuda", requires_grad=True
|
||||
)
|
||||
arg4 = torch.rand(
|
||||
[29, 50, 32], dtype=torch.float16, device="cuda", requires_grad=True
|
||||
)
|
||||
arg5 = torch.rand(
|
||||
[32, 50, 29], dtype=torch.float16, device="cuda", requires_grad=True
|
||||
)
|
||||
arg6 = torch.rand(
|
||||
[29, 50, 32], dtype=torch.float16, device="cuda", requires_grad=True
|
||||
)
|
||||
arg7 = torch.randint(0, 1000, [1], dtype=torch.int64, device="cuda")
|
||||
|
||||
out_eager = foo(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
|
||||
out_eager.sum().backward()
|
||||
print("Eager Success! ✅")
|
||||
compiled_foo = torch.compile(foo, fullgraph=True, dynamic=True)
|
||||
out_compiled = compiled_foo(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7)
|
||||
out_compiled.sum().backward()
|
||||
print("Compile Success! ✅")
|
||||
|
||||
@pytest.mark.xfail(reason="Issue #163877")
|
||||
def test_fuzzer_issue_163877(self):
|
||||
torch.manual_seed(0)
|
||||
|
Reference in New Issue
Block a user