[static-runtime] update generator for the modified tests; re-run autogen script (#84437)

Test Plan: CI

Reviewed By: mikeiovine

Differential Revision: D39183148

Pull Request resolved: https://github.com/pytorch/pytorch/pull/84437
Approved by: https://github.com/mikeiovine
This commit is contained in:
Max Podkorytov
2022-09-06 20:07:56 +00:00
committed by PyTorch MergeBot
parent 6363b1b358
commit 7f90606309
5 changed files with 365 additions and 167 deletions

View File

@ -1,4 +1,5 @@
// @lint-ignore-every CLANGTIDY HOWTOEVEN
// AUTO-GENERATED FROM: torchgen/static_runtime/gen_static_runtime_ops.py
#include <gtest/gtest.h>
#include <torch/csrc/jit/runtime/static/impl.h>
#include <torch/torch.h>
@ -863,6 +864,38 @@ TEST(StaticRuntime, autogen_clamp_max) {
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_clamp_max_Tensor) {
const std::string script = R"IR(
graph(%self: Tensor, %max: Tensor):
%bias: None = prim::Constant()
%ret = aten::clamp_max(%self, %max)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
auto self0 = at::rand({6, 6, 6});
auto max0 = at::rand({6, 6, 6});
std::vector<IValue> args{self0, max0};
testStaticRuntime(
script,
args,
{},
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/true);
auto self1 = at::rand({22, 22, 22});
auto max1 = at::rand({22, 22, 22});
std::vector<IValue> args2{self1, max1};
testStaticRuntime(
script,
args,
args2,
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_clip) {
const std::string script = R"IR(
graph(%self: Tensor, %min: int?, %max: int?):
@ -1531,36 +1564,6 @@ TEST(StaticRuntime, autogen_index_copy) {
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_inverse) {
const std::string script = R"IR(
graph(%self: Tensor):
%bias: None = prim::Constant()
%ret = aten::inverse(%self)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
auto self0 = at::rand({6, 6, 6});
std::vector<IValue> args{self0};
testStaticRuntime(
script,
args,
{},
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/true);
auto self1 = at::rand({22, 22, 22});
std::vector<IValue> args2{self1};
testStaticRuntime(
script,
args,
args2,
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_isin_Tensor_Tensor) {
const std::string script = R"IR(
graph(%elements: Tensor, %test_elements: Tensor, %assume_unique: bool, %invert: bool):
@ -2899,6 +2902,38 @@ TEST(StaticRuntime, autogen_square) {
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_prod) {
const std::string script = R"IR(
graph(%self: Tensor, %dtype: int?):
%bias: None = prim::Constant()
%ret = aten::prod(%self, %dtype)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
auto self0 = at::rand({6, 6, 6});
auto dtype0 = at::ScalarType::Float;
std::vector<IValue> args{self0, dtype0};
testStaticRuntime(
script,
args,
{},
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/false);
auto self1 = at::rand({22, 22, 22});
auto dtype1 = at::ScalarType::Float;
std::vector<IValue> args2{self1, dtype1};
testStaticRuntime(
script,
args,
args2,
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/false);
}
TEST(StaticRuntime, autogen_prod_dim_int) {
const std::string script = R"IR(
graph(%self: Tensor, %dim: int, %keepdim: bool, %dtype: int?):
@ -5163,38 +5198,6 @@ TEST(StaticRuntime, autogen_maximum) {
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_max_other) {
const std::string script = R"IR(
graph(%self: Tensor, %other: Tensor):
%bias: None = prim::Constant()
%ret = aten::max(%self, %other)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
auto self0 = at::rand({6, 6, 6});
auto other0 = at::rand({6, 6, 6});
std::vector<IValue> args{self0, other0};
testStaticRuntime(
script,
args,
{},
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/true);
auto self1 = at::rand({22, 22, 22});
auto other1 = at::rand({22, 22, 22});
std::vector<IValue> args2{self1, other1};
testStaticRuntime(
script,
args,
args2,
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_minimum) {
const std::string script = R"IR(
graph(%self: Tensor, %other: Tensor):
@ -5507,40 +5510,6 @@ TEST(StaticRuntime, autogen_mse_loss) {
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_l1_loss) {
const std::string script = R"IR(
graph(%self: Tensor, %target: Tensor, %reduction: int):
%bias: None = prim::Constant()
%ret = aten::l1_loss(%self, %target, %reduction)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
auto self0 = at::rand({6, 6, 6});
auto target0 = at::rand({6, 6, 6});
auto reduction0 = 1;
std::vector<IValue> args{self0, target0, reduction0};
testStaticRuntime(
script,
args,
{},
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/false);
auto self1 = at::rand({22, 22, 22});
auto target1 = at::rand({22, 22, 22});
auto reduction1 = 1;
std::vector<IValue> args2{self1, target1, reduction1};
testStaticRuntime(
script,
args,
args2,
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/false);
}
TEST(StaticRuntime, autogen_multi_margin_loss) {
const std::string script = R"IR(
graph(%self: Tensor, %target: Tensor, %p: int, %margin: int, %weight: Tensor?, %reduction: int):
@ -6230,8 +6199,8 @@ TEST(StaticRuntime, autogen_adaptive_max_pool2d_backward) {
return (%cloned)
)IR";
auto grad_output0 = at::randint(-3, 2, {2, 2, 2}, at::kFloat);
auto self0 = at::randint(-3, 2, {2, 2, 2}, at::kFloat);
auto grad_output0 = at::rand({2, 2, 2}, at::kFloat);
auto self0 = at::rand({2, 2, 2}, at::kFloat);
auto indices0 = at::randint(0, 1, {2, 2, 2}, at::kLong);
std::vector<IValue> args{grad_output0, self0, indices0};
testStaticRuntime(
@ -6242,8 +6211,8 @@ TEST(StaticRuntime, autogen_adaptive_max_pool2d_backward) {
/*use_equalnan=*/false,
/*check_resize=*/true);
auto grad_output1 = at::randint(-3, 3, {3, 3, 3}, at::kFloat);
auto self1 = at::randint(-3, 2, {3, 3, 3}, at::kFloat);
auto grad_output1 = at::rand({3, 3, 3}, at::kFloat);
auto self1 = at::rand({3, 3, 3}, at::kFloat);
auto indices1 = at::randint(0, 1, {3, 3, 3}, at::kLong);
std::vector<IValue> args2{grad_output1, self1, indices1};
testStaticRuntime(
@ -6264,8 +6233,8 @@ TEST(StaticRuntime, autogen_adaptive_max_pool3d_backward) {
return (%cloned)
)IR";
auto grad_output0 = at::randint(-3, 2, {2, 2, 2, 2}, at::kFloat);
auto self0 = at::randint(-3, 2, {2, 2, 2, 2}, at::kFloat);
auto grad_output0 = at::rand({2, 2, 2, 2}, at::kFloat);
auto self0 = at::rand({2, 2, 2, 2}, at::kFloat);
auto indices0 = at::randint(0, 1, {2, 2, 2, 2}, at::kLong);
std::vector<IValue> args{grad_output0, self0, indices0};
testStaticRuntime(
@ -6276,8 +6245,8 @@ TEST(StaticRuntime, autogen_adaptive_max_pool3d_backward) {
/*use_equalnan=*/false,
/*check_resize=*/true);
auto grad_output1 = at::randint(-3, 3, {3, 3, 3, 3}, at::kFloat);
auto self1 = at::randint(-3, 2, {3, 3, 3, 3}, at::kFloat);
auto grad_output1 = at::rand({3, 3, 3, 3}, at::kFloat);
auto self1 = at::rand({3, 3, 3, 3}, at::kFloat);
auto indices1 = at::randint(0, 1, {3, 3, 3, 3}, at::kLong);
std::vector<IValue> args2{grad_output1, self1, indices1};
testStaticRuntime(
@ -7521,15 +7490,15 @@ TEST(StaticRuntime, autogen_linalg_cross) {
TEST(StaticRuntime, autogen_linalg_det) {
const std::string script = R"IR(
graph(%self: Tensor):
graph(%A: Tensor):
%bias: None = prim::Constant()
%ret = aten::linalg_det(%self)
%ret = aten::linalg_det(%A)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
auto self0 = at::rand({6, 6, 6});
std::vector<IValue> args{self0};
auto A0 = at::rand({6, 6, 6});
std::vector<IValue> args{A0};
testStaticRuntime(
script,
args,
@ -7538,8 +7507,8 @@ TEST(StaticRuntime, autogen_linalg_det) {
/*use_equalnan=*/false,
/*check_resize=*/true);
auto self1 = at::rand({22, 22, 22});
std::vector<IValue> args2{self1};
auto A1 = at::rand({22, 22, 22});
std::vector<IValue> args2{A1};
testStaticRuntime(
script,
args,
@ -7612,10 +7581,40 @@ TEST(StaticRuntime, autogen_linalg_eigvals) {
}
TEST(StaticRuntime, autogen_linalg_inv) {
const std::string script = R"IR(
graph(%A: Tensor):
%bias: None = prim::Constant()
%ret = aten::linalg_inv(%A)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
auto A0 = at::rand({6, 6, 6});
std::vector<IValue> args{A0};
testStaticRuntime(
script,
args,
{},
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/true);
auto A1 = at::rand({22, 22, 22});
std::vector<IValue> args2{A1};
testStaticRuntime(
script,
args,
args2,
/*use_allclose=*/false,
/*use_equalnan=*/false,
/*check_resize=*/true);
}
TEST(StaticRuntime, autogen_inverse) {
const std::string script = R"IR(
graph(%self: Tensor):
%bias: None = prim::Constant()
%ret = aten::linalg_inv(%self)
%ret = aten::inverse(%self)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
@ -7705,18 +7704,18 @@ TEST(StaticRuntime, autogen_outer) {
/*check_resize=*/true);
}
// Disabling the test because JIT alias analysis does not support linalg_svdvals at this point.
TEST(StaticRuntime, DISABLED_autogen_linalg_svdvals) {
TEST(StaticRuntime, autogen_linalg_svdvals) {
const std::string script = R"IR(
graph(%A: Tensor):
graph(%A: Tensor, %driver: str?):
%bias: None = prim::Constant()
%ret = aten::linalg_svdvals(%A)
%ret = aten::linalg_svdvals(%A, %driver)
%cloned = aten::clone(%ret, %bias)
return (%cloned)
)IR";
auto A0 = at::rand({6, 6, 6});
std::vector<IValue> args{A0};
auto driver0 = "floor";
std::vector<IValue> args{A0, driver0};
testStaticRuntime(
script,
args,
@ -7726,7 +7725,8 @@ TEST(StaticRuntime, DISABLED_autogen_linalg_svdvals) {
/*check_resize=*/true);
auto A1 = at::rand({22, 22, 22});
std::vector<IValue> args2{A1};
auto driver1 = "floor";
std::vector<IValue> args2{A1, driver1};
testStaticRuntime(
script,
args,