diff --git a/benchmarks/static_runtime/test_generated_ops.cc b/benchmarks/static_runtime/test_generated_ops.cc index 415bf464fbd1..bdf0585404ed 100644 --- a/benchmarks/static_runtime/test_generated_ops.cc +++ b/benchmarks/static_runtime/test_generated_ops.cc @@ -272,6 +272,38 @@ TEST(StaticRuntime, autogen_addr) { /*check_resize=*/true); } +TEST(StaticRuntime, autogen__test_functorch_fallback) { + const std::string script = R"IR( + graph(%self: Tensor, %other: Tensor): + %bias: None = prim::Constant() + %ret = aten::_test_functorch_fallback(%self, %other) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto other0 = at::rand({6, 6, 6}); + std::vector args{self0, other0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto other1 = at::rand({22, 22, 22}); + std::vector args2{self1, other1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + TEST(StaticRuntime, autogen_argmax) { const std::string script = R"IR( graph(%self: Tensor, %dim: int?, %keepdim: bool): @@ -4440,6 +4472,40 @@ TEST(StaticRuntime, autogen_masked_select) { /*check_resize=*/true); } +TEST(StaticRuntime, autogen_nonzero_static) { + const std::string script = R"IR( + graph(%self: Tensor, %size: int, %fill_value: int): + %bias: None = prim::Constant() + %ret = aten::nonzero_static(%self, %size, %fill_value) + %cloned = aten::clone(%ret, %bias) + return (%cloned) + )IR"; + + auto self0 = at::rand({6, 6, 6}); + auto size0 = 1; + auto fill_value0 = 1; + std::vector args{self0, size0, fill_value0}; + testStaticRuntime( + script, + args, + {}, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); + + auto self1 = at::rand({22, 22, 22}); + auto size1 = 1; + auto fill_value1 = 1; + std::vector args2{self1, size1, fill_value1}; + testStaticRuntime( + script, + args, + args2, + /*use_allclose=*/false, + /*use_equalnan=*/false, + /*check_resize=*/true); +} + TEST(StaticRuntime, autogen_gather) { const std::string script = R"IR( graph(%self: Tensor, %dim: int, %index: Tensor, %sparse_grad: bool): @@ -7106,222 +7172,6 @@ TEST(StaticRuntime, autogen_special_multigammaln) { /*check_resize=*/true); } -TEST(StaticRuntime, autogen_fft_fft) { - const std::string script = R"IR( - graph(%self: Tensor, %n: int?, %dim: int, %norm: str?): - %bias: None = prim::Constant() - %ret = aten::fft_fft(%self, %n, %dim, %norm) - %cloned = aten::clone(%ret, %bias) - return (%cloned) - )IR"; - - auto self0 = at::rand({6, 6, 6}); - auto n0 = 1; - auto dim0 = 1; - auto norm0 = "forward"; - std::vector args{self0, n0, dim0, norm0}; - testStaticRuntime( - script, - args, - {}, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); - - auto self1 = at::rand({22, 22, 22}); - auto n1 = 1; - auto dim1 = 1; - auto norm1 = "forward"; - std::vector args2{self1, n1, dim1, norm1}; - testStaticRuntime( - script, - args, - args2, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); -} - -TEST(StaticRuntime, autogen_fft_ifft) { - const std::string script = R"IR( - graph(%self: Tensor, %n: int?, %dim: int, %norm: str?): - %bias: None = prim::Constant() - %ret = aten::fft_ifft(%self, %n, %dim, %norm) - %cloned = aten::clone(%ret, %bias) - return (%cloned) - )IR"; - - auto self0 = at::rand({6, 6, 6}); - auto n0 = 1; - auto dim0 = 1; - auto norm0 = "forward"; - std::vector args{self0, n0, dim0, norm0}; - testStaticRuntime( - script, - args, - {}, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); - - auto self1 = at::rand({22, 22, 22}); - auto n1 = 1; - auto dim1 = 1; - auto norm1 = "forward"; - std::vector args2{self1, n1, dim1, norm1}; - testStaticRuntime( - script, - args, - args2, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); -} - -TEST(StaticRuntime, autogen_fft_rfft) { - const std::string script = R"IR( - graph(%self: Tensor, %n: int?, %dim: int, %norm: str?): - %bias: None = prim::Constant() - %ret = aten::fft_rfft(%self, %n, %dim, %norm) - %cloned = aten::clone(%ret, %bias) - return (%cloned) - )IR"; - - auto self0 = at::rand({6, 6, 6}); - auto n0 = 1; - auto dim0 = 1; - auto norm0 = "forward"; - std::vector args{self0, n0, dim0, norm0}; - testStaticRuntime( - script, - args, - {}, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); - - auto self1 = at::rand({22, 22, 22}); - auto n1 = 1; - auto dim1 = 1; - auto norm1 = "forward"; - std::vector args2{self1, n1, dim1, norm1}; - testStaticRuntime( - script, - args, - args2, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); -} - -TEST(StaticRuntime, autogen_fft_irfft) { - const std::string script = R"IR( - graph(%self: Tensor, %n: int?, %dim: int, %norm: str?): - %bias: None = prim::Constant() - %ret = aten::fft_irfft(%self, %n, %dim, %norm) - %cloned = aten::clone(%ret, %bias) - return (%cloned) - )IR"; - - auto self0 = at::rand({6, 6, 6}); - auto n0 = 1; - auto dim0 = 1; - auto norm0 = "forward"; - std::vector args{self0, n0, dim0, norm0}; - testStaticRuntime( - script, - args, - {}, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); - - auto self1 = at::rand({22, 22, 22}); - auto n1 = 1; - auto dim1 = 1; - auto norm1 = "forward"; - std::vector args2{self1, n1, dim1, norm1}; - testStaticRuntime( - script, - args, - args2, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); -} - -TEST(StaticRuntime, autogen_fft_hfft) { - const std::string script = R"IR( - graph(%self: Tensor, %n: int?, %dim: int, %norm: str?): - %bias: None = prim::Constant() - %ret = aten::fft_hfft(%self, %n, %dim, %norm) - %cloned = aten::clone(%ret, %bias) - return (%cloned) - )IR"; - - auto self0 = at::rand({6, 6, 6}); - auto n0 = 1; - auto dim0 = 1; - auto norm0 = "forward"; - std::vector args{self0, n0, dim0, norm0}; - testStaticRuntime( - script, - args, - {}, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); - - auto self1 = at::rand({22, 22, 22}); - auto n1 = 1; - auto dim1 = 1; - auto norm1 = "forward"; - std::vector args2{self1, n1, dim1, norm1}; - testStaticRuntime( - script, - args, - args2, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); -} - -TEST(StaticRuntime, autogen_fft_ihfft) { - const std::string script = R"IR( - graph(%self: Tensor, %n: int?, %dim: int, %norm: str?): - %bias: None = prim::Constant() - %ret = aten::fft_ihfft(%self, %n, %dim, %norm) - %cloned = aten::clone(%ret, %bias) - return (%cloned) - )IR"; - - auto self0 = at::rand({6, 6, 6}); - auto n0 = 1; - auto dim0 = 1; - auto norm0 = "forward"; - std::vector args{self0, n0, dim0, norm0}; - testStaticRuntime( - script, - args, - {}, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); - - auto self1 = at::rand({22, 22, 22}); - auto n1 = 1; - auto dim1 = 1; - auto norm1 = "forward"; - std::vector args2{self1, n1, dim1, norm1}; - testStaticRuntime( - script, - args, - args2, - /*use_allclose=*/false, - /*use_equalnan=*/false, - /*check_resize=*/true); -} - TEST(StaticRuntime, autogen_linalg_cross) { const std::string script = R"IR( graph(%self: Tensor, %other: Tensor, %dim: int): diff --git a/torch/csrc/jit/runtime/static/generated_ops.cpp b/torch/csrc/jit/runtime/static/generated_ops.cpp index af61ee72a00e..4597e1298cd6 100644 --- a/torch/csrc/jit/runtime/static/generated_ops.cpp +++ b/torch/csrc/jit/runtime/static/generated_ops.cpp @@ -36,7 +36,8 @@ #include #include -namespace torch::jit { +namespace torch { +namespace jit { REGISTER_OPERATOR_FUNCTOR( aten::absolute, @@ -190,6 +191,29 @@ REGISTER_OPERATOR_FUNCTOR(aten::addr, aten_addr, [](Node* n) -> SROperator { return nullptr; }); +REGISTER_OPERATOR_FUNCTOR( + aten::_test_functorch_fallback, + aten__test_functorch_fallback, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto& other = p_node->Input(1).toTensor(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::native::_test_functorch_fallback(self, other); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::native::_test_functorch_fallback_out(self, other, out); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + REGISTER_OPERATOR_FUNCTOR(aten::argmax, aten_argmax, [](Node* n) -> SROperator { if (n->matches(torch::schema( "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"))) { @@ -2430,6 +2454,25 @@ REGISTER_OPERATOR_FUNCTOR(aten::addbmm, aten_addbmm, [](Node* n) -> SROperator { return nullptr; }); +REGISTER_OPERATOR_FUNCTOR(aten::diag, aten_diag, [](Node* n) -> SROperator { + if (n->matches( + torch::schema("aten::diag(Tensor self, int diagonal=0) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto diagonal = p_node->Input(1).toInt(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = at::native::diag(self, diagonal); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::native::diag_out(self, diagonal, out); + }; + } + LogAndDumpSchema(n); + return nullptr; +}); + REGISTER_OPERATOR_FUNCTOR(aten::cross, aten_cross, [](Node* n) -> SROperator { if (n->matches(torch::schema( "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor"))) { @@ -2684,6 +2727,30 @@ REGISTER_OPERATOR_FUNCTOR( return nullptr; }); +REGISTER_OPERATOR_FUNCTOR( + aten::nonzero_static, + aten_nonzero_static, + [](Node* n) -> SROperator { + if (n->matches(torch::schema( + "aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor"))) { + return [](ProcessedNode* p_node) { + const auto& self = p_node->Input(0).toTensor(); + const auto size = p_node->Input(1).toInt(); + const auto fill_value = p_node->Input(2).toInt(); + if (p_node->Output(0).isNone()) { + p_node->Output(0) = + at::native::nonzero_static_cpu(self, size, fill_value); + return; + } + auto& out = p_node->Output(0).toTensor(); + fastResizeToZero(out); + at::native::nonzero_static_out_cpu(self, size, fill_value, out); + }; + } + LogAndDumpSchema(n); + return nullptr; + }); + REGISTER_OPERATOR_FUNCTOR(aten::gather, aten_gather, [](Node* n) -> SROperator { if (n->matches(torch::schema( "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor"))) { @@ -4463,132 +4530,6 @@ REGISTER_OPERATOR_FUNCTOR( return nullptr; }); -REGISTER_OPERATOR_FUNCTOR(aten::fft_fft, aten_fft_fft, [](Node* n) -> SROperator { - if (n->matches(torch::schema( - "aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) { - return [](ProcessedNode* p_node) { - const auto& self = p_node->Input(0).toTensor(); - const auto n = p_node->Input(1).toOptional(); - const auto dim = p_node->Input(2).toInt(); - const auto norm = p_node->Input(3).toOptional(); - if (p_node->Output(0).isNone()) { - p_node->Output(0) = at::native::fft_fft_symint(self, n, dim, norm); - return; - } - auto& out = p_node->Output(0).toTensor(); - fastResizeToZero(out); - at::native::fft_fft_symint_out(self, n, dim, norm, out); - }; - } - LogAndDumpSchema(n); - return nullptr; -}); - -REGISTER_OPERATOR_FUNCTOR(aten::fft_ifft, aten_fft_ifft, [](Node* n) -> SROperator { - if (n->matches(torch::schema( - "aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) { - return [](ProcessedNode* p_node) { - const auto& self = p_node->Input(0).toTensor(); - const auto n = p_node->Input(1).toOptional(); - const auto dim = p_node->Input(2).toInt(); - const auto norm = p_node->Input(3).toOptional(); - if (p_node->Output(0).isNone()) { - p_node->Output(0) = at::native::fft_ifft_symint(self, n, dim, norm); - return; - } - auto& out = p_node->Output(0).toTensor(); - fastResizeToZero(out); - at::native::fft_ifft_symint_out(self, n, dim, norm, out); - }; - } - LogAndDumpSchema(n); - return nullptr; -}); - -REGISTER_OPERATOR_FUNCTOR(aten::fft_rfft, aten_fft_rfft, [](Node* n) -> SROperator { - if (n->matches(torch::schema( - "aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) { - return [](ProcessedNode* p_node) { - const auto& self = p_node->Input(0).toTensor(); - const auto n = p_node->Input(1).toOptional(); - const auto dim = p_node->Input(2).toInt(); - const auto norm = p_node->Input(3).toOptional(); - if (p_node->Output(0).isNone()) { - p_node->Output(0) = at::native::fft_rfft_symint(self, n, dim, norm); - return; - } - auto& out = p_node->Output(0).toTensor(); - fastResizeToZero(out); - at::native::fft_rfft_symint_out(self, n, dim, norm, out); - }; - } - LogAndDumpSchema(n); - return nullptr; -}); - -REGISTER_OPERATOR_FUNCTOR(aten::fft_irfft, aten_fft_irfft, [](Node* n) -> SROperator { - if (n->matches(torch::schema( - "aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) { - return [](ProcessedNode* p_node) { - const auto& self = p_node->Input(0).toTensor(); - const auto n = p_node->Input(1).toOptional(); - const auto dim = p_node->Input(2).toInt(); - const auto norm = p_node->Input(3).toOptional(); - if (p_node->Output(0).isNone()) { - p_node->Output(0) = at::native::fft_irfft_symint(self, n, dim, norm); - return; - } - auto& out = p_node->Output(0).toTensor(); - fastResizeToZero(out); - at::native::fft_irfft_symint_out(self, n, dim, norm, out); - }; - } - LogAndDumpSchema(n); - return nullptr; -}); - -REGISTER_OPERATOR_FUNCTOR(aten::fft_hfft, aten_fft_hfft, [](Node* n) -> SROperator { - if (n->matches(torch::schema( - "aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) { - return [](ProcessedNode* p_node) { - const auto& self = p_node->Input(0).toTensor(); - const auto n = p_node->Input(1).toOptional(); - const auto dim = p_node->Input(2).toInt(); - const auto norm = p_node->Input(3).toOptional(); - if (p_node->Output(0).isNone()) { - p_node->Output(0) = at::native::fft_hfft_symint(self, n, dim, norm); - return; - } - auto& out = p_node->Output(0).toTensor(); - fastResizeToZero(out); - at::native::fft_hfft_symint_out(self, n, dim, norm, out); - }; - } - LogAndDumpSchema(n); - return nullptr; -}); - -REGISTER_OPERATOR_FUNCTOR(aten::fft_ihfft, aten_fft_ihfft, [](Node* n) -> SROperator { - if (n->matches(torch::schema( - "aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) { - return [](ProcessedNode* p_node) { - const auto& self = p_node->Input(0).toTensor(); - const auto n = p_node->Input(1).toOptional(); - const auto dim = p_node->Input(2).toInt(); - const auto norm = p_node->Input(3).toOptional(); - if (p_node->Output(0).isNone()) { - p_node->Output(0) = at::native::fft_ihfft_symint(self, n, dim, norm); - return; - } - auto& out = p_node->Output(0).toTensor(); - fastResizeToZero(out); - at::native::fft_ihfft_symint_out(self, n, dim, norm, out); - }; - } - LogAndDumpSchema(n); - return nullptr; -}); - REGISTER_OPERATOR_FUNCTOR( aten::linalg_cross, aten_linalg_cross, @@ -5281,4 +5222,5 @@ REGISTER_NATIVE_OPERATOR_FUNCTOR( return nullptr; }); -} // namespace torch::jit +} // namespace jit +} // namespace torch diff --git a/torchgen/static_runtime/config.py b/torchgen/static_runtime/config.py index 407165147e35..da6e2a21c2a3 100644 --- a/torchgen/static_runtime/config.py +++ b/torchgen/static_runtime/config.py @@ -383,6 +383,6 @@ def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> N return if op_name in ("diagonal", "linalg_diagonal"): arg_map["offset"] = "0" - arg_map["dim0"] = "1" arg_map["dim1"] = "2" + arg_map["dim2"] = "1" return diff --git a/torchgen/static_runtime/generator.py b/torchgen/static_runtime/generator.py index e709450b48d3..b068af7728aa 100644 --- a/torchgen/static_runtime/generator.py +++ b/torchgen/static_runtime/generator.py @@ -222,6 +222,17 @@ BLOCKED_OPS = frozenset( "special_spherical_bessel_j0", "_foobar", "_nested_tensor_strides", + "_nested_tensor_storage_offsets", + "_nested_get_values", # no CPU backend + "_nested_get_values_copy", # no CPU backend + "_nested_view_from_jagged", # testing needs to be patched + "_nested_view_from_jagged_copy", # testing needs to be patched + "_nested_view_from_buffer", # testing needs to be patched + "_nested_view_from_buffer_copy", # testing needs to be patched + "_int_mm", # testing needs to be patched + "_to_sparse_csc", # testing needs to be patched + "_to_sparse_csr", # testing needs to be patched + "segment_reduce", # testing needs to be patched ) )