mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
[Static Runtime] Fix & run gen_static_runtime_ops (#128299)
gen_static_runtime_ops hasn't been updated in a while. In preparation for https://github.com/pytorch/pytorch/pull/127675 in which I need to re-run the codegen step for cumprod, I want to land these changes beforehand in case there are any other issues that arise. I added a number of ops to the blocklist: ``` + "_nested_tensor_storage_offsets", + "_nested_get_values", # no CPU backend + "_nested_get_values_copy", # no CPU backend + "_nested_view_from_jagged", # testing needs to be patched + "_nested_view_from_jagged_copy", # testing needs to be patched + "_nested_view_from_buffer", # testing needs to be patched + "_nested_view_from_buffer_copy", # testing needs to be patched + "_int_mm", # testing needs to be patched + "_to_sparse_csc", # testing needs to be patched + "_to_sparse_csr", # testing needs to be patched + "segment_reduce", # testing needs to be patched ``` Most of these are added just because testing doesn't work right now. Additionally, a few `fft` ops seem to have been removed from native_functions.yaml; I'm guessing it's unlikely FFT would have been used in many real models though. Differential Revision: [D58329403](https://our.internmc.facebook.com/intern/diff/D58329403/) Pull Request resolved: https://github.com/pytorch/pytorch/pull/128299 Approved by: https://github.com/YuqingJ
This commit is contained in:
committed by
PyTorch MergeBot
parent
f8c45996d5
commit
29081059b6
@ -272,6 +272,38 @@ TEST(StaticRuntime, autogen_addr) {
|
|||||||
/*check_resize=*/true);
|
/*check_resize=*/true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(StaticRuntime, autogen__test_functorch_fallback) {
|
||||||
|
const std::string script = R"IR(
|
||||||
|
graph(%self: Tensor, %other: Tensor):
|
||||||
|
%bias: None = prim::Constant()
|
||||||
|
%ret = aten::_test_functorch_fallback(%self, %other)
|
||||||
|
%cloned = aten::clone(%ret, %bias)
|
||||||
|
return (%cloned)
|
||||||
|
)IR";
|
||||||
|
|
||||||
|
auto self0 = at::rand({6, 6, 6});
|
||||||
|
auto other0 = at::rand({6, 6, 6});
|
||||||
|
std::vector<IValue> args{self0, other0};
|
||||||
|
testStaticRuntime(
|
||||||
|
script,
|
||||||
|
args,
|
||||||
|
{},
|
||||||
|
/*use_allclose=*/false,
|
||||||
|
/*use_equalnan=*/false,
|
||||||
|
/*check_resize=*/true);
|
||||||
|
|
||||||
|
auto self1 = at::rand({22, 22, 22});
|
||||||
|
auto other1 = at::rand({22, 22, 22});
|
||||||
|
std::vector<IValue> args2{self1, other1};
|
||||||
|
testStaticRuntime(
|
||||||
|
script,
|
||||||
|
args,
|
||||||
|
args2,
|
||||||
|
/*use_allclose=*/false,
|
||||||
|
/*use_equalnan=*/false,
|
||||||
|
/*check_resize=*/true);
|
||||||
|
}
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_argmax) {
|
TEST(StaticRuntime, autogen_argmax) {
|
||||||
const std::string script = R"IR(
|
const std::string script = R"IR(
|
||||||
graph(%self: Tensor, %dim: int?, %keepdim: bool):
|
graph(%self: Tensor, %dim: int?, %keepdim: bool):
|
||||||
@ -4440,6 +4472,40 @@ TEST(StaticRuntime, autogen_masked_select) {
|
|||||||
/*check_resize=*/true);
|
/*check_resize=*/true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST(StaticRuntime, autogen_nonzero_static) {
|
||||||
|
const std::string script = R"IR(
|
||||||
|
graph(%self: Tensor, %size: int, %fill_value: int):
|
||||||
|
%bias: None = prim::Constant()
|
||||||
|
%ret = aten::nonzero_static(%self, %size, %fill_value)
|
||||||
|
%cloned = aten::clone(%ret, %bias)
|
||||||
|
return (%cloned)
|
||||||
|
)IR";
|
||||||
|
|
||||||
|
auto self0 = at::rand({6, 6, 6});
|
||||||
|
auto size0 = 1;
|
||||||
|
auto fill_value0 = 1;
|
||||||
|
std::vector<IValue> args{self0, size0, fill_value0};
|
||||||
|
testStaticRuntime(
|
||||||
|
script,
|
||||||
|
args,
|
||||||
|
{},
|
||||||
|
/*use_allclose=*/false,
|
||||||
|
/*use_equalnan=*/false,
|
||||||
|
/*check_resize=*/true);
|
||||||
|
|
||||||
|
auto self1 = at::rand({22, 22, 22});
|
||||||
|
auto size1 = 1;
|
||||||
|
auto fill_value1 = 1;
|
||||||
|
std::vector<IValue> args2{self1, size1, fill_value1};
|
||||||
|
testStaticRuntime(
|
||||||
|
script,
|
||||||
|
args,
|
||||||
|
args2,
|
||||||
|
/*use_allclose=*/false,
|
||||||
|
/*use_equalnan=*/false,
|
||||||
|
/*check_resize=*/true);
|
||||||
|
}
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_gather) {
|
TEST(StaticRuntime, autogen_gather) {
|
||||||
const std::string script = R"IR(
|
const std::string script = R"IR(
|
||||||
graph(%self: Tensor, %dim: int, %index: Tensor, %sparse_grad: bool):
|
graph(%self: Tensor, %dim: int, %index: Tensor, %sparse_grad: bool):
|
||||||
@ -7106,222 +7172,6 @@ TEST(StaticRuntime, autogen_special_multigammaln) {
|
|||||||
/*check_resize=*/true);
|
/*check_resize=*/true);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_fft_fft) {
|
|
||||||
const std::string script = R"IR(
|
|
||||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
|
||||||
%bias: None = prim::Constant()
|
|
||||||
%ret = aten::fft_fft(%self, %n, %dim, %norm)
|
|
||||||
%cloned = aten::clone(%ret, %bias)
|
|
||||||
return (%cloned)
|
|
||||||
)IR";
|
|
||||||
|
|
||||||
auto self0 = at::rand({6, 6, 6});
|
|
||||||
auto n0 = 1;
|
|
||||||
auto dim0 = 1;
|
|
||||||
auto norm0 = "forward";
|
|
||||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
{},
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
|
|
||||||
auto self1 = at::rand({22, 22, 22});
|
|
||||||
auto n1 = 1;
|
|
||||||
auto dim1 = 1;
|
|
||||||
auto norm1 = "forward";
|
|
||||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
args2,
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_fft_ifft) {
|
|
||||||
const std::string script = R"IR(
|
|
||||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
|
||||||
%bias: None = prim::Constant()
|
|
||||||
%ret = aten::fft_ifft(%self, %n, %dim, %norm)
|
|
||||||
%cloned = aten::clone(%ret, %bias)
|
|
||||||
return (%cloned)
|
|
||||||
)IR";
|
|
||||||
|
|
||||||
auto self0 = at::rand({6, 6, 6});
|
|
||||||
auto n0 = 1;
|
|
||||||
auto dim0 = 1;
|
|
||||||
auto norm0 = "forward";
|
|
||||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
{},
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
|
|
||||||
auto self1 = at::rand({22, 22, 22});
|
|
||||||
auto n1 = 1;
|
|
||||||
auto dim1 = 1;
|
|
||||||
auto norm1 = "forward";
|
|
||||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
args2,
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_fft_rfft) {
|
|
||||||
const std::string script = R"IR(
|
|
||||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
|
||||||
%bias: None = prim::Constant()
|
|
||||||
%ret = aten::fft_rfft(%self, %n, %dim, %norm)
|
|
||||||
%cloned = aten::clone(%ret, %bias)
|
|
||||||
return (%cloned)
|
|
||||||
)IR";
|
|
||||||
|
|
||||||
auto self0 = at::rand({6, 6, 6});
|
|
||||||
auto n0 = 1;
|
|
||||||
auto dim0 = 1;
|
|
||||||
auto norm0 = "forward";
|
|
||||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
{},
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
|
|
||||||
auto self1 = at::rand({22, 22, 22});
|
|
||||||
auto n1 = 1;
|
|
||||||
auto dim1 = 1;
|
|
||||||
auto norm1 = "forward";
|
|
||||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
args2,
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_fft_irfft) {
|
|
||||||
const std::string script = R"IR(
|
|
||||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
|
||||||
%bias: None = prim::Constant()
|
|
||||||
%ret = aten::fft_irfft(%self, %n, %dim, %norm)
|
|
||||||
%cloned = aten::clone(%ret, %bias)
|
|
||||||
return (%cloned)
|
|
||||||
)IR";
|
|
||||||
|
|
||||||
auto self0 = at::rand({6, 6, 6});
|
|
||||||
auto n0 = 1;
|
|
||||||
auto dim0 = 1;
|
|
||||||
auto norm0 = "forward";
|
|
||||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
{},
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
|
|
||||||
auto self1 = at::rand({22, 22, 22});
|
|
||||||
auto n1 = 1;
|
|
||||||
auto dim1 = 1;
|
|
||||||
auto norm1 = "forward";
|
|
||||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
args2,
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_fft_hfft) {
|
|
||||||
const std::string script = R"IR(
|
|
||||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
|
||||||
%bias: None = prim::Constant()
|
|
||||||
%ret = aten::fft_hfft(%self, %n, %dim, %norm)
|
|
||||||
%cloned = aten::clone(%ret, %bias)
|
|
||||||
return (%cloned)
|
|
||||||
)IR";
|
|
||||||
|
|
||||||
auto self0 = at::rand({6, 6, 6});
|
|
||||||
auto n0 = 1;
|
|
||||||
auto dim0 = 1;
|
|
||||||
auto norm0 = "forward";
|
|
||||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
{},
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
|
|
||||||
auto self1 = at::rand({22, 22, 22});
|
|
||||||
auto n1 = 1;
|
|
||||||
auto dim1 = 1;
|
|
||||||
auto norm1 = "forward";
|
|
||||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
args2,
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_fft_ihfft) {
|
|
||||||
const std::string script = R"IR(
|
|
||||||
graph(%self: Tensor, %n: int?, %dim: int, %norm: str?):
|
|
||||||
%bias: None = prim::Constant()
|
|
||||||
%ret = aten::fft_ihfft(%self, %n, %dim, %norm)
|
|
||||||
%cloned = aten::clone(%ret, %bias)
|
|
||||||
return (%cloned)
|
|
||||||
)IR";
|
|
||||||
|
|
||||||
auto self0 = at::rand({6, 6, 6});
|
|
||||||
auto n0 = 1;
|
|
||||||
auto dim0 = 1;
|
|
||||||
auto norm0 = "forward";
|
|
||||||
std::vector<IValue> args{self0, n0, dim0, norm0};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
{},
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
|
|
||||||
auto self1 = at::rand({22, 22, 22});
|
|
||||||
auto n1 = 1;
|
|
||||||
auto dim1 = 1;
|
|
||||||
auto norm1 = "forward";
|
|
||||||
std::vector<IValue> args2{self1, n1, dim1, norm1};
|
|
||||||
testStaticRuntime(
|
|
||||||
script,
|
|
||||||
args,
|
|
||||||
args2,
|
|
||||||
/*use_allclose=*/false,
|
|
||||||
/*use_equalnan=*/false,
|
|
||||||
/*check_resize=*/true);
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST(StaticRuntime, autogen_linalg_cross) {
|
TEST(StaticRuntime, autogen_linalg_cross) {
|
||||||
const std::string script = R"IR(
|
const std::string script = R"IR(
|
||||||
graph(%self: Tensor, %other: Tensor, %dim: int):
|
graph(%self: Tensor, %other: Tensor, %dim: int):
|
||||||
|
@ -36,7 +36,8 @@
|
|||||||
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
|
#include <torch/csrc/jit/tensorexpr/llvm_codegen.h>
|
||||||
#include <torch/csrc/jit/tensorexpr/loopnest.h>
|
#include <torch/csrc/jit/tensorexpr/loopnest.h>
|
||||||
|
|
||||||
namespace torch::jit {
|
namespace torch {
|
||||||
|
namespace jit {
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(
|
REGISTER_OPERATOR_FUNCTOR(
|
||||||
aten::absolute,
|
aten::absolute,
|
||||||
@ -190,6 +191,29 @@ REGISTER_OPERATOR_FUNCTOR(aten::addr, aten_addr, [](Node* n) -> SROperator {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
REGISTER_OPERATOR_FUNCTOR(
|
||||||
|
aten::_test_functorch_fallback,
|
||||||
|
aten__test_functorch_fallback,
|
||||||
|
[](Node* n) -> SROperator {
|
||||||
|
if (n->matches(torch::schema(
|
||||||
|
"aten::_test_functorch_fallback(Tensor self, Tensor other) -> Tensor"))) {
|
||||||
|
return [](ProcessedNode* p_node) {
|
||||||
|
const auto& self = p_node->Input(0).toTensor();
|
||||||
|
const auto& other = p_node->Input(1).toTensor();
|
||||||
|
if (p_node->Output(0).isNone()) {
|
||||||
|
p_node->Output(0) =
|
||||||
|
at::native::_test_functorch_fallback(self, other);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto& out = p_node->Output(0).toTensor();
|
||||||
|
fastResizeToZero(out);
|
||||||
|
at::native::_test_functorch_fallback_out(self, other, out);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
LogAndDumpSchema(n);
|
||||||
|
return nullptr;
|
||||||
|
});
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::argmax, aten_argmax, [](Node* n) -> SROperator {
|
REGISTER_OPERATOR_FUNCTOR(aten::argmax, aten_argmax, [](Node* n) -> SROperator {
|
||||||
if (n->matches(torch::schema(
|
if (n->matches(torch::schema(
|
||||||
"aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"))) {
|
"aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor"))) {
|
||||||
@ -2430,6 +2454,25 @@ REGISTER_OPERATOR_FUNCTOR(aten::addbmm, aten_addbmm, [](Node* n) -> SROperator {
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
REGISTER_OPERATOR_FUNCTOR(aten::diag, aten_diag, [](Node* n) -> SROperator {
|
||||||
|
if (n->matches(
|
||||||
|
torch::schema("aten::diag(Tensor self, int diagonal=0) -> Tensor"))) {
|
||||||
|
return [](ProcessedNode* p_node) {
|
||||||
|
const auto& self = p_node->Input(0).toTensor();
|
||||||
|
const auto diagonal = p_node->Input(1).toInt();
|
||||||
|
if (p_node->Output(0).isNone()) {
|
||||||
|
p_node->Output(0) = at::native::diag(self, diagonal);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto& out = p_node->Output(0).toTensor();
|
||||||
|
fastResizeToZero(out);
|
||||||
|
at::native::diag_out(self, diagonal, out);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
LogAndDumpSchema(n);
|
||||||
|
return nullptr;
|
||||||
|
});
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::cross, aten_cross, [](Node* n) -> SROperator {
|
REGISTER_OPERATOR_FUNCTOR(aten::cross, aten_cross, [](Node* n) -> SROperator {
|
||||||
if (n->matches(torch::schema(
|
if (n->matches(torch::schema(
|
||||||
"aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor"))) {
|
"aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor"))) {
|
||||||
@ -2684,6 +2727,30 @@ REGISTER_OPERATOR_FUNCTOR(
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
REGISTER_OPERATOR_FUNCTOR(
|
||||||
|
aten::nonzero_static,
|
||||||
|
aten_nonzero_static,
|
||||||
|
[](Node* n) -> SROperator {
|
||||||
|
if (n->matches(torch::schema(
|
||||||
|
"aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor"))) {
|
||||||
|
return [](ProcessedNode* p_node) {
|
||||||
|
const auto& self = p_node->Input(0).toTensor();
|
||||||
|
const auto size = p_node->Input(1).toInt();
|
||||||
|
const auto fill_value = p_node->Input(2).toInt();
|
||||||
|
if (p_node->Output(0).isNone()) {
|
||||||
|
p_node->Output(0) =
|
||||||
|
at::native::nonzero_static_cpu(self, size, fill_value);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto& out = p_node->Output(0).toTensor();
|
||||||
|
fastResizeToZero(out);
|
||||||
|
at::native::nonzero_static_out_cpu(self, size, fill_value, out);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
LogAndDumpSchema(n);
|
||||||
|
return nullptr;
|
||||||
|
});
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::gather, aten_gather, [](Node* n) -> SROperator {
|
REGISTER_OPERATOR_FUNCTOR(aten::gather, aten_gather, [](Node* n) -> SROperator {
|
||||||
if (n->matches(torch::schema(
|
if (n->matches(torch::schema(
|
||||||
"aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor"))) {
|
"aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor"))) {
|
||||||
@ -4463,132 +4530,6 @@ REGISTER_OPERATOR_FUNCTOR(
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
});
|
});
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::fft_fft, aten_fft_fft, [](Node* n) -> SROperator {
|
|
||||||
if (n->matches(torch::schema(
|
|
||||||
"aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) {
|
|
||||||
return [](ProcessedNode* p_node) {
|
|
||||||
const auto& self = p_node->Input(0).toTensor();
|
|
||||||
const auto n = p_node->Input(1).toOptional<c10::SymInt>();
|
|
||||||
const auto dim = p_node->Input(2).toInt();
|
|
||||||
const auto norm = p_node->Input(3).toOptional<c10::string_view>();
|
|
||||||
if (p_node->Output(0).isNone()) {
|
|
||||||
p_node->Output(0) = at::native::fft_fft_symint(self, n, dim, norm);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto& out = p_node->Output(0).toTensor();
|
|
||||||
fastResizeToZero(out);
|
|
||||||
at::native::fft_fft_symint_out(self, n, dim, norm, out);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
LogAndDumpSchema(n);
|
|
||||||
return nullptr;
|
|
||||||
});
|
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::fft_ifft, aten_fft_ifft, [](Node* n) -> SROperator {
|
|
||||||
if (n->matches(torch::schema(
|
|
||||||
"aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) {
|
|
||||||
return [](ProcessedNode* p_node) {
|
|
||||||
const auto& self = p_node->Input(0).toTensor();
|
|
||||||
const auto n = p_node->Input(1).toOptional<c10::SymInt>();
|
|
||||||
const auto dim = p_node->Input(2).toInt();
|
|
||||||
const auto norm = p_node->Input(3).toOptional<c10::string_view>();
|
|
||||||
if (p_node->Output(0).isNone()) {
|
|
||||||
p_node->Output(0) = at::native::fft_ifft_symint(self, n, dim, norm);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto& out = p_node->Output(0).toTensor();
|
|
||||||
fastResizeToZero(out);
|
|
||||||
at::native::fft_ifft_symint_out(self, n, dim, norm, out);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
LogAndDumpSchema(n);
|
|
||||||
return nullptr;
|
|
||||||
});
|
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::fft_rfft, aten_fft_rfft, [](Node* n) -> SROperator {
|
|
||||||
if (n->matches(torch::schema(
|
|
||||||
"aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) {
|
|
||||||
return [](ProcessedNode* p_node) {
|
|
||||||
const auto& self = p_node->Input(0).toTensor();
|
|
||||||
const auto n = p_node->Input(1).toOptional<c10::SymInt>();
|
|
||||||
const auto dim = p_node->Input(2).toInt();
|
|
||||||
const auto norm = p_node->Input(3).toOptional<c10::string_view>();
|
|
||||||
if (p_node->Output(0).isNone()) {
|
|
||||||
p_node->Output(0) = at::native::fft_rfft_symint(self, n, dim, norm);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto& out = p_node->Output(0).toTensor();
|
|
||||||
fastResizeToZero(out);
|
|
||||||
at::native::fft_rfft_symint_out(self, n, dim, norm, out);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
LogAndDumpSchema(n);
|
|
||||||
return nullptr;
|
|
||||||
});
|
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::fft_irfft, aten_fft_irfft, [](Node* n) -> SROperator {
|
|
||||||
if (n->matches(torch::schema(
|
|
||||||
"aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) {
|
|
||||||
return [](ProcessedNode* p_node) {
|
|
||||||
const auto& self = p_node->Input(0).toTensor();
|
|
||||||
const auto n = p_node->Input(1).toOptional<c10::SymInt>();
|
|
||||||
const auto dim = p_node->Input(2).toInt();
|
|
||||||
const auto norm = p_node->Input(3).toOptional<c10::string_view>();
|
|
||||||
if (p_node->Output(0).isNone()) {
|
|
||||||
p_node->Output(0) = at::native::fft_irfft_symint(self, n, dim, norm);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto& out = p_node->Output(0).toTensor();
|
|
||||||
fastResizeToZero(out);
|
|
||||||
at::native::fft_irfft_symint_out(self, n, dim, norm, out);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
LogAndDumpSchema(n);
|
|
||||||
return nullptr;
|
|
||||||
});
|
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::fft_hfft, aten_fft_hfft, [](Node* n) -> SROperator {
|
|
||||||
if (n->matches(torch::schema(
|
|
||||||
"aten::fft_hfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) {
|
|
||||||
return [](ProcessedNode* p_node) {
|
|
||||||
const auto& self = p_node->Input(0).toTensor();
|
|
||||||
const auto n = p_node->Input(1).toOptional<c10::SymInt>();
|
|
||||||
const auto dim = p_node->Input(2).toInt();
|
|
||||||
const auto norm = p_node->Input(3).toOptional<c10::string_view>();
|
|
||||||
if (p_node->Output(0).isNone()) {
|
|
||||||
p_node->Output(0) = at::native::fft_hfft_symint(self, n, dim, norm);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto& out = p_node->Output(0).toTensor();
|
|
||||||
fastResizeToZero(out);
|
|
||||||
at::native::fft_hfft_symint_out(self, n, dim, norm, out);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
LogAndDumpSchema(n);
|
|
||||||
return nullptr;
|
|
||||||
});
|
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(aten::fft_ihfft, aten_fft_ihfft, [](Node* n) -> SROperator {
|
|
||||||
if (n->matches(torch::schema(
|
|
||||||
"aten::fft_ihfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor"))) {
|
|
||||||
return [](ProcessedNode* p_node) {
|
|
||||||
const auto& self = p_node->Input(0).toTensor();
|
|
||||||
const auto n = p_node->Input(1).toOptional<c10::SymInt>();
|
|
||||||
const auto dim = p_node->Input(2).toInt();
|
|
||||||
const auto norm = p_node->Input(3).toOptional<c10::string_view>();
|
|
||||||
if (p_node->Output(0).isNone()) {
|
|
||||||
p_node->Output(0) = at::native::fft_ihfft_symint(self, n, dim, norm);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto& out = p_node->Output(0).toTensor();
|
|
||||||
fastResizeToZero(out);
|
|
||||||
at::native::fft_ihfft_symint_out(self, n, dim, norm, out);
|
|
||||||
};
|
|
||||||
}
|
|
||||||
LogAndDumpSchema(n);
|
|
||||||
return nullptr;
|
|
||||||
});
|
|
||||||
|
|
||||||
REGISTER_OPERATOR_FUNCTOR(
|
REGISTER_OPERATOR_FUNCTOR(
|
||||||
aten::linalg_cross,
|
aten::linalg_cross,
|
||||||
aten_linalg_cross,
|
aten_linalg_cross,
|
||||||
@ -5281,4 +5222,5 @@ REGISTER_NATIVE_OPERATOR_FUNCTOR(
|
|||||||
return nullptr;
|
return nullptr;
|
||||||
});
|
});
|
||||||
|
|
||||||
} // namespace torch::jit
|
} // namespace jit
|
||||||
|
} // namespace torch
|
||||||
|
@ -383,6 +383,6 @@ def override_test_values(arg_map: Dict[str, str], op_name: str, index: int) -> N
|
|||||||
return
|
return
|
||||||
if op_name in ("diagonal", "linalg_diagonal"):
|
if op_name in ("diagonal", "linalg_diagonal"):
|
||||||
arg_map["offset"] = "0"
|
arg_map["offset"] = "0"
|
||||||
arg_map["dim0"] = "1"
|
|
||||||
arg_map["dim1"] = "2"
|
arg_map["dim1"] = "2"
|
||||||
|
arg_map["dim2"] = "1"
|
||||||
return
|
return
|
||||||
|
@ -222,6 +222,17 @@ BLOCKED_OPS = frozenset(
|
|||||||
"special_spherical_bessel_j0",
|
"special_spherical_bessel_j0",
|
||||||
"_foobar",
|
"_foobar",
|
||||||
"_nested_tensor_strides",
|
"_nested_tensor_strides",
|
||||||
|
"_nested_tensor_storage_offsets",
|
||||||
|
"_nested_get_values", # no CPU backend
|
||||||
|
"_nested_get_values_copy", # no CPU backend
|
||||||
|
"_nested_view_from_jagged", # testing needs to be patched
|
||||||
|
"_nested_view_from_jagged_copy", # testing needs to be patched
|
||||||
|
"_nested_view_from_buffer", # testing needs to be patched
|
||||||
|
"_nested_view_from_buffer_copy", # testing needs to be patched
|
||||||
|
"_int_mm", # testing needs to be patched
|
||||||
|
"_to_sparse_csc", # testing needs to be patched
|
||||||
|
"_to_sparse_csr", # testing needs to be patched
|
||||||
|
"segment_reduce", # testing needs to be patched
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user