Make string serialization of C++ FunctionSchema consistent with torchgen.model.FunctionSchema

Pull Request resolved: https://github.com/pytorch/pytorch/pull/77926

There is a discrepency between the string representation of C++ FunctionSchema and torchgen.model.FunctionSchema.
The latter will not add parenthesis around the returned types if that a single item,
but the C++ FunctionSchema always add the parenthesis.

Make them consistent so we can convert one type to the other via its string representation and parse method.

Differential Revision: [D36535924](https://our.internmc.facebook.com/intern/diff/D36535924/)

Approved by: https://github.com/bdhirsh
This commit is contained in:
Shunting Zhang
2022-05-24 00:19:56 -07:00
committed by PyTorch MergeBot
parent c083489f46
commit 26d9386f67
4 changed files with 109 additions and 66 deletions

View File

@ -35,7 +35,48 @@ inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema)
out << ") -> ";
const auto& returns = schema.returns();
out << "(";
/*
* We should skip parenthesis if we return a single item and it's not varret,
* or we return nothing but varret.
*
* Need special handling for schema
* aten::items.str(Dict(str, t) self) -> (str,t)[]
* Even though this schema returns a single item, we need add parenthesis.
* The is necessary so the printed schema can be parsed by the C++ SchemaParser
* Without the extra parenthesis, the parser sees the first parenthesis in '(str,t)' and mistakenly
* treat the return type as a tuple. An alternative is to enhance the Lexer
* to lookahead multiple tokens to accurately decide if the return type is
* a tuple.
*/
bool need_paren = !(
(returns.size() == 1 && !schema.is_varret()) ||
(returns.size() == 0 && schema.is_varret()));
if (returns.size() == 1 && !schema.is_varret()) {
std::stringstream return_ss;
return_ss << returns.at(0);
auto return_str = return_ss.str();
// enclosing the single return item with parenthesis if the return type
// starts with a left parenthesis.
//
// There are 2 cases
// 1. something like 'aten::items.str(Dict(str, t) self) -> ((str, t)[])'.
// without the extra parenthesis, the c++ schem parser can not parse it.
// 2. something like '-> ((str, str))'. Need extra parenthesis so the return
// type is a single tuple rather than two strings.
// PR (https://github.com/pytorch/pytorch/pull/23204) has more context about
// this. test_serialize_and_deserialize (https://github.com/pytorch/pytorch/blob/master/test/test_function_schema.py#L15)
// also covers this case.
if (return_str.size() > 0 && return_str.front() == '(') {
need_paren = true;
}
}
if (need_paren) {
out << "(";
}
for (const auto i : c10::irange(returns.size())) {
if (i > 0) {
out << ", ";
@ -48,7 +89,9 @@ inline std::ostream& operator<<(std::ostream& out, const FunctionSchema& schema)
}
out << "...";
}
out << ")";
if (need_paren) {
out << ")";
}
return out;
}

View File

@ -1303,7 +1303,7 @@ TEST(AliasRegistrationTest, ConservativeWithAliasingAnnotationsShouldError) {
// registration.
expectThrows<c10::Error>(
[&graph] { AliasDb aliasDb(graph); },
"Tried to register operator foo::rand3(Tensor(a) arg1) -> (Tensor(b)) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
"Tried to register operator foo::rand3(Tensor(a) arg1) -> Tensor(b) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
}
TEST(AliasRegistrationTest, ConservativeWithAliasingAnnotationsShouldError2) {
@ -1323,7 +1323,7 @@ TEST(AliasRegistrationTest, ConservativeWithAliasingAnnotationsShouldError2) {
// registration.
expectThrows<c10::Error>(
[&graph] { AliasDb aliasDb(graph); },
"Tried to register operator foo::rand4(Tensor(a) arg1) -> (Tensor(a)) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
"Tried to register operator foo::rand4(Tensor(a) arg1) -> Tensor(a) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
}
TEST(AliasRegistrationTest, FromSchemaWithInferredSchemaShouldError) {
@ -1337,7 +1337,7 @@ TEST(AliasRegistrationTest, FromSchemaWithInferredSchemaShouldError) {
})
.aliasAnalysis(AliasAnalysisKind::FROM_SCHEMA));
},
"Tried to register operator foo::rand5(Tensor _0) -> (Tensor _0) with AliasAnalysisKind::FROM_SCHEMA, but the schema is inferred");
"Tried to register operator foo::rand5(Tensor _0) -> Tensor _0 with AliasAnalysisKind::FROM_SCHEMA, but the schema is inferred");
}
TEST(AliasRegistrationTest, FromSchemaInferredPure) {
@ -1438,7 +1438,7 @@ TEST(AliasRegistrationTest, PureWithAnnotationsShouldError) {
// registration.
expectThrows<c10::Error>(
[&graph] { AliasDb aliasDb(graph); },
"Tried to register operator foo::rand11(Tensor(a) arg1) -> (Tensor(a)) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
"Tried to register operator foo::rand11(Tensor(a) arg1) -> Tensor(a) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
}
TEST(AliasRegistrationTest, AliasMoveAtenListOp) {
@ -1605,7 +1605,7 @@ TEST(AliasRegistrationTest, PureWithAnnotationsShouldError2) {
// registration.
expectThrows<c10::Error>(
[&graph] { AliasDb aliasDb(graph); },
"Tried to register operator foo::rand12(Tensor(a) arg1) -> (Tensor(b)) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
"Tried to register operator foo::rand12(Tensor(a) arg1) -> Tensor(b) with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA");
}
} // namespace jit
} // namespace torch

View File

@ -1 +1 @@
foo(Tensor x, (Tensor, Tensor, Tensor) y, (Tensor, (Tensor, Tensor)) z) -> (Tensor)
foo(Tensor x, (Tensor, Tensor, Tensor) y, (Tensor, (Tensor, Tensor)) z) -> Tensor

View File

@ -243,13 +243,13 @@ class TestDispatch(TestCase):
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
def test_def_impl_schema_mismatch(self):
@ -264,9 +264,9 @@ CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed
self.assertExpectedInline(state, '''\
Inferred operator schema for a C++ kernel function doesn't match the expected function schema.
operator: test::foo
expected schema: test::foo(Tensor x, Tensor y) -> (Tensor)
expected schema: test::foo(Tensor x, Tensor y) -> Tensor
registered at /dev/null:0
inferred schema: (Tensor _0) -> (Tensor _0)
inferred schema: (Tensor _0) -> Tensor _0
impl_t_t
reason: The number of arguments is different. 2 vs 1.''')
@ -283,13 +283,13 @@ Inferred operator schema for a C++ kernel function doesn't match the expected fu
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor _0) -> (Tensor _0)
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
def test_def_only(self):
@ -299,7 +299,7 @@ CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> (Tensor
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x, Tensor y) -> (Tensor)
schema: test::foo(Tensor x, Tensor y) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
''')
@ -318,10 +318,10 @@ alias analysis kind: FROM_SCHEMA
self.assertExpectedInline(state, '''\
name: test::foo
schema: (none)
CPU: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
def test_computed_table(self):
@ -340,14 +340,14 @@ CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor _0) -> (Tensor _0)
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: fn_cpu :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
XLA: fn_xla :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
AutogradCPU: fn_autogradcpu :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
XLA: fn_xla :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
AutogradCPU: fn_autogradcpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -375,11 +375,11 @@ AutogradXLA: fn_autograd [autograd kernel]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor _0) -> (Tensor _0)
schema: test::foo(Tensor _0) -> Tensor _0
debug: registered at /dev/null:0
alias analysis kind: CONSERVATIVE
CPU: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: default_def_name_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -407,10 +407,10 @@ AutogradXLA: default_def_name_t_t [math kernel]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -440,11 +440,11 @@ AutogradXLA: impl_t_t [math kernel]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -472,10 +472,10 @@ AutogradXLA: fn_math [math kernel]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
Autograd[alias]: impl_t_t :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
Autograd[alias]: impl_t_t :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -504,12 +504,12 @@ AutogradXLA: impl_t_t [autograd kernel]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -538,11 +538,11 @@ AutogradXLA: fn_math [math kernel]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
FPGA: fn_fpga :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
FPGA: fn_fpga :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -572,11 +572,11 @@ FPGA: fn_fpga [kernel]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -607,12 +607,12 @@ AutogradXLA: fallthrough registered in pytorch framework [backend fallback]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -646,13 +646,13 @@ FPGA: fn_defaultbackend [default backend kernel]
state, table = result.state, result.table
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x) -> (Tensor)
schema: test::foo(Tensor x) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: FROM_SCHEMA
CPU: fn_cpu :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CPU: fn_cpu :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
Autograd[alias]: fn_autograd :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn_math :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeExplicitAutograd[alias]: fn_defaultbackend :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
''')
# computed dispatch table is too big, so we only check on a few entries we're interested in.
@ -678,7 +678,7 @@ AutogradXLA: fn_autograd [autograd kernel]
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
'''Tried to register an operator (test::foo(Tensor x, Tensor y) -> (Tensor)) with the same name and overload '''
'''Tried to register an operator (test::foo(Tensor x, Tensor y) -> Tensor) with the same name and overload '''
'''name multiple times. Each overload's schema should only be registered with a single call to def(). '''
'''Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0'''
)
@ -693,7 +693,7 @@ AutogradXLA: fn_autograd [autograd kernel]
]).state
self.assertExpectedInline(state, '''\
name: test::foo
schema: test::foo(Tensor x, Tensor y) -> (Tensor)
schema: test::foo(Tensor x, Tensor y) -> Tensor
debug: registered at /dev/null:0
alias analysis kind: PURE_FUNCTION
''')
@ -708,7 +708,7 @@ alias analysis kind: PURE_FUNCTION
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
'''Tried to register an operator (test::foo(Tensor x) -> (Tensor)) with the same name and overload '''
'''Tried to register an operator (test::foo(Tensor x) -> Tensor) with the same name and overload '''
'''name multiple times. Each overload's schema should only be registered with a single call to def(). '''
'''Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0'''
)
@ -724,7 +724,7 @@ alias analysis kind: PURE_FUNCTION
]
self.assertExpectedInline(
self.commute("foo", ops, expect_raises=True).state,
'''Tried to register an operator (test::foo(Tensor x) -> (Tensor)) with the same name and overload '''
'''Tried to register an operator (test::foo(Tensor x) -> Tensor) with the same name and overload '''
'''name multiple times. Each overload's schema should only be registered with a single call to def(). '''
'''Duplicate registration: registered at /dev/null:0. Original registration: registered at /dev/null:0'''
)
@ -754,8 +754,8 @@ alias analysis kind: PURE_FUNCTION
'''\
name: test::foo
schema: (none)
CompositeImplicitAutograd[alias]: fn2 :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias] (inactive): fn1 :: (Tensor _0) -> (Tensor _0) [ boxed unboxed ]
CompositeImplicitAutograd[alias]: fn2 :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
CompositeImplicitAutograd[alias] (inactive): fn1 :: (Tensor _0) -> Tensor _0 [ boxed unboxed ]
'''
)