mirror of
https://github.com/pytorch/pytorch.git
synced 2025-10-20 21:14:14 +08:00
Add aten::softmax NNAPI converter (#58539)
Summary: Add support for aten::softmax op in the NNAPI model converter with flexible size Pull Request resolved: https://github.com/pytorch/pytorch/pull/58539 Test Plan: pytest test/test_nnapi.py::TestNNAPI::test_softmax Reviewed By: anshuljain1 Differential Revision: D28531946 fbshipit-source-id: 8633f3e3f7f52795f9866ff16ad0867ea36a19e8
This commit is contained in:
committed by
Facebook GitHub Bot
parent
45ce26c397
commit
14d604a13e
@ -227,6 +227,17 @@ class TestNNAPI(TestCase):
|
||||
with self.assertRaisesRegex(Exception, "hardtanh with args"):
|
||||
self.check(torch.nn.Hardtanh(0.0, 5.0), inp)
|
||||
|
||||
def test_softmax(self):
|
||||
inp = torch.tensor([[-2.0, -0.5], [0.5, 2.0]])
|
||||
self.check(torch.nn.Softmax(), inp)
|
||||
self.check(torch.nn.Softmax(dim=0), inp)
|
||||
# Test flexible size
|
||||
self.check(
|
||||
torch.nn.Softmax(),
|
||||
inp,
|
||||
convert_args=[torch.zeros(0, 0)],
|
||||
)
|
||||
|
||||
def test_mean(self):
|
||||
class MeanModule(torch.nn.Module):
|
||||
def __init__(self, dim, keep=False):
|
||||
|
||||
@ -759,6 +759,8 @@ class _NnapiSerializer(object):
|
||||
self.add_pointwise_simple_unary_op(node, NNAPI_OperationCode.RELU),
|
||||
"aten::sigmoid": lambda self, node:
|
||||
self.add_pointwise_simple_unary_op(node, NNAPI_OperationCode.LOGISTIC),
|
||||
"aten::softmax": lambda self, node:
|
||||
self.add_softmax(node),
|
||||
"aten::hardtanh": lambda self, node:
|
||||
self.add_hardtanh(node),
|
||||
"aten::avg_pool2d": lambda self, node:
|
||||
@ -1131,6 +1133,27 @@ class _NnapiSerializer(object):
|
||||
|
||||
self._do_add_binary(node, opcode, fuse_code, qparams=(scale, zero_point))
|
||||
|
||||
def add_softmax(self, node):
|
||||
assert node.inputsSize() == 3
|
||||
in_id, in_oper = self.get_tensor_operand_by_jitval(node.inputsAt(0))
|
||||
|
||||
_, softmax_dim = self.get_constant_value(node.inputsAt(1), "IntType")
|
||||
|
||||
out_id = self.add_tensor_operand(node.outputsAt(0), in_oper)
|
||||
for dim, size in enumerate(in_oper.shape):
|
||||
if size == 0:
|
||||
self.forward_operand_shape(out_id, dim, in_id, dim)
|
||||
|
||||
inputs = [None] * 3
|
||||
inputs[0] = in_id
|
||||
inputs[1] = self.add_immediate_float_scalar(1.0) # positive scaling factor of exponent, beta
|
||||
inputs[2] = self.add_immediate_int_scalar(softmax_dim)
|
||||
|
||||
outputs = [None] * 1
|
||||
outputs[0] = out_id
|
||||
|
||||
self.add_operation(NNAPI_OperationCode.SOFTMAX, inputs, outputs)
|
||||
|
||||
def add_hardtanh(self, node):
|
||||
assert node.inputsSize() == 3
|
||||
assert node.outputsSize() == 1
|
||||
|
||||
Reference in New Issue
Block a user