From 720af7dab61afe57002b2609db65f65c5f4a614a Mon Sep 17 00:00:00 2001 From: PuQing Date: Thu, 11 May 2023 22:31:07 +0800 Subject: [PATCH 1/2] add paddle ops convert - affine_channel - p_norm - roi_align - softmax_with_cross_entropy --- python/tvm/relay/frontend/paddlepaddle.py | 103 +++++++++++++++++ .../frontend/paddlepaddle/test_forward.py | 107 ++++++++++++++++++ 2 files changed, 210 insertions(+) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index adff28187e5b..48d11e7fd5a0 100755 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -141,6 +141,26 @@ def convert_addmm(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_affine_channel(g, op, block): + """Operator converter for affine_channel.""" + + if "data_layout" in op.attr_names: + assert ( + op.attr("data_layout") == "NCHW" or op.attr("data_layout") == "AnyLayout" + ), "Only NCHW and AnyLayout are supported for affine_channel. But received: {}".format( + op.attr("data_layout") + ) + + x = g.get_node(op.input("X")[0]) + bias = g.get_node(op.input("Bias")[0]) + scale = g.get_node(op.input("Scale")[0]) + scale = _op.reshape(scale, [-1, 1, 1]) + bias = _op.reshape(bias, [-1, 1, 1]) + x = _op.multiply(x, scale) + out = _op.add(x, bias) + g.add_node(op.output("Out")[0], out) + + def convert_arg_max_min(g, op, block): """Operator converter for arg_max and arg_min.""" @@ -1394,6 +1414,22 @@ def convert_one_hot_v2(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_p_norm(g, op, blcok): + """Operator converter for p_norm.""" + + x = g.get_node(op.input("X")[0]) + axis = op.attr("axis") + p = op.attr("porder") + keepdim = op.attr("keepdim") + p_node = _expr.const(p, dtype="float32") + abs_node = _op.abs(x) + pow_node = _op.power(abs_node, p_node) + reduce_sum = _op.sum(pow_node, axis=[axis], keepdims=keepdim) + p_node1 = _expr.const(1.0 / p, dtype="float32") + out = _op.power(reduce_sum, p_node1) + g.add_node(op.output("Out")[0], out) + + def convert_padding(g, op, block): """Operator converter for padding.""" @@ -1645,6 +1681,30 @@ def convert_reshape(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_roi_align(g, op, block): + """Operator converter for roi_align.""" + + rois = g.get_node(op.input("ROIs")[0]) + spatial_scale = op.attr("spatial_scale") + if op.attr("aligned"): + offset = _expr.const(0.5, dtype="float32") + roi_offset = _op.divide(offset, _expr.const(spatial_scale, dtype="float32")) + rois = _op.subtract(rois, roi_offset) + num_rois = infer_shape(rois)[0] + zero_node = _expr.const(0, dtype="int32") + batch_index = _op.full(zero_node, [num_rois, 1], dtype="float32") + rois = _op.concatenate([batch_index, rois], axis=1) + out = _op.vision.roi_align( + g.get_node(op.input("X")[0]), + rois, + pooled_size=[op.attr("pooled_height"), op.attr("pooled_width")], + spatial_scale=spatial_scale, + sample_ratio=op.attr("sampling_ratio"), + mode="avg", + ) + g.add_node(op.output("Out")[0], out) + + def convert_rnn(g, op, block): """Operator converter for rnn.""" @@ -2166,6 +2226,45 @@ def convert_softmax(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_softmax_with_cross_entropy(g, op, block): + """Operator converter for softmax_with_cross_entropy.""" + + logits = g.get_node(op.input("Logits")[0]) + labels = g.get_node(op.input("Label")[0]) + ignore_index = op.attr("ignore_index") + axis = op.attr("axis") + if axis < 0: + axis = len(infer_shape(logits)) + axis + + softmax = _op.nn.softmax(logits, axis=axis) + + g.add_node(op.output("Softmax")[0], softmax) + + softmax = _op.log(softmax) + soft_label = op.attr("soft_label") + if soft_label: + loss = _op.sum(-labels * softmax, axis=axis) + else: + labels_one = _op.one_hot( + labels, + on_value=_expr.const(1.0, dtype="float32"), + off_value=_expr.const(0.0, dtype="float32"), + depth=infer_shape(logits)[axis], + axis=axis + 1, + dtype="float32", + ) + labels_one = _op.squeeze(labels_one, axis=axis) + loss = _op.sum(-labels_one * softmax, axis=axis) + loss = _op.expand_dims(loss, axis=axis) + if ignore_index != -100: # noly when soft_label is False + assert not soft_label, "soft_label and ignore_index cannot be set at the same time." + ignore_mask = _op.not_equal(labels, _expr.const(ignore_index, dtype="int64")) + ignore_mask = _op.cast(ignore_mask, "float32") + loss = _op.multiply(loss, ignore_mask) + + g.add_node(op.output("Loss")[0], loss) + + def convert_softplus(g, op, block): """Operator converter for softplus.""" @@ -2460,6 +2559,7 @@ def convert_where_index(g, op, block): "abs": convert_unary_op, "acos": convert_unary_op, "addmm": convert_addmm, + "affine_channel": convert_affine_channel, "arg_max": convert_arg_max_min, "arg_min": convert_arg_max_min, "argsort": convert_argsort, @@ -2556,6 +2656,7 @@ def convert_where_index(g, op, block): "norm": convert_norm, "not_equal": convert_elementwise_op, "one_hot_v2": convert_one_hot_v2, + "p_norm": convert_p_norm, "pad1d": convert_padding, "pad2d": convert_padding, "pad3d": convert_padding, @@ -2568,6 +2669,7 @@ def convert_where_index(g, op, block): "relu6": convert_relu6, "reshape2": convert_reshape, "round": convert_unary_op, + "roi_align": convert_roi_align, "reciprocal": convert_reciprocal, "reduce_all": convert_reduce, "reduce_any": convert_reduce, @@ -2591,6 +2693,7 @@ def convert_where_index(g, op, block): "size": convert_size, "slice": convert_slice, "softmax": convert_softmax, + "softmax_with_cross_entropy": convert_softmax_with_cross_entropy, "softplus": convert_softplus, "softsign": convert_softsign, "softshrink": convert_softshrink, diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index 289fc0faa3ef..3d8d476eb6a1 100755 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -2302,5 +2302,112 @@ def forward(self, x, y): verify_model(Dist(), input_data=[y, v]) +@tvm.testing.uses_gpu +def test_forward_affine_channel(): + class AffineChannel(nn.Layer): + def __init__(self): + super(AffineChannel, self).__init__() + + @paddle.jit.to_static + def forward(self, inputs, scale, bias): + return paddle.fluid.layers.affine_channel(inputs, scale, bias) + + input_shape = [2, 3, 4, 5] + input_data = paddle.rand(input_shape, dtype="float32") + scale_data = paddle.rand([input_shape[1]], dtype="float32") + bias_data = paddle.rand([input_shape[1]], dtype="float32") + verify_model( + AffineChannel(), + [ + input_data, + scale_data, + bias_data, + ], + ) + + +@tvm.testing.uses_gpu +def test_forward_p_norm(): + class PNorm(nn.Layer): + def __init__(self, axis, keepdim, p=1): + super(PNorm, self).__init__() + self.p = p + self.axis = axis + self.keepdim = keepdim + + @paddle.jit.to_static + def forward(self, input_data): + return paddle.norm(input_data, p=self.p, axis=self.axis, keepdim=self.keepdim) + + input_data = paddle.rand((2, 2, 3), dtype="float32") + verify_model(PNorm(axis=0, keepdim=True), input_data=input_data) + verify_model(PNorm(axis=0, keepdim=False), input_data=input_data) + verify_model(PNorm(axis=1, keepdim=True, p=1.5), input_data=input_data) + verify_model(PNorm(axis=-1, keepdim=True, p=3.4), input_data=input_data) + + +@tvm.testing.uses_gpu +def test_forward_roi_align(): + class RoiAlign(nn.Layer): + def __init__(self, spatial_scale=1.0, sampling_ratio=-1, aligned=False): + super(RoiAlign, self).__init__() + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + @paddle.jit.to_static + def forward(self, input_data, rois, rois_num): + return paddle.vision.ops.roi_align( + input_data, rois, rois_num, 3, self.spatial_scale, self.sampling_ratio, self.aligned + ) + + input_data = paddle.rand((1, 128, 32, 32), dtype="float32") + boxes = paddle.rand([3, 4]) + boxes[:, 2] += boxes[:, 0] + 3 + boxes[:, 3] += boxes[:, 1] + 4 + boxes_num = paddle.to_tensor([3]).astype("int32") + verify_model(RoiAlign(), input_data=[input_data, boxes, boxes_num]) + verify_model(RoiAlign(aligned=True), input_data=[input_data, boxes, boxes_num]) + verify_model( + RoiAlign(spatial_scale=2.0, aligned=True), input_data=[input_data, boxes, boxes_num] + ) + + +@tvm.testing.uses_gpu +def test_forward_softmax_with_cross_entropy(): + class SoftmaxWithCrossEntropy(nn.Layer): + def __init__(self, soft_label=False, ignore_index=-100, return_softmax=False, axis=-1): + super(SoftmaxWithCrossEntropy, self).__init__() + self.soft_label = soft_label + self.ignore_index = ignore_index + self.return_softmax = return_softmax + self.axis = axis + + @paddle.jit.to_static + def forward(self, input_data, label): + return paddle.nn.functional.softmax_with_cross_entropy( + input_data, + label, + soft_label=self.soft_label, + ignore_index=self.ignore_index, + return_softmax=self.return_softmax, + axis=self.axis, + ) + + input_data = paddle.rand([5, 3], dtype="float32") + label = paddle.randint(0, 2, [5, 1]) + verify_model(SoftmaxWithCrossEntropy(), input_data=[input_data, label]) + verify_model(SoftmaxWithCrossEntropy(return_softmax=True), input_data=[input_data, label]) + verify_model( + SoftmaxWithCrossEntropy(return_softmax=True, ignore_index=1), input_data=[input_data, label] + ) + input_data = paddle.rand([5, 4, 3], dtype="float32") + label = paddle.randint(0, 2, [5, 1, 3]) + verify_model(SoftmaxWithCrossEntropy(axis=1), input_data=[input_data, label]) + label = paddle.randint(0, 2, [5, 4, 3]).astype("float32") + verify_model(SoftmaxWithCrossEntropy(soft_label=True), input_data=[input_data, label]) + verify_model(SoftmaxWithCrossEntropy(soft_label=True, axis=0), input_data=[input_data, label]) + + if __name__ == "__main__": tvm.testing.main() From cbdb3051d767f05682112489a1adee8df4d41714 Mon Sep 17 00:00:00 2001 From: PuQing Date: Fri, 12 May 2023 20:49:26 +0800 Subject: [PATCH 2/2] delete affine_channel op convert --- python/tvm/relay/frontend/paddlepaddle.py | 21 ---------------- .../frontend/paddlepaddle/test_forward.py | 24 ------------------- 2 files changed, 45 deletions(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 48d11e7fd5a0..5d0e9dca725f 100755 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -141,26 +141,6 @@ def convert_addmm(g, op, block): g.add_node(op.output("Out")[0], out) -def convert_affine_channel(g, op, block): - """Operator converter for affine_channel.""" - - if "data_layout" in op.attr_names: - assert ( - op.attr("data_layout") == "NCHW" or op.attr("data_layout") == "AnyLayout" - ), "Only NCHW and AnyLayout are supported for affine_channel. But received: {}".format( - op.attr("data_layout") - ) - - x = g.get_node(op.input("X")[0]) - bias = g.get_node(op.input("Bias")[0]) - scale = g.get_node(op.input("Scale")[0]) - scale = _op.reshape(scale, [-1, 1, 1]) - bias = _op.reshape(bias, [-1, 1, 1]) - x = _op.multiply(x, scale) - out = _op.add(x, bias) - g.add_node(op.output("Out")[0], out) - - def convert_arg_max_min(g, op, block): """Operator converter for arg_max and arg_min.""" @@ -2559,7 +2539,6 @@ def convert_where_index(g, op, block): "abs": convert_unary_op, "acos": convert_unary_op, "addmm": convert_addmm, - "affine_channel": convert_affine_channel, "arg_max": convert_arg_max_min, "arg_min": convert_arg_max_min, "argsort": convert_argsort, diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index 3d8d476eb6a1..df95935cfb4d 100755 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -2302,30 +2302,6 @@ def forward(self, x, y): verify_model(Dist(), input_data=[y, v]) -@tvm.testing.uses_gpu -def test_forward_affine_channel(): - class AffineChannel(nn.Layer): - def __init__(self): - super(AffineChannel, self).__init__() - - @paddle.jit.to_static - def forward(self, inputs, scale, bias): - return paddle.fluid.layers.affine_channel(inputs, scale, bias) - - input_shape = [2, 3, 4, 5] - input_data = paddle.rand(input_shape, dtype="float32") - scale_data = paddle.rand([input_shape[1]], dtype="float32") - bias_data = paddle.rand([input_shape[1]], dtype="float32") - verify_model( - AffineChannel(), - [ - input_data, - scale_data, - bias_data, - ], - ) - - @tvm.testing.uses_gpu def test_forward_p_norm(): class PNorm(nn.Layer):