From 6583152d40f0e6aedf075624141017a661dfcc4a Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 15:53:57 +0000 Subject: [PATCH 1/9] add interploate and flatten --- python/tvm/relay/frontend/paddlepaddle.py | 160 ++++++++++++++++-- .../frontend/paddlepaddle/test_forward.py | 89 ++++++++++ 2 files changed, 235 insertions(+), 14 deletions(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index fa7c80c912d9..0880f5986c08 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -39,7 +39,6 @@ infer_type, infer_value, shape_of, - try_infer_value, new_var, ) @@ -382,10 +381,15 @@ def convert_expand(g, op, block): x = g.get_node(op.input("X")[0]) if op.input("Shape"): sizes = g.get_node(op.input("Shape")[0]) - sizes = try_infer_value(sizes, g.get_params())[0] else: sizes = op.attr("shape") + if isinstance(sizes, _expr.Expr): + try: + sizes = infer_value(sizes, g.get_params()).numpy() + except Exception: + pass + if isinstance(sizes, np.ndarray): sizes = sizes.tolist() @@ -447,10 +451,14 @@ def convert_fill_constant(g, op, block): value = _expr.const(value).astype(dtype) if "ValueTensor" in op.input_names and op.input("ValueTensor"): shape = g.get_node(op.input("ValueTensor")[0]) - shape = try_infer_value(shape, g.get_params())[0] if "ShapeTensor" in op.input_names and op.input("ShapeTensor"): shape = g.get_node(op.input("ShapeTensor")[0]) - shape = try_infer_value(shape, g.get_params())[0] + + if isinstance(shape, _expr.Expr): + try: + shape = infer_value(shape, g.get_params()).numpy() + except Exception: + pass if isinstance(shape, np.ndarray): shape = shape.tolist() @@ -459,6 +467,33 @@ def convert_fill_constant(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_flatten(g, op, block): + """Operator converter for flatten.""" + + x = g.get_node(op.input("X")[0]) + input_shape = list(infer_shape(x)) + + start = op.attr("start_axis") + end = op.attr("stop_axis") + ndim = len(input_shape) + if end < 0: + end += ndim + new_shape = [0] * start + + new_shape.append(-1) + squeeze_axes = [] + for i in range(start + 1, end + 1): + new_shape.append(1) + squeeze_axes.append(i) + for _ in range(end + 1, ndim): + new_shape.append(0) + out = _op.reshape(x, new_shape) + if squeeze_axes: + out = _op.squeeze(out, axis=squeeze_axes) + + g.add_node(op.output("Out")[0], out) + + def convert_gather(g, op, block): """Operator converter for gather.""" @@ -552,6 +587,102 @@ def convert_hard_swish(g, op, block): g.add_node(op.output("Out")[0], out) +def convert_interpolate(g, op, block): + """Operator converter for interpolate.""" + + def get_interpolate_mode(op): + """Get parameters for interpolate methos.""" + + interp_method = op.attr("interp_method") + align_corners = op.attr("align_corners") + align_mode = op.attr("align_mode") + + rounding_method = "" + if interp_method == "nearest": + interp_method = "nearest_neighbor" + coordinate_transformation_mode = "asymmetric" + rounding_method = "floor" + elif interp_method == "bilinear": + interp_method = "linear" + if not align_corners and align_mode == 0: + coordinate_transformation_mode = "half_pixel" + else: + if align_corners: + coordinate_transformation_mode = "align_corners" + else: + coordinate_transformation_mode = "asymmetric" + elif interp_method == "bicubic": + interp_method = "cubic" + if align_corners: + coordinate_transformation_mode = "align_corners" + else: + coordinate_transformation_mode = "half_pixel" + else: + msg = "interp_method {} is not supported for PaddlePaddle's interpolate" + raise tvm.error.OpAttributeInvalid(msg.format(interp_method)) + return rounding_method, interp_method, coordinate_transformation_mode + + layout = op.attr("data_layout") + out_h = op.attr("out_h") + out_w = op.attr("out_w") + + x = g.get_node(op.input("X")[0]) + x_shape = infer_shape(x) + assert len(x_shape) == 4, "Only 4D input tensor is supported for PaddlePaddle's interpolate" + input_out_size = op.input("OutSize") + input_size_tensor = op.input("SizeTensor") + input_scale = op.input("Scale") + rounding_method, interp_method, coordinate_transformation_mode = get_interpolate_mode(op) + + if input_out_size: + # if out_size is a tensor + out_size = g.get_node(input_out_size[0]) + try: + out_size = infer_value(out_size, g.get_params()).numpy().tolist() + except Exception: + pass + elif input_size_tensor: + # if out_size is a list of tensor + out_size = list() + for name in input_size_tensor: + size = g.get_node(name) + if len(infer_shape(size)) == 0: + shape = _op.reshape(shape, [-1]) + out_size.append(size) + out_size = _op.concatenate(out_size, axis=0) + try: + out_size = infer_value(out_size, g.get_params()).numpy().tolist() + except Exception: + pass + elif input_scale: + # if out_size is not defined, but scale is defined + input_scale = g.get_node(input_scale[0]) + input_shape = shape_of(x).astype("float32") + if layout.startswith("NC"): + out_size = _op.strided_slice(input_shape, begin=[2], end=[4]) * input_scale + else: + out_size = _op.strided_slice(input_shape, begin=[1], end=[3]) * input_scale + out_size = out_size.astype("int32") + try: + out_size = infer_value(out_size, g.get_params()).numpy().tolist() + except Exception: + pass + else: + # if out_size is a constant value + out_size = [out_h, out_w] + + out = _op.image.resize2d( + x, + size=out_size, + layout=layout, + method=interp_method, + coordinate_transformation_mode=coordinate_transformation_mode, + rounding_method=rounding_method, + cubic_alpha=-0.75, + ) + g.add_node(op.output("Out")[0], out) + + def convert_layer_norm(g, op, block): """Operator converter for layer_norm.""" @@ -939,18 +1070,17 @@ def convert_reshape(g, op, block): if input_shape: new_shape = g.get_node(input_shape[0]) elif input_shape_tensor: - tmp_shape = [] + new_shape = [] for shape_name in input_shape_tensor: shape = g.get_node(shape_name) if len(infer_shape(shape)) == 0: shape = _op.reshape(shape, [-1]) - if isinstance(shape, _expr.Constant): - tmp_shape.append(shape) - elif isinstance(shape, _expr.Expr): - tmp_shape.append(shape) - else: - tmp_shape.append(_expr.const(np.array(shape).astype("int64"))) - new_shape = _op.concatenate(tmp_shape, axis=0) + new_shape.append(shape) + new_shape = _op.concatenate(new_shape, axis=0) + try: + new_shape = infer_value(new_shape, g.get_params()).numpy().tolist() + except Exception: + pass else: new_shape = op.attr("shape") out = _op.reshape(data, new_shape) @@ -1184,6 +1314,8 @@ def convert_unsqueeze(g, op, block): "assign_value": convert_assign_value, "atan": convert_unary_op, "batch_norm": convert_batch_norm, + "bicubic_interp_v2": convert_interpolate, + "bilinear_interp_v2": convert_interpolate, "bmm": convert_bmm, "brelu": convert_brelu, "cast": convert_cast, @@ -1201,7 +1333,6 @@ def convert_unsqueeze(g, op, block): "elementwise_floordiv": convert_elementwise_op, "elementwise_max": convert_elementwise_op, "elementwise_min": convert_elementwise_op, - "elementwise_mod": convert_elementwise_op, "elementwise_mul": convert_elementwise_op, "elementwise_pow": convert_elementwise_op, "elementwise_prod": convert_elementwise_op, @@ -1214,6 +1345,7 @@ def convert_unsqueeze(g, op, block): "feed": convert_feed, "fill_any_like": convert_fill_any_like, "fill_constant": convert_fill_constant, + "flatten_contiguous_range": convert_flatten, "floor": convert_unary_op, "floor_mod": convert_elementwise_op, "gather": convert_gather, @@ -1243,7 +1375,7 @@ def convert_unsqueeze(g, op, block): "matmul": convert_matmul, "matmul_v2": convert_matmul, "mul": convert_mul, - "not_equal": convert_elementwise_op, + "nearest_interp_v2": convert_interpolate, "pad1d": convert_padding, "pad2d": convert_padding, "pad3d": convert_padding, diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index b8d4c1150238..0cb5e94cc013 100644 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -585,6 +585,24 @@ def forward(self, x, y): verify_model(ExpandAs(), [x_data, y_data]) +@tvm.testing.uses_gpu +def test_forward_flatten(): + class Flatten(nn.Layer): + def __init__(self, start_axis=0, stop_axis=-1): + super(Flatten, self).__init__() + self.start_axis = start_axis + self.stop_axis = stop_axis + + @paddle.jit.to_static + def forward(self, x): + return paddle.flatten(x, start_axis=self.start_axis, stop_axis=self.stop_axis) + + input_data = paddle.rand([2, 3, 4, 5, 2], dtype="float32") + verify_model(Flatten(), input_data=input_data) + verify_model(Flatten(2), input_data=input_data) + verify_model(Flatten(2, -2), input_data=input_data) + + @tvm.testing.uses_gpu def test_forward_gather(): class Gather(nn.Layer): @@ -764,6 +782,77 @@ def hard_swish(inputs): verify_model(hard_swish, input_data=input_data) +@tvm.testing.uses_gpu +def test_forward_interpolate(): + class Interpolate0(nn.Layer): + def __init__( + self, + mode="nearest", + align_corners=False, + align_mode=0, + data_format="NCHW", + use_scale=False, + use_list=False, + use_const=False, + ): + super(Interpolate0, self).__init__() + self.mode = mode + self.align_corners = align_corners + self.align_mode = align_mode + self.data_format = data_format + self.use_scale = use_scale + self.use_list = use_list + self.use_const = use_const + + @paddle.jit.to_static + def forward(self, x): + size = np.array([15, 19]).astype("int32") + scale = np.array([2.0, 1.0]).astype("float32") + if not self.use_list and not self.use_const: + size = paddle.to_tensor(size) + scale = paddle.to_tensor(scale) + elif not self.use_const: + size0 = paddle.to_tensor(size[0:1]) + size = [size0, int(size[1])] + else: + size = size.tolist() + scale = scale.tolist() + if not self.use_scale: + return paddle.nn.functional.interpolate( + x, + size=size, + mode=self.mode, + align_corners=self.align_corners, + align_mode=self.align_mode, + data_format=self.data_format, + ) + else: + return paddle.nn.functional.interpolate( + x, + scale_factor=scale, + mode=self.mode, + align_corners=self.align_corners, + align_mode=self.align_mode, + data_format=self.data_format, + ) + + input_data = paddle.rand([1, 2, 8, 12]).astype("float32") + verify_model(Interpolate0(), input_data) + verify_model(Interpolate0(use_list=True), input_data) + verify_model(Interpolate0(use_scale=True), input_data) + verify_model(Interpolate0("bilinear", use_scale=True), input_data) + verify_model(Interpolate0("bilinear", use_scale=True, align_corners=True), input_data) + verify_model( + Interpolate0( + "bilinear", use_scale=True, align_corners=True, align_mode=1, data_format="NHWC" + ), + input_data, + ) + verify_model( + Interpolate0("bicubic", use_scale=True, align_corners=True, align_mode=1), input_data + ) + + @tvm.testing.uses_gpu def test_forward_layer_norm(): @paddle.jit.to_static From a8f6835c437333bba177e9100b0e13c50916f72f Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 16:10:54 +0000 Subject: [PATCH 2/9] fix spells --- python/tvm/relay/frontend/paddlepaddle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 0880f5986c08..56dd80654ac4 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -591,7 +591,7 @@ def convert_interpolate(g, op, block): """Operator converter for interpolate.""" def get_interpolate_mode(op): - """Get parameters for interpolate methos.""" + """Get parameters for interpolation methods.""" interp_method = op.attr("interp_method") align_corners = op.attr("align_corners") From 208c498c034bea67a31a7b87b5b8248362419058 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 16:14:22 +0000 Subject: [PATCH 3/9] fix diff --- python/tvm/relay/frontend/paddlepaddle.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 56dd80654ac4..e093937f03a2 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -1333,6 +1333,7 @@ def convert_unsqueeze(g, op, block): "elementwise_floordiv": convert_elementwise_op, "elementwise_max": convert_elementwise_op, "elementwise_min": convert_elementwise_op, + "elementwise_mod": convert_elementwise_op, "elementwise_mul": convert_elementwise_op, "elementwise_pow": convert_elementwise_op, "elementwise_prod": convert_elementwise_op, From 44b169ee6b39f785b700735f09ffc29d7e675191 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 16:16:51 +0000 Subject: [PATCH 4/9] rename unit test name --- .../frontend/paddlepaddle/test_forward.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/python/frontend/paddlepaddle/test_forward.py b/tests/python/frontend/paddlepaddle/test_forward.py index 0cb5e94cc013..e427d6f563f9 100644 --- a/tests/python/frontend/paddlepaddle/test_forward.py +++ b/tests/python/frontend/paddlepaddle/test_forward.py @@ -784,7 +784,7 @@ def hard_swish(inputs): @tvm.testing.uses_gpu def test_forward_interpolate(): - class Interpolate0(nn.Layer): + class Interpolate(nn.Layer): def __init__( self, mode="nearest", @@ -795,7 +795,7 @@ def __init__( use_list=False, use_const=False, ): - super(Interpolate0, self).__init__() + super(Interpolate, self).__init__() self.mode = mode self.align_corners = align_corners self.align_mode = align_mode @@ -837,19 +837,19 @@ def forward(self, x): ) input_data = paddle.rand([1, 2, 8, 12]).astype("float32") - verify_model(Interpolate0(), input_data) - verify_model(Interpolate0(use_list=True), input_data) - verify_model(Interpolate0(use_scale=True), input_data) - verify_model(Interpolate0("bilinear", use_scale=True), input_data) - verify_model(Interpolate0("bilinear", use_scale=True, align_corners=True), input_data) + verify_model(Interpolate(), input_data) + verify_model(Interpolate(use_list=True), input_data) + verify_model(Interpolate(use_scale=True), input_data) + verify_model(Interpolate("bilinear", use_scale=True), input_data) + verify_model(Interpolate("bilinear", use_scale=True, align_corners=True), input_data) verify_model( - Interpolate0( + Interpolate( "bilinear", use_scale=True, align_corners=True, align_mode=1, data_format="NHWC" ), input_data, ) verify_model( - Interpolate0("bicubic", use_scale=True, align_corners=True, align_mode=1), input_data + Interpolate("bicubic", use_scale=True, align_corners=True, align_mode=1), input_data ) From 4f4ddcc76ff295109e7fc1846ff516b045f805f2 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 16:47:59 +0000 Subject: [PATCH 5/9] add parameters for common:try_infer_value --- python/tvm/relay/frontend/common.py | 4 +-- python/tvm/relay/frontend/paddlepaddle.py | 39 +++++++++-------------- 2 files changed, 17 insertions(+), 26 deletions(-) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index cf579923e301..fb64f4c58c86 100755 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -578,14 +578,14 @@ def infer_value_simulated(input_val, params): return output_value -def try_infer_value(val, on_success=None, on_failure=None): +def try_infer_value(val, on_success=None, on_failure=None, parameters={}): """Try running infer_value on the input val, and if successful, return the inferred value or pass it to on_success callback if provided. Otherwise, run on_failure callback if it is provided, or return the input val as output. In each case, the second return value indicates whether infer_value has succeeded or not. """ try: - ret = infer_value(val, {}).numpy() + ret = infer_value(val, parameters).numpy() if on_success: return on_success(ret), True return ret, True diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index e093937f03a2..ab92df0c7407 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -38,6 +38,7 @@ infer_shape, infer_type, infer_value, + try_infer_value, shape_of, new_var, ) @@ -385,10 +386,7 @@ def convert_expand(g, op, block): sizes = op.attr("shape") if isinstance(sizes, _expr.Expr): - try: - sizes = infer_value(sizes, g.get_params()).numpy() - except Exception: - pass + sizes, infered = try_infer_value(sizes, parameters=g.get_params()) if isinstance(sizes, np.ndarray): sizes = sizes.tolist() @@ -455,10 +453,7 @@ def convert_fill_constant(g, op, block): shape = g.get_node(op.input("ShapeTensor")[0]) if isinstance(shape, _expr.Expr): - try: - shape = infer_value(shape, g.get_params()).numpy() - except Exception: - pass + shape, infered = try_infer_value(shape, parameters=g.get_params()) if isinstance(shape, np.ndarray): shape = shape.tolist() @@ -637,10 +632,9 @@ def get_interpolate_mode(op): if input_out_size: # if out_size is a tensor out_size = g.get_node(input_out_size[0]) - try: - out_size = infer_value(out_size, g.get_params()).numpy().tolist() - except Exception: - pass + out_size, infered = try_infer_value(out_size, parameters=g.get_params()) + if infered: + out_size = out_size.tolist() elif input_size_tensor: # if out_size is a list of tensor out_size = list() @@ -650,10 +644,9 @@ def get_interpolate_mode(op): shape = _op.reshape(shape, [-1]) out_size.append(size) out_size = _op.concatenate(out_size, axis=0) - try: - out_size = infer_value(out_size, g.get_params()).numpy().tolist() - except Exception: - pass + out_size, infered = try_infer_value(out_size, parameters=g.get_params()) + if infered: + out_size = out_size.tolist() elif input_scale: # if out_size is not defined, but scale is defined input_scale = g.get_node(input_scale[0]) @@ -663,10 +656,9 @@ def get_interpolate_mode(op): else: out_size = _op.strided_slice(input_shape, begin=[1], end=[3]) * input_scale out_size = out_size.astype("int32") - try: - out_size = infer_value(out_size, g.get_params()).numpy().tolist() - except Exception: - pass + out_size, infered = try_infer_value(out_size, parameters=g.get_params()) + if infered: + out_size = out_size.tolist() else: # if out_size is a constant value out_size = [out_h, out_w] @@ -1077,10 +1069,9 @@ def convert_reshape(g, op, block): shape = _op.reshape(shape, [-1]) new_shape.append(shape) new_shape = _op.concatenate(new_shape, axis=0) - try: - new_shape = infer_value(new_shape, g.get_params()).numpy().tolist() - except Exception: - pass + new_shape, infered = try_infer_value(new_shape, parameters=g.get_params()) + if infered: + new_shape = new_shape.tolist() else: new_shape = op.attr("shape") out = _op.reshape(data, new_shape) From a5fd9f66c79f7e45c76e98b5ac6d64a30eb8df2d Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 16:55:00 +0000 Subject: [PATCH 6/9] eliminate unnecessary diff --- python/tvm/relay/frontend/paddlepaddle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index ab92df0c7407..76e74b4d8b31 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -38,8 +38,8 @@ infer_shape, infer_type, infer_value, - try_infer_value, shape_of, + try_infer_value, new_var, ) From 43bfb3edd3a2fdd0e3889df9b9c77392540a0efe Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 16:59:39 +0000 Subject: [PATCH 7/9] fix pylint problem --- python/tvm/relay/frontend/paddlepaddle.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index 76e74b4d8b31..dea641161409 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -386,7 +386,7 @@ def convert_expand(g, op, block): sizes = op.attr("shape") if isinstance(sizes, _expr.Expr): - sizes, infered = try_infer_value(sizes, parameters=g.get_params()) + sizes = try_infer_value(sizes, parameters=g.get_params())[0] if isinstance(sizes, np.ndarray): sizes = sizes.tolist() @@ -453,7 +453,7 @@ def convert_fill_constant(g, op, block): shape = g.get_node(op.input("ShapeTensor")[0]) if isinstance(shape, _expr.Expr): - shape, infered = try_infer_value(shape, parameters=g.get_params()) + shape = try_infer_value(shape, parameters=g.get_params())[0] if isinstance(shape, np.ndarray): shape = shape.tolist() From 677b5d4d16fdf0812bccc9cbda33b7829804d1e9 Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 17:05:42 +0000 Subject: [PATCH 8/9] fix pylint problem --- python/tvm/relay/frontend/common.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index fb64f4c58c86..737830986d74 100755 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -578,14 +578,15 @@ def infer_value_simulated(input_val, params): return output_value -def try_infer_value(val, on_success=None, on_failure=None, parameters={}): +def try_infer_value(val, on_success=None, on_failure=None, parameters=None): """Try running infer_value on the input val, and if successful, return the inferred value or pass it to on_success callback if provided. Otherwise, run on_failure callback if it is provided, or return the input val as output. In each case, the second return value indicates whether infer_value has succeeded or not. """ try: - ret = infer_value(val, parameters).numpy() + params = parameters if parameters is not None else {} + ret = infer_value(val, params).numpy() if on_success: return on_success(ret), True return ret, True From 16c346448ef48bc6d70c60cfd93c26c04da5c4bb Mon Sep 17 00:00:00 2001 From: jiangjiajun Date: Fri, 5 Nov 2021 17:33:24 +0000 Subject: [PATCH 9/9] eliminate unnecessary diff --- python/tvm/relay/frontend/paddlepaddle.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/tvm/relay/frontend/paddlepaddle.py b/python/tvm/relay/frontend/paddlepaddle.py index dea641161409..967238552b24 100644 --- a/python/tvm/relay/frontend/paddlepaddle.py +++ b/python/tvm/relay/frontend/paddlepaddle.py @@ -1368,6 +1368,7 @@ def convert_unsqueeze(g, op, block): "matmul_v2": convert_matmul, "mul": convert_mul, "nearest_interp_v2": convert_interpolate, + "not_equal": convert_elementwise_op, "pad1d": convert_padding, "pad2d": convert_padding, "pad3d": convert_padding,