From 354f31c83409967afb7b17c2b2f68cd3ee03f307 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 28 Jun 2021 21:19:09 +0000 Subject: [PATCH 01/12] [Relay to Onnx conversion] * added support for Sigmoid op * added unit test --- python/tvm/contrib/target/onnx.py | 1 + tests/python/contrib/test_onnx.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index b05265fa976a..cf53307ef503 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -667,6 +667,7 @@ def convert_attributes(cls, attrs): "clip": Clip, "expand_dims": Expand, "nn.lrn": LRN, + "sigmoid": rename("Sigmoid"), } diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index 3636409f8a06..2bbe23e1b896 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -530,6 +530,21 @@ def verify_lrn(xshape, size, dtype="float32"): verify_lrn(i, s) +def test_sigmoid(): + """Sigmoid unit test.""" + def verify_sigmoid(dshape, dtype="float32"): + x = relay.var("x", relay.ty.TensorType(dshape, dtype)) + y = relay.sigmoid(x) + func = relay.Function([x], y) + x_data = np.random.uniform(size=dshape).astype(dtype) + verify_results(func, [x_data], "test_sigmoid", rtol=1e-4, atol=1e-4) + + isize = [(1,3,480,640), (1,3,224,224)] + + for i in isize: + verify_sigmoid(i) + + if __name__ == "__main__": test_add() test_bias_add() @@ -554,3 +569,4 @@ def verify_lrn(xshape, size, dtype="float32"): test_clip() test_expand_dims() test_lrn() + test_sigmoid() From acb27e7d1bfdd0fb81c71ac34054332c0c424a58 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 28 Jun 2021 21:24:28 +0000 Subject: [PATCH 02/12] [Relay to Onnx conversion][Copy] * added support for Copy op * added unit test --- python/tvm/contrib/target/onnx.py | 1 + tests/python/contrib/test_onnx.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index cf53307ef503..1d1e13dd0516 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -668,6 +668,7 @@ def convert_attributes(cls, attrs): "expand_dims": Expand, "nn.lrn": LRN, "sigmoid": rename("Sigmoid"), + "copy": rename("Identity"), } diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index 2bbe23e1b896..332751d09e6d 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -545,6 +545,21 @@ def verify_sigmoid(dshape, dtype="float32"): verify_sigmoid(i) +def test_copy(): + """Copy unit test.""" + def verify_copy(dshape, dtype="float32"): + x = relay.var("x", relay.ty.TensorType(dshape, dtype)) + y = relay.copy(x) + func = relay.Function([x], y) + x_data = np.random.uniform(size=dshape).astype(dtype) + verify_results(func, [x_data], "test_copy", rtol=1e-4, atol=1e-4) + + isize = [(1,3,480,640), (1,3,224,224)] + + for i in isize: + verify_copy(i) + + if __name__ == "__main__": test_add() test_bias_add() @@ -570,3 +585,4 @@ def verify_sigmoid(dshape, dtype="float32"): test_expand_dims() test_lrn() test_sigmoid() + test_copy() From 2e323850e49c0edbcf3dcb0ee0aa6c5021b59519 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 28 Jun 2021 21:31:11 +0000 Subject: [PATCH 03/12] [Relay to Onnx conversion][Pool] * added missing ceil_mode in average pool and max pool conversion --- python/tvm/contrib/target/onnx.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index 1d1e13dd0516..89c575c5ffd8 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -147,6 +147,7 @@ def convert_attributes(cls, attrs): "pads": attrs.get_int_tuple("padding"), "strides": attrs.get_int_tuple("strides"), "kernel_shape": attrs.get_int_tuple("pool_size"), + "ceil_mode": 1 if attrs.ceil_mode else 0, } From 88792dfbbcabf0402be2d7dbce0036137cc13ca6 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 28 Jun 2021 21:37:51 +0000 Subject: [PATCH 04/12] [Relay to Onnx conversion][Round] * added support for Round op * added unit test --- python/tvm/contrib/target/onnx.py | 1 + tests/python/contrib/test_onnx.py | 16 ++++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index 89c575c5ffd8..4eb8341e40e4 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -670,6 +670,7 @@ def convert_attributes(cls, attrs): "nn.lrn": LRN, "sigmoid": rename("Sigmoid"), "copy": rename("Identity"), + "round": rename("Round"), } diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index 332751d09e6d..b9151947d235 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -560,6 +560,21 @@ def verify_copy(dshape, dtype="float32"): verify_copy(i) +def test_round(): + """Round unit test.""" + def verify_round(dshape, dtype="float32"): + x = relay.var("x", relay.ty.TensorType(dshape, dtype)) + y = relay.round(x) + func = relay.Function([x], y) + x_data = np.random.uniform(size=dshape).astype(dtype) + verify_results(func, [x_data], "test_round", rtol=1e-4, atol=1e-4) + + isize = [(1,3,480,640), (1,3,224,224)] + + for i in isize: + verify_round(i) + + if __name__ == "__main__": test_add() test_bias_add() @@ -586,3 +601,4 @@ def verify_copy(dshape, dtype="float32"): test_lrn() test_sigmoid() test_copy() + test_round() From 38040fe75c7b391e61d86ac99bc4e4cb71cf4638 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 28 Jun 2021 21:48:22 +0000 Subject: [PATCH 05/12] [Relay to Onnx conversion][Cast] * added support for Cast op * added unit test --- python/tvm/contrib/target/onnx.py | 12 ++++++++++++ tests/python/contrib/test_onnx.py | 18 ++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index 4eb8341e40e4..8264e4cc407e 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -24,6 +24,7 @@ import onnx import onnx.utils from onnx import numpy_helper, OperatorSetIdProto, defs +from onnx import TensorProto import tvm from tvm import relay import tvm._ffi @@ -634,6 +635,16 @@ def convert_attributes(cls, attrs): return {"alpha": attrs.alpha, "beta": attrs.beta, "bias": attrs.bias, "size": attrs.size} +class Cast(OpConverter): + """ Operator converter for Cast.""" + + @classmethod + def convert_attributes(cls, attrs): + return { + 'to': getattr(TensorProto, attrs.dtype.upper()) + } + + relay_to_onnx_op_mapping = { "reshape": Reshape, "nn.conv2d": Conv, @@ -671,6 +682,7 @@ def convert_attributes(cls, attrs): "sigmoid": rename("Sigmoid"), "copy": rename("Identity"), "round": rename("Round"), + "cast": Cast, } diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index b9151947d235..cd79aaba54b0 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -575,6 +575,23 @@ def verify_round(dshape, dtype="float32"): verify_round(i) +def test_cast(): + """Cast unit test.""" + def verify_cast(dshape, dtype): + x = relay.var("x", relay.ty.TensorType(dshape, "float32")) + y = relay.cast(x, dtype) + func = relay.Function([x], y) + x_data = np.random.uniform(size=dshape).astype("float32") + verify_results(func, [x_data], "test_cast", rtol=1e-4, atol=1e-4) + + isize = [(1,3,480,640), (1,3,224,224)] + out_dtypes = ['int8', 'int16', 'uint8', 'uint16'] + + for i in isize: + for o_dtype in out_dtypes: + verify_cast(i, o_dtype) + + if __name__ == "__main__": test_add() test_bias_add() @@ -602,3 +619,4 @@ def verify_round(dshape, dtype="float32"): test_sigmoid() test_copy() test_round() + test_cast() From 86d44b83564aff8a720b9b8be99448ed255f1811 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 28 Jun 2021 21:50:09 +0000 Subject: [PATCH 06/12] [Relay to Onnx testing] * fixed formatting --- tests/python/contrib/test_onnx.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index cd79aaba54b0..32ee4de5f1f8 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -516,6 +516,7 @@ def verify_expand_dims(dshape, axis, num_newaxis, dtype="float32"): def test_lrn(): + """LRN unit test.""" def verify_lrn(xshape, size, dtype="float32"): x = relay.var("x", relay.ty.TensorType(xshape, dtype)) y = relay.nn.lrn(x, size=size, axis=1, alpha=1.0, beta=1.0, bias=1.0) From f9c70c56b7396a9a9b4b7e81dd11f5e38dcd93e3 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 28 Jun 2021 22:01:21 +0000 Subject: [PATCH 07/12] * fixed formatting issues --- tests/python/contrib/test_onnx.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index 32ee4de5f1f8..b90e63006e27 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -517,6 +517,7 @@ def verify_expand_dims(dshape, axis, num_newaxis, dtype="float32"): def test_lrn(): """LRN unit test.""" + def verify_lrn(xshape, size, dtype="float32"): x = relay.var("x", relay.ty.TensorType(xshape, dtype)) y = relay.nn.lrn(x, size=size, axis=1, alpha=1.0, beta=1.0, bias=1.0) @@ -533,6 +534,7 @@ def verify_lrn(xshape, size, dtype="float32"): def test_sigmoid(): """Sigmoid unit test.""" + def verify_sigmoid(dshape, dtype="float32"): x = relay.var("x", relay.ty.TensorType(dshape, dtype)) y = relay.sigmoid(x) @@ -540,7 +542,7 @@ def verify_sigmoid(dshape, dtype="float32"): x_data = np.random.uniform(size=dshape).astype(dtype) verify_results(func, [x_data], "test_sigmoid", rtol=1e-4, atol=1e-4) - isize = [(1,3,480,640), (1,3,224,224)] + isize = [(1, 3, 480, 640), (1, 3, 224, 224)] for i in isize: verify_sigmoid(i) @@ -548,6 +550,7 @@ def verify_sigmoid(dshape, dtype="float32"): def test_copy(): """Copy unit test.""" + def verify_copy(dshape, dtype="float32"): x = relay.var("x", relay.ty.TensorType(dshape, dtype)) y = relay.copy(x) @@ -555,7 +558,7 @@ def verify_copy(dshape, dtype="float32"): x_data = np.random.uniform(size=dshape).astype(dtype) verify_results(func, [x_data], "test_copy", rtol=1e-4, atol=1e-4) - isize = [(1,3,480,640), (1,3,224,224)] + isize = [(1, 3, 480, 640), (1, 3, 224, 224)] for i in isize: verify_copy(i) @@ -563,6 +566,7 @@ def verify_copy(dshape, dtype="float32"): def test_round(): """Round unit test.""" + def verify_round(dshape, dtype="float32"): x = relay.var("x", relay.ty.TensorType(dshape, dtype)) y = relay.round(x) @@ -570,7 +574,7 @@ def verify_round(dshape, dtype="float32"): x_data = np.random.uniform(size=dshape).astype(dtype) verify_results(func, [x_data], "test_round", rtol=1e-4, atol=1e-4) - isize = [(1,3,480,640), (1,3,224,224)] + isize = [(1, 3, 480, 640), (1, 3, 224, 224)] for i in isize: verify_round(i) @@ -578,6 +582,7 @@ def verify_round(dshape, dtype="float32"): def test_cast(): """Cast unit test.""" + def verify_cast(dshape, dtype): x = relay.var("x", relay.ty.TensorType(dshape, "float32")) y = relay.cast(x, dtype) @@ -585,8 +590,8 @@ def verify_cast(dshape, dtype): x_data = np.random.uniform(size=dshape).astype("float32") verify_results(func, [x_data], "test_cast", rtol=1e-4, atol=1e-4) - isize = [(1,3,480,640), (1,3,224,224)] - out_dtypes = ['int8', 'int16', 'uint8', 'uint16'] + isize = [(1, 3, 480, 640), (1, 3, 224, 224)] + out_dtypes = ["int8", "int16", "uint8", "uint16"] for i in isize: for o_dtype in out_dtypes: From 02b6e858853e8e1250757274dfdf6f158c659003 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 28 Jun 2021 22:05:55 +0000 Subject: [PATCH 08/12] * fixed formatting issue in onnx.py --- python/tvm/contrib/target/onnx.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index 8264e4cc407e..e18448ad61e0 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -640,9 +640,7 @@ class Cast(OpConverter): @classmethod def convert_attributes(cls, attrs): - return { - 'to': getattr(TensorProto, attrs.dtype.upper()) - } + return {"to": getattr(TensorProto, attrs.dtype.upper())} relay_to_onnx_op_mapping = { From ee528176a4d32063f360c10e514b85d740ec1fc1 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Jun 2021 18:05:09 +0000 Subject: [PATCH 09/12] [Relay to Onnx conversion][Conv2d Transpose] * Added support for conv2d transpose operator * Added unit test case. Unit test is similar to the conv2d unit test. --- python/tvm/contrib/target/onnx.py | 15 +++++++++ tests/python/contrib/test_onnx.py | 52 +++++++++++++++++++++++++++++++ 2 files changed, 67 insertions(+) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index e18448ad61e0..8e8b24e2c798 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -139,6 +139,20 @@ def convert_attributes(cls, attrs): } +class ConvTranspose(OpConverter): + """Operator converter for ConvTranspose.""" + + @classmethod + def convert_attributes(cls, attrs): + return { + "group": attrs.get_int("groups"), + "pads": attrs.get_int_tuple("padding"), + "strides": attrs.get_int_tuple("strides"), + "dilations": attrs.get_int_tuple("dilation"), + "kernel_shape": attrs.get_int_tuple("kernel_size"), + "output_padding": attrs.get_int_tuple("output_padding"), + } + class MaxPool(OpConverter): """Operator converter for MaxPool.""" @@ -646,6 +660,7 @@ def convert_attributes(cls, attrs): relay_to_onnx_op_mapping = { "reshape": Reshape, "nn.conv2d": Conv, + "nn.conv2d_transpose": ConvTranspose, "add": rename("Add"), "nn.relu": rename("Relu"), "transpose": Transpose, diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index b90e63006e27..b85519aa3cca 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -174,6 +174,57 @@ def verify_conv2d( verify_conv2d("float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)) +def test_conv2d_transpose(): + """Conv2d_Transpose unit tests.""" + def verify_conv2d_transpose( + dtype, scale, dshape, kshape, + padding=(1, 1), groups=1, dilation=(1, 1), **attrs + ): + x = relay.var("x", shape=dshape, dtype=dtype) + w = relay.var("w", shape=kshape, dtype=dtype) + y = relay.nn.conv2d_transpose(x, w, padding=padding, + dilation=dilation, groups=groups, **attrs) + func = relay.Function([x, w], y) + data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) + kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) + verify_results(func, [data, kernel], "test_conv2d_transpose", rtol=1e-5, atol=1e-5) + + dshape = (1, 3, 224, 224) + kshape = (3, 10, 3, 3) + verify_conv2d_transpose( + "float32", 1, dshape, kshape, padding=(1, 1), + channels=10, kernel_size=(3, 3) + ) + + dshape = (1, 3, 224, 224) + kshape = (3, 10, 3, 3) + verify_conv2d_transpose( + "float32", 1, dshape, kshape, padding=(2, 2), + channels=10, kernel_size=(3, 3) + ) + + dshape = (1, 3, 18, 18) + kshape = (3, 10, 2, 2) + verify_conv2d_transpose( + "float32", 1, dshape, kshape, padding=(2, 2), + channels=10, kernel_size=(2, 2), dilation=(1, 1), + ) + + dshape = (1, 3, 18, 18) + kshape = (3, 10, 4, 4) + verify_conv2d_transpose( + "float32", 1, dshape, kshape, padding=(1, 1), + channels=10, kernel_size=(4, 4) + ) + + dshape = (1, 3, 18, 18) + kshape = (3, 10, 4, 4) + verify_conv2d_transpose( + "float32", 1, dshape, kshape, padding=(1, 1), + channels=10, kernel_size=(4, 4) + ) + + def test_reshape(): def verify_reshape(shape, newshape): x = relay.var("x", relay.TensorType(shape, "float32")) @@ -602,6 +653,7 @@ def verify_cast(dshape, dtype): test_add() test_bias_add() test_conv2d() + test_conv2d_transpose() test_reshape() test_transpose() test_dense() From 3e60da825ec551c9c5841da08f9d8f588ca1ac32 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Jun 2021 19:00:19 +0000 Subject: [PATCH 10/12] * Fixed formatting errors --- python/tvm/contrib/target/onnx.py | 1 + tests/python/contrib/test_onnx.py | 28 ++++++++++++++++------------ 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index 8e8b24e2c798..10a428120606 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -153,6 +153,7 @@ def convert_attributes(cls, attrs): "output_padding": attrs.get_int_tuple("output_padding"), } + class MaxPool(OpConverter): """Operator converter for MaxPool.""" diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index b85519aa3cca..1f95dae882a3 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -176,14 +176,16 @@ def verify_conv2d( def test_conv2d_transpose(): """Conv2d_Transpose unit tests.""" + def verify_conv2d_transpose( dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs ): x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", shape=kshape, dtype=dtype) - y = relay.nn.conv2d_transpose(x, w, padding=padding, - dilation=dilation, groups=groups, **attrs) + y = relay.nn.conv2d_transpose( + x, w, padding=padding, dilation=dilation, groups=groups, **attrs + ) func = relay.Function([x, w], y) data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) @@ -192,36 +194,38 @@ def verify_conv2d_transpose( dshape = (1, 3, 224, 224) kshape = (3, 10, 3, 3) verify_conv2d_transpose( - "float32", 1, dshape, kshape, padding=(1, 1), - channels=10, kernel_size=(3, 3) + "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3) ) dshape = (1, 3, 224, 224) kshape = (3, 10, 3, 3) verify_conv2d_transpose( - "float32", 1, dshape, kshape, padding=(2, 2), - channels=10, kernel_size=(3, 3) + "float32", 1, dshape, kshape, padding=(2, 2), channels=10, kernel_size=(3, 3) ) dshape = (1, 3, 18, 18) kshape = (3, 10, 2, 2) verify_conv2d_transpose( - "float32", 1, dshape, kshape, padding=(2, 2), - channels=10, kernel_size=(2, 2), dilation=(1, 1), + "float32", + 1, + dshape, + kshape, + padding=(2, 2), + channels=10, + kernel_size=(2, 2), + dilation=(1, 1), ) dshape = (1, 3, 18, 18) kshape = (3, 10, 4, 4) verify_conv2d_transpose( - "float32", 1, dshape, kshape, padding=(1, 1), - channels=10, kernel_size=(4, 4) + "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4) ) dshape = (1, 3, 18, 18) kshape = (3, 10, 4, 4) verify_conv2d_transpose( - "float32", 1, dshape, kshape, padding=(1, 1), - channels=10, kernel_size=(4, 4) + "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4) ) From beb305ef0f2e5aea0fd9b02b9699d965107e156e Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Jun 2021 19:37:32 +0000 Subject: [PATCH 11/12] [Relay to Onnx][Pad] * Fixed issue in Pad conversion * Updated unit test * Known issue: Relay pad defn is missing pad_value arg --- python/tvm/contrib/target/onnx.py | 14 ++++++++++++-- tests/python/contrib/test_onnx.py | 3 +++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index 10a428120606..590148e9cbf8 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -347,7 +347,12 @@ def convert_attributes(cls, attrs): after.append(axis_pads[1]) pads = before + after pads = numpy.asarray(pads, dtype=pads[0].dtype) - return {"pads": pads, "mode": attrs.get_str("pad_mode"), "constant_value": attrs.pad_value} + return { + "pads": pads, + "mode": attrs.get_str("pad_mode"), + #"constant_value": float(attrs.get_str("pad_value")), + "constant_value": 0, + } @classmethod def convert(cls, node_entry, model_container, node_dict): @@ -367,7 +372,12 @@ def convert(cls, node_entry, model_container, node_dict): add_input(value, name, "value", model_container), ] - node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"]) + node = onnx.helper.make_node( + cls.__name__, + input_names, + node_entry["output_names"], + mode=attrs["mode"] + ) model_container.add_nodes([node]) diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index 1f95dae882a3..12b44c758c00 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -27,6 +27,7 @@ import tvm from tvm import relay from tvm.contrib.target.onnx import to_onnx +from tvm.relay.testing import run_infer_type def func_to_onnx(func, name): @@ -325,12 +326,14 @@ def verify_batch_norm(axis=1): def test_pad(): + """Pad unit test.""" def verify_pad(): for dtype in ["float16", "float32"]: dshape = (4, 10, 7, 7) x = relay.var("x", shape=dshape, dtype=dtype) y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4))) func = relay.Function([x], y) + func = run_infer_type(func) x_data = np.random.uniform(size=dshape).astype(dtype) verify_results(func, [x_data], "test_pad", rtol=1e-5, atol=1e-5) From f67214ac3c3bbcd01bdb4bb99822d08f74d20b37 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 29 Jun 2021 20:35:25 +0000 Subject: [PATCH 12/12] [Relay to Onnx conversion][Pad] * Changed pad_value to input instead of attrs * See PR: https://github.com/apache/tvm/pull/7860 * Fixed some formatting errors --- python/tvm/contrib/target/onnx.py | 14 ++++---------- tests/python/contrib/test_onnx.py | 19 +++++++++---------- 2 files changed, 13 insertions(+), 20 deletions(-) diff --git a/python/tvm/contrib/target/onnx.py b/python/tvm/contrib/target/onnx.py index 590148e9cbf8..e442c806a2f3 100644 --- a/python/tvm/contrib/target/onnx.py +++ b/python/tvm/contrib/target/onnx.py @@ -350,8 +350,6 @@ def convert_attributes(cls, attrs): return { "pads": pads, "mode": attrs.get_str("pad_mode"), - #"constant_value": float(attrs.get_str("pad_value")), - "constant_value": 0, } @classmethod @@ -363,20 +361,16 @@ def convert(cls, node_entry, model_container, node_dict): attrs = cls.convert_attributes(node_entry["relay_node"].attrs) name = node_entry["name"] - data = numpy.asarray(attrs["pads"], dtype=attrs["pads"][0].dtype).astype(numpy.int64) - value = numpy.dtype(node_entry["types"][0].dtype).type(attrs["constant_value"]) + pad_data = numpy.asarray(attrs["pads"], dtype=attrs["pads"][0].dtype).astype(numpy.int64) input_names = [ node_entry["input_names"][0], - add_input(data, name, "pads", model_container), - add_input(value, name, "value", model_container), + add_input(pad_data, name, "pads", model_container), + node_entry["input_names"][1], ] node = onnx.helper.make_node( - cls.__name__, - input_names, - node_entry["output_names"], - mode=attrs["mode"] + cls.__name__, input_names, node_entry["output_names"], mode=attrs["mode"] ) model_container.add_nodes([node]) diff --git a/tests/python/contrib/test_onnx.py b/tests/python/contrib/test_onnx.py index 12b44c758c00..8567f2c814cf 100644 --- a/tests/python/contrib/test_onnx.py +++ b/tests/python/contrib/test_onnx.py @@ -179,8 +179,7 @@ def test_conv2d_transpose(): """Conv2d_Transpose unit tests.""" def verify_conv2d_transpose( - dtype, scale, dshape, kshape, - padding=(1, 1), groups=1, dilation=(1, 1), **attrs + dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs ): x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", shape=kshape, dtype=dtype) @@ -327,15 +326,15 @@ def verify_batch_norm(axis=1): def test_pad(): """Pad unit test.""" + def verify_pad(): - for dtype in ["float16", "float32"]: - dshape = (4, 10, 7, 7) - x = relay.var("x", shape=dshape, dtype=dtype) - y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4))) - func = relay.Function([x], y) - func = run_infer_type(func) - x_data = np.random.uniform(size=dshape).astype(dtype) - verify_results(func, [x_data], "test_pad", rtol=1e-5, atol=1e-5) + dshape = (4, 10, 7, 7) + x = relay.var("x", shape=dshape, dtype="int32") + y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4))) + func = relay.Function([x], y) + func = run_infer_type(func) + x_data = np.random.randint(low=-255, high=255, size=dshape).astype(np.int32) + verify_results(func, [x_data], "test_pad", rtol=1e-5, atol=1e-5) verify_pad()