Skip to content
46 changes: 40 additions & 6 deletions python/tvm/contrib/target/onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
import onnx
import onnx.utils
from onnx import numpy_helper, OperatorSetIdProto, defs
from onnx import TensorProto
import tvm
from tvm import relay
import tvm._ffi
Expand Down Expand Up @@ -138,6 +139,21 @@ def convert_attributes(cls, attrs):
}


class ConvTranspose(OpConverter):
"""Operator converter for ConvTranspose."""

@classmethod
def convert_attributes(cls, attrs):
return {
"group": attrs.get_int("groups"),
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"dilations": attrs.get_int_tuple("dilation"),
"kernel_shape": attrs.get_int_tuple("kernel_size"),
"output_padding": attrs.get_int_tuple("output_padding"),
}


class MaxPool(OpConverter):
"""Operator converter for MaxPool."""

Expand All @@ -147,6 +163,7 @@ def convert_attributes(cls, attrs):
"pads": attrs.get_int_tuple("padding"),
"strides": attrs.get_int_tuple("strides"),
"kernel_shape": attrs.get_int_tuple("pool_size"),
"ceil_mode": 1 if attrs.ceil_mode else 0,
}


Expand Down Expand Up @@ -330,7 +347,10 @@ def convert_attributes(cls, attrs):
after.append(axis_pads[1])
pads = before + after
pads = numpy.asarray(pads, dtype=pads[0].dtype)
return {"pads": pads, "mode": attrs.get_str("pad_mode"), "constant_value": attrs.pad_value}
return {
"pads": pads,
"mode": attrs.get_str("pad_mode"),
}

@classmethod
def convert(cls, node_entry, model_container, node_dict):
Expand All @@ -341,16 +361,17 @@ def convert(cls, node_entry, model_container, node_dict):
attrs = cls.convert_attributes(node_entry["relay_node"].attrs)

name = node_entry["name"]
data = numpy.asarray(attrs["pads"], dtype=attrs["pads"][0].dtype).astype(numpy.int64)
value = numpy.dtype(node_entry["types"][0].dtype).type(attrs["constant_value"])
pad_data = numpy.asarray(attrs["pads"], dtype=attrs["pads"][0].dtype).astype(numpy.int64)

input_names = [
node_entry["input_names"][0],
add_input(data, name, "pads", model_container),
add_input(value, name, "value", model_container),
add_input(pad_data, name, "pads", model_container),
node_entry["input_names"][1],
]

node = onnx.helper.make_node(cls.__name__, input_names, node_entry["output_names"])
node = onnx.helper.make_node(
cls.__name__, input_names, node_entry["output_names"], mode=attrs["mode"]
)
model_container.add_nodes([node])


Expand Down Expand Up @@ -633,9 +654,18 @@ def convert_attributes(cls, attrs):
return {"alpha": attrs.alpha, "beta": attrs.beta, "bias": attrs.bias, "size": attrs.size}


class Cast(OpConverter):
""" Operator converter for Cast."""

@classmethod
def convert_attributes(cls, attrs):
return {"to": getattr(TensorProto, attrs.dtype.upper())}


relay_to_onnx_op_mapping = {
"reshape": Reshape,
"nn.conv2d": Conv,
"nn.conv2d_transpose": ConvTranspose,
"add": rename("Add"),
"nn.relu": rename("Relu"),
"transpose": Transpose,
Expand Down Expand Up @@ -667,6 +697,10 @@ def convert_attributes(cls, attrs):
"clip": Clip,
"expand_dims": Expand,
"nn.lrn": LRN,
"sigmoid": rename("Sigmoid"),
"copy": rename("Identity"),
"round": rename("Round"),
"cast": Cast,
}


Expand Down
144 changes: 137 additions & 7 deletions tests/python/contrib/test_onnx.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
import tvm
from tvm import relay
from tvm.contrib.target.onnx import to_onnx
from tvm.relay.testing import run_infer_type


def func_to_onnx(func, name):
Expand Down Expand Up @@ -174,6 +175,60 @@ def verify_conv2d(
verify_conv2d("float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4))


def test_conv2d_transpose():
"""Conv2d_Transpose unit tests."""

def verify_conv2d_transpose(
dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs
):
x = relay.var("x", shape=dshape, dtype=dtype)
w = relay.var("w", shape=kshape, dtype=dtype)
y = relay.nn.conv2d_transpose(
x, w, padding=padding, dilation=dilation, groups=groups, **attrs
)
func = relay.Function([x, w], y)
data = np.random.uniform(-scale, scale, size=dshape).astype(dtype)
kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype)
verify_results(func, [data, kernel], "test_conv2d_transpose", rtol=1e-5, atol=1e-5)

dshape = (1, 3, 224, 224)
kshape = (3, 10, 3, 3)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3)
)

dshape = (1, 3, 224, 224)
kshape = (3, 10, 3, 3)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(2, 2), channels=10, kernel_size=(3, 3)
)

dshape = (1, 3, 18, 18)
kshape = (3, 10, 2, 2)
verify_conv2d_transpose(
"float32",
1,
dshape,
kshape,
padding=(2, 2),
channels=10,
kernel_size=(2, 2),
dilation=(1, 1),
)

dshape = (1, 3, 18, 18)
kshape = (3, 10, 4, 4)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)
)

dshape = (1, 3, 18, 18)
kshape = (3, 10, 4, 4)
verify_conv2d_transpose(
"float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(4, 4)
)


def test_reshape():
def verify_reshape(shape, newshape):
x = relay.var("x", relay.TensorType(shape, "float32"))
Expand Down Expand Up @@ -270,14 +325,16 @@ def verify_batch_norm(axis=1):


def test_pad():
"""Pad unit test."""

def verify_pad():
for dtype in ["float16", "float32"]:
dshape = (4, 10, 7, 7)
x = relay.var("x", shape=dshape, dtype=dtype)
y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_pad", rtol=1e-5, atol=1e-5)
dshape = (4, 10, 7, 7)
x = relay.var("x", shape=dshape, dtype="int32")
y = relay.nn.pad(x, ((1, 1), (2, 2), (3, 3), (4, 4)))
func = relay.Function([x], y)
func = run_infer_type(func)
x_data = np.random.randint(low=-255, high=255, size=dshape).astype(np.int32)
verify_results(func, [x_data], "test_pad", rtol=1e-5, atol=1e-5)

verify_pad()

Expand Down Expand Up @@ -516,6 +573,8 @@ def verify_expand_dims(dshape, axis, num_newaxis, dtype="float32"):


def test_lrn():
"""LRN unit test."""

def verify_lrn(xshape, size, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(xshape, dtype))
y = relay.nn.lrn(x, size=size, axis=1, alpha=1.0, beta=1.0, bias=1.0)
Expand All @@ -530,10 +589,77 @@ def verify_lrn(xshape, size, dtype="float32"):
verify_lrn(i, s)


def test_sigmoid():
"""Sigmoid unit test."""

def verify_sigmoid(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.sigmoid(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_sigmoid", rtol=1e-4, atol=1e-4)

isize = [(1, 3, 480, 640), (1, 3, 224, 224)]

for i in isize:
verify_sigmoid(i)


def test_copy():
"""Copy unit test."""

def verify_copy(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.copy(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_copy", rtol=1e-4, atol=1e-4)

isize = [(1, 3, 480, 640), (1, 3, 224, 224)]

for i in isize:
verify_copy(i)


def test_round():
"""Round unit test."""

def verify_round(dshape, dtype="float32"):
x = relay.var("x", relay.ty.TensorType(dshape, dtype))
y = relay.round(x)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype(dtype)
verify_results(func, [x_data], "test_round", rtol=1e-4, atol=1e-4)

isize = [(1, 3, 480, 640), (1, 3, 224, 224)]

for i in isize:
verify_round(i)


def test_cast():
"""Cast unit test."""

def verify_cast(dshape, dtype):
x = relay.var("x", relay.ty.TensorType(dshape, "float32"))
y = relay.cast(x, dtype)
func = relay.Function([x], y)
x_data = np.random.uniform(size=dshape).astype("float32")
verify_results(func, [x_data], "test_cast", rtol=1e-4, atol=1e-4)

isize = [(1, 3, 480, 640), (1, 3, 224, 224)]
out_dtypes = ["int8", "int16", "uint8", "uint16"]

for i in isize:
for o_dtype in out_dtypes:
verify_cast(i, o_dtype)


if __name__ == "__main__":
test_add()
test_bias_add()
test_conv2d()
test_conv2d_transpose()
test_reshape()
test_transpose()
test_dense()
Expand All @@ -554,3 +680,7 @@ def verify_lrn(xshape, size, dtype="float32"):
test_clip()
test_expand_dims()
test_lrn()
test_sigmoid()
test_copy()
test_round()
test_cast()