From 0f9e667c3881c584b1a9b33100be07d64fe7bc80 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Tue, 15 Oct 2019 17:07:53 -0700 Subject: [PATCH 01/23] Added slice v10 --- python/tvm/relay/frontend/onnx.py | 57 +++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index b007b41e61fe..e446422176ef 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -654,6 +654,7 @@ class Slice(OnnxOpConverter): """ @classmethod def _impl_v1(cls, inputs, attr, params): + print(attr) if isinstance(attr['starts'], int): attr['starts'] = (attr['starts'],) attr['ends'] = (attr['ends'],) @@ -689,6 +690,34 @@ def _impl_v1(cls, inputs, attr, params): 'ends': 'end'}, ignores=['axes'])(inputs, attr) + @classmethod + def _impl_v10(cls, inputs, attr, params): + starts = params[get_name(inputs[1])].asnumpy() + ends = params[get_name(inputs[2])].asnumpy() + + # Update the starts and ends according to axes if required. + if len(inputs) >= 4: + axes = params[get_name(inputs[3])].asnumpy() + + if (max(axes + 1) != len(axes)): + new_axes = [] + new_starts = [] + new_ends = [] + pop_index = 0 + for i in range(max(axes) + 1): + if i in axes: + new_axes.append(i) + new_starts.append(starts[pop_index]) + new_ends.append(ends[pop_index]) + pop_index += 1 + else: + new_axes.append(i) + new_starts.append(0) + new_ends.append(np.iinfo(np.int32).max) + starts = new_starts + ends = new_ends + return _op.strided_slice(inputs[0], begin=starts, end=ends) + class Gather(OnnxOpConverter): """ Operator converter for Gather. """ @@ -847,6 +876,18 @@ def _impl_v1(cls, inputs, attr, params): attr['axis'] = 1 return AttrCvt('softmax', transforms={'axis': ('axis', 1)})(inputs, attr, params) +class OneHot(OnnxOpConverter): + """ Operator converter for OneHot. + """ + @classmethod + def _impl_v9(cls, inputs, attr, params): + print(inputs) + # set default value when axis is not set in the model + if 'axis' not in attr: + attr['axis'] = -1 + print("UHOH ONEHOT") + return AttrCvt('one_hot', transforms={'axis': ('axis', 1)})(inputs, attr, params) + class ConstantFill(OnnxOpConverter): """ Operator converter for ConstantFill. """ @@ -875,6 +916,20 @@ def _impl_v1(cls, inputs, attr, params): shape = shape + attr.pop('extra_shape') return _op.full(inputs[0], shape) +class ConstantOfShape(OnnxOpConverter): + """ Operator converter for ConstantOfShape. + """ + @classmethod + def _impl_v9(cls, inputs, attr, params): + shape = params[get_name(inputs[0])].asnumpy() + if 'value' in attr: + value = attr.pop('value') + dtype = value.dtype + else: + value = 0 + dtype = 'float32' + return _op.full(value, shape=shape, dtype=dtype) + class Sign(OnnxOpConverter): """ Operator converter for Sign. """ @@ -948,6 +1003,7 @@ def _get_convert_map(opset): 'ScaledTanh': ScaledTanh.get_converter(opset), 'ParametricSoftplus': ParametricSoftPlus.get_converter(opset), 'ConstantFill': ConstantFill.get_converter(opset), + 'ConstantOfShape': ConstantOfShape.get_converter(opset), # 'GivenTensorFill' 'FC': AttrCvt('dense', ignores=['axis', 'axis_w']), 'Scale': Scale.get_converter(opset), @@ -1001,6 +1057,7 @@ def _get_convert_map(opset): # softmax default axis is different in onnx 'Softmax': Softmax.get_converter(opset), 'LogSoftmax': AttrCvt('log_softmax', {'axis': ('axis', 1)}), + 'OneHot': OneHot.get_converter(opset), # 'Hardmax' 'Softsign': Softsign.get_converter(opset), 'SoftPlus': SoftPlus.get_converter(opset), From d7d0de39862b1e27428bff0f698afc4b5d70840d Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Wed, 16 Oct 2019 11:37:27 -0700 Subject: [PATCH 02/23] Added constantofshape operation and small refactor. --- python/tvm/relay/frontend/common.py | 16 ++++++++++++++++ python/tvm/relay/frontend/onnx.py | 16 ++++++---------- python/tvm/relay/frontend/tensorflow.py | 14 +------------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index d4b9162d6f3d..8fa8b5bd0f83 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -24,6 +24,7 @@ from .. import module as _module from .. import transform as _transform from .. import op as _op +from .. import analysis class RequiredAttr(object): @@ -472,6 +473,21 @@ def infer_channels(inputs, transpose=False): out_shapes = [get_const_tuple(out_type.checked_type.shape)] channels = out_shapes[0][0] if not transpose else out_shapes[0][1] return channels + + +def infer_value(input_val, params): + from tvm.contrib import graph_runtime + # Check that all free variables have associated parameters. + assert all(var.name_hint in params.keys() for var in analysis.free_vars( + input_val)), "All inputs to infer must be available in params." + func = _expr.Function(analysis.free_vars(input_val), input_val) + with tvm.relay.build_config(opt_level=0): + graph, lib, params = tvm.relay.build(func, target="llvm", params=params) + ctx = tvm.context("llvm", 0) + m = graph_runtime.create(graph, lib, ctx) + m.set_input(**params) + m.run() + return m.get_output(0) def new_var(name_hint, diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index e446422176ef..7314d6db80a4 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -29,6 +29,7 @@ from .. import op as _op from .common import AttrCvt, Renamer from .common import get_relay_op, new_var, infer_shape, infer_channels, get_name +from onnx.numpy_helper import to_array __all__ = ['from_onnx'] @@ -921,14 +922,14 @@ class ConstantOfShape(OnnxOpConverter): """ @classmethod def _impl_v9(cls, inputs, attr, params): - shape = params[get_name(inputs[0])].asnumpy() if 'value' in attr: - value = attr.pop('value') - dtype = value.dtype + np_value = to_array(attr.pop('value'))[0] + value = _expr.const(np_value) + dtype = np_value.dtype.name else: - value = 0 + value = _expr.const(0) dtype = 'float32' - return _op.full(value, shape=shape, dtype=dtype) + return _op.full_like(_op.cast(inputs[0], dtype), value) class Sign(OnnxOpConverter): """ Operator converter for Sign. @@ -1271,11 +1272,6 @@ def _parse_dtype(self, value_proto, dtype): def _parse_array(self, tensor_proto): """Grab data in TensorProto and convert to numpy array.""" - try: - from onnx.numpy_helper import to_array - except ImportError as e: - raise ImportError( - "Unable to import onnx which is required {}".format(e)) np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims)) return _nd.array(np_array) diff --git a/python/tvm/relay/frontend/tensorflow.py b/python/tvm/relay/frontend/tensorflow.py index bfa3431ba29e..2ef8d15fe291 100644 --- a/python/tvm/relay/frontend/tensorflow.py +++ b/python/tvm/relay/frontend/tensorflow.py @@ -39,22 +39,10 @@ from .common import infer_type as _infer_type from .common import infer_shape as _infer_shape from .common import infer_channels as _infer_channels +from .common import infer_value as _infer_value __all__ = ['from_tensorflow'] -def _infer_value(input_val, params): - from tvm.contrib import graph_runtime - # Check that all free variables have associated parameters. - assert all(var.name_hint in params.keys() for var in analysis.free_vars( - input_val)), "All inputs to infer must be available in params." - func = _expr.Function(analysis.free_vars(input_val), input_val) - with tvm.relay.build_config(opt_level=0): - graph, lib, params = tvm.relay.build(func, target="llvm", params=params) - ctx = tvm.context("llvm", 0) - m = graph_runtime.create(graph, lib, ctx) - m.set_input(**params) - m.run() - return m.get_output(0) def _get_pad_pair(input1d, kernel1d, stride1d): if input1d % stride1d == 0: From 019190c2b1d926ae4f59071a9474ca9685b683fa Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Thu, 17 Oct 2019 11:26:22 -0700 Subject: [PATCH 03/23] Finished one_hot implementation. --- python/tvm/relay/frontend/onnx.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 7314d6db80a4..14eb720743ed 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -28,7 +28,7 @@ from .. import module as _module from .. import op as _op from .common import AttrCvt, Renamer -from .common import get_relay_op, new_var, infer_shape, infer_channels, get_name +from .common import get_relay_op, new_var, infer_shape, infer_channels, infer_type, infer_value, get_name from onnx.numpy_helper import to_array __all__ = ['from_onnx'] @@ -877,17 +877,24 @@ def _impl_v1(cls, inputs, attr, params): attr['axis'] = 1 return AttrCvt('softmax', transforms={'axis': ('axis', 1)})(inputs, attr, params) + class OneHot(OnnxOpConverter): """ Operator converter for OneHot. """ @classmethod def _impl_v9(cls, inputs, attr, params): - print(inputs) + # Extract relay one_hot inputs. + indices, depth, values = inputs + # Split onnx on off values into two separate expressions. + on_value, off_value = _op.split(values, 2) + # Extract the datatype of the output from on_value. + dtype = infer_type(on_value).checked_type.dtype + # Convert depth into an integer. + depth = infer_value(depth, params).asnumpy()[0] # set default value when axis is not set in the model if 'axis' not in attr: attr['axis'] = -1 - print("UHOH ONEHOT") - return AttrCvt('one_hot', transforms={'axis': ('axis', 1)})(inputs, attr, params) + return _op.one_hot(indices, on_value, off_value, depth, attr['axis'], dtype=dtype) class ConstantFill(OnnxOpConverter): """ Operator converter for ConstantFill. From 3826216f7ec3209e1da1759c494fa21a9f612e3d Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Fri, 18 Oct 2019 11:38:14 -0700 Subject: [PATCH 04/23] Reshape working across all bert layers. --- python/tvm/relay/frontend/onnx.py | 43 ++++++++++++++----------------- 1 file changed, 20 insertions(+), 23 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 14eb720743ed..db884a375a58 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -427,35 +427,32 @@ class Reshape(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - if 'shape' in attr: - return _op.reshape(inputs[0], attr['shape']) + return _op.reshape(inputs[0], attr['shape']) + @classmethod + def _impl_v5(cls, inputs, attr, params): if get_name(inputs[1]) in params: shape = tuple(params[inputs[1].name_hint].asnumpy()) out = _op.reshape(inputs[0], shape) else: data, shape = inputs - logging.warning("Constant evaluating Reshape's shape argument, may reduce performance") shape_params = analysis.free_vars(shape) - func = _expr.Function(shape_params, shape) - mod = _module.Module.from_expr(func) - seq = _transform.Sequential([_transform.InferType(), - _transform.FoldConstant(), - _transform.FuseOps(0), - _transform.InferType()]) - with tvm.relay.PassContext(opt_level=2): - mod = seq(mod) - with tvm.relay.build_config(opt_level=0): - ex = tvm.relay.create_executor("debug", mod=mod) - inputs = [] - for sp in shape_params: - if not sp.name_hint in params: - sh = [int(i) for i in sp.type_annotation.shape] - inputs.append( - tvm.nd.array(np.random.rand(*sh).astype('float32'))) - static_shape = ex.evaluate()(*inputs, **params) + fake_params = [] + for sp in shape_params: + if sp.name_hint not in params: + sp_dtype = sp.type_annotation.dtype + sp_shape = [s.value for s in sp.type_annotation.shape] + # Add a fake copy of this parameter for shape inference. + fake_params.append(sp) + params[sp.name_hint] = tvm.nd.array( + np.random.rand(*sp_shape).astype(sp_dtype)) + # Infer value of shape + print("\n\nShape Graph: ", infer_type(shape)) + static_shape = infer_value(shape, params) + # Remove fake params from param dictionary. + for sp in fake_params: + params.pop(sp.name_hint, None) out = _op.reshape(data, newshape=tuple(static_shape.asnumpy())) - return out class Concat(OnnxOpConverter): @@ -655,7 +652,6 @@ class Slice(OnnxOpConverter): """ @classmethod def _impl_v1(cls, inputs, attr, params): - print(attr) if isinstance(attr['starts'], int): attr['starts'] = (attr['starts'],) attr['ends'] = (attr['ends'],) @@ -886,7 +882,8 @@ def _impl_v9(cls, inputs, attr, params): # Extract relay one_hot inputs. indices, depth, values = inputs # Split onnx on off values into two separate expressions. - on_value, off_value = _op.split(values, 2) + on_value, off_value = _op.take( + values, _op.const(0)), _op.take(values, _op.const(1)) # Extract the datatype of the output from on_value. dtype = infer_type(on_value).checked_type.dtype # Convert depth into an integer. From 9442f6aa9819bcc8ecafce116728a75e1bdff8db Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Fri, 18 Oct 2019 15:03:56 -0700 Subject: [PATCH 05/23] Fixed constantofshape and removed code duplication. --- python/tvm/relay/frontend/common.py | 21 +++++++++++++ python/tvm/relay/frontend/onnx.py | 49 ++++++++++++++++------------- 2 files changed, 48 insertions(+), 22 deletions(-) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index 8fa8b5bd0f83..c5f6102dc54b 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -19,6 +19,7 @@ import logging import tvm +import numpy as np from topi.util import get_const_tuple from .. import expr as _expr from .. import module as _module @@ -490,6 +491,26 @@ def infer_value(input_val, params): return m.get_output(0) +def infer_value_simulated(input_val, params): + # Keep track of which params we need to simulate + fake_params = [] + # Add a fake copy of all missing params. + for free_param in analysis.free_vars(input_val): + if free_param.name_hint not in params: + fp_dtype = free_param.type_annotation.dtype + fp_shape = [s.value for s in free_param.type_annotation.shape] + fake_params.append(free_param) + params[free_param.name_hint] = tvm.nd.array( + np.random.rand(*fp_shape).astype(fp_dtype) + ) + # Now infer the value. + output_value = infer_value(input_val, params) + # Clean fake params out of param dictionary. + for fp in fake_params: + params.pop(fp.name_hint, None) + return output_value + + def new_var(name_hint, type_annotation=None, shape=None, diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index db884a375a58..d28e307dbb8f 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -28,7 +28,7 @@ from .. import module as _module from .. import op as _op from .common import AttrCvt, Renamer -from .common import get_relay_op, new_var, infer_shape, infer_channels, infer_type, infer_value, get_name +from .common import get_relay_op, new_var, infer_shape, infer_channels, infer_type, infer_value, infer_value_simulated, get_name from onnx.numpy_helper import to_array __all__ = ['from_onnx'] @@ -120,7 +120,7 @@ def _impl_v1(cls, inputs, attr, params): axis = int(attr.get('axis', 0)) inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2) return get_relay_op(op_name)(*inputs) - + class Pool(OnnxOpConverter): """ A helper class for pool op converters. @@ -280,8 +280,24 @@ class MatMul(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs)) - input_1_t = _op.transpose(inputs[1], axes=(1, 0)) - return _op.nn.dense(inputs[0], input_1_t) + # Need to check input shape as batch matmul must be supported. + a_shape = infer_shape(inputs[0]) + # When performing a batch matmul, we need to properly handle N-dim shapes. + if len(a_shape) > 2: + b_shape = infer_shape(inputs[1]) + # Convert a and b into 3 dimensional tensors. + a = _op.reshape(inputs[0], [-1, a_shape[-2], a_shape[-1]]) + b = _op.reshape(inputs[1], [-1, b_shape[-2], b_shape[-1]]) + # Transpose matrix dimensions of b. + b = _op.transpose(b, [0, 2, 1]) + # Perform a batch matmul. + output = _op.nn.batch_matmul(a, b) + # Reshape output to original dimensions. + return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]]) + # Otherwise a simple dense op will get the job done. + else: + input_1_t = _op.transpose(inputs[1], axes=(1, 0)) + return _op.nn.dense(inputs[0], input_1_t) class MaxPool(Pool): @@ -436,23 +452,9 @@ def _impl_v5(cls, inputs, attr, params): out = _op.reshape(inputs[0], shape) else: data, shape = inputs - shape_params = analysis.free_vars(shape) - fake_params = [] - for sp in shape_params: - if sp.name_hint not in params: - sp_dtype = sp.type_annotation.dtype - sp_shape = [s.value for s in sp.type_annotation.shape] - # Add a fake copy of this parameter for shape inference. - fake_params.append(sp) - params[sp.name_hint] = tvm.nd.array( - np.random.rand(*sp_shape).astype(sp_dtype)) - # Infer value of shape - print("\n\nShape Graph: ", infer_type(shape)) - static_shape = infer_value(shape, params) - # Remove fake params from param dictionary. - for sp in fake_params: - params.pop(sp.name_hint, None) - out = _op.reshape(data, newshape=tuple(static_shape.asnumpy())) + static_shape = infer_value_simulated(shape, params) + out = _op.reshape(data, newshape=tuple( + static_shape.asnumpy().astype('int32'))) return out class Concat(OnnxOpConverter): @@ -933,7 +935,10 @@ def _impl_v9(cls, inputs, attr, params): else: value = _expr.const(0) dtype = 'float32' - return _op.full_like(_op.cast(inputs[0], dtype), value) + static_shape = infer_value_simulated(inputs[0], params) + output = _op.full( + value, shape=tuple(static_shape.asnumpy().astype('int32')), dtype=dtype) + return output class Sign(OnnxOpConverter): """ Operator converter for Sign. From c558ef7f1052d6364fb0a008201c8af83d743688 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Fri, 18 Oct 2019 15:44:48 -0700 Subject: [PATCH 06/23] onnx model fully ingested. --- python/tvm/relay/frontend/onnx.py | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index d28e307dbb8f..918694ef47fa 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -639,11 +639,17 @@ class Split(OnnxOpConverter): @classmethod def _impl_v1(cls, inputs, attr, params): - attr['indices_or_sections'] = [] - index = 0 - for i in attr['split'][:-1]: - index += i - attr['indices_or_sections'].append(index) + splits = attr.get('split', False) + if splits: + attr['indices_or_sections'] = [] + index = 0 + for i in splits[:-1]: + index += i + attr['indices_or_sections'].append(index) + # When splits isnt specified divide evenly over axis. + else: + in_shape = infer_shape(inputs[0]) + attr['indices_or_sections'] = in_shape[attr['axis']] return AttrCvt( 'split', ignores=['split'])(inputs, attr, params) From 01d2145928dc1c81a3e310684f429c65cee46dab Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Tue, 22 Oct 2019 10:30:14 -0700 Subject: [PATCH 07/23] Working on improving onnx tests. --- python/tvm/relay/frontend/onnx.py | 16 +++++++++++----- tests/python/frontend/onnx/test_forward.py | 7 ++++--- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 918694ef47fa..e8da40840f61 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1368,7 +1368,8 @@ def _fix_outputs(self, op_name, outputs): def from_onnx(model, shape=None, - dtype="float32"): + dtype="float32", + opset=None): """Convert a ONNX model into an equivalent Relay Function. ONNX graphs are represented as Python Protobuf objects. @@ -1389,6 +1390,10 @@ def from_onnx(model, dtype : str or dict of str to str The input types to the graph + opset : int + Optional override to autodetected opset. + This can be helpful for some testing. + Returns ------- mod : tvm.relay.Module @@ -1411,9 +1416,10 @@ def from_onnx(model, pass g = GraphProto(shape, dtype) graph = model.graph - try: - opset = model.opset_import[0].version if model.opset_import else 1 - except AttributeError: - opset = 1 + if opset is None: + try: + opset = model.opset_import[0].version if model.opset_import else 1 + except AttributeError: + opset = 1 mod, params = g.from_onnx(graph, opset) return mod, params diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 3d1262f436bb..3c02603acd81 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -30,7 +30,7 @@ import unittest import scipy -def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output_dtype='float32'): +def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output_dtype='float32', opset=None): """ Generic function to execute and get tvm output""" target = 'llvm' if isinstance(input_data, list): @@ -46,7 +46,7 @@ def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output shape_dict = {input_names: input_data.shape} dtype_dict = {input_names: input_data.dtype} - mod, params = relay.frontend.from_onnx(graph_def, shape_dict) + mod, params = relay.frontend.from_onnx(graph_def, shape_dict, opset=opset) with relay.build_config(opt_level=1): graph, lib, params = relay.build(mod, target, @@ -306,7 +306,8 @@ def _test_slice_iteration(indata, outdata, starts, ends, axes=None): model = helper.make_model(graph, producer_name='slice_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32') + tvm_out = get_tvm_output( + model, indata, target, ctx, outdata.shape, 'float32', opset=1) tvm.testing.assert_allclose(outdata, tvm_out) From 05d8905621e1123906dbe35e077d6ea62e242261 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Tue, 22 Oct 2019 14:29:05 -0700 Subject: [PATCH 08/23] Changed onnx testing to use onnxruntime instead of caffe2, also formatted. --- tests/python/frontend/onnx/test_forward.py | 602 ++++++++++++--------- 1 file changed, 357 insertions(+), 245 deletions(-) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 3c02603acd81..6b0b79bf7d06 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -14,7 +14,6 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import attr import numpy as np import math import torch @@ -27,9 +26,9 @@ from nnvm.testing.config import ctx_list import onnx from onnx import helper, TensorProto -import unittest import scipy + def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output_dtype='float32', opset=None): """ Generic function to execute and get tvm output""" target = 'llvm' @@ -53,14 +52,15 @@ def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output params=params) ctx = tvm.cpu(0) - from tvm.contrib import graph_runtime m = graph_runtime.create(graph, lib, ctx) # set inputs if isinstance(input_data, list): for i, e in enumerate(input_names): - m.set_input(input_names[i], tvm.nd.array(input_data[i].astype(input_data[i].dtype))) + m.set_input(input_names[i], tvm.nd.array( + input_data[i].astype(input_data[i].dtype))) else: - m.set_input(input_names, tvm.nd.array(input_data.astype(input_data.dtype))) + m.set_input(input_names, tvm.nd.array( + input_data.astype(input_data.dtype))) m.set_input(**params) # execute @@ -76,11 +76,11 @@ def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output tvm_output = m.get_output(0) return tvm_output.asnumpy() -def get_caffe2_output(model, x, dtype='float32'): - import caffe2.python.onnx.backend - prepared_backend = caffe2.python.onnx.backend.prepare(model) - W = {model.graph.input[0].name: x.astype(dtype)} - c2_out = prepared_backend.run(W)[0] + +def get_onnx_output(model, x, dtype='float32'): + import onnxruntime.backend as backend + prepared_backend = backend.prepare(model) + c2_out = prepared_backend.run(x.astype(dtype))[0] return c2_out @@ -88,20 +88,25 @@ def verify_onnx_forward_impl(graph_file, data_shape, out_shape): dtype = 'float32' x = np.random.uniform(size=data_shape) model = onnx.load_model(graph_file) - c2_out = get_caffe2_output(model, x, dtype) + c2_out = get_onnx_output(model, x, dtype) for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, x, target, ctx, out_shape, dtype) tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5) + def verify_super_resolution_example(): - verify_onnx_forward_impl(super_resolution, (1, 1, 224, 224), (1, 1, 672, 672)) + verify_onnx_forward_impl( + super_resolution, (1, 1, 224, 224), (1, 1, 672, 672)) + def verify_squeezenet1_1(): verify_onnx_forward_impl(squeezenet1_1, (1, 3, 224, 224), (1, 1000)) + def verify_lenet(): verify_onnx_forward_impl(lenet, (1, 1, 28, 28), (1, 10)) + def verify_resnet18(): verify_onnx_forward_impl(resnet18_1_0, (1, 3, 224, 224), (1, 1000)) @@ -112,20 +117,20 @@ def test_reshape(): ref_array = np.array(ref_shape) ref_node = onnx.helper.make_node('Constant', - inputs=[], - outputs=['ref_in'], - value=onnx.helper.make_tensor(name = 'const_tensor', - data_type = onnx.TensorProto.INT32, - dims = ref_array.shape, - vals = ref_array.flatten().astype(int))) + inputs=[], + outputs=['ref_in'], + value=onnx.helper.make_tensor(name='const_tensor', + data_type=onnx.TensorProto.INT32, + dims=ref_array.shape, + vals=ref_array.flatten().astype(int))) reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"]) graph = helper.make_graph([ref_node, reshape_node], "reshape_test", - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(ref_shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(ref_shape))]) model = helper.make_model(graph, producer_name='reshape_test') @@ -135,28 +140,29 @@ def test_reshape(): tvm.testing.assert_allclose(ref_shape, tvm_out.shape) + def test_shape(): in_shape = (4, 3, 3, 4) ref_shape = (6, 2, 4, 3) ref_array = np.array(ref_shape) ref_node = onnx.helper.make_node('Constant', - inputs=[], - outputs=['ref_in'], - value=onnx.helper.make_tensor(name = 'const_tensor', - data_type = onnx.TensorProto.INT32, - dims = ref_array.shape, - vals = ref_array.flatten().astype(int))) + inputs=[], + outputs=['ref_in'], + value=onnx.helper.make_tensor(name='const_tensor', + data_type=onnx.TensorProto.INT32, + dims=ref_array.shape, + vals=ref_array.flatten().astype(int))) reshape_node = helper.make_node("Reshape", ["in", "ref_in"], ["out"]) shape_node = helper.make_node("Shape", ['out'], ['final_out']) graph = helper.make_graph([ref_node, reshape_node, shape_node], "shape_test", - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("final_out", - TensorProto.FLOAT, list(ref_shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("final_out", + TensorProto.FLOAT, list(ref_shape))]) model = helper.make_model(graph, producer_name='shape_test') @@ -166,6 +172,7 @@ def test_shape(): tvm.testing.assert_allclose(ref_shape, tvm_out) + def _test_power_iteration(x_shape, y_shape): if isinstance(y_shape, int): y_shape = [y_shape] @@ -179,12 +186,12 @@ def _test_power_iteration(x_shape, y_shape): graph = helper.make_graph([res], 'power_test', - inputs = [helper.make_tensor_value_info("x", - TensorProto.FLOAT, list(x_shape)), - helper.make_tensor_value_info("y", - TensorProto.FLOAT, list(y_shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(np_res.shape))]) + inputs=[helper.make_tensor_value_info("x", + TensorProto.FLOAT, list(x_shape)), + helper.make_tensor_value_info("y", + TensorProto.FLOAT, list(y_shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(np_res.shape))]) model = helper.make_model(graph, producer_name='power_test') @@ -192,11 +199,13 @@ def _test_power_iteration(x_shape, y_shape): tvm_out = get_tvm_output(model, [x, y], target, ctx, np_res.shape) tvm.testing.assert_allclose(np_res, tvm_out, rtol=1e-5, atol=1e-5) + def test_power(): _test_power_iteration((1, 3), (1)) _test_power_iteration((2, 3), (2, 3)) _test_power_iteration((2, 3), (1, 3)) + def test_squeeze(): in_shape = (1, 3, 1, 3, 1, 1) out_shape = (3, 3) @@ -204,10 +213,10 @@ def test_squeeze(): graph = helper.make_graph([y], 'squeeze_test', - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(out_shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='squeeze_test') @@ -217,20 +226,21 @@ def test_squeeze(): tvm.testing.assert_allclose(out_shape, tvm_out.shape) + def test_flatten(): in_shape = (1, 3, 4, 4) axis = 1 ref_shape = (1, 48) - flatten_node = helper.make_node("Flatten", ["in"], ["out"], axis = axis) + flatten_node = helper.make_node("Flatten", ["in"], ["out"], axis=axis) graph = helper.make_graph([flatten_node], "flatten_test", - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(ref_shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(ref_shape))]) model = helper.make_model(graph, producer_name='flatten_test') @@ -240,6 +250,7 @@ def test_flatten(): tvm.testing.assert_allclose(ref_shape, tvm_out.shape) + def test_unsqueeze(): in_shape = (3, 3) axis = (0, 3, 4) @@ -248,10 +259,10 @@ def test_unsqueeze(): graph = helper.make_graph([y], 'squeeze_test', - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(out_shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='squeeze_test') @@ -261,6 +272,7 @@ def test_unsqueeze(): tvm.testing.assert_allclose(out_shape, tvm_out.shape) + def verify_gather(in_shape, indices, axis, dtype): x = np.random.uniform(size=in_shape).astype(dtype) indices = np.array(indices, dtype="int32") @@ -270,38 +282,43 @@ def verify_gather(in_shape, indices, axis, dtype): graph = helper.make_graph([y], 'gather_test', - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(in_shape)), - helper.make_tensor_value_info("indices", - TensorProto.INT32, list(indices.shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(out_np.shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(in_shape)), + helper.make_tensor_value_info("indices", + TensorProto.INT32, list(indices.shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(out_np.shape))]) model = helper.make_model(graph, producer_name='gather_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [x, indices], target, ctx, out_np.shape) + tvm_out = get_tvm_output( + model, [x, indices], target, ctx, out_np.shape) tvm.testing.assert_allclose(out_np, tvm_out) + def test_gather(): verify_gather((4,), [1], 0, 'int32') - verify_gather((1,4), [0], 0, 'int32') - verify_gather((4,), [[[1,0],[0,1]]], 0, 'float32') - verify_gather((2,2), [[[1,0],[0,1]]], 1, 'int32') - verify_gather((3,3,3), [[[1,0]]], -1, 'int32') - verify_gather((4,3,5,6), [[2,1,0,0]], 0, 'float32') + verify_gather((1, 4), [0], 0, 'int32') + verify_gather((4,), [[[1, 0], [0, 1]]], 0, 'float32') + verify_gather((2, 2), [[[1, 0], [0, 1]]], 1, 'int32') + verify_gather((3, 3, 3), [[[1, 0]]], -1, 'int32') + verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, 'float32') + def _test_slice_iteration(indata, outdata, starts, ends, axes=None): if axes: - y = helper.make_node("Slice", ['in'], ['out'], axes=axes, starts=starts, ends=ends) + y = helper.make_node( + "Slice", ['in'], ['out'], axes=axes, starts=starts, ends=ends) else: - y = helper.make_node("Slice", ['in'], ['out'], starts=starts, ends=ends) + y = helper.make_node( + "Slice", ['in'], ['out'], starts=starts, ends=ends) graph = helper.make_graph([y], 'slice_test', - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(indata.shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(outdata.shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(indata.shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='slice_test') @@ -311,6 +328,7 @@ def _test_slice_iteration(indata, outdata, starts, ends, axes=None): tvm.testing.assert_allclose(outdata, tvm_out) + def test_slice(): x = np.random.randn(20, 10, 5).astype(np.float32) _test_slice_iteration(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1)) @@ -318,6 +336,7 @@ def test_slice(): _test_slice_iteration(x, x[:, 1:1000], (1), (1000), (1)) _test_slice_iteration(x, x[:, 0:-1], (0), (-1), (1)) + def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs): indata = np.random.uniform(-1, 1, size=inshape).astype(dtype) outdata = outfunc(indata, **npargs) @@ -326,24 +345,29 @@ def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs): graph = helper.make_graph([y], opname+'_test', - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(indata.shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(outdata.shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(indata.shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name=opname+'_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, dtype) + tvm_out = get_tvm_output( + model, indata, target, ctx, outdata.shape, dtype) tvm.testing.assert_allclose(outdata, tvm_out) + def test_floor(): - _test_onnx_op_elementwise((2, 4, 5, 6), np.floor, {}, 'float32', 'Floor', {}) + _test_onnx_op_elementwise((2, 4, 5, 6), np.floor, + {}, 'float32', 'Floor', {}) + def test_ceil(): _test_onnx_op_elementwise((2, 4, 5, 6), np.ceil, {}, 'float32', 'Ceil', {}) + def test_clip(): _test_onnx_op_elementwise((2, 4, 5, 6), np.clip, @@ -352,6 +376,7 @@ def test_clip(): 'Clip', {'min': -1.0, 'max': 1.0}) + def test_matmul(): a_shape = (4, 3) b_shape = (3, 4) @@ -364,52 +389,57 @@ def test_matmul(): graph = helper.make_graph([mul_node], "matmul_test", - inputs = [helper.make_tensor_value_info("a", - TensorProto.FLOAT, list(a_shape)), - helper.make_tensor_value_info("b", - TensorProto.FLOAT, list(b_shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(out_np.shape))]) + inputs=[helper.make_tensor_value_info("a", + TensorProto.FLOAT, list(a_shape)), + helper.make_tensor_value_info("b", + TensorProto.FLOAT, list(b_shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(out_np.shape))]) model = helper.make_model(graph, producer_name='matmul_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [a_array, b_array], target, ctx, out_np.shape) + tvm_out = get_tvm_output( + model, [a_array, b_array], target, ctx, out_np.shape) tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) + def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None): in_array = np.random.uniform(size=shape).astype(dtype) - if alpha == None and beta == None and bias==None: + if alpha == None and beta == None and bias == None: alpha = 0.0001 beta = 0.75 bias = 1.0 - node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], size=nsize) + node = onnx.helper.make_node( + 'LRN', inputs=['in'], outputs=['out'], size=nsize) else: node = onnx.helper.make_node('LRN', inputs=['in'], outputs=['out'], alpha=alpha, beta=beta, bias=bias, size=nsize) graph = helper.make_graph([node], "lrn_test", - inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(shape))], - outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))]) + inputs=[helper.make_tensor_value_info( + "in", TensorProto.FLOAT, list(shape))], + outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(shape))]) model = helper.make_model(graph, producer_name='lrn_test') def _get_python_lrn(): square_sum = np.zeros(shape).astype(dtype) for n, c, h, w in np.ndindex(in_array.shape): square_sum[n, c, h, w] = sum(in_array[n, - max(0, c - int(math.floor((nsize - 1) / 2))): \ - min(5, c + int(math.ceil((nsize - 1) / 2)) + 1), - h, - w] ** 2) + max(0, c - int(math.floor((nsize - 1) / 2))): + min(5, c + int(math.ceil((nsize - 1) / 2)) + 1), + h, + w] ** 2) py_out = in_array / ((bias + (alpha / nsize) * square_sum) ** beta) return py_out for target, ctx in ctx_list(): input_name = model.graph.input[0].name py_out = _get_python_lrn() - tvm_out = get_tvm_output(model, in_array, target, ctx, py_out.shape, 'float32') + tvm_out = get_tvm_output( + model, in_array, target, ctx, py_out.shape, 'float32') tvm.testing.assert_allclose(py_out, tvm_out, rtol=1e-5, atol=1e-5) @@ -437,20 +467,22 @@ def _get_python_instance_norm(x, gamma, beta, epsilon=1e-5): y = _get_python_instance_norm(x, gamma, beta, epsilon).astype(np.float32) node = onnx.helper.make_node( - 'InstanceNormalization', - inputs=['x', 'gamma', 'beta'], - outputs=['y'], - epsilon=epsilon, - ) + 'InstanceNormalization', + inputs=['x', 'gamma', 'beta'], + outputs=['y'], + epsilon=epsilon, + ) graph = helper.make_graph([node], "instance_norm_test", inputs=[helper.make_tensor_value_info("x", TensorProto.FLOAT, list(shape)), - helper.make_tensor_value_info("gamma", TensorProto.FLOAT, (shape[1],)), + helper.make_tensor_value_info( + "gamma", TensorProto.FLOAT, (shape[1],)), helper.make_tensor_value_info("beta", TensorProto.FLOAT, (shape[1],))], outputs=[helper.make_tensor_value_info("y", TensorProto.FLOAT, list(shape))]) model = helper.make_model(graph, producer_name='instance_norm_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [x, gamma, beta], target, ctx, shape, 'float32') + tvm_out = get_tvm_output( + model, [x, gamma, beta], target, ctx, shape, 'float32') tvm.testing.assert_allclose(y, tvm_out, rtol=1e-5, atol=1e-5) @@ -465,103 +497,122 @@ def _test_upsample_nearest(): scale = 2 in_shape = (1, 1, 3, 3) out_shape = (1, 1, 3*scale, 3*scale) - y = helper.make_node("Upsample", ['in'], ['out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0]) + y = helper.make_node("Upsample", ['in'], [ + 'out'], mode='nearest', scales=[1.0, 1.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) - out_array = topi.testing.upsampling_python(in_array, (scale, scale), "NCHW") + out_array = topi.testing.upsampling_python( + in_array, (scale, scale), "NCHW") graph = helper.make_graph([y], 'upsample_nearest_test', - inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) + inputs=[helper.make_tensor_value_info( + "in", TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='upsample_nearest_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, 'float32') + tvm_out = get_tvm_output( + model, in_array, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_array, tvm_out) + def _test_upsample_bilinear(): scale = 2 in_shape = (1, 1, 3, 3) out_shape = (1, 1, 3*scale, 3*scale) - y = helper.make_node("Upsample", ['in'], ['out'], mode='linear', scales=[1.0, 1.0, 2.0, 2.0]) + y = helper.make_node("Upsample", ['in'], [ + 'out'], mode='linear', scales=[1.0, 1.0, 2.0, 2.0]) in_array = np.random.uniform(size=in_shape).astype(np.float32) - out_array = topi.testing.bilinear_resize_python(in_array, (3*scale, 3*scale), "NCHW") + out_array = topi.testing.bilinear_resize_python( + in_array, (3*scale, 3*scale), "NCHW") graph = helper.make_graph([y], 'upsample_bilinear_test', - inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) + inputs=[helper.make_tensor_value_info( + "in", TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='upsample_bilinear_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, 'float32') + tvm_out = get_tvm_output( + model, in_array, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5) + def _test_upsample_bilinear_opset9(): scale = 2 in_shape = (1, 1, 3, 3) out_shape = (1, 1, 3*scale, 3*scale) - y = helper.make_node("Upsample", ['in','scales'], ['out'], mode='linear') - scales=[1.0, 1.0, 2.0, 2.0] + y = helper.make_node("Upsample", ['in', 'scales'], ['out'], mode='linear') + scales = [1.0, 1.0, 2.0, 2.0] in_array = np.random.uniform(size=in_shape).astype(np.float32) - out_array = topi.testing.bilinear_resize_python(in_array, (3*scale, 3*scale), "NCHW") + out_array = topi.testing.bilinear_resize_python( + in_array, (3*scale, 3*scale), "NCHW") ref_array = np.array(scales) ref_node = helper.make_node('Constant', - inputs=[], - outputs=['scales'], - value=onnx.helper.make_tensor(name = 'const_tensor', - data_type = TensorProto.FLOAT, - dims = ref_array.shape, - vals = ref_array.flatten().astype(float))) + inputs=[], + outputs=['scales'], + value=onnx.helper.make_tensor(name='const_tensor', + data_type=TensorProto.FLOAT, + dims=ref_array.shape, + vals=ref_array.flatten().astype(float))) graph = helper.make_graph([ref_node, y], 'upsample_bilinear_opset9_test', - inputs = [helper.make_tensor_value_info("in", TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) + inputs=[helper.make_tensor_value_info( + "in", TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(out_shape))]) - model = helper.make_model(graph, producer_name='upsample_bilinear_opset9_test') + model = helper.make_model( + graph, producer_name='upsample_bilinear_opset9_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, in_array, target, ctx, out_shape, 'float32') + tvm_out = get_tvm_output( + model, in_array, target, ctx, out_shape, 'float32') tvm.testing.assert_allclose(out_array, tvm_out, rtol=1e-5, atol=1e-5) + def test_upsample(): _test_upsample_nearest() _test_upsample_bilinear() _test_upsample_bilinear_opset9() + def _test_softmax(inshape, axis): opname = 'Softmax' indata = np.random.uniform(size=inshape).astype(np.float32) outshape = inshape outdata = topi.testing.softmax_python(indata) if isinstance(axis, int): - y = helper.make_node(opname, ['in'], ['out'], axis = axis) + y = helper.make_node(opname, ['in'], ['out'], axis=axis) elif axis is None: y = helper.make_node(opname, ['in'], ['out']) graph = helper.make_graph([y], opname+'_test', - inputs = [helper.make_tensor_value_info("in", - TensorProto.FLOAT, list(indata.shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(outdata.shape))]) + inputs=[helper.make_tensor_value_info("in", + TensorProto.FLOAT, list(indata.shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name=opname+'_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, indata, target, ctx, outshape, 'float32') + tvm_out = get_tvm_output( + model, indata, target, ctx, outshape, 'float32') tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5) + def test_softmax(): _test_softmax((1, 10), None) _test_softmax((1, 10), 1) + def verify_min(input_dim): dtype = 'float32' @@ -575,25 +626,28 @@ def verify_min(input_dim): graph = helper.make_graph([min_node], "Min_test", - inputs = [helper.make_tensor_value_info("a_np1", - TensorProto.FLOAT, list(input_dim)), - helper.make_tensor_value_info("a_np2", - TensorProto.FLOAT, list(input_dim)), - helper.make_tensor_value_info("a_np3", - TensorProto.FLOAT, list(input_dim))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(b_np.shape))]) + inputs=[helper.make_tensor_value_info("a_np1", + TensorProto.FLOAT, list(input_dim)), + helper.make_tensor_value_info("a_np2", + TensorProto.FLOAT, list(input_dim)), + helper.make_tensor_value_info("a_np3", + TensorProto.FLOAT, list(input_dim))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='Min_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) + tvm_out = get_tvm_output( + model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) + def test_forward_min(): verify_min((1, 3, 20, 20)) verify_min((20, 20)) + def verify_max(input_dim): dtype = 'float32' @@ -607,25 +661,28 @@ def verify_max(input_dim): graph = helper.make_graph([max_node], "Max_test", - inputs = [helper.make_tensor_value_info("a_np1", - TensorProto.FLOAT, list(input_dim)), - helper.make_tensor_value_info("a_np2", - TensorProto.FLOAT, list(input_dim)), - helper.make_tensor_value_info("a_np3", - TensorProto.FLOAT, list(input_dim))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(b_np.shape))]) + inputs=[helper.make_tensor_value_info("a_np1", + TensorProto.FLOAT, list(input_dim)), + helper.make_tensor_value_info("a_np2", + TensorProto.FLOAT, list(input_dim)), + helper.make_tensor_value_info("a_np3", + TensorProto.FLOAT, list(input_dim))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='Max_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) + tvm_out = get_tvm_output( + model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) + def test_forward_max(): verify_max((1, 3, 20, 20)) verify_max((20, 20)) + def verify_mean(input_dim): dtype = 'float32' @@ -639,25 +696,28 @@ def verify_mean(input_dim): graph = helper.make_graph([mean_node], "Mean_test", - inputs = [helper.make_tensor_value_info("a_np1", - TensorProto.FLOAT, list(input_dim)), - helper.make_tensor_value_info("a_np2", - TensorProto.FLOAT, list(input_dim)), - helper.make_tensor_value_info("a_np3", - TensorProto.FLOAT, list(input_dim))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(b_np.shape))]) + inputs=[helper.make_tensor_value_info("a_np1", + TensorProto.FLOAT, list(input_dim)), + helper.make_tensor_value_info("a_np2", + TensorProto.FLOAT, list(input_dim)), + helper.make_tensor_value_info("a_np3", + TensorProto.FLOAT, list(input_dim))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='Mean_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) + tvm_out = get_tvm_output( + model, [a_np1, a_np2, a_np3], target, ctx, b_np.shape) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) + def test_forward_mean(): verify_mean((1, 3, 20, 20)) verify_mean((20, 20)) + def verify_hardsigmoid(input_dim, alpha, beta): dtype = 'float32' @@ -665,14 +725,15 @@ def verify_hardsigmoid(input_dim, alpha, beta): b_np = np.clip(a_np1 * alpha + beta, 0, 1) - hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], ["out"], alpha=alpha, beta=beta) + hardsigmoid_node = helper.make_node("HardSigmoid", ["a_np1"], [ + "out"], alpha=alpha, beta=beta) graph = helper.make_graph([hardsigmoid_node], "HardSigmoid_test", - inputs = [helper.make_tensor_value_info("a_np1", - TensorProto.FLOAT, list(input_dim))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(b_np.shape))]) + inputs=[helper.make_tensor_value_info("a_np1", + TensorProto.FLOAT, list(input_dim))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(b_np.shape))]) model = helper.make_model(graph, producer_name='HardSigmoid_test') @@ -680,10 +741,12 @@ def verify_hardsigmoid(input_dim, alpha, beta): tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) + def test_forward_hardsigmoid(): verify_hardsigmoid((1, 3, 20, 20), 0.5, 0.6) verify_hardsigmoid((20, 20), 0.3, 0.4) + def verify_argmin(input_dim, axis=None, keepdims=None): def _argmin_numpy(data, axis=0, keepdims=True): result = np.argmin(data, axis=axis) @@ -718,17 +781,19 @@ def _argmin_numpy(data, axis=0, keepdims=True): keepdims=keepdims) graph = helper.make_graph([node], "argmin_test", - inputs = [helper.make_tensor_value_info("a_np1", - TensorProto.INT32, list(a_np1.shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.INT32, list(b_np.shape))]) + inputs=[helper.make_tensor_value_info("a_np1", + TensorProto.INT32, list(a_np1.shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.INT32, list(b_np.shape))]) model = helper.make_model(graph, producer_name='argmin_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype) + tvm_out = get_tvm_output( + model, [a_np1], target, ctx, b_np.shape, b_np.dtype) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) + def verify_argmax(input_dim, axis=None, keepdims=None): def _argmax_numpy(data, axis=0, keepdims=True): result = np.argmax(data, axis=axis) @@ -764,29 +829,32 @@ def _argmax_numpy(data, axis=0, keepdims=True): graph = helper.make_graph([node], "argmax_test", - inputs = [helper.make_tensor_value_info("a_np1", - TensorProto.INT32, list(a_np1.shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.INT32, list(b_np.shape))]) + inputs=[helper.make_tensor_value_info("a_np1", + TensorProto.INT32, list(a_np1.shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.INT32, list(b_np.shape))]) model = helper.make_model(graph, producer_name='argmax_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [a_np1], target, ctx, b_np.shape, b_np.dtype) + tvm_out = get_tvm_output( + model, [a_np1], target, ctx, b_np.shape, b_np.dtype) tvm.testing.assert_allclose(b_np, tvm_out, rtol=1e-5, atol=1e-5) + def test_forward_arg_min_max(): '''Verify argmin and argmax''' - verify_argmin([3,4,4]) - verify_argmax([3,4,4]) - verify_argmin([3,4,4], axis=1) - verify_argmax([3,4,4], axis=0) - verify_argmin([3,4,4], keepdims=0) - verify_argmax([3,4,4], keepdims=1) - for axis in [None, 0,1,2]: - for keepdims in [None, True,False]: - verify_argmin([3,4,4], axis, keepdims) - verify_argmax([3,4,4], axis, keepdims) + verify_argmin([3, 4, 4]) + verify_argmax([3, 4, 4]) + verify_argmin([3, 4, 4], axis=1) + verify_argmax([3, 4, 4], axis=0) + verify_argmin([3, 4, 4], keepdims=0) + verify_argmax([3, 4, 4], keepdims=1) + for axis in [None, 0, 1, 2]: + for keepdims in [None, True, False]: + verify_argmin([3, 4, 4], axis, keepdims) + verify_argmax([3, 4, 4], axis, keepdims) + def verify_constantfill(is_shape, input_dim, out_dim, value, dtype, **kwargs): input_a = np.random.uniform(size=input_dim).astype(dtype) @@ -794,21 +862,23 @@ def verify_constantfill(is_shape, input_dim, out_dim, value, dtype, **kwargs): out.fill(value) if is_shape == True: - fill_node = helper.make_node("ConstantFill", [], ["out"], shape=input_dim, value=value, **kwargs) + fill_node = helper.make_node( + "ConstantFill", [], ["out"], shape=input_dim, value=value, **kwargs) else: - fill_node = helper.make_node("ConstantFill", ["input_a"], ["out"], value=value, dtype=dtype, **kwargs) + fill_node = helper.make_node("ConstantFill", ["input_a"], [ + "out"], value=value, dtype=dtype, **kwargs) if is_shape == True: inputs = [] else: inputs = [helper.make_tensor_value_info("input_a", - TensorProto.FLOAT, list(input_dim))] + TensorProto.FLOAT, list(input_dim))] graph = helper.make_graph([fill_node], "fill_test", inputs, - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(out.shape))]) + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(out.shape))]) model = helper.make_model(graph, producer_name='fill_test') @@ -820,10 +890,12 @@ def verify_constantfill(is_shape, input_dim, out_dim, value, dtype, **kwargs): tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5) + def test_constantfill(): verify_constantfill(True, (2, 3, 4, 5), (2, 3, 4, 5), 10, 'float32') verify_constantfill(False, (2, 3, 4, 5), (2, 3, 4, 5), 10, 'float32') - verify_constantfill(True, (2, 3, 4, 5), (2, 3, 4, 5, 4, 5, 6), 10, 'float32', extra_shape=(4, 5, 6)) + verify_constantfill(True, (2, 3, 4, 5), (2, 3, 4, 5, 4, + 5, 6), 10, 'float32', extra_shape=(4, 5, 6)) def verify_pad(indata, pads, mode='constant', value=0.0): @@ -842,7 +914,8 @@ def verify_pad(indata, pads, mode='constant', value=0.0): pads=pads, ) else: - outdata = np.pad(indata, pad_width=np_pads, mode='constant', constant_values=value) + outdata = np.pad(indata, pad_width=np_pads, + mode='constant', constant_values=value) node = helper.make_node( 'Pad', inputs=['input'], @@ -853,22 +926,30 @@ def verify_pad(indata, pads, mode='constant', value=0.0): ) graph = helper.make_graph([node], 'pad_test', - inputs = [helper.make_tensor_value_info("input", - TensorProto.FLOAT, list(indata.shape))], - outputs = [helper.make_tensor_value_info("output", - TensorProto.FLOAT, list(outdata.shape))]) + inputs=[helper.make_tensor_value_info("input", + TensorProto.FLOAT, list(indata.shape))], + outputs=[helper.make_tensor_value_info("output", + TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='pad_test') # tvm result for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32') + tvm_out = get_tvm_output( + model, indata, target, ctx, outdata.shape, 'float32') tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5) + def test_pad(): - verify_pad(np.random.randn(2, 2).astype(np.float32), [0, 1, 0, 0], 'constant', 0.0) - verify_pad(np.random.randn(2, 3).astype(np.float32), [1, 0, 0, 1], 'constant', 0.0) - verify_pad(np.random.randn(3, 2).astype(np.float32), [0, 0, 1, 0], 'constant', 5.0) - verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'edge') - verify_pad(np.random.randn(1, 3, 4, 5).astype(np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'reflect') + verify_pad(np.random.randn(2, 2).astype( + np.float32), [0, 1, 0, 0], 'constant', 0.0) + verify_pad(np.random.randn(2, 3).astype( + np.float32), [1, 0, 0, 1], 'constant', 0.0) + verify_pad(np.random.randn(3, 2).astype( + np.float32), [0, 0, 1, 0], 'constant', 5.0) + verify_pad(np.random.randn(1, 3, 4, 5).astype( + np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'edge') + verify_pad(np.random.randn(1, 3, 4, 5).astype( + np.float32), [0, 0, 1, 1, 0, 0, 1, 1], 'reflect') + def verify_reduce_x(name, indata, axis, keepdims): indata = np.array(indata).astype(np.float32) @@ -894,16 +975,18 @@ def verify_reduce_x(name, indata, axis, keepdims): axes=axis, keepdims=keepdims) graph = helper.make_graph([node], '{}_test'.format(name), - inputs = [helper.make_tensor_value_info("input", - TensorProto.FLOAT, list(indata.shape))], - outputs = [helper.make_tensor_value_info("output", - TensorProto.FLOAT, list(outdata.shape))]) + inputs=[helper.make_tensor_value_info("input", + TensorProto.FLOAT, list(indata.shape))], + outputs=[helper.make_tensor_value_info("output", + TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='{}_test'.format(name)) # tvm result for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, indata, target, ctx, outdata.shape, 'float32') + tvm_out = get_tvm_output( + model, indata, target, ctx, outdata.shape, 'float32') tvm.testing.assert_allclose(outdata, tvm_out, rtol=1e-5, atol=1e-5) + def test_reduce_max(): verify_reduce_x("ReduceMax", np.random.randn(3, 2, 2).astype(np.float32), @@ -915,6 +998,7 @@ def test_reduce_max(): np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=1) + def test_reduce_min(): verify_reduce_x("ReduceMin", np.random.randn(3, 2, 2).astype(np.float32), @@ -926,6 +1010,7 @@ def test_reduce_min(): np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=1) + def test_reduce_sum(): verify_reduce_x("ReduceSum", np.random.randn(3, 2, 2).astype(np.float32), @@ -937,6 +1022,7 @@ def test_reduce_sum(): np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=1) + def test_reduce_mean(): verify_reduce_x("ReduceMean", np.random.randn(3, 2, 2).astype(np.float32), @@ -948,6 +1034,7 @@ def test_reduce_mean(): np.random.randn(3, 3, 3).astype(np.float32), axis=(1,), keepdims=1) + def verify_split(indata, outdatas, split, axis=0): indata = np.array(indata).astype(np.float32) outdatas = [np.array(o).astype(np.float32) for o in outdatas] @@ -960,29 +1047,34 @@ def verify_split(indata, outdatas, split, axis=0): ) graph = helper.make_graph([node], 'split_test', - inputs = [helper.make_tensor_value_info("input", - TensorProto.FLOAT, list(indata.shape))], - outputs = [helper.make_tensor_value_info("output_{}".format(i), - TensorProto.FLOAT, list(outdatas[i].shape)) - for i in range(len(split)) - ]) + inputs=[helper.make_tensor_value_info("input", + TensorProto.FLOAT, list(indata.shape))], + outputs=[helper.make_tensor_value_info("output_{}".format(i), + TensorProto.FLOAT, list(outdatas[i].shape)) + for i in range(len(split)) + ]) model = helper.make_model(graph, producer_name='split_test') for target, ctx in ctx_list(): output_shape = [o.shape for o in outdatas] output_type = ['float32', 'float32', 'float32'] - tvm_out = get_tvm_output(model, indata, target, ctx, output_shape, output_type) + tvm_out = get_tvm_output( + model, indata, target, ctx, output_shape, output_type) for o, t in zip(outdatas, tvm_out): tvm.testing.assert_allclose(o, t) + def test_split(): # 1D - verify_split([1., 2., 3., 4., 5., 6.], [[1., 2.], [3., 4.], [5., 6.]], [2, 2, 2], 0) - verify_split([1., 2., 3., 4., 5., 6.], [[1., 2.], [3.], [4., 5., 6.]], [2, 1, 3], 0) + verify_split([1., 2., 3., 4., 5., 6.], [ + [1., 2.], [3., 4.], [5., 6.]], [2, 2, 2], 0) + verify_split([1., 2., 3., 4., 5., 6.], [ + [1., 2.], [3.], [4., 5., 6.]], [2, 1, 3], 0) # 2D verify_split([[1., 2., 3., 4.], [7., 8., 9., 10.]], [[[1., 2.], [7., 8.]], [[3., 4.], [9., 10.]]], [2, 2], 1) + def test_binary_ops(): in_shape = (1, 2, 3, 3) dtype = "float32" @@ -994,13 +1086,13 @@ def verify_binary_ops(op, x, y, out_np, broadcast=None): else: z = helper.make_node(op, ['in1', 'in2'], ['out'], broadcast=1) graph = helper.make_graph([z], - '_test', - inputs = [helper.make_tensor_value_info("in1", - TensorProto.FLOAT, list(in_shape)), - helper.make_tensor_value_info("in2", - TensorProto.FLOAT, list(in_shape))], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(out_shape))]) + '_test', + inputs=[helper.make_tensor_value_info("in1", + TensorProto.FLOAT, list(in_shape)), + helper.make_tensor_value_info("in2", + TensorProto.FLOAT, list(in_shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x, y], target, ctx) @@ -1009,11 +1101,11 @@ def verify_binary_ops(op, x, y, out_np, broadcast=None): x = np.random.uniform(size=in_shape).astype(dtype) y = np.random.uniform(size=in_shape).astype(dtype) z = np.random.uniform(size=(3,)).astype(dtype) - verify_binary_ops("Add",x, y, x + y, broadcast=None) + verify_binary_ops("Add", x, y, x + y, broadcast=None) verify_binary_ops("Add", x, z, x + z, broadcast=True) verify_binary_ops("Sub", x, y, x - y, broadcast=None) verify_binary_ops("Sub", x, z, x - z, broadcast=True) - verify_binary_ops("Mul",x, y, x * y, broadcast=None) + verify_binary_ops("Mul", x, y, x * y, broadcast=None) verify_binary_ops("Mul", x, z, x * z, broadcast=True) verify_binary_ops("Div", x, y, x / y, broadcast=None) verify_binary_ops("Div", x, z, x / z, broadcast=True) @@ -1022,6 +1114,7 @@ def verify_binary_ops(op, x, y, out_np, broadcast=None): verify_binary_ops("Less", x, y, x < y, broadcast=True) verify_binary_ops("Equal", x, y, x == y, broadcast=True) + def test_single_ops(): in_shape = (1, 2, 3, 3) dtype = "float32" @@ -1030,29 +1123,30 @@ def test_single_ops(): def verify_single_ops(op, x, out_np, rtol=1e-5, atol=1e-5): z = helper.make_node(op, ['in1'], ['out']) graph = helper.make_graph([z], - '_test', - inputs = [helper.make_tensor_value_info("in1", - TensorProto.FLOAT, list(in_shape)),], - outputs = [helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(out_shape))]) + '_test', + inputs=[helper.make_tensor_value_info("in1", + TensorProto.FLOAT, list(in_shape)), ], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(out_shape))]) model = helper.make_model(graph, producer_name='_test') for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, [x], target, ctx) tvm.testing.assert_allclose(out_np, tvm_out, rtol=rtol, atol=atol) x = np.random.uniform(size=in_shape).astype(dtype) - verify_single_ops("Neg",x, -x) - verify_single_ops("Abs",x, np.abs(x)) - verify_single_ops("Reciprocal",x, 1/x) - verify_single_ops("Sqrt",x, np.sqrt(x)) - verify_single_ops("Relu",x, np.maximum(x, 0)) - verify_single_ops("Exp",x, np.exp(x)) - verify_single_ops("Log",x, np.log(x)) - verify_single_ops("Log",x, np.log(x)) - verify_single_ops("Tanh",x, np.tanh(x)) - verify_single_ops("Sigmoid",x, 1 / (1 + np.exp(-x))) - verify_single_ops("Softsign",x, x / (1 + np.abs(x))) - verify_single_ops("SoftPlus",x, np.log(1 + np.exp(x))) + verify_single_ops("Neg", x, -x) + verify_single_ops("Abs", x, np.abs(x)) + verify_single_ops("Reciprocal", x, 1/x) + verify_single_ops("Sqrt", x, np.sqrt(x)) + verify_single_ops("Relu", x, np.maximum(x, 0)) + verify_single_ops("Exp", x, np.exp(x)) + verify_single_ops("Log", x, np.log(x)) + verify_single_ops("Log", x, np.log(x)) + verify_single_ops("Tanh", x, np.tanh(x)) + verify_single_ops("Sigmoid", x, 1 / (1 + np.exp(-x))) + verify_single_ops("Softsign", x, x / (1 + np.abs(x))) + verify_single_ops("SoftPlus", x, np.log(1 + np.exp(x))) + def test_leaky_relu(): def leaky_relu_x(x, alpha): @@ -1064,6 +1158,7 @@ def leaky_relu_x(x, alpha): 'LeakyRelu', {'alpha': 0.25}) + def test_elu(): def elu_x(x, alpha): return np.where(x > 0, x, alpha * (np.exp(x) - 1.0)) @@ -1074,6 +1169,7 @@ def elu_x(x, alpha): 'Elu', {'alpha': 0.25}) + def test_selu(): def selu_x(x, alpha, gamma): return gamma * np.where(x > 0, x, alpha * (np.exp(x) - 1.0)) @@ -1084,6 +1180,7 @@ def selu_x(x, alpha, gamma): 'Selu', {'alpha': 0.25, 'gamma': 0.3}) + def test_ThresholdedRelu(): def ThresholdedRelu_x(x, alpha): out_np = np.clip(x, alpha, np.inf) @@ -1096,6 +1193,7 @@ def ThresholdedRelu_x(x, alpha): 'ThresholdedRelu', {'alpha': 0.25}) + def test_ScaledTanh(): def ScaledTanh_x(x, alpha, beta): return alpha * np.tanh(beta * x) @@ -1106,6 +1204,7 @@ def ScaledTanh_x(x, alpha, beta): 'ScaledTanh', {'alpha': 0.25, 'beta': 0.3}) + def test_ParametricSoftplus(): def ParametricSoftplus_x(x, alpha, beta): return alpha * np.log(np.exp(beta * x) + 1) @@ -1116,6 +1215,7 @@ def ParametricSoftplus_x(x, alpha, beta): 'ParametricSoftplus', {'alpha': 0.25, 'beta': 0.3}) + def test_Scale(): def Scale_x(x, scale): return scale * x @@ -1126,6 +1226,7 @@ def Scale_x(x, scale): 'Scale', {'scale': 0.25}) + def test_LogSoftmax(): _test_onnx_op_elementwise((1, 4), topi.testing.log_softmax_python, @@ -1139,16 +1240,18 @@ def check_torch_conversion(model, input_size): dummy_input = torch.randn(*input_size) file_name = '{}.onnx'.format(model.__name__) # Set verbose=True for more output - torch.onnx.export(model(), dummy_input, file_name, export_params=True, verbose=False) + torch.onnx.export(model(), dummy_input, file_name, + export_params=True, verbose=False) onnx_model = onnx.load(file_name) for target, ctx in ctx_list(): input_data = np.random.uniform(size=input_size).astype('int32') - c2_out = get_caffe2_output(onnx_model, input_data) + c2_out = get_onnx_output(onnx_model, input_data) tvm_out = get_tvm_output(onnx_model, input_data, target, ctx) tvm.testing.assert_allclose(c2_out, tvm_out) + def test_resnet(): - check_torch_conversion(torchvision.models.resnet18, (1,3,224,224)) + check_torch_conversion(torchvision.models.resnet18, (1, 3, 224, 224)) # check_torch_conversion(torchvision.models.resnet101, (1,3,224,224)) # def test_alexnet(): @@ -1164,11 +1267,13 @@ def test_resnet(): # # Torch's ONNX export does not support the max pooling used by Squezenet # check_torch_conversion(torchvision.models.squeezenet1_0, (1,3,224,224)) + def test_densenet(): - check_torch_conversion(torchvision.models.densenet161, (1,3,224,224)) + check_torch_conversion(torchvision.models.densenet161, (1, 3, 224, 224)) + def test_inception(): - check_torch_conversion(torchvision.models.inception_v3, (1,3,224,224)) + check_torch_conversion(torchvision.models.inception_v3, (1, 3, 224, 224)) # TODO(@jroesch): Update Torch + ONNX to support this import. # def test_googlenet(): @@ -1178,6 +1283,7 @@ def test_inception(): # def test_shufflenetv2(): # check_torch_conversion(torchvision.models.shufflenetv2, (1,3,224,224)) + def test_sign(): def Sign_x(x): return np.sign(x) @@ -1197,7 +1303,8 @@ def verify_not(indata, dtype): graph = helper.make_graph([node], 'not_test', - inputs=[helper.make_tensor_value_info("in", TensorProto.BOOL, list(x.shape))], + inputs=[helper.make_tensor_value_info( + "in", TensorProto.BOOL, list(x.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.BOOL, list(outdata.shape))]) model = helper.make_model(graph, producer_name='not_test') @@ -1267,7 +1374,8 @@ def verify_tile(indata, outdata, **kwargs): node = helper.make_node('Tile', inputs=['in'], outputs=['out'], **kwargs) graph = helper.make_graph([node], 'tile_test', - inputs=[helper.make_tensor_value_info("in", TensorProto.FLOAT, list(indata.shape))], + inputs=[helper.make_tensor_value_info( + "in", TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("out", TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='tile_test') @@ -1279,15 +1387,18 @@ def verify_tile(indata, outdata, **kwargs): def test_tile(): x = np.random.rand(2, 3, 4, 5).astype(np.float32) - repeats = np.random.randint(low=1, high=10, size=(np.ndim(x),)).astype(np.int64) + repeats = np.random.randint( + low=1, high=10, size=(np.ndim(x),)).astype(np.int64) z = np.tile(x, repeats) verify_tile(x, z, repeats=repeats) + def verify_erf(indata, outdata): node = helper.make_node('Erf', inputs=['in'], outputs=['out']) graph = helper.make_graph([node], 'erf_test', - inputs=[helper.make_tensor_value_info('in', TensorProto.FLOAT, list(indata.shape))], + inputs=[helper.make_tensor_value_info( + 'in', TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info('out', TensorProto.FLOAT, list(outdata.shape))]) model = helper.make_model(graph, producer_name='erf_test') @@ -1295,6 +1406,7 @@ def verify_erf(indata, outdata): tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape) tvm.testing.assert_allclose(outdata, tvm_out) + def test_erf(): x = np.random.rand(2, 3, 4, 6).astype(np.float32) z = scipy.special.erf(x) From bffad89d73a94b112db19ec18035b5c31fd2608f Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Tue, 22 Oct 2019 14:36:58 -0700 Subject: [PATCH 09/23] Add arbitrary output nodes to onnx frontend. --- python/tvm/relay/frontend/onnx.py | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index e8da40840f61..8c15a3141a71 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1149,7 +1149,7 @@ def __init__(self, shape, dtype): self._shape = shape if shape else {} self._dtype = dtype - def from_onnx(self, graph, opset): + def from_onnx(self, graph, opset, outputs): """Construct Relay expression from ONNX graph. Onnx graph is a python protobuf object. @@ -1166,6 +1166,10 @@ def from_onnx(self, graph, opset): opset : opset version + outputs : list of str + List of names to output, if not provided + then default graph outputs are used. + Returns ------- mod : tvm.relay.Module @@ -1264,7 +1268,11 @@ def from_onnx(self, graph, opset): self._nodes[k] = op[i] # now return the outputs - outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output] + # If we havent prespecified output nodes, + # just use the default graph outputs. + if outputs is None: + outputs = [ + self._nodes[self._parse_value_proto(i)] for i in graph.output] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) func = _expr.Function(analysis.free_vars(outputs), outputs) return _module.Module.from_expr(func), self._params @@ -1369,7 +1377,8 @@ def _fix_outputs(self, op_name, outputs): def from_onnx(model, shape=None, dtype="float32", - opset=None): + opset=None, + outputs=None): """Convert a ONNX model into an equivalent Relay Function. ONNX graphs are represented as Python Protobuf objects. @@ -1390,10 +1399,14 @@ def from_onnx(model, dtype : str or dict of str to str The input types to the graph - opset : int - Optional override to autodetected opset. + opset : int, optional + Override to autodetected opset. This can be helpful for some testing. + outputs : list of str, optional + Names of output nodes. If not defined then the default + outputs of the onnx graph will be used. + Returns ------- mod : tvm.relay.Module @@ -1421,5 +1434,5 @@ def from_onnx(model, opset = model.opset_import[0].version if model.opset_import else 1 except AttributeError: opset = 1 - mod, params = g.from_onnx(graph, opset) + mod, params = g.from_onnx(graph, opset, outputs) return mod, params From 94654af364d5f229e92b6a9cda468040f1110a8a Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Tue, 22 Oct 2019 15:26:40 -0700 Subject: [PATCH 10/23] Added v6 tiling for bert squad 8 support. --- python/tvm/relay/frontend/onnx.py | 8 ++++++++ tests/python/frontend/onnx/test_forward.py | 16 ++++++++-------- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 8c15a3141a71..4708a48ac863 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -986,6 +986,12 @@ def _impl_v1(cls, inputs, attr, params): reps = attr.pop('repeats') # The number of times repeating the tensor data. return _op.tile(inputs[0], reps) + @classmethod + def _impl_v6(cls, inputs, attr, params): + reps = tuple(infer_value_simulated( + inputs[1], params).asnumpy().astype('int32')) + return _op.tile(inputs[0], reps) + class Erf(OnnxOpConverter): """Operator converter for Erf """ @@ -1273,6 +1279,8 @@ def from_onnx(self, graph, opset, outputs): if outputs is None: outputs = [ self._nodes[self._parse_value_proto(i)] for i in graph.output] + else: + outputs = [self._nodes[i] for i in outputs] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) func = _expr.Function(analysis.free_vars(outputs), outputs) return _module.Module.from_expr(func), self._params diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 6b0b79bf7d06..2fb340f36bb7 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -305,7 +305,7 @@ def test_gather(): verify_gather((4, 3, 5, 6), [[2, 1, 0, 0]], 0, 'float32') -def _test_slice_iteration(indata, outdata, starts, ends, axes=None): +def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None): if axes: y = helper.make_node( "Slice", ['in'], ['out'], axes=axes, starts=starts, ends=ends) @@ -331,10 +331,10 @@ def _test_slice_iteration(indata, outdata, starts, ends, axes=None): def test_slice(): x = np.random.randn(20, 10, 5).astype(np.float32) - _test_slice_iteration(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1)) - _test_slice_iteration(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4)) - _test_slice_iteration(x, x[:, 1:1000], (1), (1000), (1)) - _test_slice_iteration(x, x[:, 0:-1], (0), (-1), (1)) + _test_slice_iteration_v1(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1)) + _test_slice_iteration_v1(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4)) + _test_slice_iteration_v1(x, x[:, 1:1000], (1), (1000), (1)) + _test_slice_iteration_v1(x, x[:, 0:-1], (0), (-1), (1)) def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs): @@ -1370,7 +1370,7 @@ def test_and(): verify_and(indata=[x, y], dtype=bool) -def verify_tile(indata, outdata, **kwargs): +def verify_tile_v1(indata, outdata, **kwargs): node = helper.make_node('Tile', inputs=['in'], outputs=['out'], **kwargs) graph = helper.make_graph([node], 'tile_test', @@ -1381,7 +1381,7 @@ def verify_tile(indata, outdata, **kwargs): model = helper.make_model(graph, producer_name='tile_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape) + tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape, opset=1) tvm.testing.assert_allclose(outdata, tvm_out) @@ -1390,7 +1390,7 @@ def test_tile(): repeats = np.random.randint( low=1, high=10, size=(np.ndim(x),)).astype(np.int64) z = np.tile(x, repeats) - verify_tile(x, z, repeats=repeats) + verify_tile_v1(x, z, repeats=repeats) def verify_erf(indata, outdata): From e9a2591c37c11ffd3e2d7aa52fad2e9ab7dc7d6c Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Wed, 23 Oct 2019 16:33:14 -0700 Subject: [PATCH 11/23] Small syntax fixes --- python/tvm/relay/frontend/onnx.py | 23 ++++------------------- 1 file changed, 4 insertions(+), 19 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 4708a48ac863..0776ab89e454 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1155,7 +1155,7 @@ def __init__(self, shape, dtype): self._shape = shape if shape else {} self._dtype = dtype - def from_onnx(self, graph, opset, outputs): + def from_onnx(self, graph, opset): """Construct Relay expression from ONNX graph. Onnx graph is a python protobuf object. @@ -1172,10 +1172,6 @@ def from_onnx(self, graph, opset, outputs): opset : opset version - outputs : list of str - List of names to output, if not provided - then default graph outputs are used. - Returns ------- mod : tvm.relay.Module @@ -1274,13 +1270,7 @@ def from_onnx(self, graph, opset, outputs): self._nodes[k] = op[i] # now return the outputs - # If we havent prespecified output nodes, - # just use the default graph outputs. - if outputs is None: - outputs = [ - self._nodes[self._parse_value_proto(i)] for i in graph.output] - else: - outputs = [self._nodes[i] for i in outputs] + outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output] outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs) func = _expr.Function(analysis.free_vars(outputs), outputs) return _module.Module.from_expr(func), self._params @@ -1385,8 +1375,7 @@ def _fix_outputs(self, op_name, outputs): def from_onnx(model, shape=None, dtype="float32", - opset=None, - outputs=None): + opset=None): """Convert a ONNX model into an equivalent Relay Function. ONNX graphs are represented as Python Protobuf objects. @@ -1411,10 +1400,6 @@ def from_onnx(model, Override to autodetected opset. This can be helpful for some testing. - outputs : list of str, optional - Names of output nodes. If not defined then the default - outputs of the onnx graph will be used. - Returns ------- mod : tvm.relay.Module @@ -1442,5 +1427,5 @@ def from_onnx(model, opset = model.opset_import[0].version if model.opset_import else 1 except AttributeError: opset = 1 - mod, params = g.from_onnx(graph, opset, outputs) + mod, params = g.from_onnx(graph, opset) return mod, params From cb25dad2fe68d4fff73c49b1697e654a536da315 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Wed, 23 Oct 2019 16:59:46 -0700 Subject: [PATCH 12/23] Reduced code duplication in split opset versions. --- python/tvm/relay/frontend/onnx.py | 59 +++++++++++++++---------------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 0776ab89e454..9862f9e54f6c 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -658,6 +658,25 @@ def _impl_v1(cls, inputs, attr, params): class Slice(OnnxOpConverter): """ Operator converter for Slice. """ + + @classmethod + def _common(cls, starts, ends, axes): + new_axes = [] + new_starts = [] + new_ends = [] + pop_index = 0 + for i in range(max(axes) + 1): + if i in axes: + new_axes.append(i) + new_starts.append(starts[pop_index]) + new_ends.append(ends[pop_index]) + pop_index += 1 + else: + new_axes.append(i) + new_starts.append(0) + new_ends.append(np.iinfo(np.int32).max) + return new_starts, new_ends, new_axes + @classmethod def _impl_v1(cls, inputs, attr, params): if isinstance(attr['starts'], int): @@ -668,22 +687,9 @@ def _impl_v1(cls, inputs, attr, params): # Update the starts and ends according to axes if required. if isinstance(attr['axes'], int): attr['axes'] = (attr['axes'],) - if (max(attr['axes']) + 1) != len(attr['axes']): - new_axes = [] - new_starts = [] - new_ends = [] - pop_index = 0 - for i in range(max(attr['axes']) + 1): - if i in attr['axes']: - new_axes.append(i) - new_starts.append(attr['starts'][pop_index]) - new_ends.append(attr['ends'][pop_index]) - pop_index += 1 - else: - new_axes.append(i) - new_starts.append(0) - new_ends.append(np.iinfo(np.int32).max) + new_starts, new_ends, new_axes = cls._common( + cls, attr['starts'], attr['ends'], attr['axes']) attr['axes'] = new_axes attr['starts'] = new_starts attr['ends'] = new_ends @@ -705,24 +711,13 @@ def _impl_v10(cls, inputs, attr, params): axes = params[get_name(inputs[3])].asnumpy() if (max(axes + 1) != len(axes)): - new_axes = [] - new_starts = [] - new_ends = [] - pop_index = 0 - for i in range(max(axes) + 1): - if i in axes: - new_axes.append(i) - new_starts.append(starts[pop_index]) - new_ends.append(ends[pop_index]) - pop_index += 1 - else: - new_axes.append(i) - new_starts.append(0) - new_ends.append(np.iinfo(np.int32).max) + new_starts, new_ends, new_axes = cls._common( + cls, starts, ends, axes) starts = new_starts ends = new_ends return _op.strided_slice(inputs[0], begin=starts, end=ends) + class Gather(OnnxOpConverter): """ Operator converter for Gather. """ @@ -731,7 +726,6 @@ def _impl_v1(cls, inputs, attr, params): axis = attr.get('axis', 0) return AttrCvt('take', extras={'axis':axis})(inputs, {}) - #return _op.take(inputs[0], inputs[1], axis) class Greater(OnnxOpConverter): @@ -901,6 +895,7 @@ def _impl_v9(cls, inputs, attr, params): attr['axis'] = -1 return _op.one_hot(indices, on_value, off_value, depth, attr['axis'], dtype=dtype) + class ConstantFill(OnnxOpConverter): """ Operator converter for ConstantFill. """ @@ -929,6 +924,7 @@ def _impl_v1(cls, inputs, attr, params): shape = shape + attr.pop('extra_shape') return _op.full(inputs[0], shape) + class ConstantOfShape(OnnxOpConverter): """ Operator converter for ConstantOfShape. """ @@ -946,6 +942,7 @@ def _impl_v9(cls, inputs, attr, params): value, shape=tuple(static_shape.asnumpy().astype('int32')), dtype=dtype) return output + class Sign(OnnxOpConverter): """ Operator converter for Sign. """ @@ -1035,7 +1032,7 @@ def _get_convert_map(opset): # 'MeanVarianceNormalization' # 'Crop' # 'Embedding' - 'Upsample' : Upsample.get_converter(opset), + 'Upsample': Upsample.get_converter(opset), 'SpatialBN': BatchNorm.get_converter(opset), # defs/generator From 123cae8a8714c659ce4e78f9dc16b55aeea46072 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Wed, 23 Oct 2019 17:09:26 -0700 Subject: [PATCH 13/23] Added batch matmul test --- python/tvm/relay/frontend/onnx.py | 4 ++-- tests/python/frontend/onnx/test_forward.py | 28 ++++++++++++++++++++++ 2 files changed, 30 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 9862f9e54f6c..2e500b5bed0e 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -689,7 +689,7 @@ def _impl_v1(cls, inputs, attr, params): attr['axes'] = (attr['axes'],) if (max(attr['axes']) + 1) != len(attr['axes']): new_starts, new_ends, new_axes = cls._common( - cls, attr['starts'], attr['ends'], attr['axes']) + attr['starts'], attr['ends'], attr['axes']) attr['axes'] = new_axes attr['starts'] = new_starts attr['ends'] = new_ends @@ -712,7 +712,7 @@ def _impl_v10(cls, inputs, attr, params): if (max(axes + 1) != len(axes)): new_starts, new_ends, new_axes = cls._common( - cls, starts, ends, axes) + starts, ends, axes) starts = new_starts ends = new_ends return _op.strided_slice(inputs[0], begin=starts, end=ends) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 2fb340f36bb7..c943b736a3cc 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -404,6 +404,33 @@ def test_matmul(): tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) +def test_batch_matmul(): + a_shape = (2, 3, 4, 3) + b_shape = (2, 3, 3, 4) + + a_array = np.random.uniform(size=a_shape).astype('float32') + b_array = np.random.uniform(size=b_shape).astype('float32') + out_np = np.matmul(a_array, b_array) + + mul_node = helper.make_node("MatMul", ["a", "b"], ["out"]) + + graph = helper.make_graph([mul_node], + "matmul_test", + inputs=[helper.make_tensor_value_info("a", + TensorProto.FLOAT, list(a_shape)), + helper.make_tensor_value_info("b", + TensorProto.FLOAT, list(b_shape))], + outputs=[helper.make_tensor_value_info("out", + TensorProto.FLOAT, list(out_np.shape))]) + + model = helper.make_model(graph, producer_name='matmul_test') + + for target, ctx in ctx_list(): + tvm_out = get_tvm_output( + model, [a_array, b_array], target, ctx, out_np.shape) + tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) + + def verify_lrn(shape, nsize, dtype, alpha=None, beta=None, bias=None): in_array = np.random.uniform(size=shape).astype(dtype) @@ -1451,6 +1478,7 @@ def test_where(): test_ceil() test_clip() test_matmul() + test_batch_matmul() test_gather() test_lrn() test_instance_norm() From 7f07ffa2c9ac4957c35287d17dec82b51d25fe7f Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Wed, 23 Oct 2019 17:18:42 -0700 Subject: [PATCH 14/23] Added unstack split testing. --- tests/python/frontend/onnx/test_forward.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index c943b736a3cc..b273ec138b13 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -1065,10 +1065,14 @@ def test_reduce_mean(): def verify_split(indata, outdatas, split, axis=0): indata = np.array(indata).astype(np.float32) outdatas = [np.array(o).astype(np.float32) for o in outdatas] + if split: + split_index = range(len(split)) + else: + split_index = range(len(outdatas)) node = helper.make_node( 'Split', inputs=['input'], - outputs=['output_{}'.format(i) for i in range(len(split))], + outputs=['output_{}'.format(i) for i in range(len(split_index))], axis=axis, split=split ) @@ -1078,7 +1082,7 @@ def verify_split(indata, outdatas, split, axis=0): TensorProto.FLOAT, list(indata.shape))], outputs=[helper.make_tensor_value_info("output_{}".format(i), TensorProto.FLOAT, list(outdatas[i].shape)) - for i in range(len(split)) + for i in range(len(split_index)) ]) model = helper.make_model(graph, producer_name='split_test') @@ -1100,6 +1104,8 @@ def test_split(): # 2D verify_split([[1., 2., 3., 4.], [7., 8., 9., 10.]], [[[1., 2.], [7., 8.]], [[3., 4.], [9., 10.]]], [2, 2], 1) + # Split evenly (unstack) + verify_split([1, 2, 3], [[1], [2], [3]], False) def test_binary_ops(): From 77031981f070b657b347692c48972fbf89ac22e6 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Wed, 23 Oct 2019 18:25:33 -0700 Subject: [PATCH 15/23] Adde onehot test, needs a little cleanup probably. --- python/tvm/relay/frontend/onnx.py | 2 +- tests/python/frontend/onnx/test_forward.py | 32 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 2e500b5bed0e..520c90a6d5ca 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -884,7 +884,7 @@ def _impl_v9(cls, inputs, attr, params): # Extract relay one_hot inputs. indices, depth, values = inputs # Split onnx on off values into two separate expressions. - on_value, off_value = _op.take( + off_value, on_value = _op.take( values, _op.const(0)), _op.take(values, _op.const(1)) # Extract the datatype of the output from on_value. dtype = infer_type(on_value).checked_type.dtype diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index b273ec138b13..f61666a47e7a 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -377,6 +377,37 @@ def test_clip(): {'min': -1.0, 'max': 1.0}) +def test_onehot(): + indices_shape = [10] + indices_array = np.random.randint( + low=0, high=9, size=indices_shape, dtype='int32') + depth = 10 + values = np.asarray([0, 1]) + out_np = np.eye(depth)[indices_array.reshape(-1)] + + onehot_node = helper.make_node( + "OneHot", ["indices", "depth", "values"], ["out"]) + + graph = helper.make_graph([onehot_node], + "onehot_test", + inputs=[helper.make_tensor_value_info("indices", + TensorProto.INT32, indices_shape), + helper.make_tensor_value_info("depth", + TensorProto.INT32, [1]), + helper.make_tensor_value_info("values", + TensorProto.INT32, values.shape)], + initializer=[helper.make_tensor("depth", TensorProto.INT32, [1], [depth]), + helper.make_tensor("values", TensorProto.INT32, values.shape, values)], + outputs=[helper.make_tensor_value_info("out", TensorProto.INT32, out_np.shape)]) + + model = helper.make_model(graph, producer_name="onehot_test") + + for target, ctx in ctx_list(): + tvm_out = get_tvm_output( + model, [indices_array], target, ctx, out_np.shape) + tvm.testing.assert_allclose(out_np, tvm_out, rtol=1e-5, atol=1e-5) + + def test_matmul(): a_shape = (4, 3) b_shape = (3, 4) @@ -1483,6 +1514,7 @@ def test_where(): test_floor() test_ceil() test_clip() + test_onehot() test_matmul() test_batch_matmul() test_gather() From 4988d33a5af8840eb3578333cd44416342af0ea5 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Thu, 24 Oct 2019 10:52:40 -0700 Subject: [PATCH 16/23] Replaced deprecated constant fill with constantofshape and updated tests accordingly. --- python/tvm/relay/frontend/onnx.py | 38 ------------ tests/python/frontend/onnx/test_forward.py | 70 +++++++++++----------- 2 files changed, 35 insertions(+), 73 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 520c90a6d5ca..e7a185c352c0 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -896,35 +896,6 @@ def _impl_v9(cls, inputs, attr, params): return _op.one_hot(indices, on_value, off_value, depth, attr['axis'], dtype=dtype) -class ConstantFill(OnnxOpConverter): - """ Operator converter for ConstantFill. - """ - @classmethod - def _impl_v1(cls, inputs, attr, params): - num_inputs = len(inputs) - if 'shape' in attr: - if num_inputs > 1: - raise ImportError( - "Can't set shape and input tensor at a time") - shape = attr.pop('shape') - else: - if num_inputs == 1: - raise ImportError( - "Either shape attribute or input should be set") - if 'input_as_shape' in attr and attr['input_as_shape']: - shape = params[get_name(inputs[0])].asnumpy() - else: - if 'extra_shape' in attr: - raise tvm.error.OpAttributeInvalid('Attribute "extra_shape" not ' - 'supported with "fill_like" for ' - 'operator ConstantFill.') - return _op.full_like(inputs[0], inputs[1]) - - if 'extra_shape' in attr: - shape = shape + attr.pop('extra_shape') - return _op.full(inputs[0], shape) - - class ConstantOfShape(OnnxOpConverter): """ Operator converter for ConstantOfShape. """ @@ -1021,7 +992,6 @@ def _get_convert_map(opset): 'ThresholdedRelu': ThresholdedRelu.get_converter(opset), 'ScaledTanh': ScaledTanh.get_converter(opset), 'ParametricSoftplus': ParametricSoftPlus.get_converter(opset), - 'ConstantFill': ConstantFill.get_converter(opset), 'ConstantOfShape': ConstantOfShape.get_converter(opset), # 'GivenTensorFill' 'FC': AttrCvt('dense', ignores=['axis', 'axis_w']), @@ -1239,14 +1209,6 @@ def from_onnx(self, graph, opset): shape=list(t_proto.dims), dtype=array.dtype) else: - if op_name == "ConstantFill": - fill_value = attr.get('value', 0.0) - dtype = attr.get('dtype', b'int32').decode("utf-8") - i_name = node.output[0] - self._params[i_name] = fill_value - self._nodes[i_name] = new_var(node.output[0], shape=(), dtype=dtype) - inputs.append(self._nodes[i_name]) - i_name = self._parse_value_proto(node) attr['tvm_custom'] = {} attr['tvm_custom']['name'] = i_name diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index f61666a47e7a..49ef0d72c394 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -25,7 +25,7 @@ from tvm.contrib import graph_runtime from nnvm.testing.config import ctx_list import onnx -from onnx import helper, TensorProto +from onnx import helper, TensorProto, mapping import scipy @@ -914,46 +914,45 @@ def test_forward_arg_min_max(): verify_argmax([3, 4, 4], axis, keepdims) -def verify_constantfill(is_shape, input_dim, out_dim, value, dtype, **kwargs): - input_a = np.random.uniform(size=input_dim).astype(dtype) - out = np.empty(shape=out_dim, dtype=dtype) +def verify_constantofshape(input_dim, value, dtype): + out = np.empty(shape=input_dim, dtype=dtype) out.fill(value) - if is_shape == True: - fill_node = helper.make_node( - "ConstantFill", [], ["out"], shape=input_dim, value=value, **kwargs) - else: - fill_node = helper.make_node("ConstantFill", ["input_a"], [ - "out"], value=value, dtype=dtype, **kwargs) - - if is_shape == True: - inputs = [] - else: - inputs = [helper.make_tensor_value_info("input_a", - TensorProto.FLOAT, list(input_dim))] - - graph = helper.make_graph([fill_node], - "fill_test", - inputs, - outputs=[helper.make_tensor_value_info("out", - TensorProto.FLOAT, list(out.shape))]) + fill_node = helper.make_node("ConstantOfShape", ["input"], ["output"], + value=helper.make_tensor( + 'value', + mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(dtype)], + (1, ), (value, ))) + + inputs = [ + helper.make_tensor_value_info("input", TensorProto.FLOAT, input_dim) + ] + + graph = helper.make_graph( + [fill_node], + "fill_test", + inputs, + outputs=[ + helper.make_tensor_value_info("output", TensorProto.FLOAT, + list(out.shape)) + ], + initializer=[ + helper.make_tensor("input", TensorProto.INT32, (len(input_dim), ), + input_dim) + ]) model = helper.make_model(graph, producer_name='fill_test') for target, ctx in ctx_list(): - if is_shape == True: - tvm_out = get_tvm_output(model, [], target, ctx, out.shape) - else: - tvm_out = get_tvm_output(model, [input_a], target, ctx, out.shape) + tvm_out = get_tvm_output(model, [], target, ctx, out.shape) tvm.testing.assert_allclose(out, tvm_out, rtol=1e-5, atol=1e-5) -def test_constantfill(): - verify_constantfill(True, (2, 3, 4, 5), (2, 3, 4, 5), 10, 'float32') - verify_constantfill(False, (2, 3, 4, 5), (2, 3, 4, 5), 10, 'float32') - verify_constantfill(True, (2, 3, 4, 5), (2, 3, 4, 5, 4, - 5, 6), 10, 'float32', extra_shape=(4, 5, 6)) +def test_constantofshape(): + verify_constantofshape((2, 3, 4, 5), 10, 'float32') + verify_constantofshape((3, 3), 0, 'int32') + verify_constantofshape((1, 2, 3), -1, 'float32') def verify_pad(indata, pads, mode='constant', value=0.0): @@ -1319,8 +1318,8 @@ def test_resnet(): # check_torch_conversion(torchvision.models.resnet101, (1,3,224,224)) # def test_alexnet(): - # Torch's ONNX export does not support the adaptive pooling used by AlexNet? - # check_torch_conversion(torchvision.models.alexnet, (1,3,224,224)) +# Torch's ONNX export does not support the adaptive pooling used by AlexNet? +# check_torch_conversion(torchvision.models.alexnet, (1,3,224,224)) # Torch's ONNX export does not support the adaptive pooling used by vgg16? # def test_vgg16(): @@ -1445,7 +1444,8 @@ def verify_tile_v1(indata, outdata, **kwargs): model = helper.make_model(graph, producer_name='tile_test') for target, ctx in ctx_list(): - tvm_out = get_tvm_output(model, [indata], target, ctx, outdata.shape, opset=1) + tvm_out = get_tvm_output( + model, [indata], target, ctx, outdata.shape, opset=1) tvm.testing.assert_allclose(outdata, tvm_out) @@ -1527,7 +1527,7 @@ def test_where(): test_forward_hardsigmoid() test_forward_arg_min_max() test_softmax() - test_constantfill() + test_constantofshape() test_reduce_max() test_reduce_min() test_reduce_sum() From b7e2644371ca4d919f309d691d2d22a7c1caba56 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Thu, 24 Oct 2019 11:28:52 -0700 Subject: [PATCH 17/23] Added tests for new opset version of slice and tile. --- tests/python/frontend/onnx/test_forward.py | 97 ++++++++++++++++++++++ 1 file changed, 97 insertions(+) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 49ef0d72c394..0a3ff44527c2 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -329,12 +329,75 @@ def _test_slice_iteration_v1(indata, outdata, starts, ends, axes=None): tvm.testing.assert_allclose(outdata, tvm_out) +def _test_slice_iteration_v10(indata, outdata, starts, ends, axes=None): + if isinstance(starts, int): + starts = (starts, ) + if isinstance(ends, int): + ends = (ends, ) + if isinstance(axes, int): + axes = (axes, ) + starts = np.asarray(starts) + ends = np.asarray(ends) + inputs = [ + helper.make_tensor_value_info("data", TensorProto.FLOAT, + list(indata.shape)), + helper.make_tensor_value_info("starts", TensorProto.INT32, + list(starts.shape)), + helper.make_tensor_value_info("ends", TensorProto.INT32, + list(ends.shape)) + ] + initializer = [ + helper.make_tensor("starts", TensorProto.INT32, list(starts.shape), + starts), + helper.make_tensor("ends", TensorProto.INT32, list(ends.shape), ends) + ] + + if axes: + axes = np.asarray(axes) + y = helper.make_node("Slice", ["data", "starts", "ends", "axes"], + ["out"]) + inputs.append( + helper.make_tensor_value_info("axes", TensorProto.INT32, + list(axes.shape))) + initializer.append( + helper.make_tensor("axes", TensorProto.INT32, list(axes.shape), + axes)) + else: + y = helper.make_node("Slice", ["data", "starts", "ends"], ["out"]) + + graph = helper.make_graph([y], + 'slice_test', + inputs=inputs, + outputs=[ + helper.make_tensor_value_info( + "out", TensorProto.FLOAT, + list(outdata.shape)) + ], + initializer=initializer) + model = helper.make_model(graph, producer_name='slice_test') + + for target, ctx in ctx_list(): + tvm_out = get_tvm_output(model, + indata, + target, + ctx, + outdata.shape, + 'float32', + opset=10) + + tvm.testing.assert_allclose(outdata, tvm_out) + + def test_slice(): x = np.random.randn(20, 10, 5).astype(np.float32) _test_slice_iteration_v1(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1)) _test_slice_iteration_v1(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4)) _test_slice_iteration_v1(x, x[:, 1:1000], (1), (1000), (1)) _test_slice_iteration_v1(x, x[:, 0:-1], (0), (-1), (1)) + _test_slice_iteration_v10(x, x[0:3, 0:10], (0, 0), (3, 10), (0, 1)) + _test_slice_iteration_v10(x, x[:, :, 3:4], (0, 0, 3), (20, 10, 4)) + _test_slice_iteration_v10(x, x[:, 1:1000], (1), (1000), (1)) + _test_slice_iteration_v10(x, x[:, 0:-1], (0), (-1), (1)) def _test_onnx_op_elementwise(inshape, outfunc, npargs, dtype, opname, kwargs): @@ -1449,12 +1512,46 @@ def verify_tile_v1(indata, outdata, **kwargs): tvm.testing.assert_allclose(outdata, tvm_out) +def verify_tile_v6(indata, repeats, outdata): + node = helper.make_node('Tile', + inputs=['input', 'repeats'], + outputs=['out']) + graph = helper.make_graph( + [node], + 'tile_test', + inputs=[ + helper.make_tensor_value_info("input", TensorProto.FLOAT, + list(indata.shape)), + helper.make_tensor_value_info("repeats", TensorProto.INT64, + list(repeats.shape)) + ], + outputs=[ + helper.make_tensor_value_info("out", TensorProto.FLOAT, + list(outdata.shape)) + ], + initializer=[ + helper.make_tensor("repeats", TensorProto.INT64, + list(repeats.shape), repeats) + ]) + + model = helper.make_model(graph, producer_name='tile_test') + + for target, ctx in ctx_list(): + tvm_out = get_tvm_output(model, [indata], + target, + ctx, + outdata.shape, + opset=6) + tvm.testing.assert_allclose(outdata, tvm_out) + + def test_tile(): x = np.random.rand(2, 3, 4, 5).astype(np.float32) repeats = np.random.randint( low=1, high=10, size=(np.ndim(x),)).astype(np.int64) z = np.tile(x, repeats) verify_tile_v1(x, z, repeats=repeats) + verify_tile_v6(x, repeats, z) def verify_erf(indata, outdata): From 14737d290b0c1e371fe386c66630b176ba08e295 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Thu, 24 Oct 2019 11:41:57 -0700 Subject: [PATCH 18/23] lint clean up --- python/tvm/relay/frontend/common.py | 17 +++++++++++++---- python/tvm/relay/frontend/onnx.py | 13 ++++++------- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index c5f6102dc54b..2be8f296a92b 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -474,9 +474,13 @@ def infer_channels(inputs, transpose=False): out_shapes = [get_const_tuple(out_type.checked_type.shape)] channels = out_shapes[0][0] if not transpose else out_shapes[0][1] return channels - + def infer_value(input_val, params): + """A hack for getting the value of an expression by evaluating a + portion of the relay graph. This is often needed for functions that + whose output shape depends on the value of a tensor. + """ from tvm.contrib import graph_runtime # Check that all free variables have associated parameters. assert all(var.name_hint in params.keys() for var in analysis.free_vars( @@ -492,7 +496,12 @@ def infer_value(input_val, params): def infer_value_simulated(input_val, params): - # Keep track of which params we need to simulate + """Extention to infer_value that can be used when some input + values are missing. This function creats dummy inputs with the same + shape and random values then calls infer_value. This is helpful when + implementing certain onnx operators where we need to evaluate the graph + to determine a static shape. + """ fake_params = [] # Add a fake copy of all missing params. for free_param in analysis.free_vars(input_val): @@ -506,8 +515,8 @@ def infer_value_simulated(input_val, params): # Now infer the value. output_value = infer_value(input_val, params) # Clean fake params out of param dictionary. - for fp in fake_params: - params.pop(fp.name_hint, None) + for fake_p in fake_params: + params.pop(fake_p.name_hint, None) return output_value diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index e7a185c352c0..cb7e0da68972 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -20,6 +20,7 @@ import logging import numpy as np +from onnx.numpy_helper import to_array import tvm from ... import nd as _nd from .. import analysis @@ -29,7 +30,6 @@ from .. import op as _op from .common import AttrCvt, Renamer from .common import get_relay_op, new_var, infer_shape, infer_channels, infer_type, infer_value, infer_value_simulated, get_name -from onnx.numpy_helper import to_array __all__ = ['from_onnx'] @@ -120,7 +120,7 @@ def _impl_v1(cls, inputs, attr, params): axis = int(attr.get('axis', 0)) inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2) return get_relay_op(op_name)(*inputs) - + class Pool(OnnxOpConverter): """ A helper class for pool op converters. @@ -295,9 +295,8 @@ def _impl_v1(cls, inputs, attr, params): # Reshape output to original dimensions. return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]]) # Otherwise a simple dense op will get the job done. - else: - input_1_t = _op.transpose(inputs[1], axes=(1, 0)) - return _op.nn.dense(inputs[0], input_1_t) + input_1_t = _op.transpose(inputs[1], axes=(1, 0)) + return _op.nn.dense(inputs[0], input_1_t) class MaxPool(Pool): @@ -710,8 +709,8 @@ def _impl_v10(cls, inputs, attr, params): if len(inputs) >= 4: axes = params[get_name(inputs[3])].asnumpy() - if (max(axes + 1) != len(axes)): - new_starts, new_ends, new_axes = cls._common( + if max(axes + 1) != len(axes): + new_starts, new_ends, _ = cls._common( starts, ends, axes) starts = new_starts ends = new_ends From eea6fc4a0e8803c9ed69d16dc04b2831ec03ced3 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Thu, 24 Oct 2019 12:01:22 -0700 Subject: [PATCH 19/23] Lint fixes --- python/tvm/relay/frontend/onnx.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index cb7e0da68972..312a55a2ea64 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -18,18 +18,17 @@ """ONNX: Open Neural Network Exchange frontend for Relay.""" from __future__ import absolute_import as _abs -import logging import numpy as np from onnx.numpy_helper import to_array import tvm from ... import nd as _nd from .. import analysis -from .. import transform as _transform from .. import expr as _expr from .. import module as _module from .. import op as _op from .common import AttrCvt, Renamer -from .common import get_relay_op, new_var, infer_shape, infer_channels, infer_type, infer_value, infer_value_simulated, get_name +from .common import get_relay_op, new_var, infer_shape, infer_channels +from .common import infer_type, infer_value, infer_value_simulated, get_name __all__ = ['from_onnx'] From 0c108a77b99d2e03f1c650e2eb03b6dbf6e187dc Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Thu, 24 Oct 2019 12:38:07 -0700 Subject: [PATCH 20/23] Changed onnx dependency --- python/tvm/relay/frontend/onnx.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 312a55a2ea64..3be58f3c071b 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -19,7 +19,6 @@ from __future__ import absolute_import as _abs import numpy as np -from onnx.numpy_helper import to_array import tvm from ... import nd as _nd from .. import analysis @@ -32,6 +31,17 @@ __all__ = ['from_onnx'] + +def get_numpy(tensor_proto): + """Grab data in TensorProto and convert to numpy array.""" + try: + from onnx.numpy_helper import to_array + except ImportError as e: + raise ImportError( + "Unable to import onnx which is required {}".format(e)) + return to_array(tensor_proto) + + def dimension_picker(prefix, surfix=''): def _impl(attr): kernel = attr['kernel_shape'] @@ -43,6 +53,7 @@ def _impl(attr): return _impl + def revert_caffe2_pad(pads): """Caffe2 requires two times the normal padding.""" if len(pads) == 4: @@ -900,7 +911,7 @@ class ConstantOfShape(OnnxOpConverter): @classmethod def _impl_v9(cls, inputs, attr, params): if 'value' in attr: - np_value = to_array(attr.pop('value'))[0] + np_value = get_numpy(attr.pop('value'))[0] value = _expr.const(np_value) dtype = np_value.dtype.name else: @@ -1249,8 +1260,7 @@ def _parse_dtype(self, value_proto, dtype): return dtype def _parse_array(self, tensor_proto): - """Grab data in TensorProto and convert to numpy array.""" - np_array = to_array(tensor_proto).reshape(tuple(tensor_proto.dims)) + np_array = get_numpy(tensor_proto).reshape(tuple(tensor_proto.dims)) return _nd.array(np_array) def _parse_attr(self, attr_proto): From 89876cbd696c024cbcd3db3cb1514d63867a4161 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Fri, 25 Oct 2019 09:36:49 -0700 Subject: [PATCH 21/23] Went back to caffe2 runtime for CI integration. --- tests/python/frontend/onnx/test_forward.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 0a3ff44527c2..2d2265b57b95 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -77,10 +77,11 @@ def get_tvm_output(graph_def, input_data, target, ctx, output_shape=None, output return tvm_output.asnumpy() -def get_onnx_output(model, x, dtype='float32'): - import onnxruntime.backend as backend - prepared_backend = backend.prepare(model) - c2_out = prepared_backend.run(x.astype(dtype))[0] +def get_caffe2_output(model, x, dtype='float32'): + import caffe2.python.onnx.backend + prepared_backend = caffe2.python.onnx.backend.prepare(model) + W = {model.graph.input[0].name: x.astype(dtype)} + c2_out = prepared_backend.run(W)[0] return c2_out @@ -88,7 +89,7 @@ def verify_onnx_forward_impl(graph_file, data_shape, out_shape): dtype = 'float32' x = np.random.uniform(size=data_shape) model = onnx.load_model(graph_file) - c2_out = get_onnx_output(model, x, dtype) + c2_out = get_caffe2_output(model, x, dtype) for target, ctx in ctx_list(): tvm_out = get_tvm_output(model, x, target, ctx, out_shape, dtype) tvm.testing.assert_allclose(c2_out, tvm_out, rtol=1e-5, atol=1e-5) @@ -1371,7 +1372,7 @@ def check_torch_conversion(model, input_size): onnx_model = onnx.load(file_name) for target, ctx in ctx_list(): input_data = np.random.uniform(size=input_size).astype('int32') - c2_out = get_onnx_output(onnx_model, input_data) + c2_out = get_caffe2_output(onnx_model, input_data) tvm_out = get_tvm_output(onnx_model, input_data, target, ctx) tvm.testing.assert_allclose(c2_out, tvm_out) From b18de8b39d0316dab100a35927f379b65de788c7 Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Mon, 28 Oct 2019 11:30:21 -0700 Subject: [PATCH 22/23] Rebase and small typo/syntax changes. --- python/tvm/relay/frontend/common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/common.py b/python/tvm/relay/frontend/common.py index 2be8f296a92b..25ba0ef31d72 100644 --- a/python/tvm/relay/frontend/common.py +++ b/python/tvm/relay/frontend/common.py @@ -488,7 +488,7 @@ def infer_value(input_val, params): func = _expr.Function(analysis.free_vars(input_val), input_val) with tvm.relay.build_config(opt_level=0): graph, lib, params = tvm.relay.build(func, target="llvm", params=params) - ctx = tvm.context("llvm", 0) + ctx = tvm.cpu(0) m = graph_runtime.create(graph, lib, ctx) m.set_input(**params) m.run() @@ -497,7 +497,7 @@ def infer_value(input_val, params): def infer_value_simulated(input_val, params): """Extention to infer_value that can be used when some input - values are missing. This function creats dummy inputs with the same + values are missing. This function creates dummy inputs with the same shape and random values then calls infer_value. This is helpful when implementing certain onnx operators where we need to evaluate the graph to determine a static shape. From bbf203c8e3047ca7f5d6daeed1dcea6db7bf36ea Mon Sep 17 00:00:00 2001 From: Josh Fromm Date: Mon, 28 Oct 2019 11:38:30 -0700 Subject: [PATCH 23/23] Added hard casting of onehot attributes to int. --- python/tvm/relay/frontend/onnx.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 3be58f3c071b..9fdc4292b35f 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -898,11 +898,16 @@ def _impl_v9(cls, inputs, attr, params): # Extract the datatype of the output from on_value. dtype = infer_type(on_value).checked_type.dtype # Convert depth into an integer. - depth = infer_value(depth, params).asnumpy()[0] + depth = int(infer_value(depth, params).asnumpy()[0]) # set default value when axis is not set in the model if 'axis' not in attr: attr['axis'] = -1 - return _op.one_hot(indices, on_value, off_value, depth, attr['axis'], dtype=dtype) + return _op.one_hot(indices, + on_value, + off_value, + depth, + int(attr['axis']), + dtype=dtype) class ConstantOfShape(OnnxOpConverter):