From a73e42169686e41e712c777fc976248869fbc85b Mon Sep 17 00:00:00 2001 From: Matthew Date: Wed, 28 Apr 2021 13:12:42 -0600 Subject: [PATCH 1/7] support same lower and maxpool in autopad --- python/tvm/relay/frontend/onnx.py | 51 +++++++++++++++++++--- tests/python/frontend/onnx/test_forward.py | 2 - 2 files changed, 44 insertions(+), 9 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index deb29480d807..e7f0f059304c 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -276,6 +276,7 @@ class Pool(OnnxOpConverter): def _impl_v1(cls, inputs, attr, params): data = inputs[0] input_shape = infer_shape(data) + input_dtype = infer_type(data).checked_type.dtype ndim = len(input_shape) if "auto_pad" in attr: attr["auto_pad"] = attr["auto_pad"].decode("utf-8") @@ -293,7 +294,19 @@ def _impl_v1(cls, inputs, attr, params): else: # Warning: Pool does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import - data = autopad(data, attr["strides"], attr["kernel_shape"], [1] * ndim, ndim) + if "int" in input_dtype: + pad_val = np.iinfo(np.dtype(input_dtype)).min + else: + pad_val = np.finfo(np.dtype(input_dtype)).min + data = autopad( + data, + attr.get("strides", [1] * (ndim - 2)), + attr["kernel_shape"], + [1] * ndim, + ndim, + pad_value=pad_val, + mode=attr["auto_pad"], + ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) elif attr["auto_pad"] == "NOTSET": @@ -356,7 +369,17 @@ def _impl_v1(cls, inputs, attr, params): return AttrCvt(op_name="instance_norm")(inputs, attr, params) -def autopad(data, strides, kernel_shape, dilations, ndim, pad_type="constant", deconv=False): +def autopad( + data, + strides, + kernel_shape, + dilations, + ndim, + pad_type="constant", + deconv=False, + mode="SAME_UPPER", + pad_value=0.0, +): """ Perform autopadding with dynamic input shapes """ @@ -391,14 +414,19 @@ def autopad(data, strides, kernel_shape, dilations, ndim, pad_type="constant", d pad_after = total_pad - pad_before # combine - pad = _op.concatenate( - [_op.reshape(pad_before, [-1, 1]), _op.reshape(pad_after, [-1, 1])], axis=1 - ) + if "LOWER" in mode: + pad = _op.concatenate( + [_op.reshape(pad_after, [-1, 1]), _op.reshape(pad_before, [-1, 1])], axis=1 + ) + else: + pad = _op.concatenate( + [_op.reshape(pad_before, [-1, 1]), _op.reshape(pad_after, [-1, 1])], axis=1 + ) # pad N and C with zeros pad = _op.concatenate([_op.const(np.zeros([2, 2], dtype="int64"), dtype="int64"), pad], axis=0) - return _op.nn.pad(data, fold_constant(pad), _op.const(0.0), pad_type) + return _op.nn.pad(data, fold_constant(pad), _op.const(pad_value), pad_type) class Conv(OnnxOpConverter): @@ -427,6 +455,7 @@ def _impl_v1(cls, inputs, attr, params): attr["kernel_shape"], attr.get("dilations", [1] * (ndim - 2)), ndim, + mode=attr["auto_pad"], ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) @@ -485,6 +514,7 @@ def _impl_v1(cls, inputs, attr, params): attr.get("dilations", [1] * (ndim - 2)), ndim, deconv=True, + mode=attr["auto_pad"], ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) @@ -757,7 +787,14 @@ def _impl_v1(cls, inputs, attr, params): if attr["auto_pad"] in ("SAME_UPPER", "SAME_LOWER"): # Warning: LpPool does not yet support dynamic shapes, # one will need to run dynamic_to_static on this model after import - data = autopad(data, attr["strides"], attr["kernel_shape"], [1] * ndim, ndim) + data = autopad( + data, + attr["strides"], + attr["kernel_shape"], + [1] * ndim, + ndim, + mode=attr["auto_pad"], + ) elif attr["auto_pad"] == "VALID": attr["pads"] = tuple([0 for i in range(ndim - 2)]) elif attr["auto_pad"] == "NOTSET": diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index e11689cc1232..aa43af4ecffc 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -4213,8 +4213,6 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0, type="float32"): "test_isinf_positive/", "test_matmulinteger/", "test_maxpool_2d_dilations/", - "test_maxpool_2d_same_lower/", - "test_maxpool_2d_same_upper/", "test_maxpool_with_argmax_2d_precomputed_pads/", "test_maxpool_with_argmax_2d_precomputed_strides/", "test_maxunpool_export_with_output_shape/", From 308581c0ca6bab848dad3d880e8255aebb89681f Mon Sep 17 00:00:00 2001 From: Matthew Brookhart Date: Thu, 29 Apr 2021 14:39:55 -0600 Subject: [PATCH 2/7] fix isinf tests --- python/tvm/relay/frontend/onnx.py | 18 +++++++++++++++++- tests/python/frontend/onnx/test_forward.py | 4 ++-- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index a695e0002b34..48f5b59377f8 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -2234,6 +2234,22 @@ def _impl_v1(cls, inputs, attr, params): ) +class IsInf(OnnxOpConverter): + """Operator converter for IsInf""" + + @classmethod + def _impl_v10(cls, inputs, attr, params): + detect_negative = attr.get("detect_negative", 1) + detect_positive = attr.get("detect_positive", 1) + dtype = infer_type(inputs[0]).checked_type.dtype + isinf = _op.isinf(inputs[0]) + if not detect_negative: + isinf = isinf * (inputs[0] > _op.const(0, dtype)) + if not detect_positive: + isinf = isinf * (inputs[0] < _op.const(0, dtype)) + return isinf + + class MaxRoiPool(OnnxOpConverter): """Operator converter for MaxRoiPool.""" @@ -2777,7 +2793,7 @@ def _get_convert_map(opset): "Floor": Renamer("floor"), "Ceil": Renamer("ceil"), "Round": Renamer("round"), - "IsInf": Renamer("isinf"), + "IsInf": IsInf.get_converter(opset), "IsNaN": Renamer("isnan"), "Sqrt": Renamer("sqrt"), "Relu": Renamer("relu"), diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 1a3d0d4ac6e0..047926128e18 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -4209,8 +4209,8 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0, type="float32"): "test_eyelike_populate_off_main_diagonal/", "test_eyelike_with_dtype/", "test_eyelike_without_dtype/", - "test_isinf_negative/", - "test_isinf_positive/", + # "test_isinf_negative/", + # "test_isinf_positive/", "test_matmulinteger/", "test_maxpool_2d_dilations/", "test_maxpool_2d_same_lower/", From f8c07278813010335c32263a11f4425ac5b7d7d3 Mon Sep 17 00:00:00 2001 From: Matthew Date: Thu, 29 Apr 2021 16:56:53 -0600 Subject: [PATCH 3/7] lower tolerance on roialign test becuase the onnx result is cropped to 4 decimal places --- tests/python/frontend/onnx/test_forward.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 9d0d87949553..d5ad21892814 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -4209,8 +4209,6 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0, type="float32"): "test_eyelike_populate_off_main_diagonal/", "test_eyelike_with_dtype/", "test_eyelike_without_dtype/", - # "test_isinf_negative/", - # "test_isinf_positive/", "test_matmulinteger/", "test_maxpool_2d_dilations/", "test_maxpool_with_argmax_2d_precomputed_pads/", @@ -4231,7 +4229,6 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0, type="float32"): "test_reversesequence_batch/", "test_reversesequence_time/", "test_rnn_seq_length/", - "test_roialign/", "test_round/", "test_scan9_sum/", "test_scan_sum/", @@ -4266,6 +4263,12 @@ def test_onnx_nodes(test): if failure in test: pytest.skip() break + atol = 1e-5 + rtol = 1e-5 + if "roialign" in test: + # for some reason the ONNX test crops the + # roialign results to 4 decimal places + atol = 1e-4 onnx_model = onnx.load(test + "/model.onnx") inputs = [] outputs = [] @@ -4283,10 +4286,10 @@ def test_onnx_nodes(test): raise ImportError(str(tensor) + " not labeled as an import or an output") tvm_val = get_tvm_output_with_vm(onnx_model, inputs, "llvm", tvm.cpu(0)) if len(outputs) == 1: - tvm.testing.assert_allclose(outputs[0], tvm_val, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(outputs[0], tvm_val, rtol=rtol, atol=atol) else: for output, val in zip(outputs, tvm_val): - tvm.testing.assert_allclose(output, val, rtol=1e-5, atol=1e-5) + tvm.testing.assert_allclose(output, val, rtol=rtol, atol=atol) def test_wrong_input(): From 7fac8008f9b010a4190cedeea9431b4aff3e3985 Mon Sep 17 00:00:00 2001 From: Matthew Date: Thu, 29 Apr 2021 17:19:51 -0600 Subject: [PATCH 4/7] slow support for bottom-k --- python/tvm/relay/frontend/onnx.py | 27 +++++++++++++++++++++- tests/python/frontend/onnx/test_forward.py | 1 - 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 9a900bdbf557..b4aaa4a4cf36 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -2265,7 +2265,32 @@ def _impl_v1(cls, inputs, attr, params): largest = attr.get("largest", 1) if largest == 0: - raise NotImplementedError("TVM only supports finding TopK largest elements") + # TODO(mbrookhart): optimize this by adding a smallest attribute to topi if this + # ever becomes a bottleneck + ndim = len(infer_shape(inputs[0])) + if axis < 0: + axis += ndim + sort = _op.sort(inputs[0], axis=axis) + argsort = _op.argsort(inputs[0], axis=axis, dtype="int64") + begin = [0] * ndim + stride = [1] * ndim + end = _op.concatenate( + [ + _op.const([np.iinfo(np.int64).max] * axis, dtype="int64"), + inputs[1], + _op.const([np.iinfo(np.int64).max] * (ndim - axis - 1), dtype="int64"), + ], + axis=0, + ) + return _expr.TupleWrapper( + _expr.Tuple( + [ + _op.strided_slice(sort, begin, end, stride), + _op.strided_slice(argsort, begin, end, stride), + ] + ), + 2, + ) return _op.topk(inputs[0], inputs[1], axis=axis, dtype="int64") diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index d5ad21892814..c25dc0165162 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -4247,7 +4247,6 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0, type="float32"): "test_tfidfvectorizer_tf_onlybigrams_levelempty/", "test_tfidfvectorizer_tf_onlybigrams_skip5/", "test_tfidfvectorizer_tf_uniandbigrams_skip5/", - "test_top_k_smallest/", "test_unique_not_sorted_without_axis/", "test_unique_sorted_with_axis/", "test_unique_sorted_with_axis_3d/", From cbfb88902661e1c653a28053579f47fa610d1cc4 Mon Sep 17 00:00:00 2001 From: Matthew Date: Fri, 30 Apr 2021 10:22:03 -0600 Subject: [PATCH 5/7] throw with nullptr in gathernd and scatternd, fix typo --- python/tvm/relay/frontend/onnx.py | 2 +- src/relay/op/tensor/transform.cc | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index b4aaa4a4cf36..1f5ac445f7ea 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1414,7 +1414,7 @@ def _impl_v1(cls, inputs, attr, params): class ScatterND(OnnxOpConverter): - """Operator converter for Scatter.""" + """Operator converter for ScatterND.""" @classmethod def _impl_v11(cls, inputs, attr, params): diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index e937cb0c7b1f..5d9c45b0498d 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -1120,6 +1120,7 @@ bool ScatterNDRel(const Array& types, int num_inputs, const Attrs& attrs, const auto out_shape = data->shape; const IntImmNode* mdim = indices->shape[0].as(); + ICHECK(mdim) << "GatherND needs a static shape for the first axis of indices, got " << indices->shape; const size_t kdim = indices->shape.size() - 1; const size_t ndim = out_shape.size(); ICHECK_LE(size_t(mdim->value), ndim) @@ -3327,6 +3328,7 @@ bool GatherNDRel(const Array& types, int num_inputs, const Attrs& attrs, } const size_t ndim = data->shape.size(); const IntImmNode* mdim = indices->shape[0].as(); + ICHECK(mdim) << "GatherND needs a static shape for the first axis of indices, got " << indices->shape; const size_t kdim = indices->shape.size() - 1; ICHECK(size_t(mdim->value) <= ndim) << "GatherND: indices shape does satisfy."; From 5b3f25eb6ade7a22d9caf6c37f9cd59c09db268b Mon Sep 17 00:00:00 2001 From: Matthew Date: Fri, 30 Apr 2021 10:54:54 -0600 Subject: [PATCH 6/7] fix lint --- src/relay/op/tensor/transform.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 5d9c45b0498d..6c5999351e37 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -1120,7 +1120,8 @@ bool ScatterNDRel(const Array& types, int num_inputs, const Attrs& attrs, const auto out_shape = data->shape; const IntImmNode* mdim = indices->shape[0].as(); - ICHECK(mdim) << "GatherND needs a static shape for the first axis of indices, got " << indices->shape; + ICHECK(mdim) << "GatherND needs a static shape for the first axis of indices, got " + << indices->shape; const size_t kdim = indices->shape.size() - 1; const size_t ndim = out_shape.size(); ICHECK_LE(size_t(mdim->value), ndim) @@ -3328,7 +3329,8 @@ bool GatherNDRel(const Array& types, int num_inputs, const Attrs& attrs, } const size_t ndim = data->shape.size(); const IntImmNode* mdim = indices->shape[0].as(); - ICHECK(mdim) << "GatherND needs a static shape for the first axis of indices, got " << indices->shape; + ICHECK(mdim) << "GatherND needs a static shape for the first axis of indices, got " + << indices->shape; const size_t kdim = indices->shape.size() - 1; ICHECK(size_t(mdim->value) <= ndim) << "GatherND: indices shape does satisfy."; From 77ca7072d7a921512c3030783e25849b657737ff Mon Sep 17 00:00:00 2001 From: Matthew Date: Sat, 1 May 2021 17:53:57 -0600 Subject: [PATCH 7/7] fix a copy typo --- src/relay/op/tensor/transform.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 6c5999351e37..e7266488ee00 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -1120,7 +1120,7 @@ bool ScatterNDRel(const Array& types, int num_inputs, const Attrs& attrs, const auto out_shape = data->shape; const IntImmNode* mdim = indices->shape[0].as(); - ICHECK(mdim) << "GatherND needs a static shape for the first axis of indices, got " + ICHECK(mdim) << "ScatterND needs a static shape for the first axis of indices, got " << indices->shape; const size_t kdim = indices->shape.size() - 1; const size_t ndim = out_shape.size();