From ee4bba2ad7f70e0d0e3771dba3d2b565ea9c69eb Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Tue, 2 Feb 2021 17:50:18 +0300 Subject: [PATCH 1/9] [ONNX] Add CumSum operator to ONNX frontend --- python/tvm/relay/frontend/onnx.py | 20 ++++++++++++- tests/python/frontend/onnx/test_forward.py | 33 ++++++++++++++++++++++ 2 files changed, 52 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index c423598a2ee7..2ea8049c7ad1 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -34,7 +34,7 @@ from .. import ty as _ty from .common import AttrCvt, Renamer -from .common import get_relay_op, new_var, infer_shape, infer_channels +from .common import get_relay_op, new_var, infer_shape, infer_channels, infer_value from .common import infer_type, get_name @@ -1075,6 +1075,23 @@ def _impl_v1(cls, inputs, attr, params): return _op.shape_of(inputs[0], "int64") +class CumSum(OnnxOpConverter): + """Operator converter for CumSum.""" + + @classmethod + def _impl_v1(cls, inputs, attr, params): + data = inputs[0] + dim = inputs[1] + dtype = inputs[2] + + if dim is not None: + dim = int(infer_value(dim, params).asnumpy()) + if inputs[2] is not None: + dtype = infer_type(inputs[2]).checked_type.dtype + + return _op.cumsum(data, axis=dim, dtype=dtype) + + class Cast(OnnxOpConverter): """Operator converter for Cast.""" @@ -2736,6 +2753,7 @@ def _get_convert_map(opset): "Resize": Resize.get_converter(opset), "NonZero": NonZero.get_converter(opset), "Range": Range.get_converter(opset), + "CumSum": CumSum.get_converter(opset), # defs/control_flow "Loop": Loop.get_converter(opset), "If": If.get_converter(opset), diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 515fc32ef88d..589f853daabe 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3964,6 +3964,38 @@ def verify_softplus(indata): verify_softplus(input_data) +def test_cumsum(): + def verify_cumsum(indata, axis): + node = onnx.helper.make_node( + 'CumSum', + inputs=['X', 'axis'], + outputs=['Y'], + ) + + graph = helper.make_graph( + [node], + "cumsum_test", + inputs=[ + helper.make_tensor_value_info("X", TensorProto.FLOAT, list(indata.shape)), + helper.make_tensor_value_info("axis", TensorProto.INT32, [1]), + ], + outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(indata.shape))], + ) + + model = helper.make_model(graph, producer_name="cumsum_test") + + verify_with_ort_with_inputs(model, [indata, axis], dtype="float32", use_vm=True, opset=11) + + data = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3)) + axis = np.int32(0) + verify_cumsum(data, axis) + axis = np.int32(1) + verify_cumsum(data, axis) + data = np.random.randn(1, 32, 32, 3).astype("float32") + axis = np.int32(1) + verify_cumsum(data, axis) + + if __name__ == "__main__": test_flatten() test_reshape() @@ -4040,3 +4072,4 @@ def verify_softplus(indata): test_size() test_maxunpool() test_softplus() + test_cumsum() From fb34f8fe68bf9f846eb2fd1949a7c350fa6d94f4 Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Wed, 3 Feb 2021 10:31:26 +0300 Subject: [PATCH 2/9] Fix lint and add attributes to CumSum --- python/tvm/relay/frontend/onnx.py | 12 ++++++++---- tests/python/frontend/onnx/test_forward.py | 10 +++++----- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 2ea8049c7ad1..d45a94e61d51 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1082,14 +1082,18 @@ class CumSum(OnnxOpConverter): def _impl_v1(cls, inputs, attr, params): data = inputs[0] dim = inputs[1] - dtype = inputs[2] if dim is not None: dim = int(infer_value(dim, params).asnumpy()) - if inputs[2] is not None: - dtype = infer_type(inputs[2]).checked_type.dtype - return _op.cumsum(data, axis=dim, dtype=dtype) + exclusive = attr.get("exclusive", 0) + reverse = attr.get("reverse", 0) + if exclusive != 0: + raise NotImplementedError("Exclusive CumSum not yet supported.") + if reverse != 0: + raise NotImplementedError("Reverse CumSum not yet supported.") + + return _op.cumsum(data, axis=dim) class Cast(OnnxOpConverter): diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 589f853daabe..c9b0c063ad72 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3967,9 +3967,9 @@ def verify_softplus(indata): def test_cumsum(): def verify_cumsum(indata, axis): node = onnx.helper.make_node( - 'CumSum', - inputs=['X', 'axis'], - outputs=['Y'], + "CumSum", + inputs=["X", "axis"], + outputs=["Y"], ) graph = helper.make_graph( @@ -3978,7 +3978,7 @@ def verify_cumsum(indata, axis): inputs=[ helper.make_tensor_value_info("X", TensorProto.FLOAT, list(indata.shape)), helper.make_tensor_value_info("axis", TensorProto.INT32, [1]), - ], + ], outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(indata.shape))], ) @@ -3986,7 +3986,7 @@ def verify_cumsum(indata, axis): verify_with_ort_with_inputs(model, [indata, axis], dtype="float32", use_vm=True, opset=11) - data = np.array([1., 2., 3., 4., 5., 6.]).astype(np.float64).reshape((2, 3)) + data = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3)) axis = np.int32(0) verify_cumsum(data, axis) axis = np.int32(1) From e8c92b390d856ee116ac12628ff712fcccfb5f06 Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Wed, 3 Feb 2021 16:54:45 +0300 Subject: [PATCH 3/9] Fix CumSum test --- tests/python/frontend/onnx/test_forward.py | 29 +++++++++++----------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index c9b0c063ad72..a34a196343bc 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3966,34 +3966,33 @@ def verify_softplus(indata): def test_cumsum(): def verify_cumsum(indata, axis): - node = onnx.helper.make_node( - "CumSum", - inputs=["X", "axis"], - outputs=["Y"], - ) + nodes = [ + make_constant_node("axis", onnx.TensorProto.INT32, [1], [axis]), + onnx.helper.make_node( + "CumSum", + inputs=["X", "axis"], + outputs=["Y"], + ), + ] graph = helper.make_graph( - [node], + nodes, "cumsum_test", inputs=[ helper.make_tensor_value_info("X", TensorProto.FLOAT, list(indata.shape)), - helper.make_tensor_value_info("axis", TensorProto.INT32, [1]), ], outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(indata.shape))], ) model = helper.make_model(graph, producer_name="cumsum_test") - verify_with_ort_with_inputs(model, [indata, axis], dtype="float32", use_vm=True, opset=11) + verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11) - data = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float64).reshape((2, 3)) - axis = np.int32(0) - verify_cumsum(data, axis) - axis = np.int32(1) - verify_cumsum(data, axis) + data = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32).reshape((2, 3)) + verify_cumsum(data, 0) + verify_cumsum(data, 1) data = np.random.randn(1, 32, 32, 3).astype("float32") - axis = np.int32(1) - verify_cumsum(data, axis) + verify_cumsum(data, 1) if __name__ == "__main__": From 36a4177c3a781c84facf167857a6d38ac1265d32 Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Mon, 8 Feb 2021 16:46:50 +0300 Subject: [PATCH 4/9] Add support exclusive attribute --- include/tvm/relay/attrs/transform.h | 4 +++ python/tvm/relay/frontend/onnx.py | 6 +---- python/tvm/relay/op/_transform.py | 2 +- python/tvm/relay/op/strategy/generic.py | 2 +- python/tvm/relay/op/transform.py | 13 ++++++++-- python/tvm/topi/cumsum.py | 21 ++++++++++++++- src/relay/op/tensor/transform.cc | 4 ++- tests/python/frontend/onnx/test_forward.py | 30 +++++++++++++++++----- 8 files changed, 64 insertions(+), 18 deletions(-) diff --git a/include/tvm/relay/attrs/transform.h b/include/tvm/relay/attrs/transform.h index 43166249638a..36f7334ffe18 100644 --- a/include/tvm/relay/attrs/transform.h +++ b/include/tvm/relay/attrs/transform.h @@ -442,9 +442,13 @@ struct MatrixSetDiagAttrs : public tvm::AttrsNode { struct CumsumAttrs : public tvm::AttrsNode { Integer axis; DataType dtype; + Integer exclusive; + Integer reverse; TVM_DECLARE_ATTRS(CumsumAttrs, "relay.attrs.CumsumAttrs") { TVM_ATTR_FIELD(axis).describe("The axis to sum over").set_default(NullValue()); TVM_ATTR_FIELD(dtype).describe("Output data type").set_default(NullValue()); + TVM_ATTR_FIELD(exclusive).describe("The top element is not included").set_default(NullValue()); + TVM_ATTR_FIELD(reverse).describe("Perform the sums in reverse direction").set_default(NullValue()); } }; diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index d45a94e61d51..2cfb1620a85c 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1088,12 +1088,8 @@ def _impl_v1(cls, inputs, attr, params): exclusive = attr.get("exclusive", 0) reverse = attr.get("reverse", 0) - if exclusive != 0: - raise NotImplementedError("Exclusive CumSum not yet supported.") - if reverse != 0: - raise NotImplementedError("Reverse CumSum not yet supported.") - return _op.cumsum(data, axis=dim) + return _op.cumsum(data, axis=dim, exclusive=exclusive, reverse=reverse) class Cast(OnnxOpConverter): diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index fd07c98ddc1f..3927ab2b5bfd 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -116,7 +116,7 @@ def compute_scatter_nd(attrs, inputs, output_type): @_reg.register_compute("cumsum") def compute_cumsum(attrs, inputs, output_type): """Compute definition of cumsum""" - return [topi.cumsum(inputs[0], attrs.axis, attrs.dtype)] + return [topi.cumsum(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive, attrs.reverse)] _reg.register_strategy("cumsum", strategy.cumsum_strategy) diff --git a/python/tvm/relay/op/strategy/generic.py b/python/tvm/relay/op/strategy/generic.py index 3ad75faf4bc1..c0b4d00e90a8 100644 --- a/python/tvm/relay/op/strategy/generic.py +++ b/python/tvm/relay/op/strategy/generic.py @@ -1367,7 +1367,7 @@ def wrap_compute_cumsum(topi_compute): """Wrap cumsum topi compute""" def _compute_cumsum(attrs, inputs, _): - return [topi_compute(inputs[0], attrs.axis, attrs.dtype)] + return [topi_compute(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive, attrs.reverse)] return _compute_cumsum diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index 6785ff248612..39789491b458 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -1322,7 +1322,7 @@ def adv_index(inputs): return _make.adv_index(Tuple(inputs)) -def cumsum(data, axis=None, dtype=None): +def cumsum(data, axis=None, dtype=None, exclusive=None, reverse=None): """Numpy style cumsum op. Return the cumulative inclusive sum of the elements along a given axis. @@ -1339,6 +1339,15 @@ def cumsum(data, axis=None, dtype=None): Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of data. + exclusive : int, optional + If set to 1 will return exclusive sum in which the top element is not + included. In other terms, if set to 1, the j-th output element would be + the sum of the first (j-1) elements. Otherwise, it would be the sum of + the first j elements. + + reverse : int, optional + If set to 1 will perform the sums in reverse direction. + Returns ------- result : relay.Expr @@ -1368,4 +1377,4 @@ def cumsum(data, axis=None, dtype=None): cumsum(a, dtype=int32) # dtype should be provided to get the expected results -> [1, 1, 2, 2, 3, 4, 4] """ - return _make.cumsum(data, axis, dtype) + return _make.cumsum(data, axis, dtype, exclusive, reverse) diff --git a/python/tvm/topi/cumsum.py b/python/tvm/topi/cumsum.py index 855427b1c619..e3329c35e709 100644 --- a/python/tvm/topi/cumsum.py +++ b/python/tvm/topi/cumsum.py @@ -22,7 +22,7 @@ from .math import cast -def cumsum(data, axis=None, dtype=None): +def cumsum(data, axis=None, dtype=None, exclusive=None, reverse=None): """Numpy style cumsum op. Return the cumulative sum of the elements along a given axis. Parameters @@ -38,6 +38,15 @@ def cumsum(data, axis=None, dtype=None): Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of data. + exclusive : int, optional + If set to 1 will return exclusive sum in which the top element is not + included. In other terms, if set to 1, the j-th output element would be + the sum of the first (j-1) elements. Otherwise, it would be the sum of + the first j elements. + + reverse : int, optional + If set to 1 will perform the sums in reverse direction. + Returns ------- result : tvm.te.Tensor @@ -75,6 +84,12 @@ def maybe_cast(x): elif i > axis: axis_mul_after *= value + if exclusive is None: + exclusive = 0 + + if reverse is None: + reverse = 0 + def gen_ir(data_buf, out_buf): ib = ir_builder.create() data_buf = ib.buffer_ptr(data_buf) @@ -90,6 +105,10 @@ def gen_ir(data_buf, out_buf): cur_idx = base_idx + k * axis_mul_after prev_idx = base_idx + (k - 1) * axis_mul_after out_buf[cur_idx] = out_buf[prev_idx] + maybe_cast(data_buf[cur_idx]) + if exclusive != 0: + with ib.for_range(0, cumsum_axis_len, "_k") as k: + cur_idx = base_idx + k * axis_mul_after + out_buf[cur_idx] = out_buf[cur_idx] - maybe_cast(data_buf[cur_idx]) return ib.get() diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index d44bfe6959ca..7e6b7aad6467 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -3705,10 +3705,12 @@ bool CumsumRel(const Array& types, int num_inputs, const Attrs& attrs, return true; } -Expr MakeCumsum(Expr data, Integer axis, DataType dtype) { +Expr MakeCumsum(Expr data, Integer axis, DataType dtype, Integer exclusive, Integer reverse) { auto attrs = make_object(); attrs->dtype = dtype; attrs->axis = axis; + attrs->exclusive = exclusive; + attrs->reverse = reverse; static const Op& op = Op::Get("cumsum"); return Call(op, {data}, Attrs(attrs), {}); } diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index a34a196343bc..63c7e7f52d18 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3965,14 +3965,21 @@ def verify_softplus(indata): def test_cumsum(): - def verify_cumsum(indata, axis): - nodes = [ - make_constant_node("axis", onnx.TensorProto.INT32, [1], [axis]), - onnx.helper.make_node( + def verify_cumsum(indata, axis, exclusive=0, reverse=0): + cumsum_node = onnx.helper.make_node( "CumSum", inputs=["X", "axis"], outputs=["Y"], - ), + ) + if exclusive != 0: + exclusive_attr = helper.make_attribute("exclusive", exclusive) + cumsum_node.attribute.append(exclusive_attr) + if reverse != 0: + reverse_attr = helper.make_attribute("reverse", reverse) + cumsum_node.attribute.append(reverse_attr) + nodes = [ + make_constant_node("axis", onnx.TensorProto.INT32, [1], [axis]), + cumsum_node, ] graph = helper.make_graph( @@ -3986,11 +3993,20 @@ def verify_cumsum(indata, axis): model = helper.make_model(graph, producer_name="cumsum_test") - verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11) + verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11, targets=['llvm']) - data = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]).astype(np.float32).reshape((2, 3)) + data = np.array([ + 1.0, 2.0, 3.0, 4.0, + 5.0, 6.0, 7.0, 8.0, + 9.0, 10.0, 11.0, 12.0, + ]).astype(np.float32).reshape((3, 4)) verify_cumsum(data, 0) verify_cumsum(data, 1) + verify_cumsum(data, 0, 1, 0) + verify_cumsum(data, 1, 1, 0) + #verify_cumsum(data, 0, 0, 1) + #verify_cumsum(data, 1, 0, 1) + #verify_cumsum(data, 1, 1, 1) data = np.random.randn(1, 32, 32, 3).astype("float32") verify_cumsum(data, 1) From b0b155b46db618609e2e543d19d9f04a94baec2e Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Mon, 8 Feb 2021 16:57:39 +0300 Subject: [PATCH 5/9] Add support reverse attribute --- include/tvm/relay/attrs/transform.h | 2 -- python/tvm/relay/op/_transform.py | 2 +- python/tvm/relay/op/strategy/generic.py | 2 +- python/tvm/relay/op/transform.py | 9 ++++++++- src/relay/op/tensor/transform.cc | 1 - tests/python/frontend/onnx/test_forward.py | 8 ++++---- 6 files changed, 14 insertions(+), 10 deletions(-) diff --git a/include/tvm/relay/attrs/transform.h b/include/tvm/relay/attrs/transform.h index 36f7334ffe18..0de801a733e5 100644 --- a/include/tvm/relay/attrs/transform.h +++ b/include/tvm/relay/attrs/transform.h @@ -443,12 +443,10 @@ struct CumsumAttrs : public tvm::AttrsNode { Integer axis; DataType dtype; Integer exclusive; - Integer reverse; TVM_DECLARE_ATTRS(CumsumAttrs, "relay.attrs.CumsumAttrs") { TVM_ATTR_FIELD(axis).describe("The axis to sum over").set_default(NullValue()); TVM_ATTR_FIELD(dtype).describe("Output data type").set_default(NullValue()); TVM_ATTR_FIELD(exclusive).describe("The top element is not included").set_default(NullValue()); - TVM_ATTR_FIELD(reverse).describe("Perform the sums in reverse direction").set_default(NullValue()); } }; diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index 3927ab2b5bfd..ba2416ff8950 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -116,7 +116,7 @@ def compute_scatter_nd(attrs, inputs, output_type): @_reg.register_compute("cumsum") def compute_cumsum(attrs, inputs, output_type): """Compute definition of cumsum""" - return [topi.cumsum(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive, attrs.reverse)] + return [topi.cumsum(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive)] _reg.register_strategy("cumsum", strategy.cumsum_strategy) diff --git a/python/tvm/relay/op/strategy/generic.py b/python/tvm/relay/op/strategy/generic.py index c0b4d00e90a8..af1d2552fab7 100644 --- a/python/tvm/relay/op/strategy/generic.py +++ b/python/tvm/relay/op/strategy/generic.py @@ -1367,7 +1367,7 @@ def wrap_compute_cumsum(topi_compute): """Wrap cumsum topi compute""" def _compute_cumsum(attrs, inputs, _): - return [topi_compute(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive, attrs.reverse)] + return [topi_compute(inputs[0], attrs.axis, attrs.dtype, attrs.exclusive)] return _compute_cumsum diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index 39789491b458..4f9aec59e309 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -1377,4 +1377,11 @@ def cumsum(data, axis=None, dtype=None, exclusive=None, reverse=None): cumsum(a, dtype=int32) # dtype should be provided to get the expected results -> [1, 1, 2, 2, 3, 4, 4] """ - return _make.cumsum(data, axis, dtype, exclusive, reverse) + if reverse is None: + reverse = 0 + if reverse == 0: + return _make.cumsum(data, axis, dtype, exclusive, reverse) + else: + out = _make.reverse(data, axis) + out = _make.cumsum(out, axis, dtype, exclusive, reverse) + return _make.reverse(out, axis) diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 7e6b7aad6467..1e9ca7d54d3d 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -3710,7 +3710,6 @@ Expr MakeCumsum(Expr data, Integer axis, DataType dtype, Integer exclusive, Inte attrs->dtype = dtype; attrs->axis = axis; attrs->exclusive = exclusive; - attrs->reverse = reverse; static const Op& op = Op::Get("cumsum"); return Call(op, {data}, Attrs(attrs), {}); } diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 63c7e7f52d18..1bbbd2797ee7 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3993,7 +3993,7 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0): model = helper.make_model(graph, producer_name="cumsum_test") - verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11, targets=['llvm']) + verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11) data = np.array([ 1.0, 2.0, 3.0, 4.0, @@ -4004,9 +4004,9 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0): verify_cumsum(data, 1) verify_cumsum(data, 0, 1, 0) verify_cumsum(data, 1, 1, 0) - #verify_cumsum(data, 0, 0, 1) - #verify_cumsum(data, 1, 0, 1) - #verify_cumsum(data, 1, 1, 1) + verify_cumsum(data, 0, 0, 1) + verify_cumsum(data, 1, 0, 1) + verify_cumsum(data, 1, 1, 1) data = np.random.randn(1, 32, 32, 3).astype("float32") verify_cumsum(data, 1) From defacbc7a48b1b4f857421b4dcf1eaaa03c79b34 Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Mon, 8 Feb 2021 17:10:58 +0300 Subject: [PATCH 6/9] Fix clang-format --- include/tvm/relay/attrs/transform.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/include/tvm/relay/attrs/transform.h b/include/tvm/relay/attrs/transform.h index 0de801a733e5..07726faec25d 100644 --- a/include/tvm/relay/attrs/transform.h +++ b/include/tvm/relay/attrs/transform.h @@ -446,7 +446,9 @@ struct CumsumAttrs : public tvm::AttrsNode { TVM_DECLARE_ATTRS(CumsumAttrs, "relay.attrs.CumsumAttrs") { TVM_ATTR_FIELD(axis).describe("The axis to sum over").set_default(NullValue()); TVM_ATTR_FIELD(dtype).describe("Output data type").set_default(NullValue()); - TVM_ATTR_FIELD(exclusive).describe("The top element is not included").set_default(NullValue()); + TVM_ATTR_FIELD(exclusive) + .describe("The top element is not included") + .set_default(NullValue()); } }; From 5f5b1b94cf23a26a25ae61a863b4fb706c92e35c Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Mon, 8 Feb 2021 17:16:26 +0300 Subject: [PATCH 7/9] Fix lint --- python/tvm/relay/frontend/onnx.py | 2 +- python/tvm/relay/op/transform.py | 13 ++++----- tests/python/frontend/onnx/test_forward.py | 34 ++++++++++++++++------ 3 files changed, 31 insertions(+), 18 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 2cfb1620a85c..77cbd0ad058f 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1089,7 +1089,7 @@ def _impl_v1(cls, inputs, attr, params): exclusive = attr.get("exclusive", 0) reverse = attr.get("reverse", 0) - return _op.cumsum(data, axis=dim, exclusive=exclusive, reverse=reverse) + return _op.cumsum(data, axis=dim, exclusive=exclusive, rev=reverse) class Cast(OnnxOpConverter): diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index 4f9aec59e309..e636d9221287 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -1322,7 +1322,7 @@ def adv_index(inputs): return _make.adv_index(Tuple(inputs)) -def cumsum(data, axis=None, dtype=None, exclusive=None, reverse=None): +def cumsum(data, axis=None, dtype=None, exclusive=None, rev=None): """Numpy style cumsum op. Return the cumulative inclusive sum of the elements along a given axis. @@ -1345,7 +1345,7 @@ def cumsum(data, axis=None, dtype=None, exclusive=None, reverse=None): the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. - reverse : int, optional + rev : int, optional If set to 1 will perform the sums in reverse direction. Returns @@ -1377,11 +1377,8 @@ def cumsum(data, axis=None, dtype=None, exclusive=None, reverse=None): cumsum(a, dtype=int32) # dtype should be provided to get the expected results -> [1, 1, 2, 2, 3, 4, 4] """ - if reverse is None: - reverse = 0 - if reverse == 0: - return _make.cumsum(data, axis, dtype, exclusive, reverse) - else: + if rev is not None and rev != 0: out = _make.reverse(data, axis) - out = _make.cumsum(out, axis, dtype, exclusive, reverse) + out = _make.cumsum(out, axis, dtype, exclusive) return _make.reverse(out, axis) + return _make.cumsum(data, axis, dtype, exclusive) diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index 1bbbd2797ee7..e890d218c621 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3967,10 +3967,10 @@ def verify_softplus(indata): def test_cumsum(): def verify_cumsum(indata, axis, exclusive=0, reverse=0): cumsum_node = onnx.helper.make_node( - "CumSum", - inputs=["X", "axis"], - outputs=["Y"], - ) + "CumSum", + inputs=["X", "axis"], + outputs=["Y"], + ) if exclusive != 0: exclusive_attr = helper.make_attribute("exclusive", exclusive) cumsum_node.attribute.append(exclusive_attr) @@ -3995,11 +3995,27 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0): verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11) - data = np.array([ - 1.0, 2.0, 3.0, 4.0, - 5.0, 6.0, 7.0, 8.0, - 9.0, 10.0, 11.0, 12.0, - ]).astype(np.float32).reshape((3, 4)) + data = ( + np.array( + [ + 1.0, + 2.0, + 3.0, + 4.0, + 5.0, + 6.0, + 7.0, + 8.0, + 9.0, + 10.0, + 11.0, + 12.0, + ] + ) + .astype(np.float32) + .reshape((3, 4)) + ) + verify_cumsum(data, 0) verify_cumsum(data, 1) verify_cumsum(data, 0, 1, 0) From 5e63126f6d875224c92b97ed94254e0702a0185e Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Tue, 9 Feb 2021 10:19:35 +0300 Subject: [PATCH 8/9] Move reverse calculation to ONNX frontend and add exclusive to GPU --- python/tvm/relay/frontend/onnx.py | 7 ++++++- python/tvm/relay/op/transform.py | 9 +-------- python/tvm/topi/cuda/scan.py | 10 +++++++++- python/tvm/topi/cumsum.py | 22 +++++++++------------- src/relay/op/tensor/transform.cc | 2 +- 5 files changed, 26 insertions(+), 24 deletions(-) diff --git a/python/tvm/relay/frontend/onnx.py b/python/tvm/relay/frontend/onnx.py index 77cbd0ad058f..c9140d782a2d 100644 --- a/python/tvm/relay/frontend/onnx.py +++ b/python/tvm/relay/frontend/onnx.py @@ -1089,7 +1089,12 @@ def _impl_v1(cls, inputs, attr, params): exclusive = attr.get("exclusive", 0) reverse = attr.get("reverse", 0) - return _op.cumsum(data, axis=dim, exclusive=exclusive, rev=reverse) + if reverse != 0: + out = _op.reverse(data, axis=dim) + out = _op.cumsum(out, axis=dim, exclusive=exclusive) + return _op.reverse(out, axis=dim) + + return _op.cumsum(data, axis=dim, exclusive=exclusive) class Cast(OnnxOpConverter): diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index e636d9221287..d1ec5120ac54 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -1322,7 +1322,7 @@ def adv_index(inputs): return _make.adv_index(Tuple(inputs)) -def cumsum(data, axis=None, dtype=None, exclusive=None, rev=None): +def cumsum(data, axis=None, dtype=None, exclusive=None): """Numpy style cumsum op. Return the cumulative inclusive sum of the elements along a given axis. @@ -1345,9 +1345,6 @@ def cumsum(data, axis=None, dtype=None, exclusive=None, rev=None): the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. - rev : int, optional - If set to 1 will perform the sums in reverse direction. - Returns ------- result : relay.Expr @@ -1377,8 +1374,4 @@ def cumsum(data, axis=None, dtype=None, exclusive=None, rev=None): cumsum(a, dtype=int32) # dtype should be provided to get the expected results -> [1, 1, 2, 2, 3, 4, 4] """ - if rev is not None and rev != 0: - out = _make.reverse(data, axis) - out = _make.cumsum(out, axis, dtype, exclusive) - return _make.reverse(out, axis) return _make.cumsum(data, axis, dtype, exclusive) diff --git a/python/tvm/topi/cuda/scan.py b/python/tvm/topi/cuda/scan.py index 232d679840fd..0986c79d0733 100644 --- a/python/tvm/topi/cuda/scan.py +++ b/python/tvm/topi/cuda/scan.py @@ -488,7 +488,7 @@ def traverse(op): return s -def cumsum(data, axis=None, dtype=None): +def cumsum(data, axis=None, dtype=None, exclusive=None): """Numpy style cumsum op. Return the cumulative sum of the elements along a given axis. Parameters @@ -504,6 +504,12 @@ def cumsum(data, axis=None, dtype=None): Type of the returned array and of the accumulator in which the elements are summed. If dtype is not specified, it defaults to the dtype of data. + exclusive : int, optional + If set to 1 will return exclusive sum in which the top element is not + included. In other terms, if set to 1, the j-th output element would be + the sum of the first (j-1) elements. Otherwise, it would be the sum of + the first j elements. + Returns ------- result : tvm.te.Tensor @@ -514,4 +520,6 @@ def cumsum(data, axis=None, dtype=None): axis = 0 data = reshape(data, (prod(data.shape),)) axis = get_const_int(axis) + if exclusive is not None and exclusive != 0: + return exclusive_scan(data, axis, output_dtype=dtype, binop=tvm.tir.generic.add) return inclusive_scan(data, axis, output_dtype=dtype, binop=tvm.tir.generic.add) diff --git a/python/tvm/topi/cumsum.py b/python/tvm/topi/cumsum.py index e3329c35e709..d148974601b5 100644 --- a/python/tvm/topi/cumsum.py +++ b/python/tvm/topi/cumsum.py @@ -22,7 +22,7 @@ from .math import cast -def cumsum(data, axis=None, dtype=None, exclusive=None, reverse=None): +def cumsum(data, axis=None, dtype=None, exclusive=None): """Numpy style cumsum op. Return the cumulative sum of the elements along a given axis. Parameters @@ -44,9 +44,6 @@ def cumsum(data, axis=None, dtype=None, exclusive=None, reverse=None): the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. - reverse : int, optional - If set to 1 will perform the sums in reverse direction. - Returns ------- result : tvm.te.Tensor @@ -87,9 +84,6 @@ def maybe_cast(x): if exclusive is None: exclusive = 0 - if reverse is None: - reverse = 0 - def gen_ir(data_buf, out_buf): ib = ir_builder.create() data_buf = ib.buffer_ptr(data_buf) @@ -99,16 +93,18 @@ def gen_ir(data_buf, out_buf): i = fused // axis_mul_after j = fused % axis_mul_after base_idx = i * cumsum_axis_len * axis_mul_after + j - out_buf[base_idx] = maybe_cast(data_buf[base_idx]) + if exclusive == 0: + out_buf[base_idx] = maybe_cast(data_buf[base_idx]) + else: + out_buf[base_idx] = 0.0 with ib.for_range(0, cumsum_axis_len - 1, "_k") as _k: k = _k + 1 cur_idx = base_idx + k * axis_mul_after prev_idx = base_idx + (k - 1) * axis_mul_after - out_buf[cur_idx] = out_buf[prev_idx] + maybe_cast(data_buf[cur_idx]) - if exclusive != 0: - with ib.for_range(0, cumsum_axis_len, "_k") as k: - cur_idx = base_idx + k * axis_mul_after - out_buf[cur_idx] = out_buf[cur_idx] - maybe_cast(data_buf[cur_idx]) + if exclusive == 0: + out_buf[cur_idx] = out_buf[prev_idx] + maybe_cast(data_buf[cur_idx]) + else: + out_buf[cur_idx] = out_buf[prev_idx] + maybe_cast(data_buf[prev_idx]) return ib.get() diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 1e9ca7d54d3d..5e39b409615d 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -3705,7 +3705,7 @@ bool CumsumRel(const Array& types, int num_inputs, const Attrs& attrs, return true; } -Expr MakeCumsum(Expr data, Integer axis, DataType dtype, Integer exclusive, Integer reverse) { +Expr MakeCumsum(Expr data, Integer axis, DataType dtype, Integer exclusive) { auto attrs = make_object(); attrs->dtype = dtype; attrs->axis = axis; From eb02bc125c8e8439659f2d32ce76514eb63380ff Mon Sep 17 00:00:00 2001 From: Egor Churaev Date: Tue, 9 Feb 2021 13:36:43 +0300 Subject: [PATCH 9/9] Add test for int type --- include/tvm/relay/attrs/transform.h | 2 +- python/tvm/relay/op/transform.py | 2 +- python/tvm/topi/cuda/scan.py | 2 +- python/tvm/topi/cumsum.py | 4 ++-- tests/python/frontend/onnx/test_forward.py | 21 +++++++++++++++++---- 5 files changed, 22 insertions(+), 9 deletions(-) diff --git a/include/tvm/relay/attrs/transform.h b/include/tvm/relay/attrs/transform.h index 07726faec25d..45a1caf2bd79 100644 --- a/include/tvm/relay/attrs/transform.h +++ b/include/tvm/relay/attrs/transform.h @@ -447,7 +447,7 @@ struct CumsumAttrs : public tvm::AttrsNode { TVM_ATTR_FIELD(axis).describe("The axis to sum over").set_default(NullValue()); TVM_ATTR_FIELD(dtype).describe("Output data type").set_default(NullValue()); TVM_ATTR_FIELD(exclusive) - .describe("The top element is not included") + .describe("The first element is not included") .set_default(NullValue()); } }; diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index d1ec5120ac54..e9d081eb5fb6 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -1340,7 +1340,7 @@ def cumsum(data, axis=None, dtype=None, exclusive=None): If dtype is not specified, it defaults to the dtype of data. exclusive : int, optional - If set to 1 will return exclusive sum in which the top element is not + If set to 1 will return exclusive sum in which the first element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. diff --git a/python/tvm/topi/cuda/scan.py b/python/tvm/topi/cuda/scan.py index 0986c79d0733..0bdab100b429 100644 --- a/python/tvm/topi/cuda/scan.py +++ b/python/tvm/topi/cuda/scan.py @@ -505,7 +505,7 @@ def cumsum(data, axis=None, dtype=None, exclusive=None): If dtype is not specified, it defaults to the dtype of data. exclusive : int, optional - If set to 1 will return exclusive sum in which the top element is not + If set to 1 will return exclusive sum in which the first element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. diff --git a/python/tvm/topi/cumsum.py b/python/tvm/topi/cumsum.py index d148974601b5..2013a352874d 100644 --- a/python/tvm/topi/cumsum.py +++ b/python/tvm/topi/cumsum.py @@ -39,7 +39,7 @@ def cumsum(data, axis=None, dtype=None, exclusive=None): If dtype is not specified, it defaults to the dtype of data. exclusive : int, optional - If set to 1 will return exclusive sum in which the top element is not + If set to 1 will return exclusive sum in which the first element is not included. In other terms, if set to 1, the j-th output element would be the sum of the first (j-1) elements. Otherwise, it would be the sum of the first j elements. @@ -96,7 +96,7 @@ def gen_ir(data_buf, out_buf): if exclusive == 0: out_buf[base_idx] = maybe_cast(data_buf[base_idx]) else: - out_buf[base_idx] = 0.0 + out_buf[base_idx] = cast(0, dtype) with ib.for_range(0, cumsum_axis_len - 1, "_k") as _k: k = _k + 1 cur_idx = base_idx + k * axis_mul_after diff --git a/tests/python/frontend/onnx/test_forward.py b/tests/python/frontend/onnx/test_forward.py index e890d218c621..27b91dd38f8e 100644 --- a/tests/python/frontend/onnx/test_forward.py +++ b/tests/python/frontend/onnx/test_forward.py @@ -3965,7 +3965,7 @@ def verify_softplus(indata): def test_cumsum(): - def verify_cumsum(indata, axis, exclusive=0, reverse=0): + def verify_cumsum(indata, axis, exclusive=0, reverse=0, type="float32"): cumsum_node = onnx.helper.make_node( "CumSum", inputs=["X", "axis"], @@ -3981,19 +3981,24 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0): make_constant_node("axis", onnx.TensorProto.INT32, [1], [axis]), cumsum_node, ] + if type == "float32": + tensor_type = TensorProto.FLOAT + else: + tensor_type = TensorProto.INT32 + type = "int32" graph = helper.make_graph( nodes, "cumsum_test", inputs=[ - helper.make_tensor_value_info("X", TensorProto.FLOAT, list(indata.shape)), + helper.make_tensor_value_info("X", tensor_type, list(indata.shape)), ], - outputs=[helper.make_tensor_value_info("Y", TensorProto.FLOAT, list(indata.shape))], + outputs=[helper.make_tensor_value_info("Y", tensor_type, list(indata.shape))], ) model = helper.make_model(graph, producer_name="cumsum_test") - verify_with_ort_with_inputs(model, [indata], dtype="float32", use_vm=True, opset=11) + verify_with_ort_with_inputs(model, [indata], dtype=type, use_vm=True, opset=11) data = ( np.array( @@ -4025,6 +4030,14 @@ def verify_cumsum(indata, axis, exclusive=0, reverse=0): verify_cumsum(data, 1, 1, 1) data = np.random.randn(1, 32, 32, 3).astype("float32") verify_cumsum(data, 1) + data = np.random.randn(1, 32, 32, 3).astype("int32") + verify_cumsum(data, 0, type="int32") + verify_cumsum(data, 1, type="int32") + verify_cumsum(data, 0, 1, 0, type="int32") + verify_cumsum(data, 1, 1, 0, type="int32") + verify_cumsum(data, 0, 0, 1, type="int32") + verify_cumsum(data, 1, 0, 1, type="int32") + verify_cumsum(data, 1, 1, 1, type="int32") if __name__ == "__main__":