From c1ea9b9a05810123fa686418eef98e782a8135a5 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 12:01:47 -0700 Subject: [PATCH 01/29] Dynamic BroadcastTo --- python/tvm/relay/op/_transform.py | 2 +- python/tvm/relay/op/dyn/__init__.py | 1 + python/tvm/relay/op/dyn/_tensor.py | 29 ++++++ python/tvm/relay/op/dyn/_transform.py | 1 + python/tvm/relay/op/dyn/transform.py | 93 +++++++++++++++++++ python/tvm/relay/op/transform.py | 6 +- src/relay/op/dyn/tensor/transform.cc | 53 +++++++++++ src/relay/op/tensor/transform.cc | 62 ++++++------- src/relay/transforms/dynamic_to_static.cc | 29 +++++- src/relay/transforms/pattern_util.h | 28 +++++- src/runtime/library_module.cc | 4 +- .../relay/dyn/test_dynamic_op_level10.py | 57 ++++++++++++ tests/python/relay/test_op_level10.py | 10 +- .../relay/test_pass_dynamic_to_static.py | 22 +++++ 14 files changed, 353 insertions(+), 44 deletions(-) create mode 100644 python/tvm/relay/op/dyn/_tensor.py create mode 100644 python/tvm/relay/op/dyn/transform.py create mode 100644 tests/python/relay/dyn/test_dynamic_op_level10.py diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index 878b82a19a36..e3b3955c9331 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -56,7 +56,7 @@ _reg.register_injective_schedule("gather") _reg.register_injective_schedule("gather_nd") _reg.register_injective_schedule("sequence_mask") -_reg.register_injective_schedule("one_hot") + _reg.register_reduce_schedule("collapse_sum_like") _reg.register_reduce_schedule("collapse_sum_to") _reg.register_injective_schedule("unravel_index") diff --git a/python/tvm/relay/op/dyn/__init__.py b/python/tvm/relay/op/dyn/__init__.py index f4d47a6d780c..967ecbc36bad 100644 --- a/python/tvm/relay/op/dyn/__init__.py +++ b/python/tvm/relay/op/dyn/__init__.py @@ -19,3 +19,4 @@ from . import _algorithm from . import _transform +from . import _tensor diff --git a/python/tvm/relay/op/dyn/_tensor.py b/python/tvm/relay/op/dyn/_tensor.py new file mode 100644 index 000000000000..6e56a440227d --- /dev/null +++ b/python/tvm/relay/op/dyn/_tensor.py @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +#pylint: disable=invalid-name, unused-argument, len-as-condition +"""Backend compiler related feature registration""" + +from tvm.te.hybrid import script +import topi + +from ..op import register_compute, register_shape_func +from ..op import register_broadcast_schedule, register_injective_schedule +from ..op import register_pattern, OpPattern + +from .._tensor import full_shape_func + +register_shape_func("dyn.broadcast_to", True, full_shape_func) diff --git a/python/tvm/relay/op/dyn/_transform.py b/python/tvm/relay/op/dyn/_transform.py index 8279b1249ced..e2704bc24e62 100644 --- a/python/tvm/relay/op/dyn/_transform.py +++ b/python/tvm/relay/op/dyn/_transform.py @@ -22,6 +22,7 @@ from tvm.te.hybrid import script from .. import op as _reg +_reg.register_broadcast_schedule("dyn.broadcast_to") _reg.register_injective_schedule("dyn.reshape") _reg.register_broadcast_schedule("dyn.tile") diff --git a/python/tvm/relay/op/dyn/transform.py b/python/tvm/relay/op/dyn/transform.py new file mode 100644 index 000000000000..9f6b84380219 --- /dev/null +++ b/python/tvm/relay/op/dyn/transform.py @@ -0,0 +1,93 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# pylint: disable=import-outside-toplevel +"""Dynamic Transform operators.""" + +from . import _make + + +def reshape(data, newshape): + """Reshape the input array based on the values of the newshape tensor. + + To give user more convenience in without doing manual shape inference, + some dimensions of the shape can take special values from the set {0, -1, -3}. + The significance of each is explained below: + + ``0`` copy this dimension from the input to the output shape. + + .. code-block:: python + + data.shape = (2,3,4), newshape = (4,0,2), result.shape = (4,3,2) + data.shape = (2,3,4), newshape = (2,0,0), result.shape = (2,3,4) + + ``-1`` infers the dimension of the output shape by using the remainder of + the input dimensions keeping the size of the new array same as that of the input array. + At most one dimension of shape can be -1. + + .. code-block:: python + + data.shape = (2,3,4), newshape = (6,1,-1), result.shape = (6,1,4) + data.shape = (2,3,4), newshape = (3,-1,8), result.shape = (3,1,8) + data.shape = (2,3,4), newshape = (-1,), result.shape = (24,) + + ``-3`` use the product of two consecutive dimensions of the input shape + as the output dimension. + + .. code-block:: python + + data.shape = (2,3,4), newshape = (-3,4), result.shape = (6,4) + data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20) + data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12) + + Special values -2 and -4 from the standard reshape op would introduce dynamic rank + in this op. Thus, they are not permitted. + + Parameters + ---------- + data : relay.Expr + The input data to the operator. + + newshape : relay.Expr + The new shape. Should be compatible with the original shape. + + Returns + ------- + result : relay.Expr + The reshaped result. + """ + return _make.reshape(data, newshape) + +def broadcast_to(data, shape): + + """Return a scalar value array with the same type, broadcast to + the provided shape. + + Parameters + ---------- + data : relay.Expr + The input tensor. + + shape : a relay.Expr, cannot be a tuple of consts + Provide the shape to broadcast to. + + Returns + ------- + result : relay.Expr + The resulting tensor. + """ + return _make.broadcast_to(data, shape) diff --git a/python/tvm/relay/op/transform.py b/python/tvm/relay/op/transform.py index 173db64de258..83008a9c1cc5 100644 --- a/python/tvm/relay/op/transform.py +++ b/python/tvm/relay/op/transform.py @@ -661,8 +661,12 @@ def broadcast_to(data, shape): result : relay.Expr The resulting tensor. """ + if isinstance(shape, Expr): + return _dyn_make.broadcast_to(data, shape) + if isinstance(shape, int): + shape = [shape] if isinstance(shape, (list, tuple)): - shape = const(list(shape), "int32") + shape = list(shape) return _make.broadcast_to(data, shape) def broadcast_to_like(data, broadcast_type): diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 0b8a15676cc3..57aa8524b2f2 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -23,6 +23,7 @@ */ #include "transform.h" +#include #include #include #include @@ -36,6 +37,8 @@ namespace relay { namespace dyn { /* relay.dyn.reshape */ +// TVM_REGISTER_NODE_TYPE(ReshapeAttrs); + bool ReshapeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types: [data, newshape, result] @@ -195,6 +198,56 @@ RELAY_REGISTER_OP("dyn.tile") .set_attr("FTVMCompute", TileCompute) .set_attr("TOpPattern", kInjective); + +// broadcast_to operator +bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + // types = [data_type, broadcast_shape_type, ret_type] + + CHECK_EQ(types.size(), 3); + + const auto* target_shape = types[1].as(); + DataType out_dtype = types[0].as()->dtype; + // rank must be static + const IntImmNode* rank = target_shape->shape[0].as(); + CHECK(rank) << "Target shape must have static rank"; // rank must be static even in dyn pass + // could add support for dyn rank in futures + + std::vector oshape; + for (int i = 0; i < rank->value; ++i) { + oshape.push_back(Any()); + } + + reporter->Assign(types[2], TensorType(oshape, out_dtype)); + return true; +} + +Expr MakeBroadCastTo(Expr data, Expr shape) { + static const Op& op = Op::Get("dyn.broadcast_to"); + auto attrs = make_object(); + return Call(op, {data, shape}, Attrs(attrs), {}); +} + +Array BroadCastToCompute(const Attrs& attrs, const Array& inputs, + const Type& out_type) { + + const auto* out_ttype = out_type.as(); + return {topi::broadcast_to(inputs[0], out_ttype->shape)}; +} + +TVM_REGISTER_GLOBAL("relay.op.dyn._make.broadcast_to").set_body_typed(MakeBroadCastTo); + +RELAY_REGISTER_OP("dyn.broadcast_to") + .describe(R"code(Broadcast the first input to match the shape argument. +)code" TVM_ADD_FILELINE) + .set_num_inputs(2) + .add_argument("data", "Tensor", "The input tensor.") + .add_argument("shape", "Tensor", "Target shape.") + .set_support_level(4) + .add_type_rel("DynamicBroadCastTo", BroadCastToRel) + .set_attr("FTVMCompute", BroadCastToCompute) + .set_attr("TOpPattern", kBroadcast); + } // namespace dyn } // namespace relay } // namespace tvm diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 9d5f248cb229..7c9bc35a1b01 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -1782,25 +1782,28 @@ RELAY_REGISTER_OP("collapse_sum_like") // CollapseSumTo: -> B where Broadcast(A, B) = A bool CollapseSumToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { + CHECK_EQ(types.size(), 3); - const InitOpAttrs* param = attrs.as(); + const InitOpAttrs* param = attrs.as(); + const auto* target_shape = types[1].as(); DataType out_dtype = types[0].as()->dtype; - const IntImmNode* shape_shape = target_shape->shape[0].as(); - CHECK(shape_shape) << "Parameter shape must have static shape"; + const IntImmNode* rank = target_shape->shape[0].as(); + CHECK(rank) << "Parameter must have static rank"; std::vector oshape; - if (param->shape) { - const Array& cshape_array = param->shape.value(); - for (size_t i = 0; i < cshape_array.size(); ++i) { + if(param->shape) { + const Array& cshape_array = param->shape.value(); + for (size_t i = 0; i < cshape_array.size(); i++) { oshape.push_back(cshape_array[i]); } } else { - for (int i = 0; i < shape_shape->value; ++i) { + for (int i = 0; i < rank->value; i++) { oshape.push_back(Any()); } } + reporter->Assign(types[2], TensorType(oshape, out_dtype)); return BroadcastRel({types[0], types[2], types[0]}, 2, Attrs(), reporter); } @@ -1827,39 +1830,33 @@ RELAY_REGISTER_OP("collapse_sum_to") .set_attr("FTVMCompute", CollapseSumLikeCompute) .set_attr("TOpPattern", kCommReduce); -// BroadCastTo: -> B where BroadCast(A, B) = B bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); - const InitOpAttrs* param = attrs.as(); - const auto* target_shape = types[1].as(); - DataType out_dtype = types[0].as()->dtype; - const IntImmNode* shape_shape = target_shape->shape[0].as(); - CHECK(shape_shape) << "Parameter shape must have static shape"; + // types = [data_type, ret_type], broadcast_to_type is in attrs bc static - std::vector oshape; - if (param->shape) { - const Array& cshape_array = param->shape.value(); + + const InitOpAttrs* param = attrs.as(); + CHECK(param); + + DataType out_dtype = types[0].as()->dtype; + std::vector oshape; + + const Array& cshape_array = param->shape.value(); for (size_t i = 0; i < cshape_array.size(); ++i) { - oshape.push_back(cshape_array[i]); - } - } else { - for (int i = 0; i < shape_shape->value; ++i) { - oshape.push_back(Any()); - } + oshape.push_back(cshape_array[i]); } - reporter->Assign(types[2], TensorType(oshape, out_dtype)); - return BroadcastRel({types[0], types[2], types[2]}, 2, Attrs(), reporter); + reporter->Assign(types[1], TensorType(oshape, out_dtype)); + return BroadcastRel({types[0], types[1], types[1]}, 2, Attrs(), reporter); + } -Expr MakeBroadCastTo(Expr data, Expr shape) { +Expr MakeBroadCastTo(Expr data, Array shape) { static const Op& op = Op::Get("broadcast_to"); auto attrs = make_object(); - if (const auto* cshape = shape.as()) { - attrs->shape = ToVector(cshape->data); - } - return Call(op, {data, shape}, Attrs(attrs), {}); + + attrs->shape = std::move(shape); + return Call(op, {data}, Attrs(attrs), {}); } Array BroadCastToCompute(const Attrs& attrs, const Array& inputs, @@ -1873,9 +1870,8 @@ TVM_REGISTER_GLOBAL("relay.op._make.broadcast_to").set_body_typed(MakeBroadCastT RELAY_REGISTER_OP("broadcast_to") .describe(R"code(Broadcast the first input to match the shape argument. )code" TVM_ADD_FILELINE) - .set_num_inputs(2) + .set_num_inputs(1) .add_argument("data", "Tensor", "The input tensor.") - .add_argument("shape", "Tensor", "Target shape.") .set_support_level(4) .add_type_rel("BroadCastTo", BroadCastToRel) .set_attr("FTVMCompute", BroadCastToCompute) @@ -3049,4 +3045,4 @@ RELAY_REGISTER_OP("sparse_to_dense") .set_attr("FTVMCompute", SparseToDenseCompute); } // namespace relay -} // namespace tvm +} // namespace tvm \ No newline at end of file diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index 359e1d335bfa..a531625aa290 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -33,7 +33,15 @@ namespace relay { class DynamicToStaticMutator : public MixedModeMutator { public: +<<<<<<< HEAD DynamicToStaticMutator() {} +======= + DynamicToStaticMutator() + : dyn_reshape_op_(Op::Get("dyn.reshape")), + dyn_tile_op_(Op::Get("dyn.tile")), + dyn_topk_op_(Op::Get("dyn.topk")), + dyn_broadcast_to_op_(Op::Get("dyn.broadcast_to")) {} +>>>>>>> bdc05de53... Dynamic BroadcastTo private: Expr Rewrite_(const CallNode* pre, const Expr& post) override { @@ -56,6 +64,18 @@ class DynamicToStaticMutator : public MixedModeMutator { param->ret_type, param->is_ascend, param->dtype); } } + if (call_node->op == dyn_broadcast_to_op_) { + if (const ConstantNode* shape = call_node->args[1].as()) { + auto attrs = make_object(); + CHECK_EQ(shape->data->ndim, 1); + + // put shape in attrs + attrs->shape = ToVector(shape->data); + static const Op& broadcast_to = Op::Get("broadcast_to"); + // pass in one arg to static broadcast to + return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); + } + } return post; } @@ -66,6 +86,14 @@ class DynamicToStaticMutator : public MixedModeMutator { } return post; } +<<<<<<< HEAD +======= + + const Op& dyn_reshape_op_; + const Op& dyn_tile_op_; + const Op& dyn_topk_op_; + const Op& dyn_broadcast_to_op_; +>>>>>>> bdc05de53... Dynamic BroadcastTo }; Expr DynamicToStatic(Function f, IRModule m) { @@ -106,6 +134,5 @@ TVM_REGISTER_GLOBAL("relay._transform.DynamicToStatic").set_body_typed([]() { }); } // namespace transform - } // namespace relay } // namespace tvm diff --git a/src/relay/transforms/pattern_util.h b/src/relay/transforms/pattern_util.h index 62a58d2b7ffb..1164ebc18151 100644 --- a/src/relay/transforms/pattern_util.h +++ b/src/relay/transforms/pattern_util.h @@ -314,7 +314,8 @@ static inline Constant MakeConstantTensor(DataType dtype, std::vector s } /*! - * \brief Check whether a shape is static and create corresponding Constant. + * \brief Check whether a shape is static and create corresponding Constant. + Eventually this will be removed and replaced with CheckConstantShapeArrayInteger * * \param shape The Array of the shape values. * \return A Constant. @@ -332,6 +333,28 @@ static inline Constant CheckConstantShape(const Array& shape) { return Constant(shape_array); } +/*! + * \brief Check whether a shape is static and create corresponding Array. Will replace CheckConstantShape after dynamic refactorization is complete + * + * \param shape The Array of the shape values. + * \return A Constant. + */ +static inline Array CheckConstantShapeArrayInteger(const Array& shape) { + Array constShape; + //auto shape_array = + // runtime::NDArray::Empty({int64_t(shape.size())}, DataType::Int(64), {kDLCPU, 0}); + //auto* shape_data = static_cast(shape_array->data); + for (size_t i = 0; i < shape.size(); ++i) { + const auto& dim_val = shape[i].as(); + CHECK(dim_val) << "Do not support symbolic shape for " + "Array format. Pass shape as Expr instead."; + + //shape_data[i] = dim_val->value; + constShape.push_back(dim_val->value); + } + return constShape; +} + /*! * \brief Check if two expressions are equal scalars. * \param a The expression to be checked. @@ -603,9 +626,10 @@ static inline Expr Pad(Expr data, Array> pad_width, double pad_ } static inline Expr Tile(Expr data, Array reps) { return MakeTile(data, reps); } +Expr MakeBroadCastTo(Expr data, Array shape); static inline Expr BroadCastTo(Expr data, Array shape) { - return MakeBroadCastTo(data, CheckConstantShape(shape)); + return MakeBroadCastTo(data, CheckConstantShapeArrayInteger(shape)); } Expr StopFusion(Expr data); diff --git a/src/runtime/library_module.cc b/src/runtime/library_module.cc index b12a9d195e2e..427c7ed31c18 100644 --- a/src/runtime/library_module.cc +++ b/src/runtime/library_module.cc @@ -74,8 +74,8 @@ PackedFunc WrapPackedFunc(TVMBackendPackedCFunc faddr, const ObjectPtr& TVMValue ret_value; int ret_type_code = kTVMNullptr; int ret = (*faddr)(const_cast(args.values), const_cast(args.type_codes), - args.num_args, &ret_value, &ret_type_code, NULL); - CHECK_EQ(ret, 0) << TVMGetLastError(); + args.num_args, &ret_value, &ret_type_code); + CHECK_EQ(ret, 0) << TVMGetLastError(); if (ret_type_code != kTVMNullptr) { *rv = TVMRetValue::MoveFromCHost(ret_value, ret_type_code); } diff --git a/tests/python/relay/dyn/test_dynamic_op_level10.py b/tests/python/relay/dyn/test_dynamic_op_level10.py new file mode 100644 index 000000000000..688236b175d1 --- /dev/null +++ b/tests/python/relay/dyn/test_dynamic_op_level10.py @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""" +Support level10 operator test cases. + +""" + + +import numpy as np +import tvm +from tvm import te +import topi.testing +from tvm import relay +from tvm.relay import transform +from tvm.relay.testing import ctx_list, run_infer_type +import topi +import topi.testing +import random + +def test_dyn_broadcast_to(): + dtype = 'uint8' + rank = 3 + dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), 'int64')) + x_shape = (1,) + x = relay.Var("x", relay.ty.TensorType(x_shape, dtype)) + z = relay.broadcast_to(x, dyn_shape) + zz = run_infer_type(z) + + assert zz.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype) + + func = relay.Function([x, dyn_shape], z) + + x = np.random.uniform(size=x_shape).astype(dtype) + dyn_shape = (1,)*rank + ref_res = np.broadcast_to(x, dyn_shape) + for target, ctx in ctx_list(): + for kind in ["vm", "debug"]: + mod = tvm.ir.IRModule.from_expr(func) + intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) + op_res = intrp.evaluate(func)(x,np.array(dyn_shape)) + tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + +test_dyn_broadcast_to() diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index 7528267cc3dd..fcea447cd827 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -213,6 +213,7 @@ def test_broadcast_to_like(): x = relay.Var("x", relay.ty.TensorType(shape , dtype)) y = relay.Var("y", relay.ty.TensorType(shape_like, dtype)) z = relay.broadcast_to_like(x, y) + zz = run_infer_type(z) assert zz.checked_type == relay.ty.TensorType(shape_like, dtype) @@ -220,6 +221,7 @@ def test_broadcast_to_like(): x = np.random.uniform(size=shape).astype(dtype) y = np.random.uniform(size=shape_like).astype(dtype) ref_res = np.broadcast_to(x, shape_like) + for target, ctx in ctx_list(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, ctx=ctx, target=target) @@ -455,13 +457,13 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): func = relay.Function([indices], out) indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32") out_np = topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype) - + for target, ctx in ctx_list(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, ctx=ctx, target=target) out_relay = intrp.evaluate(func)(indices_np) tvm.testing.assert_allclose(out_relay.asnumpy(), out_np) - + _verify((3,), 3, 1, 0, -1, "int32") _verify((3,), 3, 1.0, 0.0, -1, "float32") _verify((2, 2), 5, 2, -2, 0, "int32") @@ -472,11 +474,11 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): if __name__ == "__main__": test_adaptive_pool() test_collapse_sum_like() + test_broadcast_to() test_broadcast_to_like() test_slice_like() test_reverse_reshape() test_batch_matmul() test_shape_of() test_sequence_mask() - test_ndarray_size() - test_one_hot() + test_ndarray_size() \ No newline at end of file diff --git a/tests/python/relay/test_pass_dynamic_to_static.py b/tests/python/relay/test_pass_dynamic_to_static.py index bcd8a644e807..b13770d7d446 100644 --- a/tests/python/relay/test_pass_dynamic_to_static.py +++ b/tests/python/relay/test_pass_dynamic_to_static.py @@ -181,10 +181,32 @@ def verify_topk(k, axis, ret_type, is_ascend, dtype): for ret_type in ["both", "values", "indices"]: verify_topk(k, axis, ret_type, True, "int64") verify_topk(k, axis, ret_type, False, "float32") +def test_dynamic_to_static_broadcast_to(): + def verify_broadcast_to(shape, broadcast_shape): + x = relay.var("x", relay.TensorType(shape, "float32")) + y = relay.var("y", relay.TensorType(broadcast_shape, "float32")) + z = relay.broadcast_to(x, shape=relay.shape_of(y)) + + func = run_infer_type(relay.Function([x, y], z)) + func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType()) + + zz = func2.body + assert isinstance(zz, relay.Call) + assert zz.op == relay.op.get("broadcast_to") + assert zz.checked_type == relay.ty.TensorType(broadcast_shape, "float32") + + x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32") + y_data = np.random.uniform(low=-1, high=1, size=broadcast_shape).astype("float32") + + ref_res = np.broadcast_to(x_data, y_data.shape) + verify_func(func2, [x_data, y_data], ref_res) + verify_broadcast_to((3, 1), (3, 3)) + if __name__=="__main__": test_dynamic_to_static_reshape() test_dynamic_to_static_double_reshape() test_dynamic_to_static_quad_reshape() test_dynamic_to_static_tile() test_dynamic_to_static_topk() + test_dynamic_to_static_broadcast_to() From 5e1b5c50b99677ba59d78e0af0ce47d522b42b91 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 12:45:25 -0700 Subject: [PATCH 02/29] fixed lint! --- python/tvm/relay/op/dyn/_tensor.py | 7 +---- src/relay/op/dyn/tensor/transform.cc | 11 ++++---- src/relay/op/tensor/transform.cc | 32 ++++++++++------------- src/relay/transforms/dynamic_to_static.cc | 2 +- src/relay/transforms/pattern_util.h | 17 ++++++------ 5 files changed, 30 insertions(+), 39 deletions(-) diff --git a/python/tvm/relay/op/dyn/_tensor.py b/python/tvm/relay/op/dyn/_tensor.py index 6e56a440227d..4370b8ffac5f 100644 --- a/python/tvm/relay/op/dyn/_tensor.py +++ b/python/tvm/relay/op/dyn/_tensor.py @@ -17,12 +17,7 @@ #pylint: disable=invalid-name, unused-argument, len-as-condition """Backend compiler related feature registration""" -from tvm.te.hybrid import script -import topi - -from ..op import register_compute, register_shape_func -from ..op import register_broadcast_schedule, register_injective_schedule -from ..op import register_pattern, OpPattern +from ..op import register_shape_func from .._tensor import full_shape_func diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 57aa8524b2f2..a1e4da26a67d 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -207,19 +207,19 @@ bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs CHECK_EQ(types.size(), 3); const auto* target_shape = types[1].as(); - DataType out_dtype = types[0].as()->dtype; + DataType out_dtype = types[0].as()->dtype; // rank must be static const IntImmNode* rank = target_shape->shape[0].as(); CHECK(rank) << "Target shape must have static rank"; // rank must be static even in dyn pass - // could add support for dyn rank in futures + // could add support for dyn rank in futures std::vector oshape; for (int i = 0; i < rank->value; ++i) { oshape.push_back(Any()); } - reporter->Assign(types[2], TensorType(oshape, out_dtype)); - return true; + reporter->Assign(types[2], TensorType(oshape, out_dtype)); + return true; } Expr MakeBroadCastTo(Expr data, Expr shape) { @@ -229,8 +229,7 @@ Expr MakeBroadCastTo(Expr data, Expr shape) { } Array BroadCastToCompute(const Attrs& attrs, const Array& inputs, - const Type& out_type) { - + const Type& out_type) { const auto* out_ttype = out_type.as(); return {topi::broadcast_to(inputs[0], out_ttype->shape)}; } diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 7c9bc35a1b01..0d97c81aa71d 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -1782,19 +1782,18 @@ RELAY_REGISTER_OP("collapse_sum_like") // CollapseSumTo: -> B where Broadcast(A, B) = A bool CollapseSumToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); - const InitOpAttrs* param = attrs.as(); + const InitOpAttrs* param = attrs.as(); const auto* target_shape = types[1].as(); DataType out_dtype = types[0].as()->dtype; - const IntImmNode* rank = target_shape->shape[0].as(); + const IntImmNode* rank = target_shape->shape[0].as(); CHECK(rank) << "Parameter must have static rank"; std::vector oshape; - if(param->shape) { - const Array& cshape_array = param->shape.value(); + if (param->shape) { + const Array& cshape_array = param->shape.value(); for (size_t i = 0; i < cshape_array.size(); i++) { oshape.push_back(cshape_array[i]); } @@ -1803,7 +1802,7 @@ bool CollapseSumToRel(const Array& types, int num_inputs, const Attrs& att oshape.push_back(Any()); } } - + reporter->Assign(types[2], TensorType(oshape, out_dtype)); return BroadcastRel({types[0], types[2], types[0]}, 2, Attrs(), reporter); } @@ -1832,23 +1831,20 @@ RELAY_REGISTER_OP("collapse_sum_to") bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { + // types = [data_type, ret_type], broadcast_to_type is in attrs bc static - // types = [data_type, ret_type], broadcast_to_type is in attrs bc static - - const InitOpAttrs* param = attrs.as(); CHECK(param); - - DataType out_dtype = types[0].as()->dtype; - std::vector oshape; - - const Array& cshape_array = param->shape.value(); - for (size_t i = 0; i < cshape_array.size(); ++i) { - oshape.push_back(cshape_array[i]); + + DataType out_dtype = types[0].as()->dtype; + std::vector oshape; + + const Array& cshape_array = param->shape.value(); + for (size_t i = 0; i < cshape_array.size(); ++i) { + oshape.push_back(cshape_array[i]); } reporter->Assign(types[1], TensorType(oshape, out_dtype)); return BroadcastRel({types[0], types[1], types[1]}, 2, Attrs(), reporter); - } Expr MakeBroadCastTo(Expr data, Array shape) { @@ -3045,4 +3041,4 @@ RELAY_REGISTER_OP("sparse_to_dense") .set_attr("FTVMCompute", SparseToDenseCompute); } // namespace relay -} // namespace tvm \ No newline at end of file +} // namespace tvm diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index a531625aa290..b914a18fc769 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -73,7 +73,7 @@ class DynamicToStaticMutator : public MixedModeMutator { attrs->shape = ToVector(shape->data); static const Op& broadcast_to = Op::Get("broadcast_to"); // pass in one arg to static broadcast to - return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); + return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); } } return post; diff --git a/src/relay/transforms/pattern_util.h b/src/relay/transforms/pattern_util.h index 1164ebc18151..4ef65c46333a 100644 --- a/src/relay/transforms/pattern_util.h +++ b/src/relay/transforms/pattern_util.h @@ -314,7 +314,7 @@ static inline Constant MakeConstantTensor(DataType dtype, std::vector s } /*! - * \brief Check whether a shape is static and create corresponding Constant. + * \brief Check whether a shape is static and create corresponding Constant. Eventually this will be removed and replaced with CheckConstantShapeArrayInteger * * \param shape The Array of the shape values. @@ -334,22 +334,23 @@ static inline Constant CheckConstantShape(const Array& shape) { } /*! - * \brief Check whether a shape is static and create corresponding Array. Will replace CheckConstantShape after dynamic refactorization is complete + * \brief Check whether a shape is static and create corresponding Array. Will replace + * CheckConstantShape after dynamic refactorization is complete * * \param shape The Array of the shape values. * \return A Constant. */ static inline Array CheckConstantShapeArrayInteger(const Array& shape) { - Array constShape; - //auto shape_array = - // runtime::NDArray::Empty({int64_t(shape.size())}, DataType::Int(64), {kDLCPU, 0}); - //auto* shape_data = static_cast(shape_array->data); + Array constShape; + // auto shape_array = + // runtime::NDArray::Empty({int64_t(shape.size())}, DataType::Int(64), {kDLCPU, 0}); + // auto* shape_data = static_cast(shape_array->data); for (size_t i = 0; i < shape.size(); ++i) { const auto& dim_val = shape[i].as(); CHECK(dim_val) << "Do not support symbolic shape for " "Array format. Pass shape as Expr instead."; - - //shape_data[i] = dim_val->value; + + // shape_data[i] = dim_val->value; constShape.push_back(dim_val->value); } return constShape; From 8e115b901fcfc8e08c0b99af0b98ee1c928ae27e Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Tue, 7 Jul 2020 12:04:55 -0700 Subject: [PATCH 03/29] add test_one_hot() back --- tests/python/relay/test_op_level10.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index fcea447cd827..f40af2825901 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -481,4 +481,5 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): test_batch_matmul() test_shape_of() test_sequence_mask() - test_ndarray_size() \ No newline at end of file + test_one_hot() + test_ndarray_size() From 988fe4c1a6e68babe047505297e68ff0638a4c79 Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Tue, 7 Jul 2020 12:06:20 -0700 Subject: [PATCH 04/29] add one_hot registration back --- python/tvm/relay/op/_transform.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index e3b3955c9331..878b82a19a36 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -56,7 +56,7 @@ _reg.register_injective_schedule("gather") _reg.register_injective_schedule("gather_nd") _reg.register_injective_schedule("sequence_mask") - +_reg.register_injective_schedule("one_hot") _reg.register_reduce_schedule("collapse_sum_like") _reg.register_reduce_schedule("collapse_sum_to") _reg.register_injective_schedule("unravel_index") From 2e3550887cb66ac3e7b5fb2d1a962cf03d7ca472 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 12:01:47 -0700 Subject: [PATCH 05/29] Dynamic BroadcastTo --- python/tvm/relay/op/_transform.py | 2 +- python/tvm/relay/op/dyn/_tensor.py | 1 - src/relay/op/tensor/transform.cc | 4 ++-- src/relay/transforms/dynamic_to_static.cc | 12 ++++++++++++ 4 files changed, 15 insertions(+), 4 deletions(-) diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index 878b82a19a36..e3b3955c9331 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -56,7 +56,7 @@ _reg.register_injective_schedule("gather") _reg.register_injective_schedule("gather_nd") _reg.register_injective_schedule("sequence_mask") -_reg.register_injective_schedule("one_hot") + _reg.register_reduce_schedule("collapse_sum_like") _reg.register_reduce_schedule("collapse_sum_to") _reg.register_injective_schedule("unravel_index") diff --git a/python/tvm/relay/op/dyn/_tensor.py b/python/tvm/relay/op/dyn/_tensor.py index 4370b8ffac5f..948e99a40f78 100644 --- a/python/tvm/relay/op/dyn/_tensor.py +++ b/python/tvm/relay/op/dyn/_tensor.py @@ -18,7 +18,6 @@ """Backend compiler related feature registration""" from ..op import register_shape_func - from .._tensor import full_shape_func register_shape_func("dyn.broadcast_to", True, full_shape_func) diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 0d97c81aa71d..281be4f09512 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -1782,6 +1782,7 @@ RELAY_REGISTER_OP("collapse_sum_like") // CollapseSumTo: -> B where Broadcast(A, B) = A bool CollapseSumToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { + CHECK_EQ(types.size(), 3); const InitOpAttrs* param = attrs.as(); @@ -1802,7 +1803,6 @@ bool CollapseSumToRel(const Array& types, int num_inputs, const Attrs& att oshape.push_back(Any()); } } - reporter->Assign(types[2], TensorType(oshape, out_dtype)); return BroadcastRel({types[0], types[2], types[0]}, 2, Attrs(), reporter); } @@ -3041,4 +3041,4 @@ RELAY_REGISTER_OP("sparse_to_dense") .set_attr("FTVMCompute", SparseToDenseCompute); } // namespace relay -} // namespace tvm +} // namespace tvm \ No newline at end of file diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index b914a18fc769..d20fb7758b19 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -76,6 +76,18 @@ class DynamicToStaticMutator : public MixedModeMutator { return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); } } + if (call_node->op == dyn_broadcast_to_op_) { + if (const ConstantNode* shape = call_node->args[1].as()) { + auto attrs = make_object(); + CHECK_EQ(shape->data->ndim, 1); + + // put shape in attrs + attrs->shape = ToVector(shape->data); + static const Op& broadcast_to = Op::Get("broadcast_to"); + // pass in one arg to static broadcast to + return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); + } + } return post; } From 0852f42272c6d4c2277c6b049aac92c3b2d5247a Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 12:45:25 -0700 Subject: [PATCH 06/29] fixed lint! --- src/relay/op/dyn/tensor/transform.cc | 10 ++++------ src/relay/op/tensor/transform.cc | 3 +-- src/relay/transforms/dynamic_to_static.cc | 2 +- 3 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index a1e4da26a67d..2976e39a40e0 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -197,13 +197,11 @@ RELAY_REGISTER_OP("dyn.tile") .add_type_rel("DynamicTile", TileRel) .set_attr("FTVMCompute", TileCompute) .set_attr("TOpPattern", kInjective); - - -// broadcast_to operator -bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, - const TypeReporter& reporter) { - // types = [data_type, broadcast_shape_type, ret_type] +bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + // types = [data_type, broadcast_shape_type, ret_type] + CHECK_EQ(types.size(), 3); const auto* target_shape = types[1].as(); diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 281be4f09512..5a6612a81ce9 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -1782,7 +1782,6 @@ RELAY_REGISTER_OP("collapse_sum_like") // CollapseSumTo: -> B where Broadcast(A, B) = A bool CollapseSumToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 3); const InitOpAttrs* param = attrs.as(); @@ -3041,4 +3040,4 @@ RELAY_REGISTER_OP("sparse_to_dense") .set_attr("FTVMCompute", SparseToDenseCompute); } // namespace relay -} // namespace tvm \ No newline at end of file +} // namespace tvm diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index d20fb7758b19..0ab137837fd2 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -85,7 +85,7 @@ class DynamicToStaticMutator : public MixedModeMutator { attrs->shape = ToVector(shape->data); static const Op& broadcast_to = Op::Get("broadcast_to"); // pass in one arg to static broadcast to - return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); + return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); } } return post; From 6977134883421c5aac99dd3758035935dbcad957 Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Tue, 7 Jul 2020 12:06:20 -0700 Subject: [PATCH 07/29] add one_hot registration back --- python/tvm/relay/op/_transform.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/op/_transform.py b/python/tvm/relay/op/_transform.py index e3b3955c9331..878b82a19a36 100644 --- a/python/tvm/relay/op/_transform.py +++ b/python/tvm/relay/op/_transform.py @@ -56,7 +56,7 @@ _reg.register_injective_schedule("gather") _reg.register_injective_schedule("gather_nd") _reg.register_injective_schedule("sequence_mask") - +_reg.register_injective_schedule("one_hot") _reg.register_reduce_schedule("collapse_sum_like") _reg.register_reduce_schedule("collapse_sum_to") _reg.register_injective_schedule("unravel_index") From 9e11e24d8d8687eca08f719a51b4bbfef6ec76e6 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 13:15:24 -0700 Subject: [PATCH 08/29] fixed lint.. again --- src/relay/op/dyn/tensor/transform.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 2976e39a40e0..d8fa89607577 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -197,11 +197,12 @@ RELAY_REGISTER_OP("dyn.tile") .add_type_rel("DynamicTile", TileRel) .set_attr("FTVMCompute", TileCompute) .set_attr("TOpPattern", kInjective); - -bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, - const TypeReporter& reporter) { - // types = [data_type, broadcast_shape_type, ret_type] + +// broadcast_to operator +bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + // types = [data_type, broadcast_shape_type, ret_type] CHECK_EQ(types.size(), 3); const auto* target_shape = types[1].as(); From 40cea20f8289d9e5f323aab7a9bb3e7c5476e973 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 13:23:11 -0700 Subject: [PATCH 09/29] fixed lint --- src/relay/op/dyn/tensor/transform.cc | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index d8fa89607577..580b57ac9c14 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -198,11 +198,10 @@ RELAY_REGISTER_OP("dyn.tile") .set_attr("FTVMCompute", TileCompute) .set_attr("TOpPattern", kInjective); - // broadcast_to operator bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, - const TypeReporter& reporter) { - // types = [data_type, broadcast_shape_type, ret_type] + const TypeReporter& reporter) { + // types = [data_type, broadcast_shape_type, ret_type] CHECK_EQ(types.size(), 3); const auto* target_shape = types[1].as(); From 42831f799d720969499a956a3e05395424e9a6ea Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 13:27:54 -0700 Subject: [PATCH 10/29] lint --- src/runtime/library_module.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/library_module.cc b/src/runtime/library_module.cc index 427c7ed31c18..7c3323c56229 100644 --- a/src/runtime/library_module.cc +++ b/src/runtime/library_module.cc @@ -75,7 +75,7 @@ PackedFunc WrapPackedFunc(TVMBackendPackedCFunc faddr, const ObjectPtr& int ret_type_code = kTVMNullptr; int ret = (*faddr)(const_cast(args.values), const_cast(args.type_codes), args.num_args, &ret_value, &ret_type_code); - CHECK_EQ(ret, 0) << TVMGetLastError(); + CHECK_EQ(ret, 0) << TVMGetLastError(); if (ret_type_code != kTVMNullptr) { *rv = TVMRetValue::MoveFromCHost(ret_value, ret_type_code); } From 1cc1b8f8c85654f63c6c15aba010f44729c30377 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 14:44:16 -0700 Subject: [PATCH 11/29] responding to comments --- python/tvm/relay/op/dyn/transform.py | 93 ------------------- src/relay/op/dyn/tensor/transform.cc | 1 - src/relay/transforms/dynamic_to_static.cc | 12 --- src/relay/transforms/pattern_util.h | 5 +- .../relay/dyn/test_dynamic_op_level10.py | 5 - tests/python/relay/test_op_level10.py | 4 +- 6 files changed, 3 insertions(+), 117 deletions(-) delete mode 100644 python/tvm/relay/op/dyn/transform.py diff --git a/python/tvm/relay/op/dyn/transform.py b/python/tvm/relay/op/dyn/transform.py deleted file mode 100644 index 9f6b84380219..000000000000 --- a/python/tvm/relay/op/dyn/transform.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -# pylint: disable=import-outside-toplevel -"""Dynamic Transform operators.""" - -from . import _make - - -def reshape(data, newshape): - """Reshape the input array based on the values of the newshape tensor. - - To give user more convenience in without doing manual shape inference, - some dimensions of the shape can take special values from the set {0, -1, -3}. - The significance of each is explained below: - - ``0`` copy this dimension from the input to the output shape. - - .. code-block:: python - - data.shape = (2,3,4), newshape = (4,0,2), result.shape = (4,3,2) - data.shape = (2,3,4), newshape = (2,0,0), result.shape = (2,3,4) - - ``-1`` infers the dimension of the output shape by using the remainder of - the input dimensions keeping the size of the new array same as that of the input array. - At most one dimension of shape can be -1. - - .. code-block:: python - - data.shape = (2,3,4), newshape = (6,1,-1), result.shape = (6,1,4) - data.shape = (2,3,4), newshape = (3,-1,8), result.shape = (3,1,8) - data.shape = (2,3,4), newshape = (-1,), result.shape = (24,) - - ``-3`` use the product of two consecutive dimensions of the input shape - as the output dimension. - - .. code-block:: python - - data.shape = (2,3,4), newshape = (-3,4), result.shape = (6,4) - data.shape = (2,3,4,5), newshape = (-3,-3), result.shape = (6,20) - data.shape = (2,3,4), newshape = (0,-3), result.shape = (2,12) - - Special values -2 and -4 from the standard reshape op would introduce dynamic rank - in this op. Thus, they are not permitted. - - Parameters - ---------- - data : relay.Expr - The input data to the operator. - - newshape : relay.Expr - The new shape. Should be compatible with the original shape. - - Returns - ------- - result : relay.Expr - The reshaped result. - """ - return _make.reshape(data, newshape) - -def broadcast_to(data, shape): - - """Return a scalar value array with the same type, broadcast to - the provided shape. - - Parameters - ---------- - data : relay.Expr - The input tensor. - - shape : a relay.Expr, cannot be a tuple of consts - Provide the shape to broadcast to. - - Returns - ------- - result : relay.Expr - The resulting tensor. - """ - return _make.broadcast_to(data, shape) diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 580b57ac9c14..8b045754e0f1 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -37,7 +37,6 @@ namespace relay { namespace dyn { /* relay.dyn.reshape */ -// TVM_REGISTER_NODE_TYPE(ReshapeAttrs); bool ReshapeRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index 0ab137837fd2..b914a18fc769 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -76,18 +76,6 @@ class DynamicToStaticMutator : public MixedModeMutator { return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); } } - if (call_node->op == dyn_broadcast_to_op_) { - if (const ConstantNode* shape = call_node->args[1].as()) { - auto attrs = make_object(); - CHECK_EQ(shape->data->ndim, 1); - - // put shape in attrs - attrs->shape = ToVector(shape->data); - static const Op& broadcast_to = Op::Get("broadcast_to"); - // pass in one arg to static broadcast to - return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); - } - } return post; } diff --git a/src/relay/transforms/pattern_util.h b/src/relay/transforms/pattern_util.h index 4ef65c46333a..68921511d315 100644 --- a/src/relay/transforms/pattern_util.h +++ b/src/relay/transforms/pattern_util.h @@ -342,15 +342,12 @@ static inline Constant CheckConstantShape(const Array& shape) { */ static inline Array CheckConstantShapeArrayInteger(const Array& shape) { Array constShape; - // auto shape_array = - // runtime::NDArray::Empty({int64_t(shape.size())}, DataType::Int(64), {kDLCPU, 0}); - // auto* shape_data = static_cast(shape_array->data); + for (size_t i = 0; i < shape.size(); ++i) { const auto& dim_val = shape[i].as(); CHECK(dim_val) << "Do not support symbolic shape for " "Array format. Pass shape as Expr instead."; - // shape_data[i] = dim_val->value; constShape.push_back(dim_val->value); } return constShape; diff --git a/tests/python/relay/dyn/test_dynamic_op_level10.py b/tests/python/relay/dyn/test_dynamic_op_level10.py index 688236b175d1..ffde83b88673 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level10.py +++ b/tests/python/relay/dyn/test_dynamic_op_level10.py @@ -22,13 +22,8 @@ import numpy as np import tvm -from tvm import te -import topi.testing from tvm import relay -from tvm.relay import transform from tvm.relay.testing import ctx_list, run_infer_type -import topi -import topi.testing import random def test_dyn_broadcast_to(): diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index f40af2825901..a79f1a514fa7 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -457,13 +457,13 @@ def _verify(indices_shape, depth, on_value, off_value, axis, dtype): func = relay.Function([indices], out) indices_np = np.random.randint(0, depth, size=indices_shape).astype("int32") out_np = topi.testing.one_hot(indices_np, on_value, off_value, depth, axis, dtype) - + for target, ctx in ctx_list(): for kind in ["graph", "debug"]: intrp = relay.create_executor(kind, ctx=ctx, target=target) out_relay = intrp.evaluate(func)(indices_np) tvm.testing.assert_allclose(out_relay.asnumpy(), out_np) - + _verify((3,), 3, 1, 0, -1, "int32") _verify((3,), 3, 1.0, 0.0, -1, "float32") _verify((2, 2), 5, 2, -2, 0, "int32") From 81b7d5cd50110fffa58dd907bed9c6e56a6bbb9c Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 14:56:23 -0700 Subject: [PATCH 12/29] skipping cuda in dynamic test --- tests/python/relay/dyn/test_dynamic_op_level10.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/python/relay/dyn/test_dynamic_op_level10.py b/tests/python/relay/dyn/test_dynamic_op_level10.py index ffde83b88673..5223e8bc3fe5 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level10.py +++ b/tests/python/relay/dyn/test_dynamic_op_level10.py @@ -43,10 +43,11 @@ def test_dyn_broadcast_to(): dyn_shape = (1,)*rank ref_res = np.broadcast_to(x, dyn_shape) for target, ctx in ctx_list(): - for kind in ["vm", "debug"]: - mod = tvm.ir.IRModule.from_expr(func) - intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) - op_res = intrp.evaluate(func)(x,np.array(dyn_shape)) - tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + if (target is not 'cuda'): #skip cuda because we don't have dynamic support for GPU + for kind in ["vm", "debug"]: + mod = tvm.ir.IRModule.from_expr(func) + intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) + op_res = intrp.evaluate(func)(x,np.array(dyn_shape)) + tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) test_dyn_broadcast_to() From 4fd8a84adb6db201bef2ead47863915ff784be83 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 15:03:51 -0700 Subject: [PATCH 13/29] skipping cuda in dynamic test --- tests/python/relay/dyn/test_dynamic_op_level10.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/python/relay/dyn/test_dynamic_op_level10.py b/tests/python/relay/dyn/test_dynamic_op_level10.py index 5223e8bc3fe5..ab6f43d1a184 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level10.py +++ b/tests/python/relay/dyn/test_dynamic_op_level10.py @@ -29,7 +29,8 @@ def test_dyn_broadcast_to(): dtype = 'uint8' rank = 3 - dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), 'int64')) + shape_type = 'int64' + dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), shape_type)) x_shape = (1,) x = relay.Var("x", relay.ty.TensorType(x_shape, dtype)) z = relay.broadcast_to(x, dyn_shape) @@ -47,7 +48,7 @@ def test_dyn_broadcast_to(): for kind in ["vm", "debug"]: mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) - op_res = intrp.evaluate(func)(x,np.array(dyn_shape)) + op_res = intrp.evaluate(func)(x,np.array(dyn_shape).as(shape_type)) tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) test_dyn_broadcast_to() From da104ac8a29706b96d868a6097c5cfba14f7aa21 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 15:05:31 -0700 Subject: [PATCH 14/29] fixed i386 test and GPU test --- tests/python/relay/dyn/test_dynamic_op_level10.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/relay/dyn/test_dynamic_op_level10.py b/tests/python/relay/dyn/test_dynamic_op_level10.py index ab6f43d1a184..347a5c7c0804 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level10.py +++ b/tests/python/relay/dyn/test_dynamic_op_level10.py @@ -48,7 +48,7 @@ def test_dyn_broadcast_to(): for kind in ["vm", "debug"]: mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) - op_res = intrp.evaluate(func)(x,np.array(dyn_shape).as(shape_type)) + op_res = intrp.evaluate(func)(x,np.array(dyn_shape).astype(shape_type)) tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) test_dyn_broadcast_to() From e47bca41ad21c77ad3d8763d7600dc4e0934d5c9 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 16:13:34 -0700 Subject: [PATCH 15/29] lint --- tests/python/relay/dyn/test_dynamic_op_level10.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/relay/dyn/test_dynamic_op_level10.py b/tests/python/relay/dyn/test_dynamic_op_level10.py index 347a5c7c0804..d9b23a717f65 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level10.py +++ b/tests/python/relay/dyn/test_dynamic_op_level10.py @@ -44,7 +44,7 @@ def test_dyn_broadcast_to(): dyn_shape = (1,)*rank ref_res = np.broadcast_to(x, dyn_shape) for target, ctx in ctx_list(): - if (target is not 'cuda'): #skip cuda because we don't have dynamic support for GPU + if (target != 'cuda'): #skip cuda because we don't have dynamic support for GPU for kind in ["vm", "debug"]: mod = tvm.ir.IRModule.from_expr(func) intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) From dd1ec39c983a8e1fc63e7de738a38bebe66de693 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Tue, 7 Jul 2020 17:16:21 -0700 Subject: [PATCH 16/29] starting ones and zeros --- python/tvm/relay/op/dyn/_tensor.py | 4 +- python/tvm/relay/op/tensor.py | 8 ++- src/relay/op/dyn/tensor/transform.cc | 66 +++++++++++++++++++ .../relay/dyn/test_dynamic_op_level3.py | 15 +++++ 4 files changed, 90 insertions(+), 3 deletions(-) diff --git a/python/tvm/relay/op/dyn/_tensor.py b/python/tvm/relay/op/dyn/_tensor.py index 948e99a40f78..f4ce5198e34d 100644 --- a/python/tvm/relay/op/dyn/_tensor.py +++ b/python/tvm/relay/op/dyn/_tensor.py @@ -15,9 +15,11 @@ # specific language governing permissions and limitations # under the License. #pylint: disable=invalid-name, unused-argument, len-as-condition -"""Backend compiler related feature registration""" +"""Backend compiler related feature registration for dynamic ops""" from ..op import register_shape_func from .._tensor import full_shape_func register_shape_func("dyn.broadcast_to", True, full_shape_func) +register_shape_func("dyn.ones", True, no_data_full_shape_func) +register_shape_func("dyn.zeros", True, no_data_full_shape_func) diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index c60dbee6dd64..c267accc79cc 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -939,8 +939,10 @@ def zeros(shape, dtype): result : relay.Expr The resulting tensor. """ + if isinstance(shape, Expr): + return _dyn.make.zeros(shape, dtype) if isinstance(shape, (list, tuple)): - shape = const(list(shape), "int32") + shape = const(list(shape), "int32") # change mes return _make.zeros(shape, dtype) @@ -976,8 +978,10 @@ def ones(shape, dtype): result : relay.Expr The resulting tensor. """ + if isinstance(shape, Expr): + return _dyn.make.ones(shape, dtype) if isinstance(shape, (list, tuple)): - shape = const(list(shape), "int32") + shape = const(list(shape), "int32") # change me return _make.ones(shape, dtype) diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 8b045754e0f1..3c91e55b6fc0 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -244,6 +244,72 @@ RELAY_REGISTER_OP("dyn.broadcast_to") .set_attr("FTVMCompute", BroadCastToCompute) .set_attr("TOpPattern", kBroadcast); +// zeros and ones operator +bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, + const TypeReporter& reporter) { + // types = [zeros_shape, ret_type] + CHECK_EQ(types.size(), 2); + const InitOpAttrs* param = attrs.as(); + const auto* fill_shape = types[0].as(); + DataType out_dtype = param->dtype; + + const IntImmNode* shape_shape = fill_shape->shape[0].as(); + CHECK(shape_shape) << "Parameter shape must have static rank"; + + std::vector oshape; + for (int i = 0; i < shape_shape->value; ++i) { + oshape.push_back(Any()); + } + + reporter->Assign(types[1], TensorType(oshape, out_dtype)); + return true; +} + +Expr MakeZeros(Expr shape, DataType dtype) { + auto attrs = make_object(); + attrs->dtype = std::move(dtype); + static const Op& op = Op::Get("dyn.zeros"); + return Call(op, {shape}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.dyn_make.zeros").set_body_typed(MakeZeros); + +RELAY_REGISTER_OP("dyn.zeros") + .describe(R"code(Fill array with zeros. + +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(1) + .add_argument("shape", "Tensor", "Target shape.") + .set_support_level(3) + .add_type_rel("DynamicInitOp", InitOpRel); + +Expr MakeOnes(Expr shape, DataType dtype) { + auto attrs = make_object(); + attrs->dtype = std::move(dtype); + static const Op& op = Op::Get("dyn.ones"); + return Call(op, {shape}, Attrs(attrs), {}); +} + +TVM_REGISTER_GLOBAL("relay.op.dyn._make.ones").set_body_typed(MakeOnes); + +RELAY_REGISTER_OP("dyn.ones") + .describe(R"code(Fill array with ones. + +)code" TVM_ADD_FILELINE) + .set_attrs_type() + .set_num_inputs(1) + .add_argument("shape", "Tensor", "Target shape.") + .set_support_level(3) + .add_type_rel("DynamicInitOp", InitOpRel); + + + + + + + + } // namespace dyn } // namespace relay } // namespace tvm diff --git a/tests/python/relay/dyn/test_dynamic_op_level3.py b/tests/python/relay/dyn/test_dynamic_op_level3.py index 2f473c9de070..56ddb9113936 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level3.py +++ b/tests/python/relay/dyn/test_dynamic_op_level3.py @@ -84,7 +84,22 @@ def verify_tile(dshape, reps): verify_tile((2, 3, 4), (1, 2)) verify_tile((2, 3), (3, 2, 1)) + +def test_dyn_zeros_ones(): + def verify_zeros_ones(shape, dtype): + for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: + shape = relay.var("x", relay.TensorType(shape, "float32")) + y = op(shape, dtype) + yy = run_infer_type(y) + assert yy.checked_type == relay.TensorType(shape, dtype) + intrp = create_executor() + intrp_res = intrp.evaluate(y).asnumpy() + np.testing.assert_allclose(intrp_res, ref(shape, dtype)) + + verify_zeros_ones((124, 50), "float64") + if __name__ == "__main__": test_dyn_reshape() test_dyn_shape_reshape() test_dyn_tile() + test_dyn_zeros_ones() \ No newline at end of file From c1b03037e1e8a32c7e0970ef7df1227c1e276787 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Wed, 8 Jul 2020 12:30:30 -0700 Subject: [PATCH 17/29] fixed dynamic ones and zeros, wrote dyn ones and zeros test --- python/tvm/relay/op/dyn/__init__.py | 2 +- python/tvm/relay/op/dyn/_tensor.py | 27 +++++++++++++++++-- python/tvm/relay/op/tensor.py | 7 ++--- src/relay/op/dyn/tensor/transform.cc | 4 +-- .../relay/dyn/test_dynamic_op_level3.py | 26 ++++++++++++------ 5 files changed, 50 insertions(+), 16 deletions(-) diff --git a/python/tvm/relay/op/dyn/__init__.py b/python/tvm/relay/op/dyn/__init__.py index 967ecbc36bad..d1c5dbb4804d 100644 --- a/python/tvm/relay/op/dyn/__init__.py +++ b/python/tvm/relay/op/dyn/__init__.py @@ -19,4 +19,4 @@ from . import _algorithm from . import _transform -from . import _tensor +from . import _tensor \ No newline at end of file diff --git a/python/tvm/relay/op/dyn/_tensor.py b/python/tvm/relay/op/dyn/_tensor.py index f4ce5198e34d..6b2307f2c5eb 100644 --- a/python/tvm/relay/op/dyn/_tensor.py +++ b/python/tvm/relay/op/dyn/_tensor.py @@ -17,8 +17,31 @@ #pylint: disable=invalid-name, unused-argument, len-as-condition """Backend compiler related feature registration for dynamic ops""" -from ..op import register_shape_func -from .._tensor import full_shape_func +import topi + +from ..op import register_shape_func, register_compute +from ..op import register_broadcast_schedule +from ..op import register_pattern, OpPattern +from .._tensor import full_shape_func, no_data_full_shape_func +from .._tensor import zeros_compute, ones_compute + +# ones +@register_compute("dyn.ones") +def ones_compute(attrs, inputs, output_type): + assert len(inputs) == 1 + return [topi.full(output_type.shape, output_type.dtype, 1.0)] + +register_broadcast_schedule("dyn.ones") +register_pattern("dyn.ones", OpPattern.ELEMWISE) + +# zeros -- copied from static op, maybe import with a wrapper but I think they might need to be different +@register_compute("dyn.zeros") +def zeros_compute(attrs, inputs, output_type): + assert len(inputs) == 1 + return [topi.full(output_type.shape, output_type.dtype, 0.0)] + +register_broadcast_schedule("dyn.zeros") +register_pattern("dyn.zeros", OpPattern.ELEMWISE) register_shape_func("dyn.broadcast_to", True, full_shape_func) register_shape_func("dyn.ones", True, no_data_full_shape_func) diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index c267accc79cc..117872422f81 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -20,7 +20,8 @@ from tvm.runtime import TVMContext as _TVMContext from . import _make -from ..expr import Tuple, const +from .dyn import _make as _dyn_make +from ..expr import Tuple, const, Expr # We create a wrapper function for each operator in the @@ -940,7 +941,7 @@ def zeros(shape, dtype): The resulting tensor. """ if isinstance(shape, Expr): - return _dyn.make.zeros(shape, dtype) + return _dyn_make.zeros(shape, dtype) if isinstance(shape, (list, tuple)): shape = const(list(shape), "int32") # change mes return _make.zeros(shape, dtype) @@ -979,7 +980,7 @@ def ones(shape, dtype): The resulting tensor. """ if isinstance(shape, Expr): - return _dyn.make.ones(shape, dtype) + return _dyn_make.ones(shape, dtype) if isinstance(shape, (list, tuple)): shape = const(list(shape), "int32") # change me return _make.ones(shape, dtype) diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 3c91e55b6fc0..7885c0f74173 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -265,14 +265,14 @@ bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, return true; } -Expr MakeZeros(Expr shape, DataType dtype) { +Expr MakeZeros(Expr shape, DataType dtype) { auto attrs = make_object(); attrs->dtype = std::move(dtype); static const Op& op = Op::Get("dyn.zeros"); return Call(op, {shape}, Attrs(attrs), {}); } -TVM_REGISTER_GLOBAL("relay.op.dyn_make.zeros").set_body_typed(MakeZeros); +TVM_REGISTER_GLOBAL("relay.op.dyn._make.zeros").set_body_typed(MakeZeros); RELAY_REGISTER_OP("dyn.zeros") .describe(R"code(Fill array with zeros. diff --git a/tests/python/relay/dyn/test_dynamic_op_level3.py b/tests/python/relay/dyn/test_dynamic_op_level3.py index 56ddb9113936..189857121976 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level3.py +++ b/tests/python/relay/dyn/test_dynamic_op_level3.py @@ -88,15 +88,25 @@ def verify_tile(dshape, reps): def test_dyn_zeros_ones(): def verify_zeros_ones(shape, dtype): for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: - shape = relay.var("x", relay.TensorType(shape, "float32")) - y = op(shape, dtype) + + rank = len(shape) + dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), 'int64')) + y = op(dyn_shape, dtype) yy = run_infer_type(y) - assert yy.checked_type == relay.TensorType(shape, dtype) - intrp = create_executor() - intrp_res = intrp.evaluate(y).asnumpy() - np.testing.assert_allclose(intrp_res, ref(shape, dtype)) - - verify_zeros_ones((124, 50), "float64") + assert yy.checked_type == relay.ty.TensorType((relay.Any(),) * rank, dtype) + + func = relay.Function([dyn_shape], y) + ref_res = ref(shape, dtype) + for target, ctx in ctx_list(): + if (target != 'cuda'): #skip cuda because no dynamic support for GPU + for kind in ["vm", "debug"]: + mod = tvm.ir.IRModule.from_expr(func) + intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target) + op_res = intrp.evaluate(func)(np.array(shape).astype('int64')) + tvm.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=1e-5) + + + verify_zeros_ones((124, 50), 'float64') if __name__ == "__main__": test_dyn_reshape() From 2c3380297492c061a2a9fb6db36907a21a284dfb Mon Sep 17 00:00:00 2001 From: electriclilies Date: Wed, 8 Jul 2020 13:02:57 -0700 Subject: [PATCH 18/29] added static version of zeros, ones and added a check for size of types to static BroadCastToRel --- python/tvm/relay/op/_tensor.py | 4 +- python/tvm/relay/op/tensor.py | 8 +++- src/relay/op/dyn/tensor/transform.cc | 7 ---- src/relay/op/tensor/transform.cc | 49 +++++++++-------------- src/relay/transforms/dynamic_to_static.cc | 27 +------------ src/relay/transforms/pattern_util.h | 5 ++- 6 files changed, 32 insertions(+), 68 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index cd9e4ed050d2..97cac919d9ad 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -92,7 +92,7 @@ # zeros @register_compute("zeros") def zeros_compute(attrs, inputs, output_type): - assert len(inputs) == 1 + assert len(inputs) == 0 return [topi.full(output_type.shape, output_type.dtype, 0.0)] register_broadcast_schedule("zeros") @@ -109,7 +109,7 @@ def zeros_like_compute(attrs, inputs, output_type): # ones @register_compute("ones") def ones_compute(attrs, inputs, output_type): - assert len(inputs) == 1 + assert len(inputs) == 0 return [topi.full(output_type.shape, output_type.dtype, 1.0)] register_broadcast_schedule("ones") diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index 117872422f81..fb992051f4d1 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -942,8 +942,10 @@ def zeros(shape, dtype): """ if isinstance(shape, Expr): return _dyn_make.zeros(shape, dtype) + if isinstance(shape, int): + shape = [shape] if isinstance(shape, (list, tuple)): - shape = const(list(shape), "int32") # change mes + shape = list(shape) return _make.zeros(shape, dtype) @@ -981,8 +983,10 @@ def ones(shape, dtype): """ if isinstance(shape, Expr): return _dyn_make.ones(shape, dtype) + if isinstance(shape, int): + shape = [shape] if isinstance(shape, (list, tuple)): - shape = const(list(shape), "int32") # change me + shape = list(shape) return _make.ones(shape, dtype) diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 7885c0f74173..928d5a4f2ad4 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -303,13 +303,6 @@ RELAY_REGISTER_OP("dyn.ones") .set_support_level(3) .add_type_rel("DynamicInitOp", InitOpRel); - - - - - - - } // namespace dyn } // namespace relay } // namespace tvm diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 5a6612a81ce9..64d20410e27b 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -979,37 +979,29 @@ RELAY_REGISTER_OP("full") bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { - CHECK_EQ(types.size(), 2); + // types = [ret_type] + CHECK_EQ(types.size(), 1); + const InitOpAttrs* param = attrs.as(); - const auto* fill_shape = types[0].as(); + CHECK(param); + DataType out_dtype = param->dtype; + std::vector oshape; - const IntImmNode* shape_shape = fill_shape->shape[0].as(); - CHECK(shape_shape) << "Parameter shape must have static shape"; - - std::vector oshape; - if (param->shape) { - const Array& cshape_array = param->shape.value(); - for (size_t i = 0; i < cshape_array.size(); ++i) { - oshape.push_back(cshape_array[i]); - } - } else { - for (int i = 0; i < shape_shape->value; ++i) { - oshape.push_back(Any()); - } + const Array& cshape_array = param->shape.value(); + for (size_t i = 0; i < cshape_array.size(); ++i) { + oshape.push_back(cshape_array[i]); } - reporter->Assign(types[1], TensorType(oshape, out_dtype)); + reporter->Assign(types[0], TensorType(oshape, out_dtype)); return true; } -Expr MakeZeros(Expr shape, DataType dtype) { +Expr MakeZeros(Array shape, DataType dtype) { auto attrs = make_object(); - if (const auto* cshape = shape.as()) { - attrs->shape = ToVector(cshape->data); - } + attrs->shape = std::move(shape); attrs->dtype = std::move(dtype); static const Op& op = Op::Get("zeros"); - return Call(op, {shape}, Attrs(attrs), {}); + return Call(op, {}, Attrs(attrs), {}); } TVM_REGISTER_GLOBAL("relay.op._make.zeros").set_body_typed(MakeZeros); @@ -1019,19 +1011,16 @@ RELAY_REGISTER_OP("zeros") )code" TVM_ADD_FILELINE) .set_attrs_type() - .set_num_inputs(1) - .add_argument("shape", "Tensor", "Target shape.") + .set_num_inputs(0) .set_support_level(3) .add_type_rel("InitOp", InitOpRel); -Expr MakeOnes(Expr shape, DataType dtype) { +Expr MakeOnes(Array shape, DataType dtype) { auto attrs = make_object(); - if (const auto* cshape = shape.as()) { - attrs->shape = ToVector(cshape->data); - } + attrs->shape = std::move(shape); attrs->dtype = std::move(dtype); static const Op& op = Op::Get("ones"); - return Call(op, {shape}, Attrs(attrs), {}); + return Call(op, {}, Attrs(attrs), {}); } TVM_REGISTER_GLOBAL("relay.op._make.ones").set_body_typed(MakeOnes); @@ -1041,8 +1030,7 @@ RELAY_REGISTER_OP("ones") )code" TVM_ADD_FILELINE) .set_attrs_type() - .set_num_inputs(1) - .add_argument("shape", "Tensor", "Target shape.") + .set_num_inputs(0) .set_support_level(3) .add_type_rel("InitOp", InitOpRel); @@ -1831,6 +1819,7 @@ RELAY_REGISTER_OP("collapse_sum_to") bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [data_type, ret_type], broadcast_to_type is in attrs bc static + CHECK_EQ(types.size(), 2); const InitOpAttrs* param = attrs.as(); CHECK(param); diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index b914a18fc769..3888ac3dcddb 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -33,15 +33,7 @@ namespace relay { class DynamicToStaticMutator : public MixedModeMutator { public: -<<<<<<< HEAD DynamicToStaticMutator() {} -======= - DynamicToStaticMutator() - : dyn_reshape_op_(Op::Get("dyn.reshape")), - dyn_tile_op_(Op::Get("dyn.tile")), - dyn_topk_op_(Op::Get("dyn.topk")), - dyn_broadcast_to_op_(Op::Get("dyn.broadcast_to")) {} ->>>>>>> bdc05de53... Dynamic BroadcastTo private: Expr Rewrite_(const CallNode* pre, const Expr& post) override { @@ -63,17 +55,10 @@ class DynamicToStaticMutator : public MixedModeMutator { return MakeTopK(call_node->args[0], static_cast(ToScalar(k->data, 0)), param->axis, param->ret_type, param->is_ascend, param->dtype); } - } - if (call_node->op == dyn_broadcast_to_op_) { + } else if (call_node->op == Op::Get("dyn.broadcast_to")) { if (const ConstantNode* shape = call_node->args[1].as()) { - auto attrs = make_object(); CHECK_EQ(shape->data->ndim, 1); - - // put shape in attrs - attrs->shape = ToVector(shape->data); - static const Op& broadcast_to = Op::Get("broadcast_to"); - // pass in one arg to static broadcast to - return Call(broadcast_to, {call_node->args[0]}, Attrs(attrs), {}); + return MakeBroadCastTo(call_node->args[0], ToVector(shape->data)); } } return post; @@ -86,14 +71,6 @@ class DynamicToStaticMutator : public MixedModeMutator { } return post; } -<<<<<<< HEAD -======= - - const Op& dyn_reshape_op_; - const Op& dyn_tile_op_; - const Op& dyn_topk_op_; - const Op& dyn_broadcast_to_op_; ->>>>>>> bdc05de53... Dynamic BroadcastTo }; Expr DynamicToStatic(Function f, IRModule m) { diff --git a/src/relay/transforms/pattern_util.h b/src/relay/transforms/pattern_util.h index 68921511d315..8ce495e33f5d 100644 --- a/src/relay/transforms/pattern_util.h +++ b/src/relay/transforms/pattern_util.h @@ -526,7 +526,7 @@ inline Expr ZerosLike(Expr e) { } inline Expr Zeros(Array shape, DataType dtype) { - return MakeZeros(CheckConstantShape(shape), dtype); + return MakeZeros(CheckConstantShapeArrayInteger(shape), dtype); } inline Expr OnesLike(Expr e) { @@ -535,7 +535,7 @@ inline Expr OnesLike(Expr e) { } inline Expr Ones(Array shape, DataType dtype) { - return MakeOnes(CheckConstantShape(shape), dtype); + return MakeOnes(CheckConstantShapeArrayInteger(shape), dtype); } inline Expr CollapseSumLike(Expr e) { @@ -624,6 +624,7 @@ static inline Expr Pad(Expr data, Array> pad_width, double pad_ } static inline Expr Tile(Expr data, Array reps) { return MakeTile(data, reps); } + Expr MakeBroadCastTo(Expr data, Array shape); static inline Expr BroadCastTo(Expr data, Array shape) { From 79d7e8aa3de7ddb69e5061c65e8b12dc53b9533e Mon Sep 17 00:00:00 2001 From: electriclilies Date: Wed, 8 Jul 2020 16:43:39 -0700 Subject: [PATCH 19/29] added dynamic to static pass for zeros and ones, dynamic test and dynamic to static test --- src/relay/transforms/dynamic_to_static.cc | 13 ++++++++++++ .../relay/dyn/test_dynamic_op_level3.py | 1 - .../relay/test_pass_dynamic_to_static.py | 20 +++++++++++++++++++ 3 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index 3888ac3dcddb..d59669c4d20e 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -58,8 +58,21 @@ class DynamicToStaticMutator : public MixedModeMutator { } else if (call_node->op == Op::Get("dyn.broadcast_to")) { if (const ConstantNode* shape = call_node->args[1].as()) { CHECK_EQ(shape->data->ndim, 1); + return MakeBroadCastTo(call_node->args[0], ToVector(shape->data)); } + } else if (call_node->op == Op::Get("dyn.zeros")) { + if (const ConstantNode* shape = call_node->args[0].as()) { + const InitOpAttrs* param = call_node->attrs.as(); + CHECK(param); + return MakeZeros(ToVector(shape->data), param->dtype); + } + } else if (call_node-> op == Op::Get("dyn.ones")) { + if (const ConstantNode* shape = call_node->args[0].as()) { + const InitOpAttrs* param = call_node->attrs.as(); + CHECK(param); + return MakeOnes(ToVector(shape->data), param->dtype); + } } return post; } diff --git a/tests/python/relay/dyn/test_dynamic_op_level3.py b/tests/python/relay/dyn/test_dynamic_op_level3.py index 189857121976..e63f9b8cd722 100644 --- a/tests/python/relay/dyn/test_dynamic_op_level3.py +++ b/tests/python/relay/dyn/test_dynamic_op_level3.py @@ -88,7 +88,6 @@ def verify_tile(dshape, reps): def test_dyn_zeros_ones(): def verify_zeros_ones(shape, dtype): for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: - rank = len(shape) dyn_shape = relay.Var("shape", relay.ty.TensorType((rank,), 'int64')) y = op(dyn_shape, dtype) diff --git a/tests/python/relay/test_pass_dynamic_to_static.py b/tests/python/relay/test_pass_dynamic_to_static.py index b13770d7d446..e6d0299c1114 100644 --- a/tests/python/relay/test_pass_dynamic_to_static.py +++ b/tests/python/relay/test_pass_dynamic_to_static.py @@ -201,6 +201,26 @@ def verify_broadcast_to(shape, broadcast_shape): ref_res = np.broadcast_to(x_data, y_data.shape) verify_func(func2, [x_data, y_data], ref_res) verify_broadcast_to((3, 1), (3, 3)) + +def test_dynamic_to_static_zeros_ones(): + def verify_ones_zeros(shape, dtype): + for op, ref, op_str in [(relay.zeros, np.zeros, "zeros"), (relay.ones, np.ones, "ones")]: + x = relay.var("x", relay.TensorType(shape, dtype)) + y = op(relay.shape_of(x), dtype) + + func = run_infer_type(relay.Function([x], y)) + func2 = run_opt_pass(run_opt_pass(func, transform.DynamicToStatic()), transform.InferType()) + + zz = func2.body + assert isinstance(zz, relay.Constant) + assert zz.checked_type == relay.ty.TensorType(shape, dtype) + + x_data = np.random.uniform(low=1, high=1, size=shape) + ref_res = ref(x_data.shape) + verify_func(func2, [x_data], ref_res) + + verify_ones_zeros((1, 2, 3), 'int64') + verify_ones_zeros((9, 8, 3, 4), 'float32') if __name__=="__main__": test_dynamic_to_static_reshape() From f6d7765d979fadca38ee484cb3dc04b727f869f4 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Wed, 8 Jul 2020 16:58:17 -0700 Subject: [PATCH 20/29] removed op_str in dyn to static pass test --- tests/python/relay/test_pass_dynamic_to_static.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/relay/test_pass_dynamic_to_static.py b/tests/python/relay/test_pass_dynamic_to_static.py index e6d0299c1114..62d4fd88bbd7 100644 --- a/tests/python/relay/test_pass_dynamic_to_static.py +++ b/tests/python/relay/test_pass_dynamic_to_static.py @@ -204,7 +204,7 @@ def verify_broadcast_to(shape, broadcast_shape): def test_dynamic_to_static_zeros_ones(): def verify_ones_zeros(shape, dtype): - for op, ref, op_str in [(relay.zeros, np.zeros, "zeros"), (relay.ones, np.ones, "ones")]: + for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: x = relay.var("x", relay.TensorType(shape, dtype)) y = op(relay.shape_of(x), dtype) From 2192ff9bf695dbf67d624439553ee4c5fc2d80e9 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Thu, 9 Jul 2020 11:57:21 -0700 Subject: [PATCH 21/29] fixed lint --- python/tvm/relay/op/_tensor.py | 1 - python/tvm/relay/op/dyn/__init__.py | 2 +- python/tvm/relay/op/dyn/_tensor.py | 2 -- src/relay/op/dyn/tensor/transform.cc | 3 ++- src/relay/op/tensor/transform.cc | 8 ++++---- 5 files changed, 7 insertions(+), 9 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 97cac919d9ad..a989ef23efc3 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -17,7 +17,6 @@ #pylint: disable=invalid-name, unused-argument, len-as-condition """Backend compiler related feature registration""" -from tvm.te.hybrid import script import topi from .op import register_compute, register_shape_func diff --git a/python/tvm/relay/op/dyn/__init__.py b/python/tvm/relay/op/dyn/__init__.py index d1c5dbb4804d..967ecbc36bad 100644 --- a/python/tvm/relay/op/dyn/__init__.py +++ b/python/tvm/relay/op/dyn/__init__.py @@ -19,4 +19,4 @@ from . import _algorithm from . import _transform -from . import _tensor \ No newline at end of file +from . import _tensor diff --git a/python/tvm/relay/op/dyn/_tensor.py b/python/tvm/relay/op/dyn/_tensor.py index 6b2307f2c5eb..dc2835977fb9 100644 --- a/python/tvm/relay/op/dyn/_tensor.py +++ b/python/tvm/relay/op/dyn/_tensor.py @@ -23,7 +23,6 @@ from ..op import register_broadcast_schedule from ..op import register_pattern, OpPattern from .._tensor import full_shape_func, no_data_full_shape_func -from .._tensor import zeros_compute, ones_compute # ones @register_compute("dyn.ones") @@ -34,7 +33,6 @@ def ones_compute(attrs, inputs, output_type): register_broadcast_schedule("dyn.ones") register_pattern("dyn.ones", OpPattern.ELEMWISE) -# zeros -- copied from static op, maybe import with a wrapper but I think they might need to be different @register_compute("dyn.zeros") def zeros_compute(attrs, inputs, output_type): assert len(inputs) == 1 diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 928d5a4f2ad4..82a94c78b33c 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -31,6 +31,7 @@ #include #include +#include namespace tvm { namespace relay { @@ -260,7 +261,7 @@ bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, for (int i = 0; i < shape_shape->value; ++i) { oshape.push_back(Any()); } - + reporter->Assign(types[1], TensorType(oshape, out_dtype)); return true; } diff --git a/src/relay/op/tensor/transform.cc b/src/relay/op/tensor/transform.cc index 64d20410e27b..85e8671cf8d5 100644 --- a/src/relay/op/tensor/transform.cc +++ b/src/relay/op/tensor/transform.cc @@ -984,9 +984,9 @@ bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, const InitOpAttrs* param = attrs.as(); CHECK(param); - + DataType out_dtype = param->dtype; - std::vector oshape; + std::vector oshape; const Array& cshape_array = param->shape.value(); for (size_t i = 0; i < cshape_array.size(); ++i) { @@ -998,7 +998,7 @@ bool InitOpRel(const Array& types, int num_inputs, const Attrs& attrs, Expr MakeZeros(Array shape, DataType dtype) { auto attrs = make_object(); - attrs->shape = std::move(shape); + attrs->shape = std::move(shape); attrs->dtype = std::move(dtype); static const Op& op = Op::Get("zeros"); return Call(op, {}, Attrs(attrs), {}); @@ -1819,7 +1819,7 @@ RELAY_REGISTER_OP("collapse_sum_to") bool BroadCastToRel(const Array& types, int num_inputs, const Attrs& attrs, const TypeReporter& reporter) { // types = [data_type, ret_type], broadcast_to_type is in attrs bc static - CHECK_EQ(types.size(), 2); + CHECK_EQ(types.size(), 2); const InitOpAttrs* param = attrs.as(); CHECK(param); From 3f71e4d605f747d1aa6c76467551e0fee1ec2b01 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Thu, 9 Jul 2020 12:40:42 -0700 Subject: [PATCH 22/29] fix lint hopefully --- src/relay/op/dyn/tensor/transform.cc | 2 +- tests/python/relay/test_pass_dynamic_to_static.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/relay/op/dyn/tensor/transform.cc b/src/relay/op/dyn/tensor/transform.cc index 82a94c78b33c..007b3dd86028 100644 --- a/src/relay/op/dyn/tensor/transform.cc +++ b/src/relay/op/dyn/tensor/transform.cc @@ -30,8 +30,8 @@ #include #include -#include #include +#include namespace tvm { namespace relay { diff --git a/tests/python/relay/test_pass_dynamic_to_static.py b/tests/python/relay/test_pass_dynamic_to_static.py index 62d4fd88bbd7..8ca788212ff3 100644 --- a/tests/python/relay/test_pass_dynamic_to_static.py +++ b/tests/python/relay/test_pass_dynamic_to_static.py @@ -229,4 +229,4 @@ def verify_ones_zeros(shape, dtype): test_dynamic_to_static_tile() test_dynamic_to_static_topk() test_dynamic_to_static_broadcast_to() - + test_dynamic_to_static_zeros_ones() From 1272a71a36f4d92a27b82e60a6b8a8a6f4265875 Mon Sep 17 00:00:00 2001 From: electriclilies Date: Thu, 9 Jul 2020 12:58:52 -0700 Subject: [PATCH 23/29] removed import const --- python/tvm/relay/op/tensor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index fb992051f4d1..a02e08d2deb7 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -21,7 +21,7 @@ from . import _make from .dyn import _make as _dyn_make -from ..expr import Tuple, const, Expr +from ..expr import Tuple, Expr # We create a wrapper function for each operator in the From 7ad825e7c7f80832f3fcc1992b974694867366d6 Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Thu, 9 Jul 2020 13:41:38 -0700 Subject: [PATCH 24/29] removed import that was actually used --- python/tvm/relay/op/_tensor.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index a989ef23efc3..97cac919d9ad 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -17,6 +17,7 @@ #pylint: disable=invalid-name, unused-argument, len-as-condition """Backend compiler related feature registration""" +from tvm.te.hybrid import script import topi from .op import register_compute, register_shape_func From 9ae620e6c40478279ed12a3ac8a31b7003e9b2e8 Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Thu, 9 Jul 2020 15:30:22 -0700 Subject: [PATCH 25/29] copy all attributes from broadcast_to, ones, zeros, full --- python/tvm/relay/_parser.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/tvm/relay/_parser.py b/python/tvm/relay/_parser.py index ac60a1f7bb51..0d3f86f6262d 100644 --- a/python/tvm/relay/_parser.py +++ b/python/tvm/relay/_parser.py @@ -116,8 +116,6 @@ def __call__(self, args, attrs, type_args): attrs = {} if self.operator in (op.strided_slice,): x = self.operator(*args) - elif self.operator in (op.zeros, op.ones, op.full, op.broadcast_to): - x = self.operator(*args, dtype=attrs["dtype"]) else: x = self.operator(*args, **{k: self.convert(v) for k, v in attrs.items()}) if isinstance(x, expr.TupleWrapper): From b9c87679d13e0a188679e396502d3a43cbd9e8ad Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Mon, 13 Jul 2020 10:04:52 -0700 Subject: [PATCH 26/29] responding to comments --- python/tvm/relay/op/_tensor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 97cac919d9ad..d4911d95e90d 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -92,7 +92,7 @@ # zeros @register_compute("zeros") def zeros_compute(attrs, inputs, output_type): - assert len(inputs) == 0 + assert not inputs return [topi.full(output_type.shape, output_type.dtype, 0.0)] register_broadcast_schedule("zeros") @@ -109,7 +109,7 @@ def zeros_like_compute(attrs, inputs, output_type): # ones @register_compute("ones") def ones_compute(attrs, inputs, output_type): - assert len(inputs) == 0 + assert not inputs return [topi.full(output_type.shape, output_type.dtype, 1.0)] register_broadcast_schedule("ones") From 53bab6c2c41d82c39668a2ea7ad9f018ffa6e9f5 Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Mon, 13 Jul 2020 10:32:28 -0700 Subject: [PATCH 27/29] fixed build error --- src/runtime/library_module.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/library_module.cc b/src/runtime/library_module.cc index 7c3323c56229..b12a9d195e2e 100644 --- a/src/runtime/library_module.cc +++ b/src/runtime/library_module.cc @@ -74,7 +74,7 @@ PackedFunc WrapPackedFunc(TVMBackendPackedCFunc faddr, const ObjectPtr& TVMValue ret_value; int ret_type_code = kTVMNullptr; int ret = (*faddr)(const_cast(args.values), const_cast(args.type_codes), - args.num_args, &ret_value, &ret_type_code); + args.num_args, &ret_value, &ret_type_code, NULL); CHECK_EQ(ret, 0) << TVMGetLastError(); if (ret_type_code != kTVMNullptr) { *rv = TVMRetValue::MoveFromCHost(ret_value, ret_type_code); From 4c0129c063d78ee9082a66cd497ff15fb59c9d33 Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Tue, 14 Jul 2020 11:55:27 -0700 Subject: [PATCH 28/29] finishing rebase --- src/relay/op/make_op.h | 6 +++--- src/relay/transforms/pattern_util.h | 2 -- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/relay/op/make_op.h b/src/relay/op/make_op.h index b5c7a526c658..3b5e9a195957 100644 --- a/src/relay/op/make_op.h +++ b/src/relay/op/make_op.h @@ -36,7 +36,7 @@ namespace tvm { namespace relay { -Expr MakeBroadCastTo(Expr data, Expr shape); +Expr MakeBroadCastTo(Expr data, Array shape); Expr MakeCast(Expr data, DataType dtype); @@ -52,7 +52,7 @@ Expr MakeFull(Expr fill_value, Expr shape, DataType dtype); Expr MakeLayoutTransform(Expr data, String src_layout, String dst_layout); -Expr MakeOnes(Expr shape, DataType dtype); +Expr MakeOnes(Array shape, DataType dtype); Expr MakePad(Expr data, Array> pad_width, double pad_value, String pad_mode); @@ -76,7 +76,7 @@ Expr MakeTopK(Expr data, int k, int axis, String ret_type, bool is_ascend, DataT Expr MakeVariance(Expr data, Expr mean, Array axis, bool keepdims, bool exclude); -Expr MakeZeros(Expr shape, DataType dtype); +Expr MakeZeros(Array shape, DataType dtype); } // namespace relay } // namespace tvm diff --git a/src/relay/transforms/pattern_util.h b/src/relay/transforms/pattern_util.h index 8ce495e33f5d..adbd1bd44431 100644 --- a/src/relay/transforms/pattern_util.h +++ b/src/relay/transforms/pattern_util.h @@ -625,8 +625,6 @@ static inline Expr Pad(Expr data, Array> pad_width, double pad_ static inline Expr Tile(Expr data, Array reps) { return MakeTile(data, reps); } -Expr MakeBroadCastTo(Expr data, Array shape); - static inline Expr BroadCastTo(Expr data, Array shape) { return MakeBroadCastTo(data, CheckConstantShapeArrayInteger(shape)); } From 1fc7f7f8de976b9e9bb396d53c0917aeef985934 Mon Sep 17 00:00:00 2001 From: Lily Orth-Smith Date: Tue, 14 Jul 2020 12:07:30 -0700 Subject: [PATCH 29/29] fix lint --- src/relay/transforms/dynamic_to_static.cc | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/relay/transforms/dynamic_to_static.cc b/src/relay/transforms/dynamic_to_static.cc index d59669c4d20e..d4de15c6ee5a 100644 --- a/src/relay/transforms/dynamic_to_static.cc +++ b/src/relay/transforms/dynamic_to_static.cc @@ -58,7 +58,6 @@ class DynamicToStaticMutator : public MixedModeMutator { } else if (call_node->op == Op::Get("dyn.broadcast_to")) { if (const ConstantNode* shape = call_node->args[1].as()) { CHECK_EQ(shape->data->ndim, 1); - return MakeBroadCastTo(call_node->args[0], ToVector(shape->data)); } } else if (call_node->op == Op::Get("dyn.zeros")) { @@ -67,7 +66,7 @@ class DynamicToStaticMutator : public MixedModeMutator { CHECK(param); return MakeZeros(ToVector(shape->data), param->dtype); } - } else if (call_node-> op == Op::Get("dyn.ones")) { + } else if (call_node->op == Op::Get("dyn.ones")) { if (const ConstantNode* shape = call_node->args[0].as()) { const InitOpAttrs* param = call_node->attrs.as(); CHECK(param);