From 9627a18cee310977e30e13a2ae3d0982605da8b0 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Thu, 1 Nov 2018 00:43:34 -0700 Subject: [PATCH 01/13] Add compute and schedule attributes for all ops in relay/op/tensor.py --- python/tvm/relay/op/_tensor.py | 289 ++++++++++++++++++++++++++++++--- python/tvm/relay/op/tensor.py | 32 ++-- 2 files changed, 282 insertions(+), 39 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 6ccb394ef8db..987bc16bebef 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -1,49 +1,294 @@ #pylint: disable=invalid-name, unused-argument """Backend compiler related feature registration""" +from __future__ import absolute_import import tvm import topi +import topi.cuda from . import register +def register_schedule(op_name, schedule): + register(op_name, "FTVMSchedule", schedule) + +def register_compute(op_name, compute): + register(op_name, "FTVMCompute", compute) + +def schedule_injective(outputs, target): + """Generic schedule for binary broadcast.""" + with tvm.target.create(target): + return topi.generic.schedule_injective(outputs) + +schedule_broadcast = schedule_injective +schedule_elemwise = schedule_injective + +# log +def log_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.log(inputs[0])] + +register_schedule("log", schedule_broadcast) + +# exp +def exp_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.exp(inputs[0])] + +register_compute("exp", exp_compute) +register_schedule("exp", schedule_broadcast) + +# sqrt +def sqrt_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.sqrt(inputs[0])] + +register_compute("sqrt", sqrt_compute) +register_schedule("sqrt", schedule_broadcast) + +# sigmoid +def sigmoid_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.sigmoid(inputs[0])] + +register_compute("sigmoid", sigmoid_compute) +register_schedule("sigmoid", schedule_broadcast) + +# floor +def floor_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.floor(inputs[0])] + +register_compute("floor", floor_compute) +register_schedule("floor", schedule_broadcast) + +# ceil +def ceil_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.ceil(inputs[0])] + +register_compute("ceil", ceil_compute) +register_schedule("ceil", schedule_broadcast) + +# trunc +def trunc_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.trunc(inputs[0])] + +register_compute("trunc", trunc_compute) +register_schedule("trunc", schedule_broadcast) + +# round +def round_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.round(inputs[0])] + +register_compute("round", round_compute) +register_schedule("round", schedule_broadcast) + +# abs +def abs_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.abs(inputs[0])] + +register_compute("abs", abs_compute) +register_schedule("abs", schedule_broadcast) + +# tanh +def tanh_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.tanh(inputs[0])] + +register_compute("tanh", tanh_compute) +register_schedule("tanh", schedule_broadcast) + +# negative +def negative_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.negative(inputs[0])] + +register_compute("negative", negative_compute) +register_schedule("negative", schedule_broadcast) + +# add def add_compute(attrs, inputs, output_type, target): assert len(inputs) == 2 return [topi.add(inputs[0], inputs[1])] -def add_schedule(outputs, target): - assert len(outputs) == 1 - return tvm.create_schedule(outputs[0].op) - -register("add", "FTVMCompute", add_compute) -register("add", "FTVMSchedule", add_schedule) +register_compute("add", add_compute) +register_schedule("add", schedule_injective) +# subtract def subtract_compute(attrs, inputs, output_type, target): assert len(inputs) == 2 return [topi.subtract(inputs[0], inputs[1])] -def subtract_schedule(outputs, target): - assert len(outputs) == 1 - return tvm.create_schedule(outputs[0].op) - -register("subtract", "FTVMCompute", subtract_compute) -register("subtract", "FTVMSchedule", subtract_schedule) +register_compute("subtract", subtract_compute) +register_schedule("subtract", schedule_broadcast) +# multiply def multiply_compute(attrs, inputs, output_type, target): assert len(inputs) == 2 return [topi.multiply(inputs[0], inputs[1])] -def multiply_schedule(outputs, target): - assert len(outputs) == 1 - return tvm.create_schedule(outputs[0].op) +register_compute("multiply", multiply_compute) +register_schedule("multiply", schedule_broadcast) + +# divide +def divide_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.divide(inputs[0], inputs[1])] + +register_compute("divide", divide_compute) +register_schedule("divide", schedule_broadcast) + +# pow +def pow_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.power(inputs[0], inputs[1])] + +register_compute("pow", pow_compute) +register_schedule("pow", schedule_injective) + +# mod +def mod_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.mod(inputs[0], inputs[1])] -register("multiply", "FTVMCompute", multiply_compute) -register("multiply", "FTVMSchedule", multiply_schedule) +register_compute("mod", mod_compute) +register_schedule("mod", schedule_broadcast) +# equal def equal_compute(attrs, inputs, output_type, target): assert len(inputs) == 2 return [topi.equal(inputs[0], inputs[1])] -def equal_schedule(outputs, target): - assert len(outputs) == 1 - return tvm.create_schedule(outputs[0].op) +register_compute("equal", equal_compute) +register_schedule("equal", schedule_broadcast) + +# not_equal +def not_equal_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.not_equal(inputs[0], inputs[1])] + +register_compute("not_equal", not_equal_compute) +register_schedule("not_equal", schedule_broadcast) + +# less +def less_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.less(inputs[0], inputs[1])] + +register_compute("less", less_compute) +register_schedule("less", schedule_broadcast) + +# less equal +def less_equal_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.less_equal(inputs[0], inputs[1])] + +register_compute("less_equal", less_equal_compute) +register_schedule("less_equal", schedule_broadcast) + +# greater +def greater_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.greater(inputs[0], inputs[1])] + +register_compute("greater", greater_compute) +register_schedule("greater", schedule_broadcast) + +# greater equal +def greater_equal_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.greater_equal(inputs[0], inputs[1])] + +register_compute("greater_equal", greater_equal_compute) +register_schedule("greater_equal", schedule_broadcast) + +# maximum +def maximum_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.maximum(inputs[0], inputs[1])] + +register_compute("maximum_compute", maximum_compute) +register_schedule("maximum_compute", schedule_injective) + +# minimum +def minimum_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.minimum(inputs[0], inputs[1])] + +register_compute("minimum", minimum_compute) +register_schedule("minimum", schedule_injective) + +# right shift +def right_shift_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.right_shift(inputs[0], inputs[1])] + +register_compute("right_shift", right_shift_compute) +register_schedule("right_shift", schedule_injective) + +# lift shift +def left_shift_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.left_shift(inputs[0], inputs[1])] + +register_compute("left_shift", left_shift_compute) +register_schedule("left_shift", schedule_injective) + +# zeros +def zeros_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.full(inputs[0], inputs[1], 0.0)] + +register_compute("zeros_compute", zeros_compute) +register_schedule("zeros_compute", schedule_injective) + +# zeros_like +def zeros_like_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.full_like(inputs[0], 0.0)] + +register_compute("zeros_like", zeros_like_compute) +register_schedule("zeros_like", schedule_injective) + +# ones +def ones_compute(attrs, inputs, output_type, target): + assert len(inputs) == 2 + return [topi.full(inputs[0], inputs[1], 1.0)] + +register_compute("ones", ones_compute) +register_schedule("ones", schedule_injective) + +# ones_like +def ones_like(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.full_like(inputs[0], 1.0)] + +register_compute("ones_like", ones_like) +register_schedule("ones_like", schedule_injective) + +# clip +def clip_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.clip(inputs[0], inputs[1], inputs[2])] + + +register_compute("clip", clip_compute) +register_schedule("clip", schedule_injective) + +# concatenate +def concatenate_compute(attrs, inputs, output_type, target): + assert len(inputs) == 1 + return [topi.concatenate(*inputs[0], attrs.axis)] + +register_compute("concatenate", concatenate_compute) +register_schedule("concatenate", schedule_injective) + +# # copy +# TODO(@jroesch): How to implement copy. +# def copy_compute(attrs, inputs, output_type, target): +# assert len(inputs) == 1 +# return [topi.copy(inputs[0])] -register("equal", "FTVMCompute", equal_compute) -register("equal", "FTVMSchedule", equal_schedule) +# register_compute("copy", copy_compute) +# register_schedule("copy", schedule_injective) diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index 3c432b58092d..c450b3b825eb 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -213,9 +213,8 @@ def add(lhs, rhs): """ return _make.add(lhs, rhs) - -def multiply(lhs, rhs): - """Multiplication with numpy-style broadcasting. +def subtract(lhs, rhs): + """Subtraction with numpy-style broadcasting. Parameters ---------- @@ -229,11 +228,10 @@ def multiply(lhs, rhs): result : relay.Expr The computed result. """ - return _make.multiply(lhs, rhs) - + return _make.subtract(lhs, rhs) -def divide(lhs, rhs): - """Division with numpy-style broadcasting. +def multiply(lhs, rhs): + """Multiplication with numpy-style broadcasting. Parameters ---------- @@ -247,11 +245,11 @@ def divide(lhs, rhs): result : relay.Expr The computed result. """ - return _make.divide(lhs, rhs) + return _make.multiply(lhs, rhs) -def pow(lhs, rhs): - """Power with numpy-style broadcasting. +def divide(lhs, rhs): + """Division with numpy-style broadcasting. Parameters ---------- @@ -265,11 +263,11 @@ def pow(lhs, rhs): result : relay.Expr The computed result. """ - return _make.pow(lhs, rhs) + return _make.divide(lhs, rhs) -def mod(lhs, rhs): - """Mod with numpy-style broadcasting. +def pow(lhs, rhs): + """Power with numpy-style broadcasting. Parameters ---------- @@ -283,11 +281,11 @@ def mod(lhs, rhs): result : relay.Expr The computed result. """ - return _make.mod(lhs, rhs) + return _make.pow(lhs, rhs) -def subtract(lhs, rhs): - """Subtraction with numpy-style broadcasting. +def mod(lhs, rhs): + """Mod with numpy-style broadcasting. Parameters ---------- @@ -301,7 +299,7 @@ def subtract(lhs, rhs): result : relay.Expr The computed result. """ - return _make.subtract(lhs, rhs) + return _make.mod(lhs, rhs) def equal(lhs, rhs): From b120240ccac53a0cd170d93e09cab27b786f41d9 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Fri, 2 Nov 2018 11:37:52 -0700 Subject: [PATCH 02/13] Add test --- python/tvm/relay/op/_tensor.py | 6 ------ tests/python/relay/test_op_level1.py | 26 +++++++++++++++++--------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 987bc16bebef..02666c567903 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -6,12 +6,6 @@ import topi.cuda from . import register -def register_schedule(op_name, schedule): - register(op_name, "FTVMSchedule", schedule) - -def register_compute(op_name, compute): - register(op_name, "FTVMCompute", compute) - def schedule_injective(outputs, target): """Generic schedule for binary broadcast.""" with tvm.target.create(target): diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index a622dfc2cbd4..ca0e1564c59c 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -1,25 +1,33 @@ import tvm import numpy as np from tvm import relay +from tvm.relay.interpreter import create_executor +def ref_log(x): + return np.log(x) def test_unary_op(): - def check_single_op(opfunc): - tp = relay.TensorType((10, 4), "float32") + def check_single_op(opfunc, ref): + shape = (10, 4) + dtype = 'float32' + tp = relay.TensorType(shape, dtype) x = relay.var("x", tp) y = opfunc(x) # test printer assert ("%0 = {}(%x)".format(y.op.name)) in y.astext() # test type inference assert relay.ir_pass.infer_type(y).checked_type == tp + intrp = create_executor() - for opfunc in [tvm.relay.log, - tvm.relay.exp, - tvm.relay.sqrt, - tvm.relay.sigmoid, - tvm.relay.tanh, - relay.nn.relu]: - check_single_op(opfunc) + + + for opfunc, ref in [(tvm.relay.log, ref_log), + (tvm.relay.exp, None), + (tvm.relay.sqrt, None) + (tvm.relay.sigmoid, None) + (tvm.relay.tanh, None) + (relay.nn.relu, None)]: + check_single_op(opfunc, ref) def test_binary_op(): From 632df8c2a46d07087dc952a063c43413f31d3a1c Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Fri, 2 Nov 2018 12:14:26 -0700 Subject: [PATCH 03/13] Add test case for log --- python/tvm/relay/interpreter.py | 9 +++++++-- python/tvm/relay/op/__init__.py | 2 +- python/tvm/relay/op/_tensor.py | 5 +++-- python/tvm/relay/op/op.py | 5 +++++ src/relay/pass/lower_ops.cc | 5 +++-- tests/python/relay/test_op_level1.py | 14 +++++++++----- 6 files changed, 28 insertions(+), 12 deletions(-) diff --git a/python/tvm/relay/interpreter.py b/python/tvm/relay/interpreter.py index 4dfe3e02989e..85072fa7b343 100644 --- a/python/tvm/relay/interpreter.py +++ b/python/tvm/relay/interpreter.py @@ -138,7 +138,8 @@ def evaluate(self, expr, params=None): """ if params: scope_builder = ScopeBuilder() - for key, value in params: + for key in params: + value = params[key] scope_builder.let(key, value) scope_builder.ret(expr) expr = scope_builder.get() @@ -168,10 +169,14 @@ def _interp_wrapper(*args): self.mod._add(expr, func, True) opt_expr = Call(expr, relay_args) return _interpreter.evaluate(self.mod, opt_expr) - else: + elif isinstance(expr, Function): call = Call(expr, relay_args) opt_expr = self.optimize(call) return _interpreter.evaluate(self.mod, opt_expr) + else: + assert len(args) == 0 + opt_expr = self.optimize(expr) + return _interpreter.evaluate(self.env, opt_expr) return _interp_wrapper diff --git a/python/tvm/relay/op/__init__.py b/python/tvm/relay/op/__init__.py index 7b61fd10f5b0..9b581486608b 100644 --- a/python/tvm/relay/op/__init__.py +++ b/python/tvm/relay/op/__init__.py @@ -1,7 +1,7 @@ #pylint: disable=wildcard-import, redefined-builtin """Relay core operators.""" # operator defs -from .op import get, register, Op +from .op import get, register, register_schedule, register_compute, Op # Operators from .reduce import * diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 02666c567903..131ba288fbc5 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -4,7 +4,7 @@ import tvm import topi import topi.cuda -from . import register +from . import register, register_schedule, register_compute def schedule_injective(outputs, target): """Generic schedule for binary broadcast.""" @@ -19,7 +19,8 @@ def log_compute(attrs, inputs, output_type, target): assert len(inputs) == 1 return [topi.log(inputs[0])] -register_schedule("log", schedule_broadcast) +register_compute("log", log_compute) +register_schedule("log", schedule_broadcast) # exp def exp_compute(attrs, inputs, output_type, target): diff --git a/python/tvm/relay/op/op.py b/python/tvm/relay/op/op.py index 0c09f39a3c83..91523f65f6b7 100644 --- a/python/tvm/relay/op/op.py +++ b/python/tvm/relay/op/op.py @@ -74,6 +74,11 @@ def _register(v): return v return _register(value) if value else _register +def register_schedule(op_name, schedule): + register(op_name, "FTVMSchedule", schedule) + +def register_compute(op_name, compute): + register(op_name, "FTVMCompute", compute) _init_api("relay.op", __name__) diff --git a/src/relay/pass/lower_ops.cc b/src/relay/pass/lower_ops.cc index f2c8ceba866d..8042a5aa232b 100644 --- a/src/relay/pass/lower_ops.cc +++ b/src/relay/pass/lower_ops.cc @@ -8,6 +8,7 @@ */ #include #include +#include #include #include #include @@ -155,8 +156,8 @@ struct LiveFunctions : ExprVisitor { }; using FCompute = TypedPackedFunc( - const Attrs&, const Array&, Type, std::string)>; -using FSchedule = TypedPackedFunc&, std::string)>; + const Attrs&, const Array&, Type, tvm::Target)>; +using FSchedule = TypedPackedFunc&, tvm::Target)>; /*! \brief Return the set of operators in their TVM format. */ Array LowerOps(const Module& mod, const Expr& e, diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index ca0e1564c59c..586dde5a8dfe 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -17,15 +17,19 @@ def check_single_op(opfunc, ref): assert ("%0 = {}(%x)".format(y.op.name)) in y.astext() # test type inference assert relay.ir_pass.infer_type(y).checked_type == tp - intrp = create_executor() - + if ref is not None: + data = np.random.rand(*shape).astype(dtype) + intrp = create_executor() + op_res = intrp.evaluate(y, { x: relay.const(data) })() + ref_res = ref(data) + np.testing.assert_allclose(op_res.asnumpy(), ref_res) for opfunc, ref in [(tvm.relay.log, ref_log), (tvm.relay.exp, None), - (tvm.relay.sqrt, None) - (tvm.relay.sigmoid, None) - (tvm.relay.tanh, None) + (tvm.relay.sqrt, None), + (tvm.relay.sigmoid, None), + (tvm.relay.tanh, None), (relay.nn.relu, None)]: check_single_op(opfunc, ref) From 3e09f5b4effe601b4f3e544ebf2586b701f7d811 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Fri, 2 Nov 2018 12:30:30 -0700 Subject: [PATCH 04/13] Use target node --- include/tvm/attrs.h | 16 ++++++++-------- include/tvm/build_module.h | 2 +- src/relay/pass/lower_ops.cc | 5 +++-- 3 files changed, 12 insertions(+), 11 deletions(-) diff --git a/include/tvm/attrs.h b/include/tvm/attrs.h index 51d916ca488d..cc1abe6e57de 100644 --- a/include/tvm/attrs.h +++ b/include/tvm/attrs.h @@ -735,12 +735,12 @@ template class AttrsNode : public BaseAttrsNode { public: void VisitAttrs(AttrVisitor* v) final { - detail::AttrNormalVisitor vis(v); + ::tvm::detail::AttrNormalVisitor vis(v); self()->__VisitAttrs__(vis); } void VisitNonDefaultAttrs(AttrVisitor* v) final { - detail::AttrNonDefaultVisitor vis(v); + ::tvm::detail::AttrNonDefaultVisitor vis(v); self()->__VisitAttrs__(vis); } @@ -761,7 +761,7 @@ class AttrsNode : public BaseAttrsNode { } return false; }; - auto vis = detail::CreateInitVisitor(DerivedType::_type_key, ffind); + auto vis = ::tvm::detail::CreateInitVisitor(DerivedType::_type_key, ffind); self()->__VisitAttrs__(vis); hit_count = vis.hit_count_; } else { @@ -779,14 +779,14 @@ class AttrsNode : public BaseAttrsNode { } return false; }; - auto vis = detail::CreateInitVisitor(DerivedType::_type_key, ffind); + auto vis = ::tvm::detail::CreateInitVisitor(DerivedType::_type_key, ffind); self()->__VisitAttrs__(vis); hit_count = vis.hit_count_; } // error handling, slow path if (hit_count * 2 != args.size() && !allow_unknown) { for (int i = 0; i < args.size(); i += 2) { - detail::AttrExistVisitor visitor; + ::tvm::detail::AttrExistVisitor visitor; visitor.key_ = args[i].operator std::string(); self()->__VisitAttrs__(visitor); if (!visitor.exist_) { @@ -803,7 +803,7 @@ class AttrsNode : public BaseAttrsNode { } Array ListFieldInfo() const final { - detail::AttrDocVisitor visitor; + ::tvm::detail::AttrDocVisitor visitor; self()->__VisitAttrs__(visitor); return visitor.fields_; } @@ -813,13 +813,13 @@ class AttrsNode : public BaseAttrsNode { if (pself == other) return true; if (other == nullptr) return false; if (pself->type_index() != other->type_index()) return false; - detail::AttrsEqualVisitor visitor(pself, other, equal); + ::tvm::detail::AttrsEqualVisitor visitor(pself, other, equal); self()->__VisitAttrs__(visitor); return visitor.result_; } size_t ContentHash(AttrsHash hasher) const final { - detail::AttrsHashVisitor visitor(hasher); + ::tvm::detail::AttrsHashVisitor visitor(hasher); visitor.result_ = std::hash()(this->type_key()); self()->__VisitAttrs__(visitor); return visitor.result_; diff --git a/include/tvm/build_module.h b/include/tvm/build_module.h index 7aafad4216e1..ddd54f604a68 100644 --- a/include/tvm/build_module.h +++ b/include/tvm/build_module.h @@ -417,7 +417,7 @@ inline TVMRetValue GenericFunc::operator()(Args&& ...args) const { const int kArraySize = kNumArgs > 0 ? kNumArgs : 1; TVMValue values[kArraySize]; int type_codes[kArraySize]; - detail::for_each(TVMArgsSetter(values, type_codes), + runtime::detail::for_each(TVMArgsSetter(values, type_codes), std::forward(args)...); TVMRetValue rv; CallPacked(TVMArgs(values, type_codes, kNumArgs), &rv); diff --git a/src/relay/pass/lower_ops.cc b/src/relay/pass/lower_ops.cc index 8042a5aa232b..75888fff0e1c 100644 --- a/src/relay/pass/lower_ops.cc +++ b/src/relay/pass/lower_ops.cc @@ -199,9 +199,10 @@ Array LowerOps(const Module& mod, const Expr& e, } auto output_tt = op->op_type->ret_type; + auto target_node = Target::create(target); Array outputs = - compute_reg[op](call->attrs, inputs, output_tt, target); - auto schedule = schedule_reg[op](outputs, target); + compute_reg[op](call->attrs, inputs, output_tt, target_node); + auto schedule = schedule_reg[op](outputs, target_node); size_t hash = StructuralHash()(func); LoweredFunc lf = flower(op->name + std::to_string(hash), schedule, inputs, outputs); From 5798d75125b08f5073c2357621cc7d868bfb2b62 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Fri, 2 Nov 2018 13:34:55 -0700 Subject: [PATCH 05/13] Fix lint --- python/tvm/relay/interpreter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/relay/interpreter.py b/python/tvm/relay/interpreter.py index 85072fa7b343..b259e58575db 100644 --- a/python/tvm/relay/interpreter.py +++ b/python/tvm/relay/interpreter.py @@ -174,7 +174,7 @@ def _interp_wrapper(*args): opt_expr = self.optimize(call) return _interpreter.evaluate(self.mod, opt_expr) else: - assert len(args) == 0 + assert not args opt_expr = self.optimize(expr) return _interpreter.evaluate(self.env, opt_expr) From 52f33a743cbcae1a9823d21f8054590f574b7da3 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Fri, 2 Nov 2018 14:21:38 -0700 Subject: [PATCH 06/13] Fix unused import and typo --- python/tvm/relay/op/_tensor.py | 2 +- src/relay/pass/lower_ops.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 131ba288fbc5..117c3151cce2 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -4,7 +4,7 @@ import tvm import topi import topi.cuda -from . import register, register_schedule, register_compute +from . import register_schedule, register_compute def schedule_injective(outputs, target): """Generic schedule for binary broadcast.""" diff --git a/src/relay/pass/lower_ops.cc b/src/relay/pass/lower_ops.cc index 75888fff0e1c..4e141aff45ea 100644 --- a/src/relay/pass/lower_ops.cc +++ b/src/relay/pass/lower_ops.cc @@ -180,7 +180,7 @@ Array LowerOps(const Module& mod, const Expr& e, auto func = mod->Lookup(func_name); auto call = Downcast(func->body); auto op_node = call->op.as(); - CHECK(op_node) << "violated invariant that primtiive calls contain a single op call"; + CHECK(op_node) << "violated invariant that primtive calls contain a single op call"; auto op = GetRef(op_node); RELAY_LOG(INFO) << "LowerOps: Lowering " << op->name; From 698c2291295e0b0c0020a03a2bfeded51740d8f5 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Fri, 2 Nov 2018 14:41:05 -0700 Subject: [PATCH 07/13] Add binary tests --- python/tvm/relay/interpreter.py | 7 +++- tests/python/relay/test_op_level1.py | 51 ++++++++++++++++++---------- 2 files changed, 39 insertions(+), 19 deletions(-) diff --git a/python/tvm/relay/interpreter.py b/python/tvm/relay/interpreter.py index b259e58575db..02956de886c1 100644 --- a/python/tvm/relay/interpreter.py +++ b/python/tvm/relay/interpreter.py @@ -147,7 +147,12 @@ def evaluate(self, expr, params=None): if isinstance(expr, Function): assert not ir_pass.free_vars(expr) - return self._make_executor(expr) + executor = self._make_executor(expr) + + if isinstance(expr, Function): + return executor + else: + return executor() class Interpreter(Executor): diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index 586dde5a8dfe..7a4370b1269a 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -1,10 +1,17 @@ +import math import tvm import numpy as np from tvm import relay from tvm.relay.interpreter import create_executor -def ref_log(x): - return np.log(x) +def sigmoid(x): + one = np.ones_like(x) + return one / (one + np.exp(-x)) + +def relu(x): + x_copy = np.copy(x) + np.maximum(x_copy, 0, x_copy) + return x_copy def test_unary_op(): def check_single_op(opfunc, ref): @@ -21,21 +28,21 @@ def check_single_op(opfunc, ref): if ref is not None: data = np.random.rand(*shape).astype(dtype) intrp = create_executor() - op_res = intrp.evaluate(y, { x: relay.const(data) })() + op_res = intrp.evaluate(y, { x: relay.const(data) }) ref_res = ref(data) - np.testing.assert_allclose(op_res.asnumpy(), ref_res) - - for opfunc, ref in [(tvm.relay.log, ref_log), - (tvm.relay.exp, None), - (tvm.relay.sqrt, None), - (tvm.relay.sigmoid, None), - (tvm.relay.tanh, None), - (relay.nn.relu, None)]: + np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + + for opfunc, ref in [(tvm.relay.log, np.log), + (tvm.relay.exp, np.exp), + (tvm.relay.sqrt, np.sqrt), + (tvm.relay.sigmoid, sigmoid), + (tvm.relay.tanh, np.tanh), + (relay.nn.relu, None)]: # Just add RELU here after registering. check_single_op(opfunc, ref) def test_binary_op(): - def check_binary_op(opfunc): + def check_binary_op(opfunc, ref): n = tvm.var("n") t1 = relay.TensorType((5, n, 5)) t2 = relay.TensorType((n, 1)) @@ -46,12 +53,20 @@ def check_binary_op(opfunc): assert ("%0 = {}(%x, %y)".format(z.op.name)) in z.astext() assert relay.ir_pass.infer_type(z).checked_type == t1 - for opfunc in [relay.add, - relay.subtract, - relay.mod, - relay.multiply, - relay.divide]: - check_binary_op(opfunc) + if ref is not None: + x = np.random.rand(*t1.shape).astype(t1.dtype) + y = np.random.rand(*t2.shape).astype(t2.dtype) + intrp = create_executor() + op_res = intrp.evaluate(z, { x: relay.const(x), y: relay.const(y) }) + ref_res = ref(x, y) + np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + + for opfunc, ref in [(relay.add, np.add) + (relay.subtract, np.subtract), + (relay.mod, np.mod), + (relay.multiply, np.multiply), + (relay.divide, np.divide)]: + check_binary_op(opfunc, ref) def test_bias_add(): From ba24889d92c6fbb62c6ae1bb22199cae9cfe6186 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Fri, 2 Nov 2018 15:41:50 -0700 Subject: [PATCH 08/13] Fix binary ops --- python/tvm/relay/interpreter.py | 2 +- tests/python/relay/test_op_level1.py | 25 ++++++++++++++++++------- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/python/tvm/relay/interpreter.py b/python/tvm/relay/interpreter.py index 02956de886c1..15c5ec842930 100644 --- a/python/tvm/relay/interpreter.py +++ b/python/tvm/relay/interpreter.py @@ -181,7 +181,7 @@ def _interp_wrapper(*args): else: assert not args opt_expr = self.optimize(expr) - return _interpreter.evaluate(self.env, opt_expr) + return _interpreter.evaluate(self.mod, opt_expr) return _interp_wrapper diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index 7a4370b1269a..2d36b463c29d 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -42,10 +42,16 @@ def check_single_op(opfunc, ref): def test_binary_op(): + def inst(vars, sh): + return [vars.get(s, s) for s in sh] + def check_binary_op(opfunc, ref): + # TODO(@jroesch): this piece of code improperly uses type variables. n = tvm.var("n") - t1 = relay.TensorType((5, n, 5)) - t2 = relay.TensorType((n, 1)) + s1 = (5, n, 5) + s2 = (n, 1) + t1 = relay.TensorType(s1) + t2 = relay.TensorType(s2) x = relay.var("x", t1) y = relay.var("y", t2) z = opfunc(x, y) @@ -54,14 +60,19 @@ def check_binary_op(opfunc, ref): assert relay.ir_pass.infer_type(z).checked_type == t1 if ref is not None: - x = np.random.rand(*t1.shape).astype(t1.dtype) - y = np.random.rand(*t2.shape).astype(t2.dtype) + t1 = relay.TensorType((5, 10, 5)) + t2 = relay.TensorType((5, 10, 5)) + x = relay.var("x", t1) + y = relay.var("y", t2) + z = opfunc(x, y) + x_data = np.random.rand(5, 10, 5).astype(t1.dtype) + y_data = np.random.rand(5, 10, 5).astype(t2.dtype) intrp = create_executor() - op_res = intrp.evaluate(z, { x: relay.const(x), y: relay.const(y) }) - ref_res = ref(x, y) + op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) }) + ref_res = ref(x_data, y_data) np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) - for opfunc, ref in [(relay.add, np.add) + for opfunc, ref in [(relay.add, np.add), (relay.subtract, np.subtract), (relay.mod, np.mod), (relay.multiply, np.multiply), From e2862f4d5950b169c9ec04b554f03a2ee8e31774 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Fri, 2 Nov 2018 16:16:37 -0700 Subject: [PATCH 09/13] Add more tests --- python/tvm/relay/op/_tensor.py | 12 +++---- python/tvm/relay/op/tensor.py | 8 ++++- src/relay/pass/lower_ops.cc | 2 +- tests/python/relay/test_op_level3.py | 37 ++++++++++++++-------- tests/python/relay/test_op_level4.py | 47 ++++++++++++++++++++++------ 5 files changed, 76 insertions(+), 30 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 117c3151cce2..bd428089525a 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -232,11 +232,11 @@ def left_shift_compute(attrs, inputs, output_type, target): # zeros def zeros_compute(attrs, inputs, output_type, target): - assert len(inputs) == 2 - return [topi.full(inputs[0], inputs[1], 0.0)] + assert len(inputs) == 0 + return [topi.full(output_type.shape, output_type.dtype, 0.0)] -register_compute("zeros_compute", zeros_compute) -register_schedule("zeros_compute", schedule_injective) +register_compute("zeros", zeros_compute) +register_schedule("zeros", schedule_injective) # zeros_like def zeros_like_compute(attrs, inputs, output_type, target): @@ -248,8 +248,8 @@ def zeros_like_compute(attrs, inputs, output_type, target): # ones def ones_compute(attrs, inputs, output_type, target): - assert len(inputs) == 2 - return [topi.full(inputs[0], inputs[1], 1.0)] + assert len(inputs) == 0 + return [topi.full(output_type.shape, output_type.dtype, 1.0)] register_compute("ones", ones_compute) register_schedule("ones", schedule_injective) diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index c450b3b825eb..88f396cae33e 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -410,6 +410,7 @@ def greater_equal(lhs, rhs): return _make.greater_equal(lhs, rhs) +# TODO TEST def maximum(lhs, rhs): """Maximum with numpy-style broadcasting. @@ -428,6 +429,7 @@ def maximum(lhs, rhs): return _make.maximum(lhs, rhs) +# TODO TEST def minimum(lhs, rhs): """Minimum with numpy-style broadcasting. @@ -446,6 +448,7 @@ def minimum(lhs, rhs): return _make.minimum(lhs, rhs) +# TODO TEST def right_shift(lhs, rhs): """Right shift with numpy-style broadcasting. @@ -464,6 +467,7 @@ def right_shift(lhs, rhs): return _make.right_shift(lhs, rhs) +# TODO TEST def left_shift(lhs, rhs): """Left shift with numpy-style broadcasting. @@ -551,7 +555,7 @@ def ones_like(data): """ return _make.ones_like(data) - +# TODO TEST def clip(a, a_min, a_max): """Clip the elements in `a` between `a_min` and `a_max`. `a_min` and `a_max` are cast to `a`'s dtype. @@ -580,6 +584,7 @@ def clip(a, a_min, a_max): return _make.clip(a, a_min, a_max) +# TODO TEST def concatenate(data, axis): """Concatenate the input tensors along the given axis. @@ -603,6 +608,7 @@ def concatenate(data, axis): return _make.concatenate(Tuple(data), axis) +# TODO TEST def copy(data): """Copy a tensor. diff --git a/src/relay/pass/lower_ops.cc b/src/relay/pass/lower_ops.cc index 4e141aff45ea..55102fe5cf67 100644 --- a/src/relay/pass/lower_ops.cc +++ b/src/relay/pass/lower_ops.cc @@ -198,7 +198,7 @@ Array LowerOps(const Module& mod, const Expr& e, i++; } - auto output_tt = op->op_type->ret_type; + auto output_tt = call->checked_type(); auto target_node = Target::create(target); Array outputs = compute_reg[op](call->attrs, inputs, output_tt, target_node); diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index 6f06c8698e3f..a826213f2ef3 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -3,29 +3,40 @@ import tvm import numpy as np from tvm import relay +from tvm.relay import create_executor from nose.tools import raises def test_zeros_ones(): - for op in [relay.zeros, relay.ones]: + for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: y = op(shape=(124, 50), dtype="float64") yy = relay.ir_pass.infer_type(y) assert yy.checked_type == relay.TensorType((124, 50), "float64") + intrp = create_executor() + intrp_res = intrp.evaluate(y).asnumpy() + np.testing.assert_allclose(intrp_res, ref((124, 50), 'float64')) def test_unary_identity(): - for op in [relay.zeros_like, - relay.ones_like, - relay.ceil, - relay.floor, - relay.trunc, - relay.round, - relay.abs, - relay.copy, - relay.negative]: - x = relay.var("x", relay.TensorType((8, 9, 4), "float32")) + for op, ref in [(relay.zeros_like, np.zeros_like), + (relay.ones_like, np.ones_like), + (relay.ceil, np.ceil), + (relay.floor, np.floor), + (relay.trunc, np.trunc), + (relay.round, np.round), + (relay.abs, np.abs), + (relay.copy, None), # np.copy + (relay.negative, np.negative)]: + shape = (8, 9, 4) + x = relay.var("x", relay.TensorType(shape, "float32")) y = op(x) yy = relay.ir_pass.infer_type(y) - assert yy.checked_type == relay.TensorType((8, 9, 4), "float32") - + assert yy.checked_type == relay.TensorType(shape, "float32") + + if ref is not None: + data = np.random.rand(*shape).astype('float32') + intrp = create_executor() + op_res = intrp.evaluate(y, { x: relay.const(data) }) + ref_res = ref(data) + np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def test_cast(): x = relay.var("x", relay.TensorType((8, 9, 4), "float32")) diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index 2dc643cfd7e4..20cf000ca354 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -1,10 +1,11 @@ import tvm import numpy as np from tvm import relay +from tvm.relay import create_executor def test_binary_op(): - def check_binary_op(opfunc): + def check_binary_op(opfunc, ref): n = tvm.var("n") t1 = relay.TensorType((5, n, 5)) t2 = relay.TensorType((n, 1)) @@ -15,17 +16,30 @@ def check_binary_op(opfunc): assert ("%0 = {}(%x, %y)".format(z.op.name)) in z.astext() assert relay.ir_pass.infer_type(z).checked_type == t1 - for opfunc in [relay.pow]: - check_binary_op(opfunc) + if ref is not None: + t1 = relay.TensorType((5, 10, 5)) + t2 = relay.TensorType((5, 10, 5)) + x = relay.var("x", t1) + y = relay.var("y", t2) + z = opfunc(x, y) + x_data = np.random.rand(5, 10, 5).astype(t1.dtype) + y_data = np.random.rand(5, 10, 5).astype(t2.dtype) + intrp = create_executor() + op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) }) + ref_res = ref(x_data, y_data) + np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + + for opfunc, ref in [(relay.pow, np.power)]: + check_binary_op(opfunc, ref) def test_cmp_type(): - for op in (relay.greater, - relay.greater_equal, - relay.less, - relay.less_equal, - relay.equal, - relay.not_equal): + for op, ref in ((relay.greater, np.greater), + (relay.greater_equal, np.greater_equal), + (relay.less, np.less), + (relay.less_equal, np.less_equal), + (relay.equal, np.equal), + (relay.not_equal, np.not_equal)): x = relay.var("x", relay.TensorType((10, 4), "float32")) y = relay.var("y", relay.TensorType((5, 10, 1), "float32")) z = op(x, y) @@ -33,6 +47,21 @@ def test_cmp_type(): zz = relay.ir_pass.infer_type(z) assert zz.checked_type == relay.TensorType((5, 10, 4), "bool") + if ref is not None: + x_shape = (10, 4) + y_shape = (5, 10, 1) + t1 = relay.TensorType(x_shape) + t2 = relay.TensorType(y_shape) + x = relay.var("x", t1) + y = relay.var("y", t2) + z = op(x, y) + x_data = np.random.rand(*x_shape).astype(t1.dtype) + y_data = np.random.rand(*y_shape).astype(t2.dtype) + intrp = create_executor() + op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) }) + ref_res = ref(x_data, y_data) + np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + def test_binary_int_broadcast(): for op in [relay.right_shift, From a32813b1fbe8222731e99c9a392a2143203f594b Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Sat, 3 Nov 2018 01:38:21 -0700 Subject: [PATCH 10/13] Add tests for remaining ops --- python/tvm/relay/op/_tensor.py | 10 +--------- python/tvm/relay/op/tensor.py | 7 ------- tests/python/relay/test_op_level1.py | 9 +++++++++ tests/python/relay/test_op_level3.py | 12 ++++++++++-- tests/python/relay/test_op_level4.py | 19 +++++++++++++++---- 5 files changed, 35 insertions(+), 22 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index bd428089525a..a04d25b9bfb6 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -265,7 +265,7 @@ def ones_like(attrs, inputs, output_type, target): # clip def clip_compute(attrs, inputs, output_type, target): assert len(inputs) == 1 - return [topi.clip(inputs[0], inputs[1], inputs[2])] + return [topi.clip(inputs[0], attrs.a_min, attrs.a_max)] register_compute("clip", clip_compute) @@ -279,11 +279,3 @@ def concatenate_compute(attrs, inputs, output_type, target): register_compute("concatenate", concatenate_compute) register_schedule("concatenate", schedule_injective) -# # copy -# TODO(@jroesch): How to implement copy. -# def copy_compute(attrs, inputs, output_type, target): -# assert len(inputs) == 1 -# return [topi.copy(inputs[0])] - -# register_compute("copy", copy_compute) -# register_schedule("copy", schedule_injective) diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py index 88f396cae33e..2505da8f1dfd 100644 --- a/python/tvm/relay/op/tensor.py +++ b/python/tvm/relay/op/tensor.py @@ -410,7 +410,6 @@ def greater_equal(lhs, rhs): return _make.greater_equal(lhs, rhs) -# TODO TEST def maximum(lhs, rhs): """Maximum with numpy-style broadcasting. @@ -429,7 +428,6 @@ def maximum(lhs, rhs): return _make.maximum(lhs, rhs) -# TODO TEST def minimum(lhs, rhs): """Minimum with numpy-style broadcasting. @@ -448,7 +446,6 @@ def minimum(lhs, rhs): return _make.minimum(lhs, rhs) -# TODO TEST def right_shift(lhs, rhs): """Right shift with numpy-style broadcasting. @@ -467,7 +464,6 @@ def right_shift(lhs, rhs): return _make.right_shift(lhs, rhs) -# TODO TEST def left_shift(lhs, rhs): """Left shift with numpy-style broadcasting. @@ -555,7 +551,6 @@ def ones_like(data): """ return _make.ones_like(data) -# TODO TEST def clip(a, a_min, a_max): """Clip the elements in `a` between `a_min` and `a_max`. `a_min` and `a_max` are cast to `a`'s dtype. @@ -584,7 +579,6 @@ def clip(a, a_min, a_max): return _make.clip(a, a_min, a_max) -# TODO TEST def concatenate(data, axis): """Concatenate the input tensors along the given axis. @@ -608,7 +602,6 @@ def concatenate(data, axis): return _make.concatenate(Tuple(data), axis) -# TODO TEST def copy(data): """Copy a tensor. diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index 2d36b463c29d..7ab13409cc43 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -134,6 +134,15 @@ def test_concatenate_infer_type(): zz = relay.ir_pass.infer_type(z) assert zz.checked_type == relay.TensorType((n, t + t, 100)) + # x = relay.var("x", shape=(10, 5)) + # y = relay.var("y", shape=(10, 5)) + # z = relay.concatenate((x, y), axis=1) + # intrp = create_executor() + # x_data = np.random.rand(10, 5).astype('float32') + # y_data = np.random.rand(10, 5).astype('float32') + # op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) }) + # ref_res = np.concatenate(x_data, y_data, axis=1) + # np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def test_dropout(): n, t, d = tvm.var("n"), tvm.var("t"), tvm.var("d") diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index a826213f2ef3..26eccf991d0e 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -46,12 +46,20 @@ def test_cast(): assert yy.checked_type == relay.TensorType((8, 9, 4), "int32") -def test_clip_type(): +def test_clip(): a = relay.var("a", relay.TensorType((10, 4), "float32")) y = relay.clip(a, 1., 4.) yy = relay.ir_pass.infer_type(y) assert yy.checked_type == relay.TensorType((10, 4), "float32") + data = np.random.rand(10, 4).astype('float32') + intrp = create_executor() + op_res = intrp.evaluate(y, { a: relay.const(data) }) + ref_res = np.clip(data, 1., 4.) + np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) + + + def test_transpose_infer_type(): n, t, d = tvm.var("n"), tvm.var("t"), 100 @@ -237,7 +245,7 @@ def test_infer_type_prelu(): test_cast() test_zeros_ones() test_unary_identity() - test_clip_type() + test_clip() test_transpose_infer_type() test_reshape_infer_type() test_reshape_like() diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index 20cf000ca354..f16a4bad7902 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -64,16 +64,27 @@ def test_cmp_type(): def test_binary_int_broadcast(): - for op in [relay.right_shift, - relay.left_shift, - relay.maximum, - relay.minimum]: + for op, ref in [(relay.right_shift, np.right_shift), + (relay.left_shift, np.left_shift), + (relay.maximum, np.maximum), + (relay.minimum, np.minimum)]: x = relay.var("x", relay.TensorType((10, 4), "int32")) y = relay.var("y", relay.TensorType((5, 10, 1), "int32")) z = op(x, y) zz = relay.ir_pass.infer_type(z) assert zz.checked_type == relay.TensorType((5, 10, 4), "int32") + if ref is not None: + x_shape = (10, 4) + y_shape = (5, 10, 1) + t1 = relay.TensorType(x_shape) + t2 = relay.TensorType(y_shape) + x_data = np.random.rand(*x_shape).astype(t1.dtype) + y_data = np.random.rand(*y_shape).astype(t2.dtype) + intrp = create_executor() + op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) }) + ref_res = ref(x_data, y_data) + np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01) def test_where(): cond = relay.var("cond", relay.TensorType((3, 4), "float32")) From 4cc1f91edca4b925f67fb02c65b3f7a83ffe7a2d Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Sat, 3 Nov 2018 01:44:38 -0700 Subject: [PATCH 11/13] Fix lint --- python/tvm/relay/interpreter.py | 7 ++++++- python/tvm/relay/op/_tensor.py | 4 ++-- tests/python/relay/test_op_level4.py | 4 ++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/python/tvm/relay/interpreter.py b/python/tvm/relay/interpreter.py index 15c5ec842930..4321a0f28cd6 100644 --- a/python/tvm/relay/interpreter.py +++ b/python/tvm/relay/interpreter.py @@ -149,7 +149,12 @@ def evaluate(self, expr, params=None): executor = self._make_executor(expr) - if isinstance(expr, Function): + # If we are evaluating a function or top-level defintion + # the user must call the function themselves. + # + # If we are evaluating an open term with parameters we will + # just return them the result. + if isinstance(expr, Function) or isinstance(expr, GlobalVar): return executor else: return executor() diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index a04d25b9bfb6..206e2aebf012 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -232,7 +232,7 @@ def left_shift_compute(attrs, inputs, output_type, target): # zeros def zeros_compute(attrs, inputs, output_type, target): - assert len(inputs) == 0 + assert not inputs return [topi.full(output_type.shape, output_type.dtype, 0.0)] register_compute("zeros", zeros_compute) @@ -248,7 +248,7 @@ def zeros_like_compute(attrs, inputs, output_type, target): # ones def ones_compute(attrs, inputs, output_type, target): - assert len(inputs) == 0 + assert not inputs return [topi.full(output_type.shape, output_type.dtype, 1.0)] register_compute("ones", ones_compute) diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index f16a4bad7902..d20997010b4c 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -77,8 +77,8 @@ def test_binary_int_broadcast(): if ref is not None: x_shape = (10, 4) y_shape = (5, 10, 1) - t1 = relay.TensorType(x_shape) - t2 = relay.TensorType(y_shape) + t1 = relay.TensorType(x_shape, 'int32') + t2 = relay.TensorType(y_shape, 'int32') x_data = np.random.rand(*x_shape).astype(t1.dtype) y_data = np.random.rand(*y_shape).astype(t2.dtype) intrp = create_executor() From d8ecf74e9aab7dd3a1f79795d419aea9005b184b Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Sat, 3 Nov 2018 01:55:51 -0700 Subject: [PATCH 12/13] Fix lint --- python/tvm/relay/interpreter.py | 2 +- python/tvm/relay/op/_tensor.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/python/tvm/relay/interpreter.py b/python/tvm/relay/interpreter.py index 4321a0f28cd6..bd8ef0d14415 100644 --- a/python/tvm/relay/interpreter.py +++ b/python/tvm/relay/interpreter.py @@ -154,7 +154,7 @@ def evaluate(self, expr, params=None): # # If we are evaluating an open term with parameters we will # just return them the result. - if isinstance(expr, Function) or isinstance(expr, GlobalVar): + if isinstance(expr, (Function, GlobalVar)): return executor else: return executor() diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index 206e2aebf012..bf655333afb5 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -278,4 +278,3 @@ def concatenate_compute(attrs, inputs, output_type, target): register_compute("concatenate", concatenate_compute) register_schedule("concatenate", schedule_injective) - From 3da8ee8644e1e7c6c3d06ed42caa25a627d26600 Mon Sep 17 00:00:00 2001 From: Jared Roesch Date: Sat, 3 Nov 2018 17:07:00 -0700 Subject: [PATCH 13/13] Fix error in concat --- python/tvm/relay/op/_tensor.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py index bf655333afb5..5841d278378a 100644 --- a/python/tvm/relay/op/_tensor.py +++ b/python/tvm/relay/op/_tensor.py @@ -270,11 +270,3 @@ def clip_compute(attrs, inputs, output_type, target): register_compute("clip", clip_compute) register_schedule("clip", schedule_injective) - -# concatenate -def concatenate_compute(attrs, inputs, output_type, target): - assert len(inputs) == 1 - return [topi.concatenate(*inputs[0], attrs.axis)] - -register_compute("concatenate", concatenate_compute) -register_schedule("concatenate", schedule_injective)