From 99df46231446c76a96eb1c5fc7ffaf5bcd8c9fb2 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Wed, 8 Jan 2020 17:14:42 -0800 Subject: [PATCH 01/17] [arith] add ShapeVar representing non-neg valued variable in a tensor shape --- docs/api/python/tvm.rst | 2 ++ include/tvm/expr.h | 39 ++++++++++++++++++++++++++++++- include/tvm/ir_functor_ext.h | 4 ++++ python/tvm/api.py | 19 +++++++++++++++ python/tvm/build_module.py | 8 +++++-- python/tvm/expr.py | 17 ++++++++++++++ python/tvm/hybrid/preprocessor.py | 2 +- src/api/api_ir.cc | 5 ++++ src/arithmetic/bound_deducer.cc | 2 +- src/arithmetic/const_int_bound.cc | 10 ++++++++ src/arithmetic/int_set.cc | 17 ++++++++++++++ src/lang/expr.cc | 10 ++++++++ src/lang/ir.cc | 4 ++++ src/schedule/bound.cc | 2 +- topi/python/topi/nn/conv2d.py | 10 ++++---- topi/python/topi/util.py | 18 +++++++++++++- topi/python/topi/x86/conv2d.py | 4 ++-- topi/python/topi/x86/dense.py | 22 ++++++++--------- 18 files changed, 170 insertions(+), 25 deletions(-) diff --git a/docs/api/python/tvm.rst b/docs/api/python/tvm.rst index b517195db9e4..1a92f3eb63ae 100644 --- a/docs/api/python/tvm.rst +++ b/docs/api/python/tvm.rst @@ -24,6 +24,7 @@ The user facing API for computation declaration. tvm.load_json tvm.save_json tvm.var + tvm.shape_var tvm.const tvm.convert tvm.placeholder @@ -49,6 +50,7 @@ The user facing API for computation declaration. .. autofunction:: tvm.load_json .. autofunction:: tvm.save_json .. autofunction:: tvm.var +.. autofunction:: tvm.shape_var .. autofunction:: tvm.const .. autofunction:: tvm.convert .. autofunction:: tvm.placeholder diff --git a/include/tvm/expr.h b/include/tvm/expr.h index 64d7547dbad5..11bdcc3c4964 100644 --- a/include/tvm/expr.h +++ b/include/tvm/expr.h @@ -118,7 +118,7 @@ class VarNode : public ExprNode { } static constexpr const char* _type_key = "Variable"; - TVM_DECLARE_FINAL_OBJECT_INFO(VarNode, ExprNode); + TVM_DECLARE_BASE_OBJECT_INFO(VarNode, ExprNode); }; /*! \brief a named variable in TVM */ @@ -153,6 +153,43 @@ class Var : public Expr { using ContainerType = VarNode; }; +class ShapeVar; +/*! + * \brief A variable node represent a tensor shape size, + * whose value must be non-negative. + */ +class ShapeVarNode : public VarNode { + public: + static ShapeVar make(DataType dtype, std::string name_hint); + + static constexpr const char* _type_key = "ShapeVar"; + TVM_DECLARE_FINAL_OBJECT_INFO(ShapeVarNode, VarNode); +}; + +/*! \brief a named variable represents a tensor shape size */ +class ShapeVar : public Var { + public: + explicit ShapeVar(ObjectPtr n) : Var(n) {} + TVM_DLL explicit ShapeVar(std::string name_hint = "s", + DataType t = DataType::Int(32)); + /*! + * \brief Get pointer to the internal value. + * \return the corresponding Variable. + */ + const ShapeVarNode* operator->() const { + return get(); + } + /*! + * \brief Get pointer to the internal value. + * \return the corresponding Variable. + */ + const ShapeVarNode* get() const { + return static_cast(data_.get()); + } + /*! \brief type indicate the container type */ + using ContainerType = ShapeVarNode; +}; + // Backward compatibility, will be removed later. using VarExpr = Var; using BaseExprNode = ExprNode; diff --git a/include/tvm/ir_functor_ext.h b/include/tvm/ir_functor_ext.h index d70c8dec7689..a033978ef08d 100644 --- a/include/tvm/ir_functor_ext.h +++ b/include/tvm/ir_functor_ext.h @@ -133,6 +133,9 @@ class ExprFunctor { } // Functions that can be overriden by subclass virtual R VisitExpr_(const VarNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; + virtual R VisitExpr_(const ShapeVarNode* op, Args... args) { + return VisitExpr_(static_cast(op), std::forward(args)...); + } virtual R VisitExpr_(const LoadNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const LetNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; virtual R VisitExpr_(const CallNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; @@ -175,6 +178,7 @@ class ExprFunctor { FType vtable; // Set dispatch IR_EXPR_FUNCTOR_DISPATCH(VarNode); + IR_EXPR_FUNCTOR_DISPATCH(ShapeVarNode); IR_EXPR_FUNCTOR_DISPATCH(LoadNode); IR_EXPR_FUNCTOR_DISPATCH(LetNode); IR_EXPR_FUNCTOR_DISPATCH(CallNode); diff --git a/python/tvm/api.py b/python/tvm/api.py index ef121bc880b2..7d4fdb171536 100644 --- a/python/tvm/api.py +++ b/python/tvm/api.py @@ -190,6 +190,25 @@ def var(name="tindex", dtype=int32): return _api_internal._Var(name, dtype) +def shape_var(name="tindex", dtype=int32): + """Create a new variable represents a tensor shape size, which is non-negative. + + Parameters + ---------- + name : str + The name + + dtype : str + The data type + + Returns + ------- + var : ShapeVar + The result symbolic shape variable. + """ + return _api_internal._ShapeVar(name, dtype) + + def any(*args): """Create a new experssion of the union of all conditions in the arguments diff --git a/python/tvm/build_module.py b/python/tvm/build_module.py index f96e28323595..1698e145dbe2 100644 --- a/python/tvm/build_module.py +++ b/python/tvm/build_module.py @@ -292,9 +292,13 @@ def get_binds(args, compact=False, binds=None): binds = {} if binds is None else binds.copy() cfg = current_build_config() arg_list = [] + + def is_var(idx): + return isinstance(idx, expr.Var) or isinstance(idx, expr.ShapeVar) + for x in args: if isinstance(x, tensor.Tensor): - any_dim = any(isinstance(i, expr.Var) for i in x.shape) + any_dim = any(is_var(i) for i in x.shape) buffer_type = "auto_broadcast" if any_dim and not compact else "" if x not in binds: buf = api.decl_buffer(x.shape, @@ -309,7 +313,7 @@ def get_binds(args, compact=False, binds=None): arg_list.append(binds[x]) elif isinstance(x, schedule.Buffer): arg_list.append(x) - elif isinstance(x, expr.Var): + elif is_var(x): arg_list.append(x) else: raise ValueError("args must be Tensor, Buffer or Var") diff --git a/python/tvm/expr.py b/python/tvm/expr.py index 733f57a68c56..17e4e9a21a44 100644 --- a/python/tvm/expr.py +++ b/python/tvm/expr.py @@ -278,6 +278,23 @@ def __init__(self, name, dtype): _api_internal._Var, name, dtype) +@register_node +class ShapeVar(Var): + """Symbolic variable to represent a tensor shape size. + + Parameters + ---------- + name : str + The name + + dtype : int + The data type + """ + def __init__(self, name, dtype): + self.__init_handle_by_constructor__( + _api_internal._ShapeVar, name, dtype) + + @register_node class Reduce(Expr): """Reduce node. diff --git a/python/tvm/hybrid/preprocessor.py b/python/tvm/hybrid/preprocessor.py index 1a9de4e3f801..035e8a40f245 100644 --- a/python/tvm/hybrid/preprocessor.py +++ b/python/tvm/hybrid/preprocessor.py @@ -63,7 +63,7 @@ def visit_Call(self, node): _internal_assert(func_id in list(HYBRID_GLOBALS.keys()) + \ ['range', 'max', 'min', 'len'] + \ list(self.symbols.keys()), \ - "Function call id not in intrinsics' list") + "Function call id " + func_id + " not in intrinsics' list") for elem in node.args: self.visit(elem) diff --git a/src/api/api_ir.cc b/src/api/api_ir.cc index ba04239dabb7..39195f571cad 100644 --- a/src/api/api_ir.cc +++ b/src/api/api_ir.cc @@ -36,6 +36,11 @@ TVM_REGISTER_GLOBAL("_Var") return VarNode::make(t, s); }); +TVM_REGISTER_GLOBAL("_ShapeVar") +.set_body_typed([](std::string s, DataType t) { + return ShapeVarNode::make(t, s); + }); + TVM_REGISTER_GLOBAL("make.abs") .set_body_typed(tvm::abs); diff --git a/src/arithmetic/bound_deducer.cc b/src/arithmetic/bound_deducer.cc index 40f86de7561a..d8a5a2df39ba 100644 --- a/src/arithmetic/bound_deducer.cc +++ b/src/arithmetic/bound_deducer.cc @@ -86,7 +86,7 @@ class BoundDeducer: public ExprVisitor { void VisitExpr(const Expr& e) final { if (!success_) return; - if (e.get() == path_[iter_++]) { + if (iter_ < path_.size() && e.get() == path_[iter_++]) { ExprVisitor::VisitExpr(e); } else { success_ = false; diff --git a/src/arithmetic/const_int_bound.cc b/src/arithmetic/const_int_bound.cc index d3f885ae8d8c..02b80a93bad8 100644 --- a/src/arithmetic/const_int_bound.cc +++ b/src/arithmetic/const_int_bound.cc @@ -292,6 +292,16 @@ class ConstIntBoundAnalyzer::Impl : } } + Entry VisitExpr_(const ShapeVarNode* op) final { + ShapeVar v = GetRef(op); + auto it = var_map_.find(v); + if (it != var_map_.end()) { + return it->second; + } else { + return MakeBound(0, kPosInf); + } + } + Entry VisitRightShift(const CallNode* op) { Entry a = VisitExpr(op->args[0]); Entry b = VisitExpr(op->args[1]); diff --git a/src/arithmetic/int_set.cc b/src/arithmetic/int_set.cc index c60c8254c80c..7f1feaf69306 100644 --- a/src/arithmetic/int_set.cc +++ b/src/arithmetic/int_set.cc @@ -405,6 +405,23 @@ class IntervalSetEvaluator : } } + IntervalSet VisitExpr_(const ShapeVarNode* op) final { + Var var = GetRef(op); + auto it = dom_map_.find(var); + if (it != dom_map_.end()) { + IntervalSet res = ToIntervalSet((*it).second); + if (res->min_value.same_as(var) && + res->max_value.same_as(var)) { + return res; + } + // recursively evaluate mapped result + // in case the domain contains variables to be relaxed. + return Eval(res); + } else { + return IntervalSet(0, GetRef(op)); + } + } + IntervalSet VisitExpr_(const AddNode* op) final { return VisitBinaryExpr_(op); } diff --git a/src/lang/expr.cc b/src/lang/expr.cc index 58a97ed91742..21addb04d533 100644 --- a/src/lang/expr.cc +++ b/src/lang/expr.cc @@ -48,6 +48,16 @@ Var VarNode::make(DataType t, std::string name_hint) { return Var(node); } +ShapeVar::ShapeVar(std::string name_hint, DataType t) + : ShapeVar(ShapeVarNode::make(t, name_hint)) {} + +ShapeVar ShapeVarNode::make(DataType t, std::string name_hint) { + ObjectPtr node = make_object(); + node->dtype = t; + node->name_hint = std::move(name_hint); + return ShapeVar(node); +} + Range::Range(Expr begin, Expr end) : Range(make_object( begin, diff --git a/src/lang/ir.cc b/src/lang/ir.cc index 6b777cc5e887..d10a339c0a99 100644 --- a/src/lang/ir.cc +++ b/src/lang/ir.cc @@ -605,6 +605,10 @@ TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) // stream << op->name << "." << op->type; p->stream << op->name_hint; }) +.set_dispatch([](const ObjectRef& node, NodePrinter* p) { + auto* op = static_cast(node.get()); + p->stream << "{" << op->name_hint << "|" << op->name_hint << ">=0}"; + }) .set_dispatch([](const ObjectRef& node, NodePrinter* p) { auto* op = static_cast(node.get()); p->stream << '('; diff --git a/src/schedule/bound.cc b/src/schedule/bound.cc index ce2397b1d4f7..5f363dbc126e 100644 --- a/src/schedule/bound.cc +++ b/src/schedule/bound.cc @@ -237,7 +237,7 @@ Map InferBound(const Schedule& sch) { InferRootBound(stage, ctx, &ret); // bind bound of root iter vars. - for (auto iv : stage->op->root_iter_vars()) { + for (auto iv : stage->op->root_iter_vars()) { auto it = ret.find(iv); if (it != ret.end()) { analyzer.Bind(iv->var, it->second); diff --git a/topi/python/topi/nn/conv2d.py b/topi/python/topi/nn/conv2d.py index 169878c11a85..664a293d56ae 100644 --- a/topi/python/topi/nn/conv2d.py +++ b/topi/python/topi/nn/conv2d.py @@ -142,18 +142,18 @@ def conv2d_infer_layout(workload, cfg): def _get_workload(data, kernel, stride, padding, out_dtype, data_layout='NCHW'): """ Get the workload structure. """ if data_layout == 'NCHW': - _, CI, IH, IW = [x.value for x in data.shape] + _, CI, IH, IW = get_const_tuple(data.shape) elif data_layout == 'NHWC': - _, IH, IW, CI = [x.value for x in data.shape] + _, IH, IW, CI = get_const_tuple(data.shape) elif data_layout == 'HWCN': - IH, IW, CI, _ = [x.value for x in data.shape] + IH, IW, CI, _ = get_const_tuple(data.shape) else: raise ValueError("not support this layout {} yet".format(data_layout)) if data_layout == 'NCHW': - CO, CIG, KH, KW = [x.value for x in kernel.shape] + CO, CIG, KH, KW = get_const_tuple(kernel.shape) else: - KH, KW, CIG, CO = [x.value for x in kernel.shape] + KH, KW, CIG, CO = get_const_tuple(kernel.shape) HPAD, WPAD, _, _ = get_pad_tuple(padding, (get_const_int(KH), get_const_int(KW))) GRPS = CI // CIG diff --git a/topi/python/topi/util.py b/topi/python/topi/util.py index 079dda5d0b0e..6f436ab6a0af 100644 --- a/topi/python/topi/util.py +++ b/topi/python/topi/util.py @@ -143,6 +143,22 @@ def equal_const_int(expr, value): return expr.value == value +def is_var(expr): + """Check whether the input is tvm.expr.Var or tvm.expr.ShapeVar + Parameters + ---------- + expr : tvm.Expr + The input expression. + Returns + ------- + equal : bool + Whether it is tvm.expr.Var or + tvm_assert_bound intrinsic (which provides the boundary information of a Var). + """ + return isinstance(expr, tvm.expr.Var) \ + or isinstance(expr, tvm.expr.ShapeVar) + + def get_const_tuple(in_tuple): """Verifies input tuple is IntImm or Var, returns tuple of int or Var. @@ -158,7 +174,7 @@ def get_const_tuple(in_tuple): """ ret = [] for elem in in_tuple: - if isinstance(elem, tvm.expr.Var): + if is_var(elem): ret.append(elem) elif not isinstance(elem, (tvm.expr.IntImm, tvm.expr.UIntImm, int)): elem = tvm.ir_pass.Simplify(elem) diff --git a/topi/python/topi/x86/conv2d.py b/topi/python/topi/x86/conv2d.py index 8a6b57eb9e66..f05550c8143b 100644 --- a/topi/python/topi/x86/conv2d.py +++ b/topi/python/topi/x86/conv2d.py @@ -31,7 +31,7 @@ from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload from ..nn.pad import pad from ..nn.util import get_pad_tuple -from ..util import get_const_tuple +from ..util import get_const_tuple, is_var from . import conv2d_avx_1x1, conv2d_avx_common @@ -44,7 +44,7 @@ def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depth """ static_data_shape = [] for dim in get_const_tuple(data.shape): - if isinstance(dim, tvm.expr.Var): + if is_var(dim): static_data_shape.append(1) else: static_data_shape.append(dim) diff --git a/topi/python/topi/x86/dense.py b/topi/python/topi/x86/dense.py index b7a3d6d5a330..b3c162ffb95f 100644 --- a/topi/python/topi/x86/dense.py +++ b/topi/python/topi/x86/dense.py @@ -24,7 +24,7 @@ from .util import get_fp32_len from .. import generic, tag, nn -from ..util import traverse_inline, get_const_tuple +from ..util import traverse_inline, get_const_tuple, is_var @autotvm.register_topi_compute(nn.dense, "cpu", "direct") def _declaration_dense(cfg, data, weight, bias=None, out_dtype=None): @@ -40,7 +40,7 @@ def _declaration_dense(cfg, data, weight, bias=None, out_dtype=None): # Always use dense_nopack for dynamic input. # This is a temporary for CV models. # TODO(kevinthesun): use kernel dispatcher instead. - if isinstance(M, tvm.expr.Var): + if is_var(M): return _declaration_dense_nopack(cfg, data, weight, bias, out_dtype) # For small batch sizes, don't pack weight into cache-friendly layout @@ -59,9 +59,9 @@ def _declaration_dense_pack(cfg, data, weight, bias=None, out_dtype=None): M, K = get_const_tuple(data.shape) # batch, in_dim N, _ = get_const_tuple(weight.shape) # out_dim # create tuning space - cfg.define_split("tile_y", 32 if isinstance(M, tvm.expr.Var) else M, num_outputs=3) - cfg.define_split("tile_x", 32 if isinstance(N, tvm.expr.Var) else N, num_outputs=3) - cfg.define_split("tile_k", 32 if isinstance(K, tvm.expr.Var) else K, num_outputs=2) + cfg.define_split("tile_y", 32 if is_var(M) else M, num_outputs=3) + cfg.define_split("tile_x", 32 if is_var(N) else N, num_outputs=3) + cfg.define_split("tile_k", 32 if is_var(K) else K, num_outputs=2) if cfg.is_fallback: _default_dense_pack_config(cfg, M, N, K) @@ -93,9 +93,9 @@ def _declaration_dense_nopack(cfg, data, weight, bias=None, out_dtype=None): M, K = get_const_tuple(data.shape) N, _ = get_const_tuple(weight.shape) # create tuning space - cfg.define_split("tile_y", 32 if isinstance(M, tvm.expr.Var) else M, num_outputs=2) - cfg.define_split("tile_x", 32 if isinstance(N, tvm.expr.Var) else N, num_outputs=2) - cfg.define_split("tile_k", 32 if isinstance(K, tvm.expr.Var) else K, num_outputs=2) + cfg.define_split("tile_y", 32 if is_var(M) else M, num_outputs=2) + cfg.define_split("tile_x", 32 if is_var(N) else N, num_outputs=2) + cfg.define_split("tile_k", 32 if is_var(K) else K, num_outputs=2) if cfg.is_fallback: _default_dense_nopack_config(cfg, M, N, K) @@ -218,11 +218,11 @@ def _schedule_dense_nopack_template(cfg, s, C): def _default_dense_pack_config(cfg, M, N, K): # Generate default schedule for dynamic shape. - if isinstance(M, tvm.expr.Var): + if is_var(M): M = 16 - if isinstance(N, tvm.expr.Var): + if is_var(N): N = 16 - if isinstance(K, tvm.expr.Var): + if is_var(K): K = 16 vec_width = get_fp32_len() From dd1eb766408f5216ddfe13c8a6f724000ff70356 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Thu, 9 Jan 2020 15:30:53 -0800 Subject: [PATCH 02/17] bounder remover; deal with div in int_set differently --- src/arithmetic/bound_deducer.cc | 48 +++++++++++++++++++ src/arithmetic/int_set.cc | 29 +++++++++-- src/lang/ir.cc | 1 + tests/python/unittest/test_hybrid_script.py | 6 +-- .../unittest/test_pass_loop_partition.py | 37 +++++++------- 5 files changed, 96 insertions(+), 25 deletions(-) diff --git a/src/arithmetic/bound_deducer.cc b/src/arithmetic/bound_deducer.cc index d8a5a2df39ba..aa88b325b9cc 100644 --- a/src/arithmetic/bound_deducer.cc +++ b/src/arithmetic/bound_deducer.cc @@ -70,6 +70,39 @@ std::vector GetPath(Expr target, Expr expr) { return v.path_; } +class BoundRemover : public ExprMutator { + public: + Expr Remove(const Expr& e) { + remove_bounded_ = true; + return ExprMutator::VisitExpr(e); + } + + Expr Reset(const Expr& e) { + remove_bounded_ = false; + return ExprMutator::VisitExpr(e); + } + + Expr VisitExpr_(const ShapeVarNode* op) final { + if (remove_bounded_) { + Expr var = VarNode::make(op->dtype, op->name_hint); + bounded_var_map_[op] = var; + return var; + } + return GetRef(op); + } + + Expr VisitExpr_(const VarNode* op) final { + if (!remove_bounded_ && bounded_var_map_.count(op)) { + return bounded_var_map_[op]; + } + return GetRef(op); + } + + private: + bool remove_bounded_ = false; + std::unordered_map bounded_var_map_; +}; + enum CompareOp {kGreater, kLess, kEqual}; // a visitor to deduce the bound of a variable from a expression @@ -297,6 +330,18 @@ void BoundDeducer::Transform() { void BoundDeducer::Deduce() { Init(); if (!success_) return; + + // Any variable appears in both expr and result, + // they should not be eagerly simplified according to its bound + // e.g., i + n/4 >= n + // => i >= n - n/4 + // If we eagerly simplified the left side given ShapeVar({n | n >= 0}) + // we would get i + 0 >= n => i >= n, which is obviously incorrect. + // Thus we remove assert_bound here and reset later. + BoundRemover bound_remover; + expr_ = bound_remover.Remove(expr_); + result_ = bound_remover.Remove(result_); + Relax(); if (!success_) return; // get the path @@ -308,6 +353,9 @@ void BoundDeducer::Deduce() { expr_map_ = EvalSetForEachSubExpr(expr_, hint_map_); this->VisitExpr(expr_); + + expr_ = bound_remover.Reset(expr_); + result_ = bound_remover.Reset(result_); } void BoundDeducer::Relax() { diff --git a/src/arithmetic/int_set.cc b/src/arithmetic/int_set.cc index 7f1feaf69306..ba5d12ac7980 100644 --- a/src/arithmetic/int_set.cc +++ b/src/arithmetic/int_set.cc @@ -435,19 +435,19 @@ class IntervalSetEvaluator : } IntervalSet VisitExpr_(const DivNode* op) final { - return VisitBinaryExpr_(op); + return VisitDivExpr_(op); } IntervalSet VisitExpr_(const ModNode* op) final { - return VisitBinaryExpr_(op); + return VisitDivExpr_(op); } IntervalSet VisitExpr_(const FloorDivNode* op) final { - return VisitBinaryExpr_(op); + return VisitDivExpr_(op); } IntervalSet VisitExpr_(const FloorModNode* op) final { - return VisitBinaryExpr_(op); + return VisitDivExpr_(op); } IntervalSet VisitExpr_(const MinNode* op) final { @@ -536,6 +536,11 @@ class IntervalSetEvaluator : return set->min_value.same_as(value) && set->max_value.same_as(value); } + bool IsVar(const Expr& op) { + // Var or ShapeVar + return op.as(); + } + template inline IntervalSet VisitBinaryExpr_(const T* op) { IntervalSet a = this->Eval(op->a); @@ -546,6 +551,22 @@ class IntervalSetEvaluator : return Combine(analyzer_, a, b); } + template + inline IntervalSet VisitDivExpr_(const T* op) { + IntervalSet a = this->Eval(op->a); + IntervalSet b = this->Eval(op->b); + if ((MatchPoint(a, op->a) && (MatchPoint(b, op->b) || IsVar(op->b))) + || (IsVar(op->a) && IsVar(op->b))) { + // e.g., + // div(10, 5) evaluates to 2 + // div(10, {n|n>=0}) evaluates to itself + // div({m|m>=0}, {n|n>=0}) evaluates to itself + return IntervalSet::SinglePoint(GetRef(op)); + } + // e.g., div({m|m>=0}, 2) goes here + return Combine(analyzer_, a, b); + } + // recursive depth int recur_depth_{0}; // analyzer diff --git a/src/lang/ir.cc b/src/lang/ir.cc index d10a339c0a99..cd8a79400c0d 100644 --- a/src/lang/ir.cc +++ b/src/lang/ir.cc @@ -1161,6 +1161,7 @@ TVM_REGISTER_NODE_TYPE(UIntImmNode); TVM_REGISTER_NODE_TYPE(StringImmNode); TVM_REGISTER_NODE_TYPE(CastNode); TVM_REGISTER_NODE_TYPE(VarNode); +TVM_REGISTER_NODE_TYPE(ShapeVarNode); TVM_REGISTER_NODE_TYPE(AddNode); TVM_REGISTER_NODE_TYPE(SubNode); TVM_REGISTER_NODE_TYPE(MulNode); diff --git a/tests/python/unittest/test_hybrid_script.py b/tests/python/unittest/test_hybrid_script.py index c3c40cf740ad..fc4a29045d3a 100644 --- a/tests/python/unittest/test_hybrid_script.py +++ b/tests/python/unittest/test_hybrid_script.py @@ -98,8 +98,8 @@ def outer_product(n, m, a, b): #Test global function #Test bridge between frontend and backend def test_outer_product(): - n = tvm.var('n') - m = tvm.var('m') + n = tvm.shape_var('n') + m = tvm.shape_var('m') a = tvm.placeholder((n, ), name='a') b = tvm.placeholder((m, ), name='b') @@ -167,7 +167,7 @@ def fanout(n, a): b[i] = sigma return b - n = tvm.var('n') + n = tvm.shape_var('n') a = tvm.placeholder((n, ), 'float32', name='a') try: b = fanout(n, a) diff --git a/tests/python/unittest/test_pass_loop_partition.py b/tests/python/unittest/test_pass_loop_partition.py index c58b2f6dd298..99332c6033f1 100644 --- a/tests/python/unittest/test_pass_loop_partition.py +++ b/tests/python/unittest/test_pass_loop_partition.py @@ -52,7 +52,7 @@ def lower(sch, args): return stmt def test_basic(): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -65,6 +65,7 @@ def test_basic(): stmt = tvm.ir_pass.LoopPartition(stmt, False) stmt = tvm.ir_pass.Simplify(stmt) assert('if' not in str(stmt.body.body.body[0])) + assert('if' in str(stmt.body.body.body[1])) def test_const_loop(): n = 21 @@ -83,8 +84,8 @@ def test_const_loop(): def test_multi_loop(): ib = tvm.ir_builder.create() - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') with ib.for_range(0, 4, "i") as i: with ib.for_range(0, n, "j") as j: with ib.for_range(0, m, "k") as k: @@ -99,8 +100,8 @@ def test_multi_loop(): def test_multi_if(): ib = tvm.ir_builder.create() - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') with ib.for_range(0, 4, 'i') as i: with ib.for_range(0, n, 'j') as j: with ib.for_range(0, m, 'k') as k: @@ -118,8 +119,8 @@ def test_multi_if(): assert('if' not in str(stmt.body[0])) def test_thread_axis(): - m = tvm.var('m') - l = tvm.var('l') + m = tvm.shape_var('m') + l = tvm.shape_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.compute((m, l), lambda i, j: A[i, j] + 3, name='B') s = tvm.create_schedule(B.op) @@ -137,11 +138,11 @@ def test_thread_axis(): assert('if' not in str(stmt.body.body.body[0])) def test_vectorize(): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') - bias = tvm.var("bias", dtype="float32") - scale = tvm.var("scale", dtype="float32") + bias = tvm.shape_var("bias", dtype="float32") + scale = tvm.shape_var("scale", dtype="float32") C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i) * scale + bias, name='C') # schedule s = tvm.create_schedule(C.op) @@ -160,8 +161,8 @@ def test_vectorize(): def test_condition(): ib = tvm.ir_builder.create() - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') with ib.for_range(0, tvm.truncdiv(n+3,4), 'i') as i: with ib.for_range(0, 4, 'j') as j: ib.emit(tvm.make.Evaluate( @@ -173,8 +174,8 @@ def test_condition(): def test_condition_EQ(): ib = tvm.ir_builder.create() - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') with ib.for_range(0, 10, 'i') as i: ib.emit(tvm.make.Evaluate( tvm.make.Select(ib.likely(tvm.expr.EQ(i, 5)), m, n))) @@ -185,7 +186,7 @@ def test_condition_EQ(): def test_thread_axis2(): n = tvm.convert(4096) - m = tvm.var('m') + m = tvm.shape_var('m') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i] + B[i], name='C') @@ -201,8 +202,8 @@ def test_thread_axis2(): assert('threadIdx' not in str(for_body.extent)) def test_everything_during_deduction(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') ib = tvm.ir_builder.create() with ib.for_range(0, n, 'i') as i: with ib.for_range(0, 32, 'j') as j: @@ -252,7 +253,7 @@ def test_multi_likely(): assert(not any(collect_visit(stmt, lambda x: isinstance(x, tvm.stmt.IfThenElse)))) def test_oneD_pool(): - m = tvm.var('m') + m = tvm.shape_var('m') ib = tvm.ir_builder.create() #data = tvm.placeholder((16,), name = 'data') data = ib.pointer("float32", name="A") From 181712153de5f84a16a5c33a0713ed33319c5042 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Thu, 9 Jan 2020 19:56:36 -0800 Subject: [PATCH 03/17] fix bounder_remover --- src/arithmetic/bound_deducer.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/arithmetic/bound_deducer.cc b/src/arithmetic/bound_deducer.cc index aa88b325b9cc..1c496b026bac 100644 --- a/src/arithmetic/bound_deducer.cc +++ b/src/arithmetic/bound_deducer.cc @@ -83,12 +83,13 @@ class BoundRemover : public ExprMutator { } Expr VisitExpr_(const ShapeVarNode* op) final { + Expr shape_var = GetRef(op); if (remove_bounded_) { Expr var = VarNode::make(op->dtype, op->name_hint); - bounded_var_map_[op] = var; + bounded_var_map_[var.as()] = shape_var; return var; } - return GetRef(op); + return shape_var; } Expr VisitExpr_(const VarNode* op) final { From 22c45496a5aa251b6ea8a4ff966cf88af33b2aa4 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Thu, 9 Jan 2020 20:31:08 -0800 Subject: [PATCH 04/17] migrate unittest to use shape_var --- .../unittest/test_arith_stmt_simplify.py | 9 ++-- tests/python/unittest/test_build_lower.py | 6 +-- tests/python/unittest/test_codegen_arm.py | 4 +- tests/python/unittest/test_codegen_c_host.py | 2 +- tests/python/unittest/test_codegen_device.py | 2 +- tests/python/unittest/test_codegen_llvm.py | 4 +- tests/python/unittest/test_codegen_rocm.py | 4 +- .../unittest/test_codegen_static_init.py | 8 +-- .../python/unittest/test_codegen_vm_basic.py | 12 ++--- tests/python/unittest/test_ir_builder.py | 8 +-- tests/python/unittest/test_lang_buffer.py | 46 ++++++++--------- tests/python/unittest/test_lang_group.py | 12 ++--- tests/python/unittest/test_lang_schedule.py | 30 +++++------ tests/python/unittest/test_lang_tag.py | 30 +++++------ tests/python/unittest/test_lang_tensor.py | 50 +++++++++---------- .../unittest/test_lang_tensor_overload_op.py | 2 +- .../unittest/test_lang_verify_compute.py | 4 +- tests/python/unittest/test_module_load.py | 2 +- .../unittest/test_pass_bound_checkers.py | 44 ++++++++-------- .../test_pass_decorate_device_scope.py | 4 +- tests/python/unittest/test_pass_inline.py | 4 +- tests/python/unittest/test_pass_makeapi.py | 2 +- .../unittest/test_pass_split_host_device.py | 2 +- .../unittest/test_pass_storage_flatten.py | 8 +-- .../python/unittest/test_pass_storage_sync.py | 10 ++-- tests/python/unittest/test_pass_unroll.py | 6 +-- 26 files changed, 158 insertions(+), 157 deletions(-) diff --git a/tests/python/unittest/test_arith_stmt_simplify.py b/tests/python/unittest/test_arith_stmt_simplify.py index 272893e20c12..2668b93f1b26 100644 --- a/tests/python/unittest/test_arith_stmt_simplify.py +++ b/tests/python/unittest/test_arith_stmt_simplify.py @@ -20,7 +20,7 @@ def test_stmt_simplify(): ib = tvm.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") - n = tvm.var("n") + n = tvm.shape_var("n") with ib.for_range(0, n, name="i") as i: with ib.if_scope(i < 12): A[i] = C[i] @@ -34,7 +34,7 @@ def test_thread_extent_simplify(): ib = tvm.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") - n = tvm.var("n") + n = tvm.shape_var("n") tx = tvm.thread_axis("threadIdx.x") ty = tvm.thread_axis("threadIdx.y") ib.scope_attr(tx, "thread_extent", n) @@ -48,7 +48,7 @@ def test_thread_extent_simplify(): def test_basic_likely_elimination(): - n = tvm.var('n') + n = tvm.shape_var('n') X = tvm.placeholder(shape=(n,), name="x") W = tvm.placeholder(shape=(n + 1,), dtype="int32", name="w") @@ -87,7 +87,8 @@ def sls(n, d): return tvm.compute(oshape, sls) - m, n, d, i, l = tvm.var('m'), tvm.var('n'), tvm.var('d'), tvm.var('i'), tvm.var('l') + m, n, d, i, l = tvm.shape_var('m'), tvm.shape_var('n'), tvm.shape_var('d'),\ + tvm.shape_var('i'), tvm.shape_var('l') data_ph = tvm.placeholder((m, d * 32), name="data") indices_ph = tvm.placeholder((i,), name="indices", dtype="int32") lengths_ph = tvm.placeholder((n,), name="lengths", dtype="int32") diff --git a/tests/python/unittest/test_build_lower.py b/tests/python/unittest/test_build_lower.py index 090120c1c921..106a9dab1a2e 100644 --- a/tests/python/unittest/test_build_lower.py +++ b/tests/python/unittest/test_build_lower.py @@ -17,8 +17,8 @@ import tvm def test_lower_rfactor(): - n = tvm.var("n") - m = tvm.var("m") + n = tvm.shape_var("n") + m = tvm.shape_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") B = tvm.compute((n,), lambda i: tvm.sum(A[i, k], axis=k), name="B") @@ -33,7 +33,7 @@ def test_lower_rfactor(): fapi = tvm.lower(s, [A, B]) def test_dependent_output_shape(): - n, m, x = tvm.var('n'), tvm.var('m'), tvm.var('x') + n, m, x = tvm.shape_var('n'), tvm.shape_var('m'), tvm.shape_var('x') A = tvm.placeholder((n, m)) B = tvm.compute((m, n//x), lambda i, j: A[i,j] , name='B') s = tvm.create_schedule(B.op) diff --git a/tests/python/unittest/test_codegen_arm.py b/tests/python/unittest/test_codegen_arm.py index 2385f2ffb59a..4aaa0ef7381f 100644 --- a/tests/python/unittest/test_codegen_arm.py +++ b/tests/python/unittest/test_codegen_arm.py @@ -47,7 +47,7 @@ def test_vmlal_s16(): target = 'llvm -target=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon' def check_correct_assembly(N): - K = tvm.var("K") + K = tvm.shape_var("K") A = tvm.placeholder((K, N), dtype="int8", name='A') B = tvm.placeholder((K, N), dtype="int8", name='B') k = tvm.reduce_axis((0, K)) @@ -67,7 +67,7 @@ def check_correct_assembly(N): check_correct_assembly(64) def check_broadcast_correct_assembly(N): - K = tvm.var("K") + K = tvm.shape_var("K") A = tvm.placeholder((K, N), dtype="int8", name='A') B = tvm.placeholder((K,), dtype="int8", name='B') k = tvm.reduce_axis((0, K)) diff --git a/tests/python/unittest/test_codegen_c_host.py b/tests/python/unittest/test_codegen_c_host.py index 92baca25bf11..27f9ba1f14e8 100644 --- a/tests/python/unittest/test_codegen_c_host.py +++ b/tests/python/unittest/test_codegen_c_host.py @@ -67,7 +67,7 @@ def check_c(): # Specifically allow offset to test codepath when offset is available Ab = tvm.decl_buffer( A.shape, A.dtype, - elem_offset=tvm.var('Aoffset'), + elem_offset=tvm.shape_var('Aoffset'), offset_factor=8, name='A') binds = {A : Ab} diff --git a/tests/python/unittest/test_codegen_device.py b/tests/python/unittest/test_codegen_device.py index 45ecf9539337..03b3c4f1cec7 100644 --- a/tests/python/unittest/test_codegen_device.py +++ b/tests/python/unittest/test_codegen_device.py @@ -19,7 +19,7 @@ import numpy as np def test_add_pipeline(): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((), name='B') C = tvm.compute(A.shape, lambda *i: A(*i) + B(), name='C') diff --git a/tests/python/unittest/test_codegen_llvm.py b/tests/python/unittest/test_codegen_llvm.py index 0e595cd79c97..2cb7a14febcc 100644 --- a/tests/python/unittest/test_codegen_llvm.py +++ b/tests/python/unittest/test_codegen_llvm.py @@ -79,7 +79,7 @@ def check_llvm(use_file): def test_llvm_lookup_intrin(): ib = tvm.ir_builder.create() - m = tvm.var("m") + m = tvm.shape_var("m") A = ib.pointer("uint8x8", name="A") x = tvm.call_llvm_intrin("uint8x8", "llvm.ctpop.i8", tvm.const(1, 'uint32'), A) ib.emit(x) @@ -112,7 +112,7 @@ def check_llvm(): # Specifically allow offset to test codepath when offset is available Ab = tvm.decl_buffer( A.shape, A.dtype, - elem_offset=tvm.var('Aoffset'), + elem_offset=tvm.shape_var('Aoffset'), offset_factor=8, name='A') binds = {A : Ab} diff --git a/tests/python/unittest/test_codegen_rocm.py b/tests/python/unittest/test_codegen_rocm.py index bba72e053142..7a92dad5d335 100644 --- a/tests/python/unittest/test_codegen_rocm.py +++ b/tests/python/unittest/test_codegen_rocm.py @@ -26,8 +26,8 @@ @unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..") def test_rocm_cross_thread_reduction(): # based on the reduction tutorial - n = tvm.var("n") - m = tvm.var("m") + n = tvm.shape_var("n") + m = tvm.shape_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") B = tvm.compute((n,), lambda i: tvm.sum(A[i, k], axis=k), name="B") diff --git a/tests/python/unittest/test_codegen_static_init.py b/tests/python/unittest/test_codegen_static_init.py index b1092309e70a..4090ff5ed6d2 100644 --- a/tests/python/unittest/test_codegen_static_init.py +++ b/tests/python/unittest/test_codegen_static_init.py @@ -20,9 +20,9 @@ def test_static_callback(): dtype = 'int64' - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.var('i') + i = tvm.shape_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) cp = tvm.thread_axis((0, 1), "cop") @@ -41,9 +41,9 @@ def test_static_callback(): def test_static_init(): dtype = 'int64' - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.var('i') + i = tvm.shape_var('i') ib = tvm.ir_builder.create() handle = tvm.call_intrin("handle", "tvm_static_handle") ib.emit( diff --git a/tests/python/unittest/test_codegen_vm_basic.py b/tests/python/unittest/test_codegen_vm_basic.py index 7ff217728034..93740c7a27f2 100644 --- a/tests/python/unittest/test_codegen_vm_basic.py +++ b/tests/python/unittest/test_codegen_vm_basic.py @@ -32,7 +32,7 @@ def tvm_call_back_get_shape(shape0): print(shape0) assert shape0 == a.shape[0] - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), tvm.float32) stmt = tvm.make.Evaluate(tvm.call_packed("tvm_call_back_get_shape", Ab.shape[0])) fapi = tvm.ir_pass.MakeAPI(stmt, "print_shape", [Ab], 0, True) @@ -47,9 +47,9 @@ def tvm_stack_vm_print(*x): def test_stack_vm_loop(): dtype = 'int64' - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.var('i') + i = tvm.shape_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) @@ -69,7 +69,7 @@ def check(f): def test_stack_vm_cond(): dtype = 'int64' - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), dtype) ib = tvm.ir_builder.create() @@ -93,9 +93,9 @@ def check(f): def test_vm_parallel(): dtype = 'int64' - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.var('i') + i = tvm.shape_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) with ib.for_range(0, n, "i", for_type="parallel") as i: diff --git a/tests/python/unittest/test_ir_builder.py b/tests/python/unittest/test_ir_builder.py index 8b9da90c914c..dc54cfd20b99 100644 --- a/tests/python/unittest/test_ir_builder.py +++ b/tests/python/unittest/test_ir_builder.py @@ -19,7 +19,7 @@ def test_for(): ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.shape_var("n") A = ib.allocate("float32", n, name="A", scope="global") with ib.for_range(0, n, name="i") as i: A[i] = A[i] + 1 @@ -39,7 +39,7 @@ def test_for(): def test_if(): ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.shape_var("n") A = ib.pointer("float32", name="A") tmod = tvm.truncmod with ib.for_range(0, n, name="i") as i: @@ -60,7 +60,7 @@ def test_if(): def test_prefetch(): A = tvm.placeholder((10, 20), name="A") ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.shape_var("n") with ib.for_range(0, n, name="i") as i: ib.emit( @@ -105,7 +105,7 @@ def check_target(target): check_target("llvm") def test_gpu(): - n = tvm.var('n') + n = tvm.shape_var('n') dtype = "float32" A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') diff --git a/tests/python/unittest/test_lang_buffer.py b/tests/python/unittest/test_lang_buffer.py index f0f246139beb..a465b9e6ca07 100644 --- a/tests/python/unittest/test_lang_buffer.py +++ b/tests/python/unittest/test_lang_buffer.py @@ -19,9 +19,9 @@ import numpy as np def test_buffer(): - m = tvm.var('m') - n = tvm.var('n') - l = tvm.var('l') + m = tvm.shape_var('m') + n = tvm.shape_var('n') + l = tvm.shape_var('l') Ab = tvm.decl_buffer((m, n), tvm.float32) Bb = tvm.decl_buffer((n, l), tvm.float32) @@ -31,8 +31,8 @@ def test_buffer(): def test_buffer_access_ptr(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32, strides=[n + 1 , 1]) aptr = Ab.access_ptr("rw") assert tvm.ir_pass.Equal(aptr.args[3], Ab.strides[0] * m) @@ -43,14 +43,14 @@ def test_buffer_access_ptr(): def test_buffer_access_ptr_offset(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32) aptr = Ab.access_ptr("rw", offset=100) offset = tvm.ir_pass.Simplify(aptr.args[2]) assert tvm.ir_pass.Equal(offset, 100) assert aptr.args[4].value == Buffer.READ | Buffer.WRITE - v = tvm.var('int32') + v = tvm.shape_var('int32') aptr = Ab.access_ptr("rw", offset=100 + 100 + v) offset = tvm.ir_pass.Simplify(aptr.args[2]) assert tvm.ir_pass.Equal(offset, 200 + v) @@ -62,8 +62,8 @@ def test_buffer_access_ptr_offset(): def test_buffer_access_ptr_extent(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32) aptr = Ab.access_ptr("rw") assert tvm.ir_pass.Equal(aptr.args[3], m * n) @@ -75,8 +75,8 @@ def test_buffer_access_ptr_extent(): def test_buffer_vload(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32, elem_offset=100) load = Ab.vload([2, 3]) offset = tvm.ir_pass.Simplify(load.index) @@ -84,11 +84,11 @@ def test_buffer_vload(): def test_buffer_index_merge_mult_mod(): - m = tvm.var('m') - n = tvm.var('n') - s = tvm.var('s') - k0 = tvm.var('k0') - k1 = tvm.var('k1') + m = tvm.shape_var('m') + n = tvm.shape_var('n') + s = tvm.shape_var('s') + k0 = tvm.shape_var('k0') + k1 = tvm.shape_var('k1') A = tvm.decl_buffer((m, n), tvm.float32) A_stride = tvm.decl_buffer((m, n), tvm.float32, strides=(s, 1)) def assert_simplified_equal(index_simplified, index_direct): @@ -123,9 +123,9 @@ def assert_simplified_equal(index_simplified, index_direct): def test_buffer_broadcast(): - m0, m1, m2 = tvm.var("m0"), tvm.var("m1"), tvm.var("m2") - n0, n1, n2 = tvm.var("n0"), tvm.var("n1"), tvm.var("n2") - o0, o1, o2 = tvm.var("o0"), tvm.var("o1"), tvm.var("o2") + m0, m1, m2 = tvm.shape_var("m0"), tvm.shape_var("m1"), tvm.shape_var("m2") + n0, n1, n2 = tvm.shape_var("n0"), tvm.shape_var("n1"), tvm.shape_var("n2") + o0, o1, o2 = tvm.shape_var("o0"), tvm.shape_var("o1"), tvm.shape_var("o2") A = tvm.placeholder((m0, m1, m2), name='A') B = tvm.placeholder((n0, n1, n2), name='B') @@ -151,9 +151,9 @@ def check(): def test_buffer_broadcast_expr(): - n0, m0, x = tvm.var('n0'), tvm.var('m0'), tvm.var('x') - n1, m1 = tvm.var('n1'), tvm.var('m1') - o0, o1 = tvm.var('o0'), tvm.var('o1') + n0, m0, x = tvm.shape_var('n0'), tvm.shape_var('m0'), tvm.shape_var('x') + n1, m1 = tvm.shape_var('n1'), tvm.shape_var('m1') + o0, o1 = tvm.shape_var('o0'), tvm.shape_var('o1') A = tvm.placeholder((m0, n0), name='A') B = tvm.placeholder((m1, n1), name='B') diff --git a/tests/python/unittest/test_lang_group.py b/tests/python/unittest/test_lang_group.py index dc6837e2be46..ab4d7a47a9a6 100644 --- a/tests/python/unittest/test_lang_group.py +++ b/tests/python/unittest/test_lang_group.py @@ -18,8 +18,8 @@ import tvm def test_scan_group(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.shape_var("m") + n = tvm.shape_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") s_state = tvm.placeholder((m, n)) s_init = tvm.compute((1, n), lambda _, i: x[0, i]) @@ -50,8 +50,8 @@ def test_scan_group(): pass def test_compute_group(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.shape_var("m") + n = tvm.shape_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") x1 = tvm.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = tvm.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") @@ -64,8 +64,8 @@ def test_compute_group(): assert g.num_child_stages == 2 def test_nest_group(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.shape_var("m") + n = tvm.shape_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") x1 = tvm.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = tvm.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") diff --git a/tests/python/unittest/test_lang_schedule.py b/tests/python/unittest/test_lang_schedule.py index 0a653066bff7..2ed627906cb7 100644 --- a/tests/python/unittest/test_lang_schedule.py +++ b/tests/python/unittest/test_lang_schedule.py @@ -19,9 +19,9 @@ import pickle as pkl def test_schedule_create(): - m = tvm.var('m') - n = tvm.var('n') - l = tvm.var('l') + m = tvm.shape_var('m') + n = tvm.shape_var('n') + l = tvm.shape_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') AA = tvm.compute((m, l), lambda i, j: A[i, j]) @@ -49,7 +49,7 @@ def test_schedule_create(): def test_reorder(): - m = tvm.var('m') + m = tvm.shape_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute(m, lambda i: A[i+1]) @@ -69,7 +69,7 @@ def test_reorder(): pass def test_split(): - m = tvm.var('m') + m = tvm.shape_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i: A[i]) @@ -79,8 +79,8 @@ def test_split(): def test_tile(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -90,8 +90,8 @@ def test_tile(): def test_fuse(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -119,8 +119,8 @@ def test_singleton(): print("test singleton fin") def test_vectorize(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -156,7 +156,7 @@ def test_pragma(): def test_rfactor(): - n = tvm.var('n') + n = tvm.shape_var('n') k1 = tvm.reduce_axis((0, n), name="k1") k2 = tvm.reduce_axis((0, n), name="k2") A = tvm.placeholder((n, n, n), name='A') @@ -214,10 +214,10 @@ def intrin_func(ins, outs): assert(s[z].iter_var_attrs[xi].iter_type == tvm.schedule.IterVar.Tensorized) def test_tensor_intrin_scalar_params(): - n = tvm.var("n") + n = tvm.shape_var("n") x = tvm.placeholder((n,), name='x') - v = tvm.var("v") - w = tvm.var("w") + v = tvm.shape_var("v") + w = tvm.shape_var("w") z = tvm.compute((n,), lambda i: x[i]*v + w, name='z') def intrin_func(ins, outs, sp): diff --git a/tests/python/unittest/test_lang_tag.py b/tests/python/unittest/test_lang_tag.py index a87971657a3f..1644a2ed55ee 100644 --- a/tests/python/unittest/test_lang_tag.py +++ b/tests/python/unittest/test_lang_tag.py @@ -33,9 +33,9 @@ def compute_conv(data, weight): axis=[ic, dh, dw])) def test_with(): - n = tvm.var('n') - m = tvm.var('m') - l = tvm.var('l') + n = tvm.shape_var('n') + m = tvm.shape_var('m') + l = tvm.shape_var('l') A = tvm.placeholder((n, l), name='A') B = tvm.placeholder((m, l), name='B') @@ -56,12 +56,12 @@ def test_with(): def test_decorator(): - n = tvm.var('n') - c = tvm.var('c') - h = tvm.var('h') - w = tvm.var('w') - kh = tvm.var('kh') - kw = tvm.var('kw') + n = tvm.shape_var('n') + c = tvm.shape_var('c') + h = tvm.shape_var('h') + w = tvm.shape_var('w') + kh = tvm.shape_var('kh') + kw = tvm.shape_var('kw') A = tvm.placeholder((n, c, h, w), name='A') B = tvm.placeholder((c, c, kh, kw), name='B') @@ -70,12 +70,12 @@ def test_decorator(): assert len(C.op.attrs) == 0 def test_nested(): - n = tvm.var('n') - c = tvm.var('c') - h = tvm.var('h') - w = tvm.var('w') - kh = tvm.var('kh') - kw = tvm.var('kw') + n = tvm.shape_var('n') + c = tvm.shape_var('c') + h = tvm.shape_var('h') + w = tvm.shape_var('w') + kh = tvm.shape_var('kh') + kw = tvm.shape_var('kw') A = tvm.placeholder((n, c, h, w), name='A') B = tvm.placeholder((c, c, kh, kw), name='B') diff --git a/tests/python/unittest/test_lang_tensor.py b/tests/python/unittest/test_lang_tensor.py index 7e9f59bf348d..9b64f4678d4d 100644 --- a/tests/python/unittest/test_lang_tensor.py +++ b/tests/python/unittest/test_lang_tensor.py @@ -18,9 +18,9 @@ from topi.nn.pooling import pool def test_tensor(): - m = tvm.var('m') - n = tvm.var('n') - l = tvm.var('l') + m = tvm.shape_var('m') + n = tvm.shape_var('n') + l = tvm.shape_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') T = tvm.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k]) @@ -37,7 +37,7 @@ def test_tensor(): def test_rank_zero(): - m = tvm.var('m') + m = tvm.shape_var('m') A = tvm.placeholder((m,), name='A') scale = tvm.placeholder((), name='s') k = tvm.reduce_axis((0, m), name="k") @@ -48,7 +48,7 @@ def test_rank_zero(): def test_conv1d(): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n+2), name='A') def computeB(ii): i = ii + 1 @@ -57,14 +57,14 @@ def computeB(ii): def test_tensor_slice(): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.compute((n, n), lambda i, j: 1) B = tvm.compute((n,), lambda i: A[0][i] + A[0][i]) def test_tensor_reduce_multi_axis(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') A = tvm.placeholder((m, n), name='A') k1 = tvm.reduce_axis((0, n), "k") k2 = tvm.reduce_axis((0, m), "k") @@ -73,23 +73,23 @@ def test_tensor_reduce_multi_axis(): def test_tensor_comm_reducer(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') A = tvm.placeholder((m, n), name='A') k = tvm.reduce_axis((0, n), "k") mysum = tvm.comm_reducer(lambda x, y: x+y, lambda t: tvm.const(0, dtype=t)) C = tvm.compute((m,), lambda i: mysum(A[i, k], axis=k)) def test_tensor_comm_reducer_overload(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') mysum = tvm.comm_reducer(lambda x, y: x+y, lambda t: tvm.const(0, dtype=t)) sum_res = mysum(m, n) def test_tensor_reduce(): - m = tvm.var('m') - n = tvm.var('n') - l = tvm.var('l') + m = tvm.shape_var('m') + n = tvm.shape_var('n') + l = tvm.shape_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') T = tvm.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k]) @@ -175,8 +175,8 @@ def intrin_func(ins, outs): assert isinstance(stmt.body.body.body[1].body, tvm.stmt.Evaluate) def test_tensor_scan(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.shape_var("m") + n = tvm.shape_var("n") x = tvm.placeholder((m, n)) s = tvm.placeholder((m, n)) res = tvm.scan(tvm.compute((1, n), lambda _, i: x[0, i]), @@ -185,8 +185,8 @@ def test_tensor_scan(): assert tuple(res.shape) == (m, n) def test_scan_multi_out(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.shape_var("m") + n = tvm.shape_var("n") x1 = tvm.placeholder((m, n)) s1 = tvm.placeholder((m, n)) x2 = tvm.placeholder((m, n)) @@ -206,7 +206,7 @@ def test_scan_multi_out(): assert isinstance(zz, tvm.tensor.ScanOp) def test_extern(): - m = tvm.var('m') + m = tvm.shape_var('m') A = tvm.placeholder((m,), name='A') def extern_func(ins, outs): @@ -217,7 +217,7 @@ def extern_func(ins, outs): def test_extern_multi_out(): - m = tvm.var('m') + m = tvm.shape_var('m') A = tvm.placeholder((m,), name='A') B = tvm.compute((m,), lambda i: A[i] * 10) @@ -230,8 +230,8 @@ def extern_func(ins, outs): assert(res[1].value_index == 1) def test_tuple_inputs(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') A0 = tvm.placeholder((m, n), name='A0') A1 = tvm.placeholder((m, n), name='A1') T0, T1 = tvm.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name='T') @@ -244,8 +244,8 @@ def test_tuple_inputs(): assert(T1.value_index == 1) def test_tuple_with_different_deps(): - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') A0 = tvm.placeholder((m, n), name='A1') A1 = tvm.placeholder((m, n), name='A2') B0, B1 = tvm.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name='B') diff --git a/tests/python/unittest/test_lang_tensor_overload_op.py b/tests/python/unittest/test_lang_tensor_overload_op.py index 3cb6e652dc28..9a8cd873b38f 100644 --- a/tests/python/unittest/test_lang_tensor_overload_op.py +++ b/tests/python/unittest/test_lang_tensor_overload_op.py @@ -87,7 +87,7 @@ def test_combination(): def verify_tensor_scalar_bop(shape, typ="add"): """Verify non-constant Tensor and scalar binary operations.""" - sh = [tvm.var('n%d' % i) for i in range(0, len(shape))] + sh = [tvm.shape_var('n%d' % i) for i in range(0, len(shape))] k = tvm.var('k') A = tvm.placeholder(sh, name='A') if typ == "add": diff --git a/tests/python/unittest/test_lang_verify_compute.py b/tests/python/unittest/test_lang_verify_compute.py index f06131ceb8cd..18344e073987 100644 --- a/tests/python/unittest/test_lang_verify_compute.py +++ b/tests/python/unittest/test_lang_verify_compute.py @@ -17,8 +17,8 @@ import tvm def test_verify_compute(): - n = tvm.var("n") - m = tvm.var("m") + n = tvm.shape_var("n") + m = tvm.shape_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") k_ = tvm.reduce_axis((0, m-1), "k_") diff --git a/tests/python/unittest/test_module_load.py b/tests/python/unittest/test_module_load.py index ba5044825308..b0250b666f3d 100644 --- a/tests/python/unittest/test_module_load.py +++ b/tests/python/unittest/test_module_load.py @@ -46,7 +46,7 @@ def test_dso_module_load(): temp = util.tempdir() def save_object(names): - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), dtype) i = tvm.var('i') # for i in 0 to n-1: diff --git a/tests/python/unittest/test_pass_bound_checkers.py b/tests/python/unittest/test_pass_bound_checkers.py index ada81690d05d..e7b649cc3bb2 100644 --- a/tests/python/unittest/test_pass_bound_checkers.py +++ b/tests/python/unittest/test_pass_bound_checkers.py @@ -46,7 +46,7 @@ def lower(sch, args): @pytest.mark.xfail def test_out_of_bounds_llvm(index_a, index_b): - n = tvm.var("n") + n = tvm.shape_var("n") A = tvm.placeholder ((n,), name='A') B = tvm.placeholder ((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i + index_a] + B[i + index_b], name='C') @@ -63,7 +63,7 @@ def test_out_of_bounds_llvm(index_a, index_b): fadd (a, b, c) def test_in_bounds_llvm(): - n = tvm.var("n") + n = tvm.shape_var("n") A = tvm.placeholder ((n,), name='A') B = tvm.placeholder ((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i] + B[i], name='C') @@ -128,7 +128,7 @@ def test_in_bounds_vectorize_llvm(): tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) def test_in_bounds_loop_partition_basic_llvm(): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -147,7 +147,7 @@ def test_in_bounds_loop_partition_basic_llvm(): @pytest.mark.xfail def test_out_of_bounds_loop_partition_basic_llvm(index_a, index_b): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -331,9 +331,9 @@ def test_out_of_bounds_conv_llvm(data_offsets, kernel_offsets, loop_tiling=False f(data_input, kernel_input, conv_out) def test_in_bounds_tensors_with_same_shapes1D_llvm(): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.shape_var('n') + k = tvm.shape_var('k') + m = tvm.shape_var('m') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((k, ), name='B') @@ -351,9 +351,9 @@ def test_in_bounds_tensors_with_same_shapes1D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes1D_llvm(a_shape, b_shape, c_shape): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.shape_var('n') + k = tvm.shape_var('k') + m = tvm.shape_var('m') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((k, ), name='B') @@ -370,9 +370,9 @@ def test_out_of_bounds_tensors_with_diff_shapes1D_llvm(a_shape, b_shape, c_shape f(a, b, t) def test_in_bounds_tensors_with_same_shapes2D_llvm(): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.shape_var('n') + k = tvm.shape_var('k') + m = tvm.shape_var('m') A = tvm.placeholder((n, n), name='A') B = tvm.placeholder((k, k), name='B') @@ -390,9 +390,9 @@ def test_in_bounds_tensors_with_same_shapes2D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes2D_llvm(a_shape, b_shape, c_shape): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.shape_var('n') + k = tvm.shape_var('k') + m = tvm.shape_var('m') A = tvm.placeholder((n, n), name='A') B = tvm.placeholder((k, k), name='B') @@ -409,9 +409,9 @@ def test_out_of_bounds_tensors_with_diff_shapes2D_llvm(a_shape, b_shape, c_shape f(a, b, t) def test_in_bounds_tensors_with_same_shapes3D_llvm(): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.shape_var('n') + k = tvm.shape_var('k') + m = tvm.shape_var('m') A = tvm.placeholder((n, n, n), name='A') B = tvm.placeholder((k, k, k), name='B') @@ -429,9 +429,9 @@ def test_in_bounds_tensors_with_same_shapes3D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes3D_llvm(a_shape, b_shape, c_shape): - n = tvm.var('n') - k = tvm.var('k') - m = tvm.var('m') + n = tvm.shape_var('n') + k = tvm.shape_var('k') + m = tvm.shape_var('m') A = tvm.placeholder((n, n, n), name='A') B = tvm.placeholder((k, k, k), name='B') diff --git a/tests/python/unittest/test_pass_decorate_device_scope.py b/tests/python/unittest/test_pass_decorate_device_scope.py index d36fe8d37964..f2cac34b7970 100644 --- a/tests/python/unittest/test_pass_decorate_device_scope.py +++ b/tests/python/unittest/test_pass_decorate_device_scope.py @@ -17,8 +17,8 @@ import tvm def test_decorate_device(): - m = tvm.var('m') - l = tvm.var('l') + m = tvm.shape_var('m') + l = tvm.shape_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') diff --git a/tests/python/unittest/test_pass_inline.py b/tests/python/unittest/test_pass_inline.py index e8b86fc75d7e..fa48d6cb548b 100644 --- a/tests/python/unittest/test_pass_inline.py +++ b/tests/python/unittest/test_pass_inline.py @@ -17,7 +17,7 @@ import tvm def test_inline(): - m = tvm.var('m') + m = tvm.shape_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i,: A[i] + 10, name='T') stmt = tvm.make.Evaluate(T[10] + 11 * T[100]) @@ -36,7 +36,7 @@ def test_inline(): pass def test_inline2(): - m = tvm.var('m') + m = tvm.shape_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i,: A[i] + 10, name='T') stmt = tvm.make.Evaluate(tvm.exp(T[10]) + 11 * T[100]) diff --git a/tests/python/unittest/test_pass_makeapi.py b/tests/python/unittest/test_pass_makeapi.py index 77a97d8bffa8..77e1e5852443 100644 --- a/tests/python/unittest/test_pass_makeapi.py +++ b/tests/python/unittest/test_pass_makeapi.py @@ -19,7 +19,7 @@ def test_makeapi(): """Not yet working, mock design""" - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i), name='C') diff --git a/tests/python/unittest/test_pass_split_host_device.py b/tests/python/unittest/test_pass_split_host_device.py index c32485d3973b..4956332d3161 100644 --- a/tests/python/unittest/test_pass_split_host_device.py +++ b/tests/python/unittest/test_pass_split_host_device.py @@ -19,7 +19,7 @@ @pytest.mark.xfail def test_loop_dependent_allocate(): - N = tvm.var("N") + N = tvm.shape_var("N") A = tvm.placeholder((2*N,), "float32", "A") C = tvm.compute((N, ), lambda i: A[2*i] + A[i+1], name='C') s = tvm.create_schedule(C.op) diff --git a/tests/python/unittest/test_pass_storage_flatten.py b/tests/python/unittest/test_pass_storage_flatten.py index 02edfe7d3261..3f0d31718ffb 100644 --- a/tests/python/unittest/test_pass_storage_flatten.py +++ b/tests/python/unittest/test_pass_storage_flatten.py @@ -17,8 +17,8 @@ import tvm def test_flatten2(): - m = tvm.var('m') - l = tvm.var('l') + m = tvm.shape_var('m') + l = tvm.shape_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') A2 = tvm.compute((m, l), lambda i, j: A1[i, j] + 3, name='A2') @@ -38,8 +38,8 @@ def test_flatten2(): def test_flatten_prefetch(): A = tvm.placeholder((25, 100, 4), name = 'A') _A= tvm.decl_buffer(A.shape, A.dtype, name = 'A'); - i = tvm.var('i') - j = tvm.var('j') + i = tvm.shape_var('i') + j = tvm.shape_var('j') region = [tvm.make.range_by_min_extent(i[0], i[1]) for i in [(i, 2), (j, 8), (0, 4)]] stmt = tvm.make.Prefetch(A.op, 0, A.dtype, region) stmt = tvm.ir_pass.StorageFlatten(stmt, {A: _A}, 64) diff --git a/tests/python/unittest/test_pass_storage_sync.py b/tests/python/unittest/test_pass_storage_sync.py index 3202d7b7d3a8..a9ce5ddca710 100644 --- a/tests/python/unittest/test_pass_storage_sync.py +++ b/tests/python/unittest/test_pass_storage_sync.py @@ -17,8 +17,8 @@ import tvm def test_storage_sync(): - m = tvm.var('m') - l = tvm.var('l') + m = tvm.shape_var('m') + l = tvm.shape_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') @@ -54,7 +54,7 @@ def meminfo_cache(): max_num_bits=128, head_address=tvm.call_extern("handle", "global_cache")) ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.shape_var("n") cp = tvm.thread_axis((0, 1), "cop") A = ib.allocate("float32", 128, name="A", scope="global.cache") with ib.for_range(0, n, name="i") as i: @@ -76,7 +76,7 @@ def meminfo_cache(): def test_coproc_sync2(): ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.shape_var("n") cp = tvm.thread_axis((0, 1), "cop") ty = tvm.thread_axis("cthread") A = ib.allocate("float32", 128, name="A") @@ -102,7 +102,7 @@ def __check_list(tvm_array, py_list): return True ib = tvm.ir_builder.create() - n = tvm.var("n") + n = tvm.shape_var("n") cp = tvm.thread_axis((0, 1), "cop") A = ib.allocate("float32", 128, name="A", scope="global.cache") with ib.for_range(0, n, name="i") as i: diff --git a/tests/python/unittest/test_pass_unroll.py b/tests/python/unittest/test_pass_unroll.py index c94ffe0bde14..856daa5e89bf 100644 --- a/tests/python/unittest/test_pass_unroll.py +++ b/tests/python/unittest/test_pass_unroll.py @@ -21,7 +21,7 @@ def test_unroll_loop(): ib = tvm.ir_builder.create() dtype = 'int64' - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), dtype) Aptr = ib.buffer_ptr(Ab) # for i in 0 to n-1: @@ -54,7 +54,7 @@ def test_unroll_loop(): def test_unroll_fake_loop(): ib = tvm.ir_builder.create() dtype = 'int32' - n = tvm.var('n') + n = tvm.shape_var('n') Ab = tvm.decl_buffer((n, ), dtype) Aptr = ib.buffer_ptr(Ab) # for i in 0 to n-1: @@ -68,7 +68,7 @@ def test_unroll_fake_loop(): assert isinstance(ret[0], tvm.stmt.Store) def test_unroll_single_count_loops(): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n,), name='A') B = tvm.compute((n,), lambda *i: A(*i), name='B') s = tvm.create_schedule(B.op) From ecac35a3054e0d5f1421b79097efc7891fda9ff4 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Fri, 10 Jan 2020 11:19:14 -0800 Subject: [PATCH 05/17] use tvm.shape_var in integration & relay tests --- tests/python/contrib/test_sparse.py | 8 ++-- tests/python/integration/test_ewise.py | 6 +-- tests/python/integration/test_gemm.py | 1 - tests/python/integration/test_reduce.py | 8 ++-- tests/python/integration/test_scan.py | 4 +- tests/python/relay/test_ir_text_printer.py | 2 +- tests/python/relay/test_op_level1.py | 18 ++++---- tests/python/relay/test_op_level10.py | 2 +- tests/python/relay/test_op_level2.py | 49 +++++++++++----------- tests/python/relay/test_op_level3.py | 10 ++--- tests/python/relay/test_op_level4.py | 2 +- tests/python/relay/test_op_level5.py | 14 +++---- 12 files changed, 62 insertions(+), 62 deletions(-) diff --git a/tests/python/contrib/test_sparse.py b/tests/python/contrib/test_sparse.py index bc815f6a0718..9a89c8b9092e 100644 --- a/tests/python/contrib/test_sparse.py +++ b/tests/python/contrib/test_sparse.py @@ -25,8 +25,8 @@ def test_static_tensor(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') A = tvmsp.placeholder(shape=(m, n), name='A', dtype=dtype) assert(A.stype == 'csr') n = 3 @@ -50,7 +50,7 @@ def test_dynamic_tensor(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n') + nr, nc, n = tvm.shape_var('nr'), tvm.shape_var('nc'), tvm.shape_var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert(A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') @@ -76,7 +76,7 @@ def test_sparse_array_tuple(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - nr, nc, n = tvm.var('nr'), tvm.var('nc'), tvm.var('n') + nr, nc, n = tvm.shape_var('nr'), tvm.shape_var('nc'), tvm.shape_var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert(A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') diff --git a/tests/python/integration/test_ewise.py b/tests/python/integration/test_ewise.py index 31a39c40fa1f..02b6906f44b3 100644 --- a/tests/python/integration/test_ewise.py +++ b/tests/python/integration/test_ewise.py @@ -57,7 +57,7 @@ def check_device(device, host="stackvm"): def test_fmod(): # graph def run(dtype): - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n,), name='A', dtype=dtype) B = tvm.placeholder((n,), name='B', dtype=dtype) C = tvm.compute(A.shape, lambda *i: tvm.fmod(A(*i), B(*i)), name='C') @@ -140,7 +140,7 @@ def check_device(device, host="stackvm"): def test_log_pow_llvm(): # graph - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n,), name='A') B = tvm.compute(A.shape, lambda *i: tvm.power(tvm.log(A(*i)), 2.0), name='B') s = tvm.create_schedule(B.op) @@ -207,7 +207,7 @@ def check_device(device): def test_add(): def run(dtype): # graph - n = tvm.var('n') + n = tvm.shape_var('n') A = tvm.placeholder((n,), name='A', dtype=dtype) B = tvm.placeholder((n,), name='B', dtype=dtype) bias = tvm.var("bias", dtype=dtype) diff --git a/tests/python/integration/test_gemm.py b/tests/python/integration/test_gemm.py index 9b1a4bf10e19..d61335f68924 100644 --- a/tests/python/integration/test_gemm.py +++ b/tests/python/integration/test_gemm.py @@ -22,7 +22,6 @@ def test_gemm(): # graph nn = 1024 - n = tvm.var('n') n = tvm.convert(nn) m = n l = n diff --git a/tests/python/integration/test_reduce.py b/tests/python/integration/test_reduce.py index acbec36c510e..bdaed442f46a 100644 --- a/tests/python/integration/test_reduce.py +++ b/tests/python/integration/test_reduce.py @@ -21,8 +21,8 @@ def test_reduce_prims(): def test_prim(reducer, np_reducer): # graph - n = tvm.var('n') - m = tvm.var('m') + n = tvm.shape_var('n') + m = tvm.shape_var('m') A = tvm.placeholder((n, m), name='A') R = tvm.compute((n, ), lambda i: tvm.expr.Select((i > 1), 1, 0), name='R') k = tvm.reduce_axis((0, m)) @@ -242,8 +242,8 @@ def fidentity(t0, t1): argmax = tvm.comm_reducer(fcombine, fidentity, name='argmax') - m = tvm.var('m') - n = tvm.var('n') + m = tvm.shape_var('m') + n = tvm.shape_var('n') idx = tvm.placeholder((m, n), name='idx', dtype='int32') val = tvm.placeholder((m, n), name='val', dtype='float32') k = tvm.reduce_axis((0, n), 'k') diff --git a/tests/python/integration/test_scan.py b/tests/python/integration/test_scan.py index 61a78090be65..fba08f9a9f43 100644 --- a/tests/python/integration/test_scan.py +++ b/tests/python/integration/test_scan.py @@ -18,8 +18,8 @@ import numpy as np def test_scan(): - m = tvm.var("m") - n = tvm.var("n") + m = tvm.shape_var("m") + n = tvm.shape_var("n") X = tvm.placeholder((m, n), name="X") s_state = tvm.placeholder((m, n)) s_init = tvm.compute((1, n), lambda _, i: X[0, i]) diff --git a/tests/python/relay/test_ir_text_printer.py b/tests/python/relay/test_ir_text_printer.py index 6426bf3410c8..347d4b1b6d49 100644 --- a/tests/python/relay/test_ir_text_printer.py +++ b/tests/python/relay/test_ir_text_printer.py @@ -70,7 +70,7 @@ def test_env(): def test_meta_data(): - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.shape_var("n"), 10, 224, 224 x = relay.var("x", shape=(n, c, h, w)) w = relay.var("w") z = relay.nn.conv2d(x, w, diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index 33c8b90bc7d6..7f81a7a662dc 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -177,7 +177,7 @@ def test_bias_add(): def test_expand_dims_infer_type(): for dtype in ['float16', 'float32']: - n, t, d = tvm.var("n"), tvm.var("t"), 100 + n, t, d = tvm.shape_var("n"), tvm.shape_var("t"), 100 x = relay.var("x", shape=(n, t, d), dtype=dtype) y = relay.expand_dims(x, axis=2) assert "axis=2" in y.astext() @@ -227,7 +227,7 @@ def test_log_softmax(): def test_concatenate(): for dtype in ['float16', 'float32']: - n, t, d = tvm.var("n"), tvm.var("t"), 100 + n, t, d = tvm.shape_var("n"), tvm.shape_var("t"), 100 x = relay.var("x", shape=(n, t, d)) y = relay.var("y", shape=(n, t, d)) z = relay.concatenate((x, y), axis=-1) @@ -280,7 +280,7 @@ def test_concatenate(): def test_dropout(): for dtype in ['float16', 'float32']: - n, t, d = tvm.var("n"), tvm.var("t"), tvm.var("d") + n, t, d = tvm.shape_var("n"), tvm.shape_var("t"), tvm.shape_var("d") input_ty = relay.TensorType((n, t, d), dtype) x = relay.var("x", input_ty) y = relay.nn.dropout(x, rate=0.75) @@ -342,7 +342,7 @@ def test_dense(): # Dense accuracy for float16 is poor if dtype == 'float16': return - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) w = relay.var("w", relay.TensorType((2, w), dtype)) y = relay.nn.dense(x, w, units=2) @@ -350,15 +350,15 @@ def test_dense(): yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype) - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), 2 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) - wh, ww = tvm.var("wh"), tvm.var("ww") + wh, ww = tvm.shape_var("wh"), tvm.shape_var("ww") w = relay.var("w", relay.TensorType((ww, wh), dtype)) y = relay.nn.dense(x, w) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype) - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), 2 + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), 2 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) w = relay.var("w", relay.IncompleteType()) y = relay.nn.dense(x, w, units=2) @@ -388,7 +388,7 @@ def test_dense_dtype(): data_dtype = 'uint8' weight_dtype = 'int8' out_dtype = 'uint8' - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype)) w = relay.var("w", relay.TensorType((2, w), weight_dtype)) y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype) @@ -400,7 +400,7 @@ def test_dense_dtype(): def test_bitserial_dense(): - m, k = tvm.var("m"), tvm.var("k") + m, k = tvm.shape_var("m"), tvm.shape_var("k") x = relay.var("x", relay.TensorType((m, k), "int16")) w = relay.var("w", relay.TensorType((k, 32), "int16")) y = relay.nn.bitserial_dense(x, w, units=32) diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index d9e29d8bbd9f..42dd12d18a3f 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -309,7 +309,7 @@ def verify_batch_matmul(x_shape, y_shape, out_shape, dtype="float32"): tvm.testing.assert_allclose(z.asnumpy(), z_np, rtol=1e-5) def test_batch_matmul(): - b, m, n, k = tvm.var("b"), tvm.var("m"), tvm.var("n"), tvm.var("k") + b, m, n, k = tvm.shape_var("b"), tvm.shape_var("m"), tvm.shape_var("n"), tvm.shape_var("k") x = relay.var("x", relay.TensorType((b, m, k), "float32")) y = relay.var("y", relay.TensorType((b, n, k), "float32")) z = relay.nn.batch_matmul(x, y) diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index ceb5d093533e..044685697430 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -33,7 +33,7 @@ def run_infer_type(expr): def test_conv2d_infer_type(): # symbolic in batch dimension - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.shape_var("n"), 10, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32")) w = relay.var("w") y = relay.nn.conv2d(x, w, @@ -47,7 +47,7 @@ def test_conv2d_infer_type(): (2, 10, 3, 3), "float32") # infer by shape of w, mixed precision - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.shape_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") @@ -57,7 +57,7 @@ def test_conv2d_infer_type(): (n, 2, 222, 222), "int32") # infer shape in case of different dtypes for input and weight. - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.shape_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") @@ -296,7 +296,7 @@ def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, def test_conv3d_infer_type(): # symbolic in batch dimension - n, c, d, h, w = tvm.var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.shape_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32")) w = relay.var("w") y = relay.nn.conv3d(x, w, @@ -310,7 +310,7 @@ def test_conv3d_infer_type(): (2, 10, 3, 3, 3), "float32") # infer by shape of w, mixed precision - n, c, d, h, w = tvm.var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.shape_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8")) y = relay.nn.conv3d(x, w, out_dtype="int32") @@ -320,7 +320,7 @@ def test_conv3d_infer_type(): (n, 2, 222, 222, 222), "int32") # infer shape in case of different dtypes for input and weight. - n, c, d, h, w = tvm.var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.shape_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8")) y = relay.nn.conv3d(x, w, out_dtype="int32") @@ -435,7 +435,7 @@ def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, def test_conv2d_transpose_infer_type(): # symbolic in batch dimension - n, c, h, w = tvm.var("n"), 10, 10, 12 + n, c, h, w = tvm.shape_var("n"), 10, 10, 12 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) w = relay.var("w", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, @@ -450,7 +450,7 @@ def test_conv2d_transpose_infer_type(): (10, 15, 3, 3), "float32") # infer by shape of w, mixed precision - n, h, w, c = tvm.var("n"), 10, 10, 12 + n, h, w, c = tvm.shape_var("n"), 10, 10, 12 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32")) y = relay.nn.conv2d_transpose(x, w, @@ -535,7 +535,7 @@ def test_conv1d_transpose_ncw_run(): def test_upsampling_infer_type(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") scale = tvm.const(2.0, "float64") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear") @@ -544,14 +544,15 @@ def test_upsampling_infer_type(): assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(h*scale)), tvm.expr.Cast("int32", tvm.round(w*scale))), "float32") - n, c = tvm.var("n"), tvm.var("c") + n, c = tvm.shape_var("n"), tvm.shape_var("c") x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32") def test_upsampling3d_infer_type(): - n, c, d, h, w = tvm.var("n"), tvm.var("c"), tvm.var("d"), tvm.var("h"), tvm.var("w") + n, c, d, h, w = tvm.shape_var("n"), tvm.shape_var("c"),\ + tvm.shape_var("d"), tvm.shape_var("h"), tvm.shape_var("w") scale = tvm.const(2.0, "float64") x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear") @@ -561,14 +562,14 @@ def test_upsampling3d_infer_type(): tvm.expr.Cast("int32", tvm.round(h*scale)), tvm.expr.Cast("int32", tvm.round(w*scale))), "float32") - n, c = tvm.var("n"), tvm.var("c") + n, c = tvm.shape_var("n"), tvm.shape_var("c") x = relay.var("x", relay.TensorType((n, c, 100, 100, 200), "float32")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32") def _test_pool2d(opfunc, reffunc): - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.shape_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() @@ -588,7 +589,7 @@ def _test_pool2d(opfunc, reffunc): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): - n, c, h, w = tvm.var("n"), 10, 224, 224 + n, c, h, w = tvm.shape_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() @@ -608,13 +609,13 @@ def _test_pool2d_int(opfunc, reffunc, dtype): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): - n, c, h, w = tvm.var("n"), tvm.var("c"), 224, 224 + n, c, h, w = tvm.shape_var("n"), tvm.shape_var("c"), 224, 224 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) y = opfunc(x, layout="NHWC") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32") - n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c, h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x) yy = run_infer_type(y) @@ -645,7 +646,7 @@ def test_pool2d(): def test_pool3d(): def _test_pool3d(opfunc): - n, c, d, h, w = tvm.var("n"), 10, 5, 224, 224 + n, c, d, h, w = tvm.shape_var("n"), 10, 5, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32")) y = opfunc(x, pool_size=(1, 1, 1)) assert "pool_size=" in y.astext() @@ -705,7 +706,7 @@ def test_avg_pool2d_no_count_pad(): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): - d1, d2, d3, d4 = tvm.var("d1"), tvm.var("d2"), tvm.var("d3"), tvm.var("d4") + d1, d2, d3, d4 = tvm.shape_var("d1"), tvm.shape_var("d2"), tvm.shape_var("d3"), tvm.shape_var("d4") x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) @@ -750,7 +751,7 @@ def test_pad_infer_type(): assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32") # some symbolic values - n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w") + n, c, h, w = tvm.shape_var("n"), 2, 3, tvm.shape_var("w") t = relay.var("t", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) yy = run_infer_type(y) @@ -773,7 +774,7 @@ def _test_run(dtype): _test_run('int32') def test_lrn(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) "alpha=" in y.astext() @@ -804,7 +805,7 @@ def test_lrn(): tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) "axis=" in y.astext() @@ -854,7 +855,7 @@ def test_batch_flatten(): def _test_upsampling(layout, method, align_corners=False): - n, c, h, w = tvm.var("n"), 16, 32, 32 + n, c, h, w = tvm.shape_var("n"), 16, 32, 32 scale_h = 2.0 scale_w = 2.0 dtype = "float32" @@ -893,7 +894,7 @@ def test_upsampling(): _test_upsampling("NHWC", "bilinear", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixel"): - n, c, d, h, w = tvm.var("n"), 8, 16, 16, 16 + n, c, d, h, w = tvm.shape_var("n"), 8, 16, 16, 16 scale_d = 2.0 scale_h = 2.0 scale_w = 2.0 @@ -1060,7 +1061,7 @@ def _has_fast_int8_instructions(asm, target): def test_bitserial_conv2d_infer_type(): # Basic shape test with ambiguous batch. - n, c, h, w = tvm.var("n"), 32, 224, 224 + n, c, h, w = tvm.shape_var("n"), 32, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16")) w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16")) y = relay.nn.bitserial_conv2d( diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index 2d92489328af..48281712a7e9 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -171,7 +171,7 @@ def verify_squeeze(shape, dtype, axis): def test_transpose_infer_type(): - n, t, d = tvm.var("n"), tvm.var("t"), 100 + n, t, d = tvm.shape_var("n"), tvm.shape_var("t"), 100 x = relay.var("x", relay.TensorType((n, t, d), "float32")) y = relay.transpose(x, axes=(1, 0, 2)) assert "axes=" in y.astext() @@ -279,7 +279,7 @@ def test_reshape_like_infer_type(): assert zz.checked_type == relay.TensorType((1, 6), "float32") # symbolic shape - n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w") + n, c, h, w = tvm.shape_var("n"), 2, 3, tvm.shape_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.var("y", relay.TensorType((1, 8, 8), "float32")) z = relay.reshape_like(x, y) @@ -452,7 +452,7 @@ def test_full_like_infer_type(): assert yy.checked_type == relay.TensorType((1, 2, 3), "float32") # symbolic shape - n, c, h, w = tvm.var("n"), 2, 3, tvm.var("w") + n, c, h, w = tvm.shape_var("n"), 2, 3, tvm.shape_var("w") base = relay.var("base", relay.TensorType((n, c, h, w), "float32")) fill = relay.var("fill", relay.TensorType((), "float32")) y = relay.full_like(base, fill) @@ -480,7 +480,7 @@ def verify_full_like(base, fill_value, dtype): def test_infer_type_leaky_relu(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.leaky_relu(x, alpha=0.1) "alpha=0.1" in y.astext() @@ -544,7 +544,7 @@ def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"): def test_infer_type_prelu(): - n, c , h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w)) verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c)) verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w)) diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index 431f014c31a0..18b03c23a226 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -29,7 +29,7 @@ def run_infer_type(expr): def test_binary_op(): def check_binary_op(opfunc, ref): - n = tvm.var("n") + n = tvm.shape_var("n") t1 = relay.TensorType((5, n, 5)) t2 = relay.TensorType((n, 1)) x = relay.var("x", t1) diff --git a/tests/python/relay/test_op_level5.py b/tests/python/relay/test_op_level5.py index 2f2e8523161c..3ecbed732cee 100644 --- a/tests/python/relay/test_op_level5.py +++ b/tests/python/relay/test_op_level5.py @@ -31,7 +31,7 @@ def run_infer_type(expr): return entry if isinstance(expr, relay.Function) else entry.body def test_resize_infer_type(): - n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c, h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) th, tw = tvm.var("th"), tvm.var("tw") z = relay.image.resize(x, (th, tw)) @@ -146,7 +146,7 @@ def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), x = relay.var("x", relay.TensorType(dshape, "float32")) verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True) - y = relay.var("y", relay.TensorType((tvm.var("n"), 3, 56, 56), "float32")) + y = relay.var("y", relay.TensorType((tvm.shape_var("n"), 3, 56, 56), "float32")) verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True, check_type_only=True) @@ -154,7 +154,7 @@ def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), ref_res = get_ref_result(dshape, clip=False) x = relay.var("x", relay.TensorType(dshape, "float32")) verify_multibox_prior(x, dshape, ref_res, clip=False) - y = relay.var("y", relay.TensorType((tvm.var("n"), 24, 32, 32), "float32")) + y = relay.var("y", relay.TensorType((tvm.shape_var("n"), 24, 32, 32), "float32")) verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True) @@ -246,7 +246,7 @@ def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, np_indices_result = np.array([[3, 0, -1, -1, -1]]) num_anchors = 5 - dshape = (tvm.var("n"), num_anchors, 6) + dshape = (tvm.shape_var("n"), num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, force_suppress=True, top_k=2, check_type_only=True) dshape = (1, num_anchors, 6) @@ -257,7 +257,7 @@ def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, [1, 0.7, 30, 60, 50, 80], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]]) np_indices_result = np.array([[3, 0, 1, -1, -1]]) - dshape = (tvm.var("n"), num_anchors, 6) + dshape = (tvm.shape_var("n"), num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, check_type_only=True) dshape = (1, num_anchors, 6) @@ -320,7 +320,7 @@ def test_default_value(): def test_threshold(): num_anchors = 5 num_classes = 5 - n = tvm.var("n") + n = tvm.shape_var("n") cls_prob = relay.var( "cls_prob", relay.ty.TensorType((n, num_anchors, num_classes), "float32")) @@ -486,7 +486,7 @@ def verify_yolo_reorg(shape, stride, out_shape): assert "stride=" in z.astext() assert zz.checked_type == relay.ty.TensorType(out_shape, "float32") - n, c, h, w = tvm.var("n"), tvm.var("c"), tvm.var("h"), tvm.var("w") + n, c, h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") idxd = tvm.indexdiv verify_yolo_reorg((n, c, 20, 20), 10, (n, c*10*10, 2, 2)) verify_yolo_reorg((n, c, h, w), 2, (n, c*2*2, idxd(h, 2), idxd(w, 2))) From 1b64a5434a874668e792d1fd7997af4780d09544 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Fri, 10 Jan 2020 16:00:55 -0800 Subject: [PATCH 06/17] add test case; fix Var register --- include/tvm/expr.h | 2 +- src/arithmetic/bound_deducer.cc | 14 +++++++------- src/arithmetic/int_set.cc | 15 +++++++++------ tests/python/unittest/test_arith_intset.py | 15 +++++++++++++++ 4 files changed, 32 insertions(+), 14 deletions(-) diff --git a/include/tvm/expr.h b/include/tvm/expr.h index 40c71cea3614..c6be7bb758df 100644 --- a/include/tvm/expr.h +++ b/include/tvm/expr.h @@ -131,7 +131,7 @@ class VarNode : public PrimExprNode { } static constexpr const char* _type_key = "Variable"; - TVM_DECLARE_FINAL_OBJECT_INFO(VarNode, PrimExprNode); + TVM_DECLARE_BASE_OBJECT_INFO(VarNode, PrimExprNode); }; /*! \brief a named variable in TVM */ diff --git a/src/arithmetic/bound_deducer.cc b/src/arithmetic/bound_deducer.cc index 7d87cc5af580..d7069511c96d 100644 --- a/src/arithmetic/bound_deducer.cc +++ b/src/arithmetic/bound_deducer.cc @@ -72,27 +72,27 @@ std::vector GetPath(PrimExpr target, PrimExpr expr) { class BoundRemover : public ExprMutator { public: - Expr Remove(const Expr& e) { + PrimExpr Remove(const PrimExpr& e) { remove_bounded_ = true; return ExprMutator::VisitExpr(e); } - Expr Reset(const Expr& e) { + PrimExpr Reset(const PrimExpr& e) { remove_bounded_ = false; return ExprMutator::VisitExpr(e); } - Expr VisitExpr_(const ShapeVarNode* op) final { - Expr shape_var = GetRef(op); + PrimExpr VisitExpr_(const ShapeVarNode* op) final { + PrimExpr shape_var = GetRef(op); if (remove_bounded_) { - Expr var = VarNode::make(op->dtype, op->name_hint); + PrimExpr var = VarNode::make(op->dtype, op->name_hint); bounded_var_map_[var.as()] = shape_var; return var; } return shape_var; } - Expr VisitExpr_(const VarNode* op) final { + PrimExpr VisitExpr_(const VarNode* op) final { if (!remove_bounded_ && bounded_var_map_.count(op)) { return bounded_var_map_[op]; } @@ -101,7 +101,7 @@ class BoundRemover : public ExprMutator { private: bool remove_bounded_ = false; - std::unordered_map bounded_var_map_; + std::unordered_map bounded_var_map_; }; enum CompareOp {kGreater, kLess, kEqual}; diff --git a/src/arithmetic/int_set.cc b/src/arithmetic/int_set.cc index d87ed6350304..28651ed46f76 100644 --- a/src/arithmetic/int_set.cc +++ b/src/arithmetic/int_set.cc @@ -536,9 +536,12 @@ class IntervalSetEvaluator : return set->min_value.same_as(value) && set->max_value.same_as(value); } - bool IsVar(const Expr& op) { - // Var or ShapeVar - return op.as(); + bool SelfBoundedVar(const IntervalSet& set, + const PrimExpr& value) const { + if (value.as()) { + return set->min_value.same_as(value) || set->max_value.same_as(value); + } + return false; } template @@ -555,13 +558,13 @@ class IntervalSetEvaluator : inline IntervalSet VisitDivExpr_(const T* op) { IntervalSet a = this->Eval(op->a); IntervalSet b = this->Eval(op->b); - if ((MatchPoint(a, op->a) && (MatchPoint(b, op->b) || IsVar(op->b))) - || (IsVar(op->a) && IsVar(op->b))) { + if ((MatchPoint(a, op->a) && (MatchPoint(b, op->b) || SelfBoundedVar(b, op->b))) + || (SelfBoundedVar(a, op->a) && SelfBoundedVar(b, op->b))) { // e.g., // div(10, 5) evaluates to 2 // div(10, {n|n>=0}) evaluates to itself // div({m|m>=0}, {n|n>=0}) evaluates to itself - return IntervalSet::SinglePoint(GetRef(op)); + return IntervalSet::SinglePoint(GetRef(op)); } // e.g., div({m|m>=0}, 2) goes here return Combine(analyzer_, a, b); diff --git a/tests/python/unittest/test_arith_intset.py b/tests/python/unittest/test_arith_intset.py index 17cc6f1a712b..b74c6647b24f 100644 --- a/tests/python/unittest/test_arith_intset.py +++ b/tests/python/unittest/test_arith_intset.py @@ -60,6 +60,8 @@ def test_add_sub(): def test_mul_div(): ck = IntSetChecker() x, y = tvm.var("x"), tvm.var("y") + sx, sy = tvm.shape_var("sx"), tvm.shape_var("sy") + tdiv = tvm.truncdiv ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True) ck.verify(x * y, {x : tvm.arith.IntervalSet(0, 10)}, (0, 10 * y)) @@ -68,22 +70,35 @@ def test_mul_div(): ck.verify(tdiv(x, y), {x : tvm.arith.IntervalSet(0, 10)}, (0, tdiv(10, y))) ck.verify(tdiv(x, 2), {x : tvm.arith.IntervalSet(1, 10)}, (0, 5)) + ck.verify(tdiv(sx, 2), {}, (0, tdiv(sx, 2))) + ck.verify(tdiv(2, sy), {}, (tdiv(2, sy), tdiv(2, sy))) + ck.verify(tdiv(sx, sy), {}, (tdiv(sx, sy), tdiv(sx, sy))) fld = tvm.floordiv ck.verify(fld(x, y), {x : tvm.arith.IntervalSet(0, 10)}, (0, fld(10, y))) ck.verify(fld(x, 2), {x : tvm.arith.IntervalSet(-1, 10)}, (-1, 5)) + ck.verify(fld(sx, 2), {}, (0, fld(sx, 2))) + ck.verify(fld(2, sy), {}, (fld(2, sy), fld(2, sy))) + ck.verify(fld(sx, sy), {}, (fld(sx, sy), fld(sx, sy))) def test_mod(): ck = IntSetChecker() x, y = tvm.var("x"), tvm.var("y") + sx, sy = tvm.shape_var("sx"), tvm.shape_var("sy") tmod = tvm.truncmod ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True) ck.verify(tmod(x, y), {x : tvm.arith.IntervalSet(0, 10)}, (0, y - 1)) ck.verify(tmod(x, 10), {x : tvm.arith.IntervalSet(1, 10)}, (0, 9)) + ck.verify(tmod(sx, 2), {}, (0, 1)) + ck.verify(tmod(2, sy), {}, (tmod(2, sy), tmod(2, sy))) + ck.verify(tmod(sx, sy), {}, (tmod(sx, sy), tmod(sx, sy))) flm = tvm.floormod ck.verify(flm(x, 10), {x : tvm.arith.IntervalSet(-10, 10)}, (0, 9)) + ck.verify(flm(sx, 2), {}, (0, 1)) + ck.verify(flm(2, sy), {}, (flm(2, sy), flm(2, sy))) + ck.verify(flm(sx, sy), {}, (flm(sx, sy), flm(sx, sy))) def test_max_min(): From 4c33a0a40e3c069089afa627538124fbc155be2e Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Fri, 10 Jan 2020 16:15:08 -0800 Subject: [PATCH 07/17] fix lint --- python/tvm/build_module.py | 2 +- python/tvm/expr.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/python/tvm/build_module.py b/python/tvm/build_module.py index 1698e145dbe2..9a0b2c1e6143 100644 --- a/python/tvm/build_module.py +++ b/python/tvm/build_module.py @@ -294,7 +294,7 @@ def get_binds(args, compact=False, binds=None): arg_list = [] def is_var(idx): - return isinstance(idx, expr.Var) or isinstance(idx, expr.ShapeVar) + return isinstance(idx, (expr.ShapeVar, expr.Var)) for x in args: if isinstance(x, tensor.Tensor): diff --git a/python/tvm/expr.py b/python/tvm/expr.py index 9755c9915913..f7b1d8937604 100644 --- a/python/tvm/expr.py +++ b/python/tvm/expr.py @@ -291,6 +291,7 @@ class ShapeVar(Var): dtype : int The data type """ + # pylint: disable=super-init-not-called def __init__(self, name, dtype): self.__init_handle_by_constructor__( _api_internal._ShapeVar, name, dtype) From 1c8ba3dbe673166099996b529288b02125e393b0 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Fri, 10 Jan 2020 16:22:15 -0800 Subject: [PATCH 08/17] fix lint again --- topi/python/topi/util.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/topi/python/topi/util.py b/topi/python/topi/util.py index 2cce5e81b1bb..c6098d4e3e4f 100644 --- a/topi/python/topi/util.py +++ b/topi/python/topi/util.py @@ -155,8 +155,7 @@ def is_var(expr): Whether it is tvm.expr.Var or tvm_assert_bound intrinsic (which provides the boundary information of a Var). """ - return isinstance(expr, tvm.expr.Var) \ - or isinstance(expr, tvm.expr.ShapeVar) + return isinstance(expr, (tvm.expr.ShapeVar, tvm.expr.Var)) def get_const_tuple(in_tuple): From 68871b826037d9b554c699ea14be0ab4875e187f Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Sat, 11 Jan 2020 14:16:43 -0800 Subject: [PATCH 09/17] add default ShapeVar visitor in Relay --- include/tvm/ir.h | 1 + src/lang/attr_functor.h | 4 ++++ src/lang/attrs.cc | 2 +- tests/python/relay/test_ir_text_printer.py | 4 ++-- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/include/tvm/ir.h b/include/tvm/ir.h index 84039485ae69..ed52283b1cb0 100644 --- a/include/tvm/ir.h +++ b/include/tvm/ir.h @@ -38,6 +38,7 @@ namespace ir { using IntImmNode = tvm::IntImmNode; using VarNode = tvm::VarNode; +using ShapeVarNode = tvm::ShapeVarNode; /*! \brief constant unsigned integer. */ class UIntImmNode : public PrimExprNode { diff --git a/src/lang/attr_functor.h b/src/lang/attr_functor.h index 34ee4b3159a5..652f7161a477 100644 --- a/src/lang/attr_functor.h +++ b/src/lang/attr_functor.h @@ -82,6 +82,9 @@ class AttrFunctor { virtual R VisitAttr_(const ir::StringImmNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; // deep comparison of symbolic integer expressions. virtual R VisitAttr_(const VarNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; + virtual R VisitAttr_(const ShapeVarNode* op, Args... args) { + return VisitAttr_(static_cast(op), std::forward(args)...); + } virtual R VisitAttr_(const ir::AddNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const ir::SubNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; virtual R VisitAttr_(const ir::MulNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; @@ -117,6 +120,7 @@ class AttrFunctor { ATTR_FUNCTOR_DISPATCH(FloatImmNode); ATTR_FUNCTOR_DISPATCH(StringImmNode); ATTR_FUNCTOR_DISPATCH(VarNode); + ATTR_FUNCTOR_DISPATCH(ShapeVarNode); ATTR_FUNCTOR_DISPATCH(AddNode); ATTR_FUNCTOR_DISPATCH(SubNode); ATTR_FUNCTOR_DISPATCH(MulNode); diff --git a/src/lang/attrs.cc b/src/lang/attrs.cc index 1d3e767a5b71..5594a410b4ab 100644 --- a/src/lang/attrs.cc +++ b/src/lang/attrs.cc @@ -121,7 +121,7 @@ bool AttrsEqualHandler::VisitAttr_(const StringImmNode* lhs, const ObjectRef& ot bool AttrsEqualHandler::VisitAttr_(const ArrayNode* lhs, const ObjectRef& other) { if (const auto* rhs = other.as()) { if (rhs->data.size() != lhs->data.size()) return false; - for (size_t i = 0; i < lhs->data.size(); ++i) { + for (size_t i = 0; i < lhs->data.size(); ++i) { if (!Equal(lhs->data[i], rhs->data[i])) return false; } } diff --git a/tests/python/relay/test_ir_text_printer.py b/tests/python/relay/test_ir_text_printer.py index 347d4b1b6d49..3bb50f50fcdc 100644 --- a/tests/python/relay/test_ir_text_printer.py +++ b/tests/python/relay/test_ir_text_printer.py @@ -82,8 +82,8 @@ def test_meta_data(): text_no_meta = str(f) assert "channels=2" in text assert "channels=2" in text_no_meta - assert "meta[Variable][0]" in text - assert "meta[Variable][0]" in text_no_meta + assert "meta[ShapeVar][0]" in text + assert "meta[ShapeVar][0]" in text_no_meta assert "type_key" in text assert "type_key" not in text_no_meta From c9bdefc09cc0197a09cae2bbc9e6537d0a5e2264 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Sat, 11 Jan 2020 15:31:34 -0800 Subject: [PATCH 10/17] fix override --- include/tvm/ir_functor_ext.h | 2 ++ src/pass/ir_functor.cc | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/include/tvm/ir_functor_ext.h b/include/tvm/ir_functor_ext.h index 8f6cf7293b2a..0be68089850f 100644 --- a/include/tvm/ir_functor_ext.h +++ b/include/tvm/ir_functor_ext.h @@ -303,6 +303,7 @@ class TVM_DLL ExprVisitor : using ExprFunctor::VisitExpr; // list of functions to override. void VisitExpr_(const VarNode* op) override; + void VisitExpr_(const ShapeVarNode* op) override; void VisitExpr_(const LoadNode* op) override; void VisitExpr_(const LetNode* op) override; void VisitExpr_(const CallNode* op) override; @@ -348,6 +349,7 @@ class TVM_DLL ExprMutator : using ExprFunctor::VisitExpr; // list of functions to override. PrimExpr VisitExpr_(const VarNode* op) override; + PrimExpr VisitExpr_(const ShapeVarNode* op) override; PrimExpr VisitExpr_(const LoadNode* op) override; PrimExpr VisitExpr_(const LetNode* op) override; PrimExpr VisitExpr_(const CallNode* op) override; diff --git a/src/pass/ir_functor.cc b/src/pass/ir_functor.cc index 67acec674630..884457d89581 100644 --- a/src/pass/ir_functor.cc +++ b/src/pass/ir_functor.cc @@ -221,6 +221,8 @@ void StmtVisitor::VisitStmt_(const EvaluateNode* op) { void ExprVisitor::VisitExpr_(const VarNode* op) {} +void ExprVisitor::VisitExpr_(const ShapeVarNode* op) {} + void ExprVisitor::VisitExpr_(const LoadNode* op) { this->VisitExpr(op->index); this->VisitExpr(op->predicate); @@ -597,6 +599,10 @@ PrimExpr ExprMutator::VisitExpr_(const VarNode* op) { return GetRef(op); } +PrimExpr ExprMutator::VisitExpr_(const ShapeVarNode* op) { + return GetRef(op); +} + PrimExpr ExprMutator::VisitExpr_(const LoadNode* op) { PrimExpr index = this->VisitExpr(op->index); PrimExpr predicate = this->VisitExpr(op->predicate); From d283e009b334e928b3f3fde7a4aecd0e51e3c248 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Sat, 11 Jan 2020 18:32:10 -0800 Subject: [PATCH 11/17] fix ShapeVar visit bug --- src/pass/ir_functor.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/pass/ir_functor.cc b/src/pass/ir_functor.cc index 884457d89581..680e491e2ab7 100644 --- a/src/pass/ir_functor.cc +++ b/src/pass/ir_functor.cc @@ -221,7 +221,9 @@ void StmtVisitor::VisitStmt_(const EvaluateNode* op) { void ExprVisitor::VisitExpr_(const VarNode* op) {} -void ExprVisitor::VisitExpr_(const ShapeVarNode* op) {} +void ExprVisitor::VisitExpr_(const ShapeVarNode* op) { + this->VisitExpr_(static_cast(op)); +} void ExprVisitor::VisitExpr_(const LoadNode* op) { this->VisitExpr(op->index); @@ -600,7 +602,7 @@ PrimExpr ExprMutator::VisitExpr_(const VarNode* op) { } PrimExpr ExprMutator::VisitExpr_(const ShapeVarNode* op) { - return GetRef(op); + return this->VisitExpr_(static_cast(op)); } PrimExpr ExprMutator::VisitExpr_(const LoadNode* op) { From 5589dbf3ef706dc599bdecc486ce0df4d809dba7 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Mon, 13 Jan 2020 11:32:07 -0800 Subject: [PATCH 12/17] revert IntervalSet for shape_var --- src/arithmetic/int_set.cc | 40 +++------------------- tests/python/unittest/test_arith_intset.py | 14 -------- 2 files changed, 4 insertions(+), 50 deletions(-) diff --git a/src/arithmetic/int_set.cc b/src/arithmetic/int_set.cc index 28651ed46f76..3f0fcf3c61d0 100644 --- a/src/arithmetic/int_set.cc +++ b/src/arithmetic/int_set.cc @@ -405,22 +405,6 @@ class IntervalSetEvaluator : } } - IntervalSet VisitExpr_(const ShapeVarNode* op) final { - Var var = GetRef(op); - auto it = dom_map_.find(var); - if (it != dom_map_.end()) { - IntervalSet res = ToIntervalSet((*it).second); - if (res->min_value.same_as(var) && - res->max_value.same_as(var)) { - return res; - } - // recursively evaluate mapped result - // in case the domain contains variables to be relaxed. - return Eval(res); - } else { - return IntervalSet(0, GetRef(op)); - } - } IntervalSet VisitExpr_(const AddNode* op) final { return VisitBinaryExpr_(op); @@ -435,19 +419,19 @@ class IntervalSetEvaluator : } IntervalSet VisitExpr_(const DivNode* op) final { - return VisitDivExpr_(op); + return VisitBinaryExpr_(op); } IntervalSet VisitExpr_(const ModNode* op) final { - return VisitDivExpr_(op); + return VisitBinaryExpr_(op); } IntervalSet VisitExpr_(const FloorDivNode* op) final { - return VisitDivExpr_(op); + return VisitBinaryExpr_(op); } IntervalSet VisitExpr_(const FloorModNode* op) final { - return VisitDivExpr_(op); + return VisitBinaryExpr_(op); } IntervalSet VisitExpr_(const MinNode* op) final { @@ -554,22 +538,6 @@ class IntervalSetEvaluator : return Combine(analyzer_, a, b); } - template - inline IntervalSet VisitDivExpr_(const T* op) { - IntervalSet a = this->Eval(op->a); - IntervalSet b = this->Eval(op->b); - if ((MatchPoint(a, op->a) && (MatchPoint(b, op->b) || SelfBoundedVar(b, op->b))) - || (SelfBoundedVar(a, op->a) && SelfBoundedVar(b, op->b))) { - // e.g., - // div(10, 5) evaluates to 2 - // div(10, {n|n>=0}) evaluates to itself - // div({m|m>=0}, {n|n>=0}) evaluates to itself - return IntervalSet::SinglePoint(GetRef(op)); - } - // e.g., div({m|m>=0}, 2) goes here - return Combine(analyzer_, a, b); - } - // recursive depth int recur_depth_{0}; // analyzer diff --git a/tests/python/unittest/test_arith_intset.py b/tests/python/unittest/test_arith_intset.py index b74c6647b24f..20e3f573776e 100644 --- a/tests/python/unittest/test_arith_intset.py +++ b/tests/python/unittest/test_arith_intset.py @@ -60,7 +60,6 @@ def test_add_sub(): def test_mul_div(): ck = IntSetChecker() x, y = tvm.var("x"), tvm.var("y") - sx, sy = tvm.shape_var("sx"), tvm.shape_var("sy") tdiv = tvm.truncdiv ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True) @@ -70,35 +69,22 @@ def test_mul_div(): ck.verify(tdiv(x, y), {x : tvm.arith.IntervalSet(0, 10)}, (0, tdiv(10, y))) ck.verify(tdiv(x, 2), {x : tvm.arith.IntervalSet(1, 10)}, (0, 5)) - ck.verify(tdiv(sx, 2), {}, (0, tdiv(sx, 2))) - ck.verify(tdiv(2, sy), {}, (tdiv(2, sy), tdiv(2, sy))) - ck.verify(tdiv(sx, sy), {}, (tdiv(sx, sy), tdiv(sx, sy))) fld = tvm.floordiv ck.verify(fld(x, y), {x : tvm.arith.IntervalSet(0, 10)}, (0, fld(10, y))) ck.verify(fld(x, 2), {x : tvm.arith.IntervalSet(-1, 10)}, (-1, 5)) - ck.verify(fld(sx, 2), {}, (0, fld(sx, 2))) - ck.verify(fld(2, sy), {}, (fld(2, sy), fld(2, sy))) - ck.verify(fld(sx, sy), {}, (fld(sx, sy), fld(sx, sy))) def test_mod(): ck = IntSetChecker() x, y = tvm.var("x"), tvm.var("y") - sx, sy = tvm.shape_var("sx"), tvm.shape_var("sy") tmod = tvm.truncmod ck.analyzer.update(y, tvm.arith.ConstIntBound(1, 100), override=True) ck.verify(tmod(x, y), {x : tvm.arith.IntervalSet(0, 10)}, (0, y - 1)) ck.verify(tmod(x, 10), {x : tvm.arith.IntervalSet(1, 10)}, (0, 9)) - ck.verify(tmod(sx, 2), {}, (0, 1)) - ck.verify(tmod(2, sy), {}, (tmod(2, sy), tmod(2, sy))) - ck.verify(tmod(sx, sy), {}, (tmod(sx, sy), tmod(sx, sy))) flm = tvm.floormod ck.verify(flm(x, 10), {x : tvm.arith.IntervalSet(-10, 10)}, (0, 9)) - ck.verify(flm(sx, 2), {}, (0, 1)) - ck.verify(flm(2, sy), {}, (flm(2, sy), flm(2, sy))) - ck.verify(flm(sx, sy), {}, (flm(sx, sy), flm(sx, sy))) def test_max_min(): From c315f36d08482abcde53dcd958b3b9a6ae989f40 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Mon, 13 Jan 2020 15:08:59 -0800 Subject: [PATCH 13/17] remove bound_remover --- src/arithmetic/bound_deducer.cc | 48 --------------------------------- 1 file changed, 48 deletions(-) diff --git a/src/arithmetic/bound_deducer.cc b/src/arithmetic/bound_deducer.cc index d7069511c96d..5128096eb40f 100644 --- a/src/arithmetic/bound_deducer.cc +++ b/src/arithmetic/bound_deducer.cc @@ -70,40 +70,6 @@ std::vector GetPath(PrimExpr target, PrimExpr expr) { return v.path_; } -class BoundRemover : public ExprMutator { - public: - PrimExpr Remove(const PrimExpr& e) { - remove_bounded_ = true; - return ExprMutator::VisitExpr(e); - } - - PrimExpr Reset(const PrimExpr& e) { - remove_bounded_ = false; - return ExprMutator::VisitExpr(e); - } - - PrimExpr VisitExpr_(const ShapeVarNode* op) final { - PrimExpr shape_var = GetRef(op); - if (remove_bounded_) { - PrimExpr var = VarNode::make(op->dtype, op->name_hint); - bounded_var_map_[var.as()] = shape_var; - return var; - } - return shape_var; - } - - PrimExpr VisitExpr_(const VarNode* op) final { - if (!remove_bounded_ && bounded_var_map_.count(op)) { - return bounded_var_map_[op]; - } - return GetRef(op); - } - - private: - bool remove_bounded_ = false; - std::unordered_map bounded_var_map_; -}; - enum CompareOp {kGreater, kLess, kEqual}; // a visitor to deduce the bound of a variable from a expression @@ -332,17 +298,6 @@ void BoundDeducer::Deduce() { Init(); if (!success_) return; - // Any variable appears in both expr and result, - // they should not be eagerly simplified according to its bound - // e.g., i + n/4 >= n - // => i >= n - n/4 - // If we eagerly simplified the left side given ShapeVar({n | n >= 0}) - // we would get i + 0 >= n => i >= n, which is obviously incorrect. - // Thus we remove assert_bound here and reset later. - BoundRemover bound_remover; - expr_ = bound_remover.Remove(expr_); - result_ = bound_remover.Remove(result_); - Relax(); if (!success_) return; // get the path @@ -354,9 +309,6 @@ void BoundDeducer::Deduce() { expr_map_ = EvalSetForEachSubExpr(expr_, hint_map_); this->VisitExpr(expr_); - - expr_ = bound_remover.Reset(expr_); - result_ = bound_remover.Reset(result_); } void BoundDeducer::Relax() { From 6cb3c7f59d614f09d7c684059f76325d5d28fcaa Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Mon, 13 Jan 2020 19:37:01 -0800 Subject: [PATCH 14/17] remove is_var; use constructor for shapevar/var instead --- include/tvm/expr.h | 10 ++++++--- include/tvm/ir.h | 2 +- python/tvm/build_module.py | 7 ++---- src/api/api_ir.cc | 4 ++-- src/arithmetic/int_set.cc | 8 ------- src/lang/expr.cc | 20 ++++++----------- src/pass/ssa.cc | 8 +++---- src/pass/tensor_core.cc | 2 +- .../unittest/test_arith_const_int_bound.py | 9 ++++++++ topi/python/topi/util.py | 17 +------------- topi/python/topi/x86/conv2d.py | 4 ++-- topi/python/topi/x86/dense.py | 22 +++++++++---------- 12 files changed, 47 insertions(+), 66 deletions(-) diff --git a/include/tvm/expr.h b/include/tvm/expr.h index 41c7320c8a60..ab435dbf2f7e 100644 --- a/include/tvm/expr.h +++ b/include/tvm/expr.h @@ -66,14 +66,16 @@ class Var; */ class VarNode : public PrimExprNode { public: + /*! \brief constructor */ + VarNode() {} + VarNode(DataType dtype, std::string name_hint); + /*! * \brief The hint to the variable name. * \note Each variable is uniquely identified by its address. */ std::string name_hint; - static Var make(DataType dtype, std::string name_hint); - void VisitAttrs(AttrVisitor* v) { v->Visit("dtype", &dtype); v->Visit("name", &name_hint); @@ -122,7 +124,9 @@ class ShapeVar; */ class ShapeVarNode : public VarNode { public: - static ShapeVar make(DataType dtype, std::string name_hint); + /*! \brief constructor */ + ShapeVarNode() {} + ShapeVarNode(DataType dtype, std::string name_hint); static constexpr const char* _type_key = "ShapeVar"; TVM_DECLARE_FINAL_OBJECT_INFO(ShapeVarNode, VarNode); diff --git a/include/tvm/ir.h b/include/tvm/ir.h index ed52283b1cb0..abebadd94422 100644 --- a/include/tvm/ir.h +++ b/include/tvm/ir.h @@ -714,7 +714,7 @@ class AnyNode : public PrimExprNode { void VisitAttrs(AttrVisitor* v) {} /*! \brief Convert to var. */ Var ToVar() const { - return VarNode::make(DataType::Int(32), "any_dim"); + return Var("any_dim", DataType::Int(32)); } TVM_DLL static PrimExpr make(); diff --git a/python/tvm/build_module.py b/python/tvm/build_module.py index cabb7634657b..761da180406c 100644 --- a/python/tvm/build_module.py +++ b/python/tvm/build_module.py @@ -293,12 +293,9 @@ def get_binds(args, compact=False, binds=None): cfg = current_build_config() arg_list = [] - def is_var(idx): - return isinstance(idx, (expr.ShapeVar, expr.Var)) - for x in args: if isinstance(x, tensor.Tensor): - any_dim = any(is_var(i) for i in x.shape) + any_dim = any(isinstance(i, expr.Var) for i in x.shape) buffer_type = "auto_broadcast" if any_dim and not compact else "" if x not in binds: buf = api.decl_buffer(x.shape, @@ -313,7 +310,7 @@ def is_var(idx): arg_list.append(binds[x]) elif isinstance(x, schedule.Buffer): arg_list.append(x) - elif is_var(x): + elif isinstance(x, expr.Var): arg_list.append(x) else: raise ValueError("args must be Tensor, Buffer or Var") diff --git a/src/api/api_ir.cc b/src/api/api_ir.cc index d002667dd2ba..2cc01178615d 100644 --- a/src/api/api_ir.cc +++ b/src/api/api_ir.cc @@ -33,12 +33,12 @@ namespace ir { TVM_REGISTER_GLOBAL("_Var") .set_body_typed([](std::string s, DataType t) { - return VarNode::make(t, s); + return Var(s, t); }); TVM_REGISTER_GLOBAL("_ShapeVar") .set_body_typed([](std::string s, DataType t) { - return ShapeVarNode::make(t, s); + return ShapeVar(s, t); }); TVM_REGISTER_GLOBAL("make.abs") diff --git a/src/arithmetic/int_set.cc b/src/arithmetic/int_set.cc index 3f0fcf3c61d0..3786483d1ab3 100644 --- a/src/arithmetic/int_set.cc +++ b/src/arithmetic/int_set.cc @@ -520,14 +520,6 @@ class IntervalSetEvaluator : return set->min_value.same_as(value) && set->max_value.same_as(value); } - bool SelfBoundedVar(const IntervalSet& set, - const PrimExpr& value) const { - if (value.as()) { - return set->min_value.same_as(value) || set->max_value.same_as(value); - } - return false; - } - template inline IntervalSet VisitBinaryExpr_(const T* op) { IntervalSet a = this->Eval(op->a); diff --git a/src/lang/expr.cc b/src/lang/expr.cc index 300f7dd3ae4f..d7ded8d7beea 100644 --- a/src/lang/expr.cc +++ b/src/lang/expr.cc @@ -39,24 +39,18 @@ PrimExpr::PrimExpr(std::string str) : PrimExpr(ir::StringImmNode::make(str)) {} Var::Var(std::string name_hint, DataType t) - : Var(VarNode::make(t, name_hint)) {} + : Var(make_object(t, name_hint)) {} -Var VarNode::make(DataType t, std::string name_hint) { - ObjectPtr node = make_object(); - node->dtype = t; - node->name_hint = std::move(name_hint); - return Var(node); +VarNode::VarNode(DataType t, std::string name_hint) { + this->dtype = t; + this->name_hint = std::move(name_hint); } ShapeVar::ShapeVar(std::string name_hint, DataType t) - : ShapeVar(ShapeVarNode::make(t, name_hint)) {} + : ShapeVar(make_object(t, name_hint)) {} -ShapeVar ShapeVarNode::make(DataType t, std::string name_hint) { - ObjectPtr node = make_object(); - node->dtype = t; - node->name_hint = std::move(name_hint); - return ShapeVar(node); -} +ShapeVarNode::ShapeVarNode(DataType t, std::string name_hint) + : VarNode(t, std::move(name_hint)) {} Range::Range(PrimExpr begin, PrimExpr end) : Range(make_object( diff --git a/src/pass/ssa.cc b/src/pass/ssa.cc index 8375e806a006..50cdc528f207 100644 --- a/src/pass/ssa.cc +++ b/src/pass/ssa.cc @@ -87,7 +87,7 @@ class IRConvertSSA final : public StmtExprMutator { const Var& v = op->var; if (defined_.count(v.get())) { PrimExpr value = this->VisitExpr(op->value); - Var new_var = VarNode::make(v.dtype(), v->name_hint); + Var new_var(v->name_hint, v.dtype()); scope_[v.get()].push_back(new_var); PrimExpr body = this->VisitExpr(op->body); scope_[v.get()].pop_back(); @@ -123,7 +123,7 @@ class IRConvertSSA final : public StmtExprMutator { const Var& v = op->var; if (defined_.count(v.get())) { PrimExpr value = this->VisitExpr(op->value); - Var new_var = VarNode::make(v.dtype(), v->name_hint); + Var new_var(v->name_hint, v.dtype()); scope_[v.get()].push_back(new_var); Stmt body = this->VisitStmt(op->body); scope_[v.get()].pop_back(); @@ -136,7 +136,7 @@ class IRConvertSSA final : public StmtExprMutator { Stmt VisitStmt_(const ForNode* op) final { const Var& v = op->loop_var; if (defined_.count(v.get())) { - Var new_var = VarNode::make(v.dtype(), v->name_hint); + Var new_var(v->name_hint, v.dtype()); scope_[v.get()].push_back(new_var); Stmt stmt = StmtExprMutator::VisitStmt_(op); scope_[v.get()].pop_back(); @@ -151,7 +151,7 @@ class IRConvertSSA final : public StmtExprMutator { Stmt VisitStmt_(const AllocateNode* op) final { const Var& v = op->buffer_var; if (defined_.count(v.get())) { - Var new_var = VarNode::make(v.dtype(), v->name_hint); + Var new_var(v->name_hint, v.dtype()); scope_[v.get()].push_back(new_var); Stmt stmt = StmtExprMutator::VisitStmt_(op); scope_[v.get()].pop_back(); diff --git a/src/pass/tensor_core.cc b/src/pass/tensor_core.cc index bb57fe8c37d3..921af6e98174 100644 --- a/src/pass/tensor_core.cc +++ b/src/pass/tensor_core.cc @@ -1108,7 +1108,7 @@ class TensorCoreIRMutator : public StmtExprMutator { auto it2 = matrix_abc_.find(simplify_name(call->name)); CHECK(it2 != matrix_abc_.end()) << "Cannot find matrix info for " << call->name; - buffer_node->data = VarNode::make(DataType::Handle(), call->name); + buffer_node->data = Var(call->name, DataType::Handle()); buffer_node->name = call->name; buffer_node->scope = "wmma." + it2->second; buffer_node->dtype = datatype; diff --git a/tests/python/unittest/test_arith_const_int_bound.py b/tests/python/unittest/test_arith_const_int_bound.py index 49448616a0fa..735ee35065fd 100644 --- a/tests/python/unittest/test_arith_const_int_bound.py +++ b/tests/python/unittest/test_arith_const_int_bound.py @@ -275,6 +275,14 @@ def test_mix_index_bound(): assert bd.max_value == (23 // 7) * 7 + 6 +def test_shape_var_bound(): + analyzer = tvm.arith.Analyzer() + x = tvm.shape_var("x") + bd = analyzer.const_int_bound(x) + assert bd.min_value == 0 + assert bd.max_value == bd.POS_INF + + if __name__ == "__main__": test_dtype_bound() test_cast_bound() @@ -288,3 +296,4 @@ def test_mix_index_bound(): test_select_bound() test_shift_and_bound() test_mix_index_bound() + test_shape_var_bound() diff --git a/topi/python/topi/util.py b/topi/python/topi/util.py index c6098d4e3e4f..8f32a297d719 100644 --- a/topi/python/topi/util.py +++ b/topi/python/topi/util.py @@ -143,21 +143,6 @@ def equal_const_int(expr, value): return expr.value == value -def is_var(expr): - """Check whether the input is tvm.expr.Var or tvm.expr.ShapeVar - Parameters - ---------- - expr : tvm.Expr - The input expression. - Returns - ------- - equal : bool - Whether it is tvm.expr.Var or - tvm_assert_bound intrinsic (which provides the boundary information of a Var). - """ - return isinstance(expr, (tvm.expr.ShapeVar, tvm.expr.Var)) - - def get_const_tuple(in_tuple): """Verifies input tuple is IntImm or Var, returns tuple of int or Var. @@ -173,7 +158,7 @@ def get_const_tuple(in_tuple): """ ret = [] for elem in in_tuple: - if is_var(elem): + if isinstance(elem, tvm.expr.Var): ret.append(elem) elif not isinstance(elem, (tvm.expr.IntImm, tvm.expr.UIntImm, int)): elem = tvm.ir_pass.Simplify(elem) diff --git a/topi/python/topi/x86/conv2d.py b/topi/python/topi/x86/conv2d.py index f05550c8143b..8a6b57eb9e66 100644 --- a/topi/python/topi/x86/conv2d.py +++ b/topi/python/topi/x86/conv2d.py @@ -31,7 +31,7 @@ from ..nn.depthwise_conv2d import _get_workload as _get_depthwise_conv2d_workload from ..nn.pad import pad from ..nn.util import get_pad_tuple -from ..util import get_const_tuple, is_var +from ..util import get_const_tuple from . import conv2d_avx_1x1, conv2d_avx_common @@ -44,7 +44,7 @@ def _get_default_config(cfg, data, kernel, strides, padding, out_dtype, is_depth """ static_data_shape = [] for dim in get_const_tuple(data.shape): - if is_var(dim): + if isinstance(dim, tvm.expr.Var): static_data_shape.append(1) else: static_data_shape.append(dim) diff --git a/topi/python/topi/x86/dense.py b/topi/python/topi/x86/dense.py index b3c162ffb95f..b7a3d6d5a330 100644 --- a/topi/python/topi/x86/dense.py +++ b/topi/python/topi/x86/dense.py @@ -24,7 +24,7 @@ from .util import get_fp32_len from .. import generic, tag, nn -from ..util import traverse_inline, get_const_tuple, is_var +from ..util import traverse_inline, get_const_tuple @autotvm.register_topi_compute(nn.dense, "cpu", "direct") def _declaration_dense(cfg, data, weight, bias=None, out_dtype=None): @@ -40,7 +40,7 @@ def _declaration_dense(cfg, data, weight, bias=None, out_dtype=None): # Always use dense_nopack for dynamic input. # This is a temporary for CV models. # TODO(kevinthesun): use kernel dispatcher instead. - if is_var(M): + if isinstance(M, tvm.expr.Var): return _declaration_dense_nopack(cfg, data, weight, bias, out_dtype) # For small batch sizes, don't pack weight into cache-friendly layout @@ -59,9 +59,9 @@ def _declaration_dense_pack(cfg, data, weight, bias=None, out_dtype=None): M, K = get_const_tuple(data.shape) # batch, in_dim N, _ = get_const_tuple(weight.shape) # out_dim # create tuning space - cfg.define_split("tile_y", 32 if is_var(M) else M, num_outputs=3) - cfg.define_split("tile_x", 32 if is_var(N) else N, num_outputs=3) - cfg.define_split("tile_k", 32 if is_var(K) else K, num_outputs=2) + cfg.define_split("tile_y", 32 if isinstance(M, tvm.expr.Var) else M, num_outputs=3) + cfg.define_split("tile_x", 32 if isinstance(N, tvm.expr.Var) else N, num_outputs=3) + cfg.define_split("tile_k", 32 if isinstance(K, tvm.expr.Var) else K, num_outputs=2) if cfg.is_fallback: _default_dense_pack_config(cfg, M, N, K) @@ -93,9 +93,9 @@ def _declaration_dense_nopack(cfg, data, weight, bias=None, out_dtype=None): M, K = get_const_tuple(data.shape) N, _ = get_const_tuple(weight.shape) # create tuning space - cfg.define_split("tile_y", 32 if is_var(M) else M, num_outputs=2) - cfg.define_split("tile_x", 32 if is_var(N) else N, num_outputs=2) - cfg.define_split("tile_k", 32 if is_var(K) else K, num_outputs=2) + cfg.define_split("tile_y", 32 if isinstance(M, tvm.expr.Var) else M, num_outputs=2) + cfg.define_split("tile_x", 32 if isinstance(N, tvm.expr.Var) else N, num_outputs=2) + cfg.define_split("tile_k", 32 if isinstance(K, tvm.expr.Var) else K, num_outputs=2) if cfg.is_fallback: _default_dense_nopack_config(cfg, M, N, K) @@ -218,11 +218,11 @@ def _schedule_dense_nopack_template(cfg, s, C): def _default_dense_pack_config(cfg, M, N, K): # Generate default schedule for dynamic shape. - if is_var(M): + if isinstance(M, tvm.expr.Var): M = 16 - if is_var(N): + if isinstance(N, tvm.expr.Var): N = 16 - if is_var(K): + if isinstance(K, tvm.expr.Var): K = 16 vec_width = get_fp32_len() From d92c86136a6a9c0de6f1f31004b77adbfe13232a Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Wed, 15 Jan 2020 13:07:53 -0800 Subject: [PATCH 15/17] ShapeVar -> SizeVar; add constructor comments --- include/tvm/expr.h | 42 ++++++++++------ include/tvm/ir.h | 2 +- include/tvm/ir_functor_ext.h | 8 +-- python/tvm/api.py | 6 +-- python/tvm/expr.py | 6 +-- src/api/api_ir.cc | 4 +- src/arithmetic/const_int_bound.cc | 4 +- src/lang/attr_functor.h | 4 +- src/lang/expr.cc | 6 +-- src/lang/ir.cc | 6 +-- src/pass/ir_functor.cc | 4 +- tests/python/contrib/test_sparse.py | 8 +-- tests/python/integration/test_ewise.py | 6 +-- tests/python/integration/test_reduce.py | 8 +-- tests/python/integration/test_scan.py | 4 +- tests/python/relay/test_ir_text_printer.py | 6 +-- tests/python/relay/test_op_level1.py | 18 +++---- tests/python/relay/test_op_level10.py | 2 +- tests/python/relay/test_op_level2.py | 50 +++++++++---------- tests/python/relay/test_op_level3.py | 10 ++-- tests/python/relay/test_op_level4.py | 2 +- tests/python/relay/test_op_level5.py | 14 +++--- .../unittest/test_arith_const_int_bound.py | 6 +-- .../unittest/test_arith_stmt_simplify.py | 10 ++-- tests/python/unittest/test_build_lower.py | 6 +-- tests/python/unittest/test_codegen_arm.py | 4 +- tests/python/unittest/test_codegen_c_host.py | 2 +- tests/python/unittest/test_codegen_device.py | 2 +- tests/python/unittest/test_codegen_llvm.py | 4 +- tests/python/unittest/test_codegen_rocm.py | 4 +- .../unittest/test_codegen_static_init.py | 8 +-- .../python/unittest/test_codegen_vm_basic.py | 12 ++--- tests/python/unittest/test_hybrid_script.py | 6 +-- tests/python/unittest/test_ir_builder.py | 8 +-- tests/python/unittest/test_lang_buffer.py | 46 ++++++++--------- tests/python/unittest/test_lang_group.py | 12 ++--- tests/python/unittest/test_lang_schedule.py | 30 +++++------ tests/python/unittest/test_lang_tag.py | 30 +++++------ tests/python/unittest/test_lang_tensor.py | 50 +++++++++---------- .../unittest/test_lang_tensor_overload_op.py | 2 +- .../unittest/test_lang_verify_compute.py | 4 +- tests/python/unittest/test_module_load.py | 2 +- .../unittest/test_pass_bound_checkers.py | 44 ++++++++-------- .../test_pass_decorate_device_scope.py | 4 +- tests/python/unittest/test_pass_inline.py | 4 +- .../unittest/test_pass_loop_partition.py | 36 ++++++------- tests/python/unittest/test_pass_makeapi.py | 2 +- .../unittest/test_pass_split_host_device.py | 2 +- .../unittest/test_pass_storage_flatten.py | 8 +-- .../python/unittest/test_pass_storage_sync.py | 10 ++-- tests/python/unittest/test_pass_unroll.py | 6 +-- 51 files changed, 298 insertions(+), 286 deletions(-) diff --git a/include/tvm/expr.h b/include/tvm/expr.h index ab435dbf2f7e..4b2162510f81 100644 --- a/include/tvm/expr.h +++ b/include/tvm/expr.h @@ -89,6 +89,10 @@ class VarNode : public PrimExprNode { class Var : public PrimExpr { public: explicit Var(ObjectPtr n) : PrimExpr(n) {} + /*! \brief constructor + * \param name_hint variable name + * \param t data type + */ TVM_DLL explicit Var(std::string name_hint = "v", DataType t = DataType::Int(32)); /*! @@ -117,43 +121,51 @@ class Var : public PrimExpr { using ContainerType = VarNode; }; -class ShapeVar; +class SizeVar; /*! - * \brief A variable node represent a tensor shape size, + * \brief A variable node represent a tensor index size, * whose value must be non-negative. */ -class ShapeVarNode : public VarNode { +class SizeVarNode : public VarNode { public: /*! \brief constructor */ - ShapeVarNode() {} - ShapeVarNode(DataType dtype, std::string name_hint); + SizeVarNode() {} + /*! \brief constructor + * \param dtype data type + * \param name_hint variable name + */ + SizeVarNode(DataType dtype, std::string name_hint); - static constexpr const char* _type_key = "ShapeVar"; - TVM_DECLARE_FINAL_OBJECT_INFO(ShapeVarNode, VarNode); + static constexpr const char* _type_key = "SizeVar"; + TVM_DECLARE_FINAL_OBJECT_INFO(SizeVarNode, VarNode); }; -/*! \brief a named variable represents a tensor shape size */ -class ShapeVar : public Var { +/*! \brief a named variable represents a tensor index size */ +class SizeVar : public Var { public: - explicit ShapeVar(ObjectPtr n) : Var(n) {} - TVM_DLL explicit ShapeVar(std::string name_hint = "s", + explicit SizeVar(ObjectPtr n) : Var(n) {} + /*! \brief constructor + * \param name_hint variable name + * \param t data type + */ + TVM_DLL explicit SizeVar(std::string name_hint = "s", DataType t = DataType::Int(32)); /*! * \brief Get pointer to the internal value. * \return the corresponding Variable. */ - const ShapeVarNode* operator->() const { + const SizeVarNode* operator->() const { return get(); } /*! * \brief Get pointer to the internal value. * \return the corresponding Variable. */ - const ShapeVarNode* get() const { - return static_cast(data_.get()); + const SizeVarNode* get() const { + return static_cast(data_.get()); } /*! \brief type indicate the container type */ - using ContainerType = ShapeVarNode; + using ContainerType = SizeVarNode; }; class Integer; diff --git a/include/tvm/ir.h b/include/tvm/ir.h index abebadd94422..f090a7022cd4 100644 --- a/include/tvm/ir.h +++ b/include/tvm/ir.h @@ -38,7 +38,7 @@ namespace ir { using IntImmNode = tvm::IntImmNode; using VarNode = tvm::VarNode; -using ShapeVarNode = tvm::ShapeVarNode; +using SizeVarNode = tvm::SizeVarNode; /*! \brief constant unsigned integer. */ class UIntImmNode : public PrimExprNode { diff --git a/include/tvm/ir_functor_ext.h b/include/tvm/ir_functor_ext.h index 0be68089850f..0d19423376fc 100644 --- a/include/tvm/ir_functor_ext.h +++ b/include/tvm/ir_functor_ext.h @@ -133,7 +133,7 @@ class ExprFunctor { } // Functions that can be overriden by subclass virtual R VisitExpr_(const VarNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; - virtual R VisitExpr_(const ShapeVarNode* op, Args... args) { + virtual R VisitExpr_(const SizeVarNode* op, Args... args) { return VisitExpr_(static_cast(op), std::forward(args)...); } virtual R VisitExpr_(const LoadNode* op, Args... args) EXPR_FUNCTOR_DEFAULT; @@ -178,7 +178,7 @@ class ExprFunctor { FType vtable; // Set dispatch IR_EXPR_FUNCTOR_DISPATCH(VarNode); - IR_EXPR_FUNCTOR_DISPATCH(ShapeVarNode); + IR_EXPR_FUNCTOR_DISPATCH(SizeVarNode); IR_EXPR_FUNCTOR_DISPATCH(LoadNode); IR_EXPR_FUNCTOR_DISPATCH(LetNode); IR_EXPR_FUNCTOR_DISPATCH(CallNode); @@ -303,7 +303,7 @@ class TVM_DLL ExprVisitor : using ExprFunctor::VisitExpr; // list of functions to override. void VisitExpr_(const VarNode* op) override; - void VisitExpr_(const ShapeVarNode* op) override; + void VisitExpr_(const SizeVarNode* op) override; void VisitExpr_(const LoadNode* op) override; void VisitExpr_(const LetNode* op) override; void VisitExpr_(const CallNode* op) override; @@ -349,7 +349,7 @@ class TVM_DLL ExprMutator : using ExprFunctor::VisitExpr; // list of functions to override. PrimExpr VisitExpr_(const VarNode* op) override; - PrimExpr VisitExpr_(const ShapeVarNode* op) override; + PrimExpr VisitExpr_(const SizeVarNode* op) override; PrimExpr VisitExpr_(const LoadNode* op) override; PrimExpr VisitExpr_(const LetNode* op) override; PrimExpr VisitExpr_(const CallNode* op) override; diff --git a/python/tvm/api.py b/python/tvm/api.py index ada3d34d2882..cb34c49da95e 100644 --- a/python/tvm/api.py +++ b/python/tvm/api.py @@ -189,7 +189,7 @@ def var(name="tindex", dtype=int32): return _api_internal._Var(name, dtype) -def shape_var(name="tindex", dtype=int32): +def size_var(name="tindex", dtype=int32): """Create a new variable represents a tensor shape size, which is non-negative. Parameters @@ -202,10 +202,10 @@ def shape_var(name="tindex", dtype=int32): Returns ------- - var : ShapeVar + var : SizeVar The result symbolic shape variable. """ - return _api_internal._ShapeVar(name, dtype) + return _api_internal._SizeVar(name, dtype) def any(*args): diff --git a/python/tvm/expr.py b/python/tvm/expr.py index 34670e5d197d..c6f9db3318b3 100644 --- a/python/tvm/expr.py +++ b/python/tvm/expr.py @@ -279,8 +279,8 @@ def __init__(self, name, dtype): @register_object -class ShapeVar(Var): - """Symbolic variable to represent a tensor shape size +class SizeVar(Var): + """Symbolic variable to represent a tensor index size which is greater or equal to zero Parameters @@ -294,7 +294,7 @@ class ShapeVar(Var): # pylint: disable=super-init-not-called def __init__(self, name, dtype): self.__init_handle_by_constructor__( - _api_internal._ShapeVar, name, dtype) + _api_internal._SizeVar, name, dtype) @register_object diff --git a/src/api/api_ir.cc b/src/api/api_ir.cc index 2cc01178615d..9b0a21a05963 100644 --- a/src/api/api_ir.cc +++ b/src/api/api_ir.cc @@ -36,9 +36,9 @@ TVM_REGISTER_GLOBAL("_Var") return Var(s, t); }); -TVM_REGISTER_GLOBAL("_ShapeVar") +TVM_REGISTER_GLOBAL("_SizeVar") .set_body_typed([](std::string s, DataType t) { - return ShapeVar(s, t); + return SizeVar(s, t); }); TVM_REGISTER_GLOBAL("make.abs") diff --git a/src/arithmetic/const_int_bound.cc b/src/arithmetic/const_int_bound.cc index f8b6c466a25a..da29dbe2395c 100644 --- a/src/arithmetic/const_int_bound.cc +++ b/src/arithmetic/const_int_bound.cc @@ -292,8 +292,8 @@ class ConstIntBoundAnalyzer::Impl : } } - Entry VisitExpr_(const ShapeVarNode* op) final { - ShapeVar v = GetRef(op); + Entry VisitExpr_(const SizeVarNode* op) final { + SizeVar v = GetRef(op); auto it = var_map_.find(v); if (it != var_map_.end()) { return it->second; diff --git a/src/lang/attr_functor.h b/src/lang/attr_functor.h index 652f7161a477..20e31477b922 100644 --- a/src/lang/attr_functor.h +++ b/src/lang/attr_functor.h @@ -82,7 +82,7 @@ class AttrFunctor { virtual R VisitAttr_(const ir::StringImmNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; // deep comparison of symbolic integer expressions. virtual R VisitAttr_(const VarNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; - virtual R VisitAttr_(const ShapeVarNode* op, Args... args) { + virtual R VisitAttr_(const SizeVarNode* op, Args... args) { return VisitAttr_(static_cast(op), std::forward(args)...); } virtual R VisitAttr_(const ir::AddNode* op, Args... args) ATTR_FUNCTOR_DEFAULT; @@ -120,7 +120,7 @@ class AttrFunctor { ATTR_FUNCTOR_DISPATCH(FloatImmNode); ATTR_FUNCTOR_DISPATCH(StringImmNode); ATTR_FUNCTOR_DISPATCH(VarNode); - ATTR_FUNCTOR_DISPATCH(ShapeVarNode); + ATTR_FUNCTOR_DISPATCH(SizeVarNode); ATTR_FUNCTOR_DISPATCH(AddNode); ATTR_FUNCTOR_DISPATCH(SubNode); ATTR_FUNCTOR_DISPATCH(MulNode); diff --git a/src/lang/expr.cc b/src/lang/expr.cc index d7ded8d7beea..b6ee5871e36b 100644 --- a/src/lang/expr.cc +++ b/src/lang/expr.cc @@ -46,10 +46,10 @@ VarNode::VarNode(DataType t, std::string name_hint) { this->name_hint = std::move(name_hint); } -ShapeVar::ShapeVar(std::string name_hint, DataType t) - : ShapeVar(make_object(t, name_hint)) {} +SizeVar::SizeVar(std::string name_hint, DataType t) + : SizeVar(make_object(t, name_hint)) {} -ShapeVarNode::ShapeVarNode(DataType t, std::string name_hint) +SizeVarNode::SizeVarNode(DataType t, std::string name_hint) : VarNode(t, std::move(name_hint)) {} Range::Range(PrimExpr begin, PrimExpr end) diff --git a/src/lang/ir.cc b/src/lang/ir.cc index 8252da21b9ab..c1918c4c65ba 100644 --- a/src/lang/ir.cc +++ b/src/lang/ir.cc @@ -605,8 +605,8 @@ TVM_STATIC_IR_FUNCTOR(NodePrinter, vtable) // stream << op->name << "." << op->type; p->stream << op->name_hint; }) -.set_dispatch([](const ObjectRef& node, NodePrinter* p) { - auto* op = static_cast(node.get()); +.set_dispatch([](const ObjectRef& node, NodePrinter* p) { + auto* op = static_cast(node.get()); p->stream << "{" << op->name_hint << "|" << op->name_hint << ">=0}"; }) .set_dispatch([](const ObjectRef& node, NodePrinter* p) { @@ -1161,7 +1161,7 @@ TVM_REGISTER_NODE_TYPE(UIntImmNode); TVM_REGISTER_NODE_TYPE(StringImmNode); TVM_REGISTER_NODE_TYPE(CastNode); TVM_REGISTER_NODE_TYPE(VarNode); -TVM_REGISTER_NODE_TYPE(ShapeVarNode); +TVM_REGISTER_NODE_TYPE(SizeVarNode); TVM_REGISTER_NODE_TYPE(AddNode); TVM_REGISTER_NODE_TYPE(SubNode); TVM_REGISTER_NODE_TYPE(MulNode); diff --git a/src/pass/ir_functor.cc b/src/pass/ir_functor.cc index 680e491e2ab7..24cd26781c08 100644 --- a/src/pass/ir_functor.cc +++ b/src/pass/ir_functor.cc @@ -221,7 +221,7 @@ void StmtVisitor::VisitStmt_(const EvaluateNode* op) { void ExprVisitor::VisitExpr_(const VarNode* op) {} -void ExprVisitor::VisitExpr_(const ShapeVarNode* op) { +void ExprVisitor::VisitExpr_(const SizeVarNode* op) { this->VisitExpr_(static_cast(op)); } @@ -601,7 +601,7 @@ PrimExpr ExprMutator::VisitExpr_(const VarNode* op) { return GetRef(op); } -PrimExpr ExprMutator::VisitExpr_(const ShapeVarNode* op) { +PrimExpr ExprMutator::VisitExpr_(const SizeVarNode* op) { return this->VisitExpr_(static_cast(op)); } diff --git a/tests/python/contrib/test_sparse.py b/tests/python/contrib/test_sparse.py index 9a89c8b9092e..5425b196574e 100644 --- a/tests/python/contrib/test_sparse.py +++ b/tests/python/contrib/test_sparse.py @@ -25,8 +25,8 @@ def test_static_tensor(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvmsp.placeholder(shape=(m, n), name='A', dtype=dtype) assert(A.stype == 'csr') n = 3 @@ -50,7 +50,7 @@ def test_dynamic_tensor(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - nr, nc, n = tvm.shape_var('nr'), tvm.shape_var('nc'), tvm.shape_var('n') + nr, nc, n = tvm.size_var('nr'), tvm.size_var('nc'), tvm.size_var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert(A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') @@ -76,7 +76,7 @@ def test_sparse_array_tuple(): stype = 'csr' target = 'llvm' ctx = tvm.context(target, 0) - nr, nc, n = tvm.shape_var('nr'), tvm.shape_var('nc'), tvm.shape_var('n') + nr, nc, n = tvm.size_var('nr'), tvm.size_var('nc'), tvm.size_var('n') A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name='A', dtype=dtype) assert(A.stype == 'csr') C = tvm.compute(A.data.shape, lambda i: A.data[i] * 2., tag='cs_scatter') diff --git a/tests/python/integration/test_ewise.py b/tests/python/integration/test_ewise.py index 02b6906f44b3..e3a1087acea3 100644 --- a/tests/python/integration/test_ewise.py +++ b/tests/python/integration/test_ewise.py @@ -57,7 +57,7 @@ def check_device(device, host="stackvm"): def test_fmod(): # graph def run(dtype): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A', dtype=dtype) B = tvm.placeholder((n,), name='B', dtype=dtype) C = tvm.compute(A.shape, lambda *i: tvm.fmod(A(*i), B(*i)), name='C') @@ -140,7 +140,7 @@ def check_device(device, host="stackvm"): def test_log_pow_llvm(): # graph - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.compute(A.shape, lambda *i: tvm.power(tvm.log(A(*i)), 2.0), name='B') s = tvm.create_schedule(B.op) @@ -207,7 +207,7 @@ def check_device(device): def test_add(): def run(dtype): # graph - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A', dtype=dtype) B = tvm.placeholder((n,), name='B', dtype=dtype) bias = tvm.var("bias", dtype=dtype) diff --git a/tests/python/integration/test_reduce.py b/tests/python/integration/test_reduce.py index bdaed442f46a..1f094c274d01 100644 --- a/tests/python/integration/test_reduce.py +++ b/tests/python/integration/test_reduce.py @@ -21,8 +21,8 @@ def test_reduce_prims(): def test_prim(reducer, np_reducer): # graph - n = tvm.shape_var('n') - m = tvm.shape_var('m') + n = tvm.size_var('n') + m = tvm.size_var('m') A = tvm.placeholder((n, m), name='A') R = tvm.compute((n, ), lambda i: tvm.expr.Select((i > 1), 1, 0), name='R') k = tvm.reduce_axis((0, m)) @@ -242,8 +242,8 @@ def fidentity(t0, t1): argmax = tvm.comm_reducer(fcombine, fidentity, name='argmax') - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') idx = tvm.placeholder((m, n), name='idx', dtype='int32') val = tvm.placeholder((m, n), name='val', dtype='float32') k = tvm.reduce_axis((0, n), 'k') diff --git a/tests/python/integration/test_scan.py b/tests/python/integration/test_scan.py index fba08f9a9f43..366ed3d4f1a5 100644 --- a/tests/python/integration/test_scan.py +++ b/tests/python/integration/test_scan.py @@ -18,8 +18,8 @@ import numpy as np def test_scan(): - m = tvm.shape_var("m") - n = tvm.shape_var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") X = tvm.placeholder((m, n), name="X") s_state = tvm.placeholder((m, n)) s_init = tvm.compute((1, n), lambda _, i: X[0, i]) diff --git a/tests/python/relay/test_ir_text_printer.py b/tests/python/relay/test_ir_text_printer.py index 3bb50f50fcdc..e84de6765177 100644 --- a/tests/python/relay/test_ir_text_printer.py +++ b/tests/python/relay/test_ir_text_printer.py @@ -70,7 +70,7 @@ def test_env(): def test_meta_data(): - n, c, h, w = tvm.shape_var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", shape=(n, c, h, w)) w = relay.var("w") z = relay.nn.conv2d(x, w, @@ -82,8 +82,8 @@ def test_meta_data(): text_no_meta = str(f) assert "channels=2" in text assert "channels=2" in text_no_meta - assert "meta[ShapeVar][0]" in text - assert "meta[ShapeVar][0]" in text_no_meta + assert "meta[SizeVar][0]" in text + assert "meta[SizeVar][0]" in text_no_meta assert "type_key" in text assert "type_key" not in text_no_meta diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index d5202d006f94..adfcbb193de7 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -177,7 +177,7 @@ def test_bias_add(): def test_expand_dims_infer_type(): for dtype in ['float16', 'float32']: - n, t, d = tvm.shape_var("n"), tvm.shape_var("t"), 100 + n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100 x = relay.var("x", shape=(n, t, d), dtype=dtype) y = relay.expand_dims(x, axis=2) assert "axis=2" in y.astext() @@ -227,7 +227,7 @@ def test_log_softmax(): def test_concatenate(): for dtype in ['float16', 'float32']: - n, t, d = tvm.shape_var("n"), tvm.shape_var("t"), 100 + n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100 x = relay.var("x", shape=(n, t, d)) y = relay.var("y", shape=(n, t, d)) z = relay.concatenate((x, y), axis=-1) @@ -280,7 +280,7 @@ def test_concatenate(): def test_dropout(): for dtype in ['float16', 'float32']: - n, t, d = tvm.shape_var("n"), tvm.shape_var("t"), tvm.shape_var("d") + n, t, d = tvm.size_var("n"), tvm.size_var("t"), tvm.size_var("d") input_ty = relay.TensorType((n, t, d), dtype) x = relay.var("x", input_ty) y = relay.nn.dropout(x, rate=0.75) @@ -342,7 +342,7 @@ def test_dense(): # Dense accuracy for float16 is poor if dtype == 'float16': return - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) w = relay.var("w", relay.TensorType((2, w), dtype)) y = relay.nn.dense(x, w, units=2) @@ -350,15 +350,15 @@ def test_dense(): yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, 2), dtype) - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), 2 + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), 2 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) - wh, ww = tvm.shape_var("wh"), tvm.shape_var("ww") + wh, ww = tvm.size_var("wh"), tvm.size_var("ww") w = relay.var("w", relay.TensorType((ww, wh), dtype)) y = relay.nn.dense(x, w) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, h, ww), dtype) - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), 2 + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), 2 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) w = relay.var("w", relay.IncompleteType()) y = relay.nn.dense(x, w, units=2) @@ -388,7 +388,7 @@ def test_dense_dtype(): data_dtype = 'uint8' weight_dtype = 'int8' out_dtype = 'uint8' - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), data_dtype)) w = relay.var("w", relay.TensorType((2, w), weight_dtype)) y = relay.nn.dense(x, w, units=2, out_dtype=out_dtype) @@ -400,7 +400,7 @@ def test_dense_dtype(): def test_bitserial_dense(): - m, k = tvm.shape_var("m"), tvm.shape_var("k") + m, k = tvm.size_var("m"), tvm.size_var("k") x = relay.var("x", relay.TensorType((m, k), "int16")) w = relay.var("w", relay.TensorType((k, 32), "int16")) y = relay.nn.bitserial_dense(x, w, units=32) diff --git a/tests/python/relay/test_op_level10.py b/tests/python/relay/test_op_level10.py index 42dd12d18a3f..bb1d346ac6e0 100644 --- a/tests/python/relay/test_op_level10.py +++ b/tests/python/relay/test_op_level10.py @@ -309,7 +309,7 @@ def verify_batch_matmul(x_shape, y_shape, out_shape, dtype="float32"): tvm.testing.assert_allclose(z.asnumpy(), z_np, rtol=1e-5) def test_batch_matmul(): - b, m, n, k = tvm.shape_var("b"), tvm.shape_var("m"), tvm.shape_var("n"), tvm.shape_var("k") + b, m, n, k = tvm.size_var("b"), tvm.size_var("m"), tvm.size_var("n"), tvm.size_var("k") x = relay.var("x", relay.TensorType((b, m, k), "float32")) y = relay.var("y", relay.TensorType((b, n, k), "float32")) z = relay.nn.batch_matmul(x, y) diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index 06a463947a6b..2f0ad4dd0557 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -128,7 +128,7 @@ def run_test_conv1d(dtype, out_dtype, scale, dshape, kshape, def test_conv2d_infer_type(): # symbolic in batch dimension - n, c, h, w = tvm.shape_var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, h, w), "float32")) w = relay.var("w") y = relay.nn.conv2d(x, w, @@ -142,7 +142,7 @@ def test_conv2d_infer_type(): (2, 10, 3, 3), "float32") # infer by shape of w, mixed precision - n, c, h, w = tvm.shape_var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") @@ -152,7 +152,7 @@ def test_conv2d_infer_type(): (n, 2, 222, 222), "int32") # infer shape in case of different dtypes for input and weight. - n, c, h, w = tvm.shape_var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3), "int8")) y = relay.nn.conv2d(x, w, out_dtype="int32") @@ -391,7 +391,7 @@ def run_test_conv2d_cuda(dtype, out_dtype, scale, dshape, kshape, def test_conv3d_infer_type(): # symbolic in batch dimension - n, c, d, h, w = tvm.shape_var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, d, h, w), "float32")) w = relay.var("w") y = relay.nn.conv3d(x, w, @@ -405,7 +405,7 @@ def test_conv3d_infer_type(): (2, 10, 3, 3, 3), "float32") # infer by shape of w, mixed precision - n, c, d, h, w = tvm.shape_var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "int8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8")) y = relay.nn.conv3d(x, w, out_dtype="int32") @@ -415,7 +415,7 @@ def test_conv3d_infer_type(): (n, 2, 222, 222, 222), "int32") # infer shape in case of different dtypes for input and weight. - n, c, d, h, w = tvm.shape_var("n"), 10, 224, 224, 224 + n, c, d, h, w = tvm.size_var("n"), 10, 224, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "uint8")) w = relay.var("w", relay.TensorType((2, 10, 3, 3, 3), "int8")) y = relay.nn.conv3d(x, w, out_dtype="int32") @@ -530,7 +530,7 @@ def run_test_conv3d(dtype, out_dtype, scale, dshape, kshape, def test_conv2d_transpose_infer_type(): # symbolic in batch dimension - n, c, h, w = tvm.shape_var("n"), 10, 10, 12 + n, c, h, w = tvm.size_var("n"), 10, 10, 12 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) w = relay.var("w", relay.IncompleteType()) y = relay.nn.conv2d_transpose(x, w, @@ -545,7 +545,7 @@ def test_conv2d_transpose_infer_type(): (10, 15, 3, 3), "float32") # infer by shape of w, mixed precision - n, h, w, c = tvm.shape_var("n"), 10, 10, 12 + n, h, w, c = tvm.size_var("n"), 10, 10, 12 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) w = relay.var("w", relay.TensorType((12, 11, 5, 5), "float32")) y = relay.nn.conv2d_transpose(x, w, @@ -629,7 +629,7 @@ def test_conv1d_transpose_ncw_run(): def test_upsampling_infer_type(): - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") scale = tvm.const(2.0, "float64") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear") @@ -638,15 +638,15 @@ def test_upsampling_infer_type(): assert yy.checked_type == relay.TensorType((n, c, tvm.expr.Cast("int32", tvm.round(h*scale)), tvm.expr.Cast("int32", tvm.round(w*scale))), "float32") - n, c = tvm.shape_var("n"), tvm.shape_var("c") + n, c = tvm.size_var("n"), tvm.size_var("c") x = relay.var("x", relay.TensorType((n, c, 100, 200), "float32")) y = relay.nn.upsampling(x, scale_h=2, scale_w=2, layout="NCHW", method="bilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 400), "float32") def test_upsampling3d_infer_type(): - n, c, d, h, w = tvm.shape_var("n"), tvm.shape_var("c"),\ - tvm.shape_var("d"), tvm.shape_var("h"), tvm.shape_var("w") + n, c, d, h, w = tvm.size_var("n"), tvm.size_var("c"),\ + tvm.size_var("d"), tvm.size_var("h"), tvm.size_var("w") scale = tvm.const(2.0, "float64") x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear") @@ -656,14 +656,14 @@ def test_upsampling3d_infer_type(): tvm.expr.Cast("int32", tvm.round(h*scale)), tvm.expr.Cast("int32", tvm.round(w*scale))), "float32") - n, c = tvm.shape_var("n"), tvm.shape_var("c") + n, c = tvm.size_var("n"), tvm.size_var("c") x = relay.var("x", relay.TensorType((n, c, 100, 100, 200), "float32")) y = relay.nn.upsampling3d(x, scale_d=2, scale_h=2, scale_w=2, layout="NCDHW", method="trilinear") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, c, 200, 200, 400), "float32") def _test_pool2d(opfunc, reffunc): - n, c, h, w = tvm.shape_var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() @@ -683,7 +683,7 @@ def _test_pool2d(opfunc, reffunc): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_pool2d_int(opfunc, reffunc, dtype): - n, c, h, w = tvm.shape_var("n"), 10, 224, 224 + n, c, h, w = tvm.size_var("n"), 10, 224, 224 x = relay.var("x", relay.TensorType((n, c, h, w), dtype)) y = opfunc(x, pool_size=(1, 1)) assert "pool_size=" in y.astext() @@ -703,13 +703,13 @@ def _test_pool2d_int(opfunc, reffunc, dtype): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def _test_global_pool2d(opfunc, reffunc): - n, c, h, w = tvm.shape_var("n"), tvm.shape_var("c"), 224, 224 + n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), 224, 224 x = relay.var("x", relay.TensorType((n, h, w, c), "float32")) y = opfunc(x, layout="NHWC") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((n, 1, 1, c), "float32") - n, c, h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = opfunc(x) yy = run_infer_type(y) @@ -768,7 +768,7 @@ def _test_pool1d(opfunc): def test_pool3d(): def _test_pool3d(opfunc): - n, c, d, h, w = tvm.shape_var("n"), 10, 5, 224, 224 + n, c, d, h, w = tvm.size_var("n"), 10, 5, 224, 224 x = relay.var("x", relay.TensorType((n, c, d, h, w), "float32")) y = opfunc(x, pool_size=(1, 1, 1)) assert "pool_size=" in y.astext() @@ -828,7 +828,7 @@ def test_avg_pool2d_no_count_pad(): tvm.testing.assert_allclose(op_res1.asnumpy(), ref_res, rtol=1e-5, atol=1e-5) def test_flatten_infer_type(): - d1, d2, d3, d4 = tvm.shape_var("d1"), tvm.shape_var("d2"), tvm.shape_var("d3"), tvm.shape_var("d4") + d1, d2, d3, d4 = tvm.size_var("d1"), tvm.size_var("d2"), tvm.size_var("d3"), tvm.size_var("d4") x = relay.var("x", relay.TensorType((d1, d2, d3, d4), "float32")) y = relay.nn.batch_flatten(x) yy = run_infer_type(y) @@ -873,7 +873,7 @@ def test_pad_infer_type(): assert yy.checked_type == relay.TensorType((3, 6, 9, 12), "float32") # some symbolic values - n, c, h, w = tvm.shape_var("n"), 2, 3, tvm.shape_var("w") + n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w") t = relay.var("t", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.pad(t, ((1, 1), (2, 2), (3, 3), (4, 4))) yy = run_infer_type(y) @@ -896,7 +896,7 @@ def _test_run(dtype): _test_run('int32') def test_lrn(): - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.lrn(x, size=10, axis=2, bias=0.5, alpha=.00001, beta=0.75) "alpha=" in y.astext() @@ -927,7 +927,7 @@ def test_lrn(): tvm.testing.assert_allclose(op_res2.asnumpy(), ref_res, rtol=1e-5) def test_l2_normalize(): - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", shape=(n, c , h, w)) y = relay.nn.l2_normalize(x, eps=0.001, axis=[1]) "axis=" in y.astext() @@ -977,7 +977,7 @@ def test_batch_flatten(): def _test_upsampling(layout, method, align_corners=False): - n, c, h, w = tvm.shape_var("n"), 16, 32, 32 + n, c, h, w = tvm.size_var("n"), 16, 32, 32 scale_h = 2.0 scale_w = 2.0 dtype = "float32" @@ -1016,7 +1016,7 @@ def test_upsampling(): _test_upsampling("NHWC", "bilinear", True) def _test_upsampling3d(layout, method, coordinate_transformation_mode="half_pixel"): - n, c, d, h, w = tvm.shape_var("n"), 8, 16, 16, 16 + n, c, d, h, w = tvm.size_var("n"), 8, 16, 16, 16 scale_d = 2.0 scale_h = 2.0 scale_w = 2.0 @@ -1183,7 +1183,7 @@ def _has_fast_int8_instructions(asm, target): def test_bitserial_conv2d_infer_type(): # Basic shape test with ambiguous batch. - n, c, h, w = tvm.shape_var("n"), 32, 224, 224 + n, c, h, w = tvm.size_var("n"), 32, 224, 224 x = relay.var("x", relay.ty.TensorType((n, c, h, w), "int16")) w = relay.var("w", relay.ty.TensorType((32, 32, 3, 3), "int16")) y = relay.nn.bitserial_conv2d( diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index 48281712a7e9..13f17ca6713b 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -171,7 +171,7 @@ def verify_squeeze(shape, dtype, axis): def test_transpose_infer_type(): - n, t, d = tvm.shape_var("n"), tvm.shape_var("t"), 100 + n, t, d = tvm.size_var("n"), tvm.size_var("t"), 100 x = relay.var("x", relay.TensorType((n, t, d), "float32")) y = relay.transpose(x, axes=(1, 0, 2)) assert "axes=" in y.astext() @@ -279,7 +279,7 @@ def test_reshape_like_infer_type(): assert zz.checked_type == relay.TensorType((1, 6), "float32") # symbolic shape - n, c, h, w = tvm.shape_var("n"), 2, 3, tvm.shape_var("w") + n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.var("y", relay.TensorType((1, 8, 8), "float32")) z = relay.reshape_like(x, y) @@ -452,7 +452,7 @@ def test_full_like_infer_type(): assert yy.checked_type == relay.TensorType((1, 2, 3), "float32") # symbolic shape - n, c, h, w = tvm.shape_var("n"), 2, 3, tvm.shape_var("w") + n, c, h, w = tvm.size_var("n"), 2, 3, tvm.size_var("w") base = relay.var("base", relay.TensorType((n, c, h, w), "float32")) fill = relay.var("fill", relay.TensorType((), "float32")) y = relay.full_like(base, fill) @@ -480,7 +480,7 @@ def verify_full_like(base, fill_value, dtype): def test_infer_type_leaky_relu(): - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.leaky_relu(x, alpha=0.1) "alpha=0.1" in y.astext() @@ -544,7 +544,7 @@ def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"): def test_infer_type_prelu(): - n, c , h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c , h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w)) verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c)) verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w)) diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index 18b03c23a226..2b25d6a90af6 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -29,7 +29,7 @@ def run_infer_type(expr): def test_binary_op(): def check_binary_op(opfunc, ref): - n = tvm.shape_var("n") + n = tvm.size_var("n") t1 = relay.TensorType((5, n, 5)) t2 = relay.TensorType((n, 1)) x = relay.var("x", t1) diff --git a/tests/python/relay/test_op_level5.py b/tests/python/relay/test_op_level5.py index f5e88478b46b..d4abf3d82ced 100644 --- a/tests/python/relay/test_op_level5.py +++ b/tests/python/relay/test_op_level5.py @@ -31,7 +31,7 @@ def run_infer_type(expr): return entry if isinstance(expr, relay.Function) else entry.body def test_resize_infer_type(): - n, c, h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "int8")) th, tw = tvm.var("th"), tvm.var("tw") z = relay.image.resize(x, (th, tw)) @@ -187,7 +187,7 @@ def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), x = relay.var("x", relay.TensorType(dshape, "float32")) verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True) - y = relay.var("y", relay.TensorType((tvm.shape_var("n"), 3, 56, 56), "float32")) + y = relay.var("y", relay.TensorType((tvm.size_var("n"), 3, 56, 56), "float32")) verify_multibox_prior(x, dshape, ref_res, sizes, ratios, steps, offsets, check_size=True, check_type_only=True) @@ -195,7 +195,7 @@ def verify_multibox_prior(x, dshape, ref_res, sizes=(1.0,), ref_res = get_ref_result(dshape, clip=False) x = relay.var("x", relay.TensorType(dshape, "float32")) verify_multibox_prior(x, dshape, ref_res, clip=False) - y = relay.var("y", relay.TensorType((tvm.shape_var("n"), 24, 32, 32), "float32")) + y = relay.var("y", relay.TensorType((tvm.size_var("n"), 24, 32, 32), "float32")) verify_multibox_prior(x, dshape, ref_res, clip=False, check_type_only=True) @@ -287,7 +287,7 @@ def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, np_indices_result = np.array([[3, 0, -1, -1, -1]]) num_anchors = 5 - dshape = (tvm.shape_var("n"), num_anchors, 6) + dshape = (tvm.size_var("n"), num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, force_suppress=True, top_k=2, check_type_only=True) dshape = (1, num_anchors, 6) @@ -298,7 +298,7 @@ def verify_nms(x0_data, x1_data, dshape, ref_res, ref_indices_res, [1, 0.7, 30, 60, 50, 80], [-1, -1, -1, -1, -1, -1], [-1, -1, -1, -1, -1, -1]]]) np_indices_result = np.array([[3, 0, 1, -1, -1]]) - dshape = (tvm.shape_var("n"), num_anchors, 6) + dshape = (tvm.size_var("n"), num_anchors, 6) verify_nms(np_data, np_valid_count, dshape, np_result, np_indices_result, check_type_only=True) dshape = (1, num_anchors, 6) @@ -361,7 +361,7 @@ def test_default_value(): def test_threshold(): num_anchors = 5 num_classes = 5 - n = tvm.shape_var("n") + n = tvm.size_var("n") cls_prob = relay.var( "cls_prob", relay.ty.TensorType((n, num_anchors, num_classes), "float32")) @@ -527,7 +527,7 @@ def verify_yolo_reorg(shape, stride, out_shape): assert "stride=" in z.astext() assert zz.checked_type == relay.ty.TensorType(out_shape, "float32") - n, c, h, w = tvm.shape_var("n"), tvm.shape_var("c"), tvm.shape_var("h"), tvm.shape_var("w") + n, c, h, w = tvm.size_var("n"), tvm.size_var("c"), tvm.size_var("h"), tvm.size_var("w") idxd = tvm.indexdiv verify_yolo_reorg((n, c, 20, 20), 10, (n, c*10*10, 2, 2)) verify_yolo_reorg((n, c, h, w), 2, (n, c*2*2, idxd(h, 2), idxd(w, 2))) diff --git a/tests/python/unittest/test_arith_const_int_bound.py b/tests/python/unittest/test_arith_const_int_bound.py index 735ee35065fd..ae2837d6446f 100644 --- a/tests/python/unittest/test_arith_const_int_bound.py +++ b/tests/python/unittest/test_arith_const_int_bound.py @@ -275,9 +275,9 @@ def test_mix_index_bound(): assert bd.max_value == (23 // 7) * 7 + 6 -def test_shape_var_bound(): +def test_size_var_bound(): analyzer = tvm.arith.Analyzer() - x = tvm.shape_var("x") + x = tvm.size_var("x") bd = analyzer.const_int_bound(x) assert bd.min_value == 0 assert bd.max_value == bd.POS_INF @@ -296,4 +296,4 @@ def test_shape_var_bound(): test_select_bound() test_shift_and_bound() test_mix_index_bound() - test_shape_var_bound() + test_size_var_bound() diff --git a/tests/python/unittest/test_arith_stmt_simplify.py b/tests/python/unittest/test_arith_stmt_simplify.py index 2668b93f1b26..9e0b47749fee 100644 --- a/tests/python/unittest/test_arith_stmt_simplify.py +++ b/tests/python/unittest/test_arith_stmt_simplify.py @@ -20,7 +20,7 @@ def test_stmt_simplify(): ib = tvm.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") - n = tvm.shape_var("n") + n = tvm.size_var("n") with ib.for_range(0, n, name="i") as i: with ib.if_scope(i < 12): A[i] = C[i] @@ -34,7 +34,7 @@ def test_thread_extent_simplify(): ib = tvm.ir_builder.create() A = ib.pointer("float32", name="A") C = ib.pointer("float32", name="C") - n = tvm.shape_var("n") + n = tvm.size_var("n") tx = tvm.thread_axis("threadIdx.x") ty = tvm.thread_axis("threadIdx.y") ib.scope_attr(tx, "thread_extent", n) @@ -48,7 +48,7 @@ def test_thread_extent_simplify(): def test_basic_likely_elimination(): - n = tvm.shape_var('n') + n = tvm.size_var('n') X = tvm.placeholder(shape=(n,), name="x") W = tvm.placeholder(shape=(n + 1,), dtype="int32", name="w") @@ -87,8 +87,8 @@ def sls(n, d): return tvm.compute(oshape, sls) - m, n, d, i, l = tvm.shape_var('m'), tvm.shape_var('n'), tvm.shape_var('d'),\ - tvm.shape_var('i'), tvm.shape_var('l') + m, n, d, i, l = tvm.size_var('m'), tvm.size_var('n'), tvm.size_var('d'),\ + tvm.size_var('i'), tvm.size_var('l') data_ph = tvm.placeholder((m, d * 32), name="data") indices_ph = tvm.placeholder((i,), name="indices", dtype="int32") lengths_ph = tvm.placeholder((n,), name="lengths", dtype="int32") diff --git a/tests/python/unittest/test_build_lower.py b/tests/python/unittest/test_build_lower.py index 106a9dab1a2e..58312dc83932 100644 --- a/tests/python/unittest/test_build_lower.py +++ b/tests/python/unittest/test_build_lower.py @@ -17,8 +17,8 @@ import tvm def test_lower_rfactor(): - n = tvm.shape_var("n") - m = tvm.shape_var("m") + n = tvm.size_var("n") + m = tvm.size_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") B = tvm.compute((n,), lambda i: tvm.sum(A[i, k], axis=k), name="B") @@ -33,7 +33,7 @@ def test_lower_rfactor(): fapi = tvm.lower(s, [A, B]) def test_dependent_output_shape(): - n, m, x = tvm.shape_var('n'), tvm.shape_var('m'), tvm.shape_var('x') + n, m, x = tvm.size_var('n'), tvm.size_var('m'), tvm.size_var('x') A = tvm.placeholder((n, m)) B = tvm.compute((m, n//x), lambda i, j: A[i,j] , name='B') s = tvm.create_schedule(B.op) diff --git a/tests/python/unittest/test_codegen_arm.py b/tests/python/unittest/test_codegen_arm.py index 4aaa0ef7381f..8e2ad7aa76e0 100644 --- a/tests/python/unittest/test_codegen_arm.py +++ b/tests/python/unittest/test_codegen_arm.py @@ -47,7 +47,7 @@ def test_vmlal_s16(): target = 'llvm -target=armv7l-none-linux-gnueabihf -mcpu=cortex-a53 -mattr=+neon' def check_correct_assembly(N): - K = tvm.shape_var("K") + K = tvm.size_var("K") A = tvm.placeholder((K, N), dtype="int8", name='A') B = tvm.placeholder((K, N), dtype="int8", name='B') k = tvm.reduce_axis((0, K)) @@ -67,7 +67,7 @@ def check_correct_assembly(N): check_correct_assembly(64) def check_broadcast_correct_assembly(N): - K = tvm.shape_var("K") + K = tvm.size_var("K") A = tvm.placeholder((K, N), dtype="int8", name='A') B = tvm.placeholder((K,), dtype="int8", name='B') k = tvm.reduce_axis((0, K)) diff --git a/tests/python/unittest/test_codegen_c_host.py b/tests/python/unittest/test_codegen_c_host.py index 27f9ba1f14e8..c08fcd6afbc1 100644 --- a/tests/python/unittest/test_codegen_c_host.py +++ b/tests/python/unittest/test_codegen_c_host.py @@ -67,7 +67,7 @@ def check_c(): # Specifically allow offset to test codepath when offset is available Ab = tvm.decl_buffer( A.shape, A.dtype, - elem_offset=tvm.shape_var('Aoffset'), + elem_offset=tvm.size_var('Aoffset'), offset_factor=8, name='A') binds = {A : Ab} diff --git a/tests/python/unittest/test_codegen_device.py b/tests/python/unittest/test_codegen_device.py index 03b3c4f1cec7..4d4aeb7118a8 100644 --- a/tests/python/unittest/test_codegen_device.py +++ b/tests/python/unittest/test_codegen_device.py @@ -19,7 +19,7 @@ import numpy as np def test_add_pipeline(): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((), name='B') C = tvm.compute(A.shape, lambda *i: A(*i) + B(), name='C') diff --git a/tests/python/unittest/test_codegen_llvm.py b/tests/python/unittest/test_codegen_llvm.py index 2cb7a14febcc..c13805d7e2b3 100644 --- a/tests/python/unittest/test_codegen_llvm.py +++ b/tests/python/unittest/test_codegen_llvm.py @@ -79,7 +79,7 @@ def check_llvm(use_file): def test_llvm_lookup_intrin(): ib = tvm.ir_builder.create() - m = tvm.shape_var("m") + m = tvm.size_var("m") A = ib.pointer("uint8x8", name="A") x = tvm.call_llvm_intrin("uint8x8", "llvm.ctpop.i8", tvm.const(1, 'uint32'), A) ib.emit(x) @@ -112,7 +112,7 @@ def check_llvm(): # Specifically allow offset to test codepath when offset is available Ab = tvm.decl_buffer( A.shape, A.dtype, - elem_offset=tvm.shape_var('Aoffset'), + elem_offset=tvm.size_var('Aoffset'), offset_factor=8, name='A') binds = {A : Ab} diff --git a/tests/python/unittest/test_codegen_rocm.py b/tests/python/unittest/test_codegen_rocm.py index 7a92dad5d335..9f8ab772c5f7 100644 --- a/tests/python/unittest/test_codegen_rocm.py +++ b/tests/python/unittest/test_codegen_rocm.py @@ -26,8 +26,8 @@ @unittest.skipIf(not tvm.rocm(0).exist or not tvm.module.enabled("rocm"), "skip because rocm is not enabled..") def test_rocm_cross_thread_reduction(): # based on the reduction tutorial - n = tvm.shape_var("n") - m = tvm.shape_var("m") + n = tvm.size_var("n") + m = tvm.size_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") B = tvm.compute((n,), lambda i: tvm.sum(A[i, k], axis=k), name="B") diff --git a/tests/python/unittest/test_codegen_static_init.py b/tests/python/unittest/test_codegen_static_init.py index 4090ff5ed6d2..80c4fa4df0e8 100644 --- a/tests/python/unittest/test_codegen_static_init.py +++ b/tests/python/unittest/test_codegen_static_init.py @@ -20,9 +20,9 @@ def test_static_callback(): dtype = 'int64' - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.shape_var('i') + i = tvm.size_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) cp = tvm.thread_axis((0, 1), "cop") @@ -41,9 +41,9 @@ def test_static_callback(): def test_static_init(): dtype = 'int64' - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.shape_var('i') + i = tvm.size_var('i') ib = tvm.ir_builder.create() handle = tvm.call_intrin("handle", "tvm_static_handle") ib.emit( diff --git a/tests/python/unittest/test_codegen_vm_basic.py b/tests/python/unittest/test_codegen_vm_basic.py index 93740c7a27f2..eebcb2e71653 100644 --- a/tests/python/unittest/test_codegen_vm_basic.py +++ b/tests/python/unittest/test_codegen_vm_basic.py @@ -32,7 +32,7 @@ def tvm_call_back_get_shape(shape0): print(shape0) assert shape0 == a.shape[0] - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), tvm.float32) stmt = tvm.make.Evaluate(tvm.call_packed("tvm_call_back_get_shape", Ab.shape[0])) fapi = tvm.ir_pass.MakeAPI(stmt, "print_shape", [Ab], 0, True) @@ -47,9 +47,9 @@ def tvm_stack_vm_print(*x): def test_stack_vm_loop(): dtype = 'int64' - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.shape_var('i') + i = tvm.size_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) @@ -69,7 +69,7 @@ def check(f): def test_stack_vm_cond(): dtype = 'int64' - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) ib = tvm.ir_builder.create() @@ -93,9 +93,9 @@ def check(f): def test_vm_parallel(): dtype = 'int64' - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) - i = tvm.shape_var('i') + i = tvm.size_var('i') ib = tvm.ir_builder.create() A = ib.buffer_ptr(Ab) with ib.for_range(0, n, "i", for_type="parallel") as i: diff --git a/tests/python/unittest/test_hybrid_script.py b/tests/python/unittest/test_hybrid_script.py index fc4a29045d3a..253f13e8d1eb 100644 --- a/tests/python/unittest/test_hybrid_script.py +++ b/tests/python/unittest/test_hybrid_script.py @@ -98,8 +98,8 @@ def outer_product(n, m, a, b): #Test global function #Test bridge between frontend and backend def test_outer_product(): - n = tvm.shape_var('n') - m = tvm.shape_var('m') + n = tvm.size_var('n') + m = tvm.size_var('m') a = tvm.placeholder((n, ), name='a') b = tvm.placeholder((m, ), name='b') @@ -167,7 +167,7 @@ def fanout(n, a): b[i] = sigma return b - n = tvm.shape_var('n') + n = tvm.size_var('n') a = tvm.placeholder((n, ), 'float32', name='a') try: b = fanout(n, a) diff --git a/tests/python/unittest/test_ir_builder.py b/tests/python/unittest/test_ir_builder.py index dc54cfd20b99..527f68669281 100644 --- a/tests/python/unittest/test_ir_builder.py +++ b/tests/python/unittest/test_ir_builder.py @@ -19,7 +19,7 @@ def test_for(): ib = tvm.ir_builder.create() - n = tvm.shape_var("n") + n = tvm.size_var("n") A = ib.allocate("float32", n, name="A", scope="global") with ib.for_range(0, n, name="i") as i: A[i] = A[i] + 1 @@ -39,7 +39,7 @@ def test_for(): def test_if(): ib = tvm.ir_builder.create() - n = tvm.shape_var("n") + n = tvm.size_var("n") A = ib.pointer("float32", name="A") tmod = tvm.truncmod with ib.for_range(0, n, name="i") as i: @@ -60,7 +60,7 @@ def test_if(): def test_prefetch(): A = tvm.placeholder((10, 20), name="A") ib = tvm.ir_builder.create() - n = tvm.shape_var("n") + n = tvm.size_var("n") with ib.for_range(0, n, name="i") as i: ib.emit( @@ -105,7 +105,7 @@ def check_target(target): check_target("llvm") def test_gpu(): - n = tvm.shape_var('n') + n = tvm.size_var('n') dtype = "float32" A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') diff --git a/tests/python/unittest/test_lang_buffer.py b/tests/python/unittest/test_lang_buffer.py index a465b9e6ca07..e681bd9a5230 100644 --- a/tests/python/unittest/test_lang_buffer.py +++ b/tests/python/unittest/test_lang_buffer.py @@ -19,9 +19,9 @@ import numpy as np def test_buffer(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') - l = tvm.shape_var('l') + m = tvm.size_var('m') + n = tvm.size_var('n') + l = tvm.size_var('l') Ab = tvm.decl_buffer((m, n), tvm.float32) Bb = tvm.decl_buffer((n, l), tvm.float32) @@ -31,8 +31,8 @@ def test_buffer(): def test_buffer_access_ptr(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32, strides=[n + 1 , 1]) aptr = Ab.access_ptr("rw") assert tvm.ir_pass.Equal(aptr.args[3], Ab.strides[0] * m) @@ -43,14 +43,14 @@ def test_buffer_access_ptr(): def test_buffer_access_ptr_offset(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32) aptr = Ab.access_ptr("rw", offset=100) offset = tvm.ir_pass.Simplify(aptr.args[2]) assert tvm.ir_pass.Equal(offset, 100) assert aptr.args[4].value == Buffer.READ | Buffer.WRITE - v = tvm.shape_var('int32') + v = tvm.size_var('int32') aptr = Ab.access_ptr("rw", offset=100 + 100 + v) offset = tvm.ir_pass.Simplify(aptr.args[2]) assert tvm.ir_pass.Equal(offset, 200 + v) @@ -62,8 +62,8 @@ def test_buffer_access_ptr_offset(): def test_buffer_access_ptr_extent(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32) aptr = Ab.access_ptr("rw") assert tvm.ir_pass.Equal(aptr.args[3], m * n) @@ -75,8 +75,8 @@ def test_buffer_access_ptr_extent(): def test_buffer_vload(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') Ab = tvm.decl_buffer((m, n), tvm.float32, elem_offset=100) load = Ab.vload([2, 3]) offset = tvm.ir_pass.Simplify(load.index) @@ -84,11 +84,11 @@ def test_buffer_vload(): def test_buffer_index_merge_mult_mod(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') - s = tvm.shape_var('s') - k0 = tvm.shape_var('k0') - k1 = tvm.shape_var('k1') + m = tvm.size_var('m') + n = tvm.size_var('n') + s = tvm.size_var('s') + k0 = tvm.size_var('k0') + k1 = tvm.size_var('k1') A = tvm.decl_buffer((m, n), tvm.float32) A_stride = tvm.decl_buffer((m, n), tvm.float32, strides=(s, 1)) def assert_simplified_equal(index_simplified, index_direct): @@ -123,9 +123,9 @@ def assert_simplified_equal(index_simplified, index_direct): def test_buffer_broadcast(): - m0, m1, m2 = tvm.shape_var("m0"), tvm.shape_var("m1"), tvm.shape_var("m2") - n0, n1, n2 = tvm.shape_var("n0"), tvm.shape_var("n1"), tvm.shape_var("n2") - o0, o1, o2 = tvm.shape_var("o0"), tvm.shape_var("o1"), tvm.shape_var("o2") + m0, m1, m2 = tvm.size_var("m0"), tvm.size_var("m1"), tvm.size_var("m2") + n0, n1, n2 = tvm.size_var("n0"), tvm.size_var("n1"), tvm.size_var("n2") + o0, o1, o2 = tvm.size_var("o0"), tvm.size_var("o1"), tvm.size_var("o2") A = tvm.placeholder((m0, m1, m2), name='A') B = tvm.placeholder((n0, n1, n2), name='B') @@ -151,9 +151,9 @@ def check(): def test_buffer_broadcast_expr(): - n0, m0, x = tvm.shape_var('n0'), tvm.shape_var('m0'), tvm.shape_var('x') - n1, m1 = tvm.shape_var('n1'), tvm.shape_var('m1') - o0, o1 = tvm.shape_var('o0'), tvm.shape_var('o1') + n0, m0, x = tvm.size_var('n0'), tvm.size_var('m0'), tvm.size_var('x') + n1, m1 = tvm.size_var('n1'), tvm.size_var('m1') + o0, o1 = tvm.size_var('o0'), tvm.size_var('o1') A = tvm.placeholder((m0, n0), name='A') B = tvm.placeholder((m1, n1), name='B') diff --git a/tests/python/unittest/test_lang_group.py b/tests/python/unittest/test_lang_group.py index ab4d7a47a9a6..3efc9bc5096b 100644 --- a/tests/python/unittest/test_lang_group.py +++ b/tests/python/unittest/test_lang_group.py @@ -18,8 +18,8 @@ import tvm def test_scan_group(): - m = tvm.shape_var("m") - n = tvm.shape_var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") s_state = tvm.placeholder((m, n)) s_init = tvm.compute((1, n), lambda _, i: x[0, i]) @@ -50,8 +50,8 @@ def test_scan_group(): pass def test_compute_group(): - m = tvm.shape_var("m") - n = tvm.shape_var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") x1 = tvm.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = tvm.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") @@ -64,8 +64,8 @@ def test_compute_group(): assert g.num_child_stages == 2 def test_nest_group(): - m = tvm.shape_var("m") - n = tvm.shape_var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x = tvm.compute((m, n), lambda i, j: tvm.const(1, "float32"), name="x") x1 = tvm.compute(x.shape, lambda *i: x(*i) + 1, name="x1") x2 = tvm.compute(x.shape, lambda *i: x1(*i) + 2, name="x2") diff --git a/tests/python/unittest/test_lang_schedule.py b/tests/python/unittest/test_lang_schedule.py index 2ed627906cb7..eeab81b965b4 100644 --- a/tests/python/unittest/test_lang_schedule.py +++ b/tests/python/unittest/test_lang_schedule.py @@ -19,9 +19,9 @@ import pickle as pkl def test_schedule_create(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') - l = tvm.shape_var('l') + m = tvm.size_var('m') + n = tvm.size_var('n') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') AA = tvm.compute((m, l), lambda i, j: A[i, j]) @@ -49,7 +49,7 @@ def test_schedule_create(): def test_reorder(): - m = tvm.shape_var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute(m, lambda i: A[i+1]) @@ -69,7 +69,7 @@ def test_reorder(): pass def test_split(): - m = tvm.shape_var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i: A[i]) @@ -79,8 +79,8 @@ def test_split(): def test_tile(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -90,8 +90,8 @@ def test_tile(): def test_fuse(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -119,8 +119,8 @@ def test_singleton(): print("test singleton fin") def test_vectorize(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') T = tvm.compute((m, n), lambda i, j: A[i, j]) @@ -156,7 +156,7 @@ def test_pragma(): def test_rfactor(): - n = tvm.shape_var('n') + n = tvm.size_var('n') k1 = tvm.reduce_axis((0, n), name="k1") k2 = tvm.reduce_axis((0, n), name="k2") A = tvm.placeholder((n, n, n), name='A') @@ -214,10 +214,10 @@ def intrin_func(ins, outs): assert(s[z].iter_var_attrs[xi].iter_type == tvm.schedule.IterVar.Tensorized) def test_tensor_intrin_scalar_params(): - n = tvm.shape_var("n") + n = tvm.size_var("n") x = tvm.placeholder((n,), name='x') - v = tvm.shape_var("v") - w = tvm.shape_var("w") + v = tvm.size_var("v") + w = tvm.size_var("w") z = tvm.compute((n,), lambda i: x[i]*v + w, name='z') def intrin_func(ins, outs, sp): diff --git a/tests/python/unittest/test_lang_tag.py b/tests/python/unittest/test_lang_tag.py index 1644a2ed55ee..fc884ea5bc92 100644 --- a/tests/python/unittest/test_lang_tag.py +++ b/tests/python/unittest/test_lang_tag.py @@ -33,9 +33,9 @@ def compute_conv(data, weight): axis=[ic, dh, dw])) def test_with(): - n = tvm.shape_var('n') - m = tvm.shape_var('m') - l = tvm.shape_var('l') + n = tvm.size_var('n') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((n, l), name='A') B = tvm.placeholder((m, l), name='B') @@ -56,12 +56,12 @@ def test_with(): def test_decorator(): - n = tvm.shape_var('n') - c = tvm.shape_var('c') - h = tvm.shape_var('h') - w = tvm.shape_var('w') - kh = tvm.shape_var('kh') - kw = tvm.shape_var('kw') + n = tvm.size_var('n') + c = tvm.size_var('c') + h = tvm.size_var('h') + w = tvm.size_var('w') + kh = tvm.size_var('kh') + kw = tvm.size_var('kw') A = tvm.placeholder((n, c, h, w), name='A') B = tvm.placeholder((c, c, kh, kw), name='B') @@ -70,12 +70,12 @@ def test_decorator(): assert len(C.op.attrs) == 0 def test_nested(): - n = tvm.shape_var('n') - c = tvm.shape_var('c') - h = tvm.shape_var('h') - w = tvm.shape_var('w') - kh = tvm.shape_var('kh') - kw = tvm.shape_var('kw') + n = tvm.size_var('n') + c = tvm.size_var('c') + h = tvm.size_var('h') + w = tvm.size_var('w') + kh = tvm.size_var('kh') + kw = tvm.size_var('kw') A = tvm.placeholder((n, c, h, w), name='A') B = tvm.placeholder((c, c, kh, kw), name='B') diff --git a/tests/python/unittest/test_lang_tensor.py b/tests/python/unittest/test_lang_tensor.py index 9b64f4678d4d..e363a2cf11be 100644 --- a/tests/python/unittest/test_lang_tensor.py +++ b/tests/python/unittest/test_lang_tensor.py @@ -18,9 +18,9 @@ from topi.nn.pooling import pool def test_tensor(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') - l = tvm.shape_var('l') + m = tvm.size_var('m') + n = tvm.size_var('n') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') T = tvm.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k]) @@ -37,7 +37,7 @@ def test_tensor(): def test_rank_zero(): - m = tvm.shape_var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') scale = tvm.placeholder((), name='s') k = tvm.reduce_axis((0, m), name="k") @@ -48,7 +48,7 @@ def test_rank_zero(): def test_conv1d(): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n+2), name='A') def computeB(ii): i = ii + 1 @@ -57,14 +57,14 @@ def computeB(ii): def test_tensor_slice(): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.compute((n, n), lambda i, j: 1) B = tvm.compute((n,), lambda i: A[0][i] + A[0][i]) def test_tensor_reduce_multi_axis(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') k1 = tvm.reduce_axis((0, n), "k") k2 = tvm.reduce_axis((0, m), "k") @@ -73,23 +73,23 @@ def test_tensor_reduce_multi_axis(): def test_tensor_comm_reducer(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A = tvm.placeholder((m, n), name='A') k = tvm.reduce_axis((0, n), "k") mysum = tvm.comm_reducer(lambda x, y: x+y, lambda t: tvm.const(0, dtype=t)) C = tvm.compute((m,), lambda i: mysum(A[i, k], axis=k)) def test_tensor_comm_reducer_overload(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') mysum = tvm.comm_reducer(lambda x, y: x+y, lambda t: tvm.const(0, dtype=t)) sum_res = mysum(m, n) def test_tensor_reduce(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') - l = tvm.shape_var('l') + m = tvm.size_var('m') + n = tvm.size_var('n') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.placeholder((n, l), name='B') T = tvm.compute((m, n, l), lambda i, j, k: A[i, k] * B[j, k]) @@ -175,8 +175,8 @@ def intrin_func(ins, outs): assert isinstance(stmt.body.body.body[1].body, tvm.stmt.Evaluate) def test_tensor_scan(): - m = tvm.shape_var("m") - n = tvm.shape_var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x = tvm.placeholder((m, n)) s = tvm.placeholder((m, n)) res = tvm.scan(tvm.compute((1, n), lambda _, i: x[0, i]), @@ -185,8 +185,8 @@ def test_tensor_scan(): assert tuple(res.shape) == (m, n) def test_scan_multi_out(): - m = tvm.shape_var("m") - n = tvm.shape_var("n") + m = tvm.size_var("m") + n = tvm.size_var("n") x1 = tvm.placeholder((m, n)) s1 = tvm.placeholder((m, n)) x2 = tvm.placeholder((m, n)) @@ -206,7 +206,7 @@ def test_scan_multi_out(): assert isinstance(zz, tvm.tensor.ScanOp) def test_extern(): - m = tvm.shape_var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') def extern_func(ins, outs): @@ -217,7 +217,7 @@ def extern_func(ins, outs): def test_extern_multi_out(): - m = tvm.shape_var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') B = tvm.compute((m,), lambda i: A[i] * 10) @@ -230,8 +230,8 @@ def extern_func(ins, outs): assert(res[1].value_index == 1) def test_tuple_inputs(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A0 = tvm.placeholder((m, n), name='A0') A1 = tvm.placeholder((m, n), name='A1') T0, T1 = tvm.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name='T') @@ -244,8 +244,8 @@ def test_tuple_inputs(): assert(T1.value_index == 1) def test_tuple_with_different_deps(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') A0 = tvm.placeholder((m, n), name='A1') A1 = tvm.placeholder((m, n), name='A2') B0, B1 = tvm.compute((m, n), lambda i, j: (A0[i, j] * 2, A1[i, j] * 3), name='B') diff --git a/tests/python/unittest/test_lang_tensor_overload_op.py b/tests/python/unittest/test_lang_tensor_overload_op.py index 366005ffcbc0..3ccd6f9cd2ac 100644 --- a/tests/python/unittest/test_lang_tensor_overload_op.py +++ b/tests/python/unittest/test_lang_tensor_overload_op.py @@ -87,7 +87,7 @@ def test_combination(): def verify_tensor_scalar_bop(shape, typ="add"): """Verify non-constant Tensor and scalar binary operations.""" - sh = [tvm.shape_var('n%d' % i) for i in range(0, len(shape))] + sh = [tvm.size_var('n%d' % i) for i in range(0, len(shape))] k = tvm.var('k') A = tvm.placeholder(sh, name='A') if typ == "add": diff --git a/tests/python/unittest/test_lang_verify_compute.py b/tests/python/unittest/test_lang_verify_compute.py index 18344e073987..6d17a0ce2372 100644 --- a/tests/python/unittest/test_lang_verify_compute.py +++ b/tests/python/unittest/test_lang_verify_compute.py @@ -17,8 +17,8 @@ import tvm def test_verify_compute(): - n = tvm.shape_var("n") - m = tvm.shape_var("m") + n = tvm.size_var("n") + m = tvm.size_var("m") A = tvm.placeholder((n, m), name='A') k = tvm.reduce_axis((0, m), "k") k_ = tvm.reduce_axis((0, m-1), "k_") diff --git a/tests/python/unittest/test_module_load.py b/tests/python/unittest/test_module_load.py index b0250b666f3d..e8e43352987e 100644 --- a/tests/python/unittest/test_module_load.py +++ b/tests/python/unittest/test_module_load.py @@ -46,7 +46,7 @@ def test_dso_module_load(): temp = util.tempdir() def save_object(names): - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) i = tvm.var('i') # for i in 0 to n-1: diff --git a/tests/python/unittest/test_pass_bound_checkers.py b/tests/python/unittest/test_pass_bound_checkers.py index e7b649cc3bb2..2cefe22432b0 100644 --- a/tests/python/unittest/test_pass_bound_checkers.py +++ b/tests/python/unittest/test_pass_bound_checkers.py @@ -46,7 +46,7 @@ def lower(sch, args): @pytest.mark.xfail def test_out_of_bounds_llvm(index_a, index_b): - n = tvm.shape_var("n") + n = tvm.size_var("n") A = tvm.placeholder ((n,), name='A') B = tvm.placeholder ((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i + index_a] + B[i + index_b], name='C') @@ -63,7 +63,7 @@ def test_out_of_bounds_llvm(index_a, index_b): fadd (a, b, c) def test_in_bounds_llvm(): - n = tvm.shape_var("n") + n = tvm.size_var("n") A = tvm.placeholder ((n,), name='A') B = tvm.placeholder ((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i] + B[i], name='C') @@ -128,7 +128,7 @@ def test_in_bounds_vectorize_llvm(): tvm.testing.assert_allclose(c.asnumpy(), a.asnumpy() + 1) def test_in_bounds_loop_partition_basic_llvm(): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -147,7 +147,7 @@ def test_in_bounds_loop_partition_basic_llvm(): @pytest.mark.xfail def test_out_of_bounds_loop_partition_basic_llvm(index_a, index_b): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -331,9 +331,9 @@ def test_out_of_bounds_conv_llvm(data_offsets, kernel_offsets, loop_tiling=False f(data_input, kernel_input, conv_out) def test_in_bounds_tensors_with_same_shapes1D_llvm(): - n = tvm.shape_var('n') - k = tvm.shape_var('k') - m = tvm.shape_var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((k, ), name='B') @@ -351,9 +351,9 @@ def test_in_bounds_tensors_with_same_shapes1D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes1D_llvm(a_shape, b_shape, c_shape): - n = tvm.shape_var('n') - k = tvm.shape_var('k') - m = tvm.shape_var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((k, ), name='B') @@ -370,9 +370,9 @@ def test_out_of_bounds_tensors_with_diff_shapes1D_llvm(a_shape, b_shape, c_shape f(a, b, t) def test_in_bounds_tensors_with_same_shapes2D_llvm(): - n = tvm.shape_var('n') - k = tvm.shape_var('k') - m = tvm.shape_var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, n), name='A') B = tvm.placeholder((k, k), name='B') @@ -390,9 +390,9 @@ def test_in_bounds_tensors_with_same_shapes2D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes2D_llvm(a_shape, b_shape, c_shape): - n = tvm.shape_var('n') - k = tvm.shape_var('k') - m = tvm.shape_var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, n), name='A') B = tvm.placeholder((k, k), name='B') @@ -409,9 +409,9 @@ def test_out_of_bounds_tensors_with_diff_shapes2D_llvm(a_shape, b_shape, c_shape f(a, b, t) def test_in_bounds_tensors_with_same_shapes3D_llvm(): - n = tvm.shape_var('n') - k = tvm.shape_var('k') - m = tvm.shape_var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, n, n), name='A') B = tvm.placeholder((k, k, k), name='B') @@ -429,9 +429,9 @@ def test_in_bounds_tensors_with_same_shapes3D_llvm(): @pytest.mark.xfail def test_out_of_bounds_tensors_with_diff_shapes3D_llvm(a_shape, b_shape, c_shape): - n = tvm.shape_var('n') - k = tvm.shape_var('k') - m = tvm.shape_var('m') + n = tvm.size_var('n') + k = tvm.size_var('k') + m = tvm.size_var('m') A = tvm.placeholder((n, n, n), name='A') B = tvm.placeholder((k, k, k), name='B') diff --git a/tests/python/unittest/test_pass_decorate_device_scope.py b/tests/python/unittest/test_pass_decorate_device_scope.py index f2cac34b7970..9ffd56544ebc 100644 --- a/tests/python/unittest/test_pass_decorate_device_scope.py +++ b/tests/python/unittest/test_pass_decorate_device_scope.py @@ -17,8 +17,8 @@ import tvm def test_decorate_device(): - m = tvm.shape_var('m') - l = tvm.shape_var('l') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') diff --git a/tests/python/unittest/test_pass_inline.py b/tests/python/unittest/test_pass_inline.py index fa48d6cb548b..511a1438f4be 100644 --- a/tests/python/unittest/test_pass_inline.py +++ b/tests/python/unittest/test_pass_inline.py @@ -17,7 +17,7 @@ import tvm def test_inline(): - m = tvm.shape_var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i,: A[i] + 10, name='T') stmt = tvm.make.Evaluate(T[10] + 11 * T[100]) @@ -36,7 +36,7 @@ def test_inline(): pass def test_inline2(): - m = tvm.shape_var('m') + m = tvm.size_var('m') A = tvm.placeholder((m,), name='A') T = tvm.compute((m,), lambda i,: A[i] + 10, name='T') stmt = tvm.make.Evaluate(tvm.exp(T[10]) + 11 * T[100]) diff --git a/tests/python/unittest/test_pass_loop_partition.py b/tests/python/unittest/test_pass_loop_partition.py index 99332c6033f1..9812660d2ad1 100644 --- a/tests/python/unittest/test_pass_loop_partition.py +++ b/tests/python/unittest/test_pass_loop_partition.py @@ -52,7 +52,7 @@ def lower(sch, args): return stmt def test_basic(): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n, ), name='A') B = tvm.placeholder((n, ), name='B') @@ -84,8 +84,8 @@ def test_const_loop(): def test_multi_loop(): ib = tvm.ir_builder.create() - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') with ib.for_range(0, 4, "i") as i: with ib.for_range(0, n, "j") as j: with ib.for_range(0, m, "k") as k: @@ -100,8 +100,8 @@ def test_multi_loop(): def test_multi_if(): ib = tvm.ir_builder.create() - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') with ib.for_range(0, 4, 'i') as i: with ib.for_range(0, n, 'j') as j: with ib.for_range(0, m, 'k') as k: @@ -119,8 +119,8 @@ def test_multi_if(): assert('if' not in str(stmt.body[0])) def test_thread_axis(): - m = tvm.shape_var('m') - l = tvm.shape_var('l') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') B = tvm.compute((m, l), lambda i, j: A[i, j] + 3, name='B') s = tvm.create_schedule(B.op) @@ -138,11 +138,11 @@ def test_thread_axis(): assert('if' not in str(stmt.body.body.body[0])) def test_vectorize(): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') - bias = tvm.shape_var("bias", dtype="float32") - scale = tvm.shape_var("scale", dtype="float32") + bias = tvm.size_var("bias", dtype="float32") + scale = tvm.size_var("scale", dtype="float32") C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i) * scale + bias, name='C') # schedule s = tvm.create_schedule(C.op) @@ -161,8 +161,8 @@ def test_vectorize(): def test_condition(): ib = tvm.ir_builder.create() - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') with ib.for_range(0, tvm.truncdiv(n+3,4), 'i') as i: with ib.for_range(0, 4, 'j') as j: ib.emit(tvm.make.Evaluate( @@ -174,8 +174,8 @@ def test_condition(): def test_condition_EQ(): ib = tvm.ir_builder.create() - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') with ib.for_range(0, 10, 'i') as i: ib.emit(tvm.make.Evaluate( tvm.make.Select(ib.likely(tvm.expr.EQ(i, 5)), m, n))) @@ -186,7 +186,7 @@ def test_condition_EQ(): def test_thread_axis2(): n = tvm.convert(4096) - m = tvm.shape_var('m') + m = tvm.size_var('m') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') C = tvm.compute(A.shape, lambda i: A[i] + B[i], name='C') @@ -202,8 +202,8 @@ def test_thread_axis2(): assert('threadIdx' not in str(for_body.extent)) def test_everything_during_deduction(): - m = tvm.shape_var('m') - n = tvm.shape_var('n') + m = tvm.size_var('m') + n = tvm.size_var('n') ib = tvm.ir_builder.create() with ib.for_range(0, n, 'i') as i: with ib.for_range(0, 32, 'j') as j: @@ -253,7 +253,7 @@ def test_multi_likely(): assert(not any(collect_visit(stmt, lambda x: isinstance(x, tvm.stmt.IfThenElse)))) def test_oneD_pool(): - m = tvm.shape_var('m') + m = tvm.size_var('m') ib = tvm.ir_builder.create() #data = tvm.placeholder((16,), name = 'data') data = ib.pointer("float32", name="A") diff --git a/tests/python/unittest/test_pass_makeapi.py b/tests/python/unittest/test_pass_makeapi.py index 77e1e5852443..34f32ef01c7c 100644 --- a/tests/python/unittest/test_pass_makeapi.py +++ b/tests/python/unittest/test_pass_makeapi.py @@ -19,7 +19,7 @@ def test_makeapi(): """Not yet working, mock design""" - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.placeholder((n,), name='B') C = tvm.compute(A.shape, lambda *i: A(*i) + B(*i), name='C') diff --git a/tests/python/unittest/test_pass_split_host_device.py b/tests/python/unittest/test_pass_split_host_device.py index 4956332d3161..e8858b8aa41e 100644 --- a/tests/python/unittest/test_pass_split_host_device.py +++ b/tests/python/unittest/test_pass_split_host_device.py @@ -19,7 +19,7 @@ @pytest.mark.xfail def test_loop_dependent_allocate(): - N = tvm.shape_var("N") + N = tvm.size_var("N") A = tvm.placeholder((2*N,), "float32", "A") C = tvm.compute((N, ), lambda i: A[2*i] + A[i+1], name='C') s = tvm.create_schedule(C.op) diff --git a/tests/python/unittest/test_pass_storage_flatten.py b/tests/python/unittest/test_pass_storage_flatten.py index 6c6aaa563a33..2bee66c0a42e 100644 --- a/tests/python/unittest/test_pass_storage_flatten.py +++ b/tests/python/unittest/test_pass_storage_flatten.py @@ -17,8 +17,8 @@ import tvm def test_flatten2(): - m = tvm.shape_var('m') - l = tvm.shape_var('l') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') A2 = tvm.compute((m, l), lambda i, j: A1[i, j] + 3, name='A2') @@ -38,8 +38,8 @@ def test_flatten2(): def test_flatten_prefetch(): A = tvm.placeholder((25, 100, 4), name = 'A') _A= tvm.decl_buffer(A.shape, A.dtype, name = 'A'); - i = tvm.shape_var('i') - j = tvm.shape_var('j') + i = tvm.size_var('i') + j = tvm.size_var('j') region = [tvm.make.range_by_min_extent(i[0], i[1]) for i in [(i, 2), (j, 8), (0, 4)]] stmt = tvm.make.Prefetch(A.op, 0, A.dtype, region) stmt = tvm.ir_pass.StorageFlatten(stmt, {A: _A}, 64) diff --git a/tests/python/unittest/test_pass_storage_sync.py b/tests/python/unittest/test_pass_storage_sync.py index a9ce5ddca710..55596eea4579 100644 --- a/tests/python/unittest/test_pass_storage_sync.py +++ b/tests/python/unittest/test_pass_storage_sync.py @@ -17,8 +17,8 @@ import tvm def test_storage_sync(): - m = tvm.shape_var('m') - l = tvm.shape_var('l') + m = tvm.size_var('m') + l = tvm.size_var('l') A = tvm.placeholder((m, l), name='A') A1 = tvm.compute((m, l), lambda i, j: A[i, j], name='A1') @@ -54,7 +54,7 @@ def meminfo_cache(): max_num_bits=128, head_address=tvm.call_extern("handle", "global_cache")) ib = tvm.ir_builder.create() - n = tvm.shape_var("n") + n = tvm.size_var("n") cp = tvm.thread_axis((0, 1), "cop") A = ib.allocate("float32", 128, name="A", scope="global.cache") with ib.for_range(0, n, name="i") as i: @@ -76,7 +76,7 @@ def meminfo_cache(): def test_coproc_sync2(): ib = tvm.ir_builder.create() - n = tvm.shape_var("n") + n = tvm.size_var("n") cp = tvm.thread_axis((0, 1), "cop") ty = tvm.thread_axis("cthread") A = ib.allocate("float32", 128, name="A") @@ -102,7 +102,7 @@ def __check_list(tvm_array, py_list): return True ib = tvm.ir_builder.create() - n = tvm.shape_var("n") + n = tvm.size_var("n") cp = tvm.thread_axis((0, 1), "cop") A = ib.allocate("float32", 128, name="A", scope="global.cache") with ib.for_range(0, n, name="i") as i: diff --git a/tests/python/unittest/test_pass_unroll.py b/tests/python/unittest/test_pass_unroll.py index 856daa5e89bf..e5ef9d0aa2f4 100644 --- a/tests/python/unittest/test_pass_unroll.py +++ b/tests/python/unittest/test_pass_unroll.py @@ -21,7 +21,7 @@ def test_unroll_loop(): ib = tvm.ir_builder.create() dtype = 'int64' - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) Aptr = ib.buffer_ptr(Ab) # for i in 0 to n-1: @@ -54,7 +54,7 @@ def test_unroll_loop(): def test_unroll_fake_loop(): ib = tvm.ir_builder.create() dtype = 'int32' - n = tvm.shape_var('n') + n = tvm.size_var('n') Ab = tvm.decl_buffer((n, ), dtype) Aptr = ib.buffer_ptr(Ab) # for i in 0 to n-1: @@ -68,7 +68,7 @@ def test_unroll_fake_loop(): assert isinstance(ret[0], tvm.stmt.Store) def test_unroll_single_count_loops(): - n = tvm.shape_var('n') + n = tvm.size_var('n') A = tvm.placeholder((n,), name='A') B = tvm.compute((n,), lambda *i: A(*i), name='B') s = tvm.create_schedule(B.op) From 42ba28b94b54ce0895507566954b6baa82bfe665 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Wed, 15 Jan 2020 13:15:35 -0800 Subject: [PATCH 16/17] shape_var -> size_var in doc --- docs/api/python/tvm.rst | 4 ++-- python/tvm/build_module.py | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/docs/api/python/tvm.rst b/docs/api/python/tvm.rst index 1a92f3eb63ae..19762fb20d97 100644 --- a/docs/api/python/tvm.rst +++ b/docs/api/python/tvm.rst @@ -24,7 +24,7 @@ The user facing API for computation declaration. tvm.load_json tvm.save_json tvm.var - tvm.shape_var + tvm.size_var tvm.const tvm.convert tvm.placeholder @@ -50,7 +50,7 @@ The user facing API for computation declaration. .. autofunction:: tvm.load_json .. autofunction:: tvm.save_json .. autofunction:: tvm.var -.. autofunction:: tvm.shape_var +.. autofunction:: tvm.size_var .. autofunction:: tvm.const .. autofunction:: tvm.convert .. autofunction:: tvm.placeholder diff --git a/python/tvm/build_module.py b/python/tvm/build_module.py index 761da180406c..85d2b8514779 100644 --- a/python/tvm/build_module.py +++ b/python/tvm/build_module.py @@ -292,7 +292,6 @@ def get_binds(args, compact=False, binds=None): binds = {} if binds is None else binds.copy() cfg = current_build_config() arg_list = [] - for x in args: if isinstance(x, tensor.Tensor): any_dim = any(isinstance(i, expr.Var) for i in x.shape) From a9d6310a4f73a897158699a2be61ffe0ad81a659 Mon Sep 17 00:00:00 2001 From: Yizhi Liu Date: Wed, 15 Jan 2020 16:59:03 -0800 Subject: [PATCH 17/17] tindex -> size --- python/tvm/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/api.py b/python/tvm/api.py index ac2826d6609c..4338b5564980 100644 --- a/python/tvm/api.py +++ b/python/tvm/api.py @@ -192,7 +192,7 @@ def var(name="tindex", dtype=int32): return _api_internal._Var(name, dtype) -def size_var(name="tindex", dtype=int32): +def size_var(name="size", dtype=int32): """Create a new variable represents a tensor shape size, which is non-negative. Parameters