diff --git a/contrib/tvmop/basic/ufunc.py b/contrib/tvmop/basic/ufunc.py index 87356428428f..912263ef4575 100644 --- a/contrib/tvmop/basic/ufunc.py +++ b/contrib/tvmop/basic/ufunc.py @@ -21,9 +21,9 @@ from .. import assign_by_req, reduce_axes def compute_add(dtype, ndim): - A = tvm.placeholder([tvm.var() for _ in range(ndim)], name='A', dtype=dtype) - B = tvm.placeholder([tvm.var() for _ in range(ndim)], name='B', dtype=dtype) - C = tvm.compute([tvm.var() for _ in range(ndim)], + A = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='A', dtype=dtype) + B = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='B', dtype=dtype) + C = tvm.compute([tvm.size_var() for _ in range(ndim)], lambda *index: A[index] + B[index], name='C') s = tvm.create_schedule(C.op) return s, A, B, C @@ -62,7 +62,7 @@ def compute_backward_vadd(dtype, ndim, reduce1st, req): # They compressed bit string is stored in `axes`. And `reduce1st` represents the first bit # of the compressed bit string. Credit to @junrushao1994 and @yzhliu. axes = ([reduce1st, 1 - reduce1st] * ndim)[:ndim] - X = tvm.placeholder([tvm.var() for _ in range(ndim)], name='X', dtype=dtype) + X = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='X', dtype=dtype) reducer = tvm.comm_reducer(lambda x, y: x + y, lambda t: tvm.const(0, dtype=t), name="sum") ret = reduce_axes(X, axes, reducer) @@ -101,13 +101,13 @@ def backward_vadd_gpu(dtype, ndim, reduce1st, req): def compute_degandrad(dtype, ndim, n): - A = tvm.placeholder([tvm.var() for _ in range(ndim)], name='A', dtype=dtype) + A = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='A', dtype=dtype) import math if n == 0: - B = tvm.compute([tvm.var() for _ in range(ndim)], + B = tvm.compute([tvm.size_var() for _ in range(ndim)], lambda *index: A[index] * tvm.const(math.pi, dtype) / tvm.const(180, dtype), name='B') else: - B = tvm.compute([tvm.var() for _ in range(ndim)], + B = tvm.compute([tvm.size_var() for _ in range(ndim)], lambda *index: A[index] / tvm.const(math.pi, dtype) * tvm.const(180, dtype), name='B') s = tvm.create_schedule(B.op) return s, A, B @@ -160,7 +160,7 @@ def rad2deg_gpu(dtype, ndim): def compute_backward_degandrad(dtype, ndim, req, n): - ishape = [tvm.var() for _ in range(ndim)] + ishape = [tvm.size_var() for _ in range(ndim)] in_grad_tmp = tvm.placeholder(ishape, name='in_grad_tmp', dtype=dtype) in_grad = tvm.placeholder(ishape, name='in_grad', dtype=dtype) out_grad = tvm.placeholder(ishape, name='out_grad', dtype=dtype) diff --git a/contrib/tvmop/core/fromnumeric.py b/contrib/tvmop/core/fromnumeric.py index e6c4c2be0814..5b21cf894a90 100644 --- a/contrib/tvmop/core/fromnumeric.py +++ b/contrib/tvmop/core/fromnumeric.py @@ -23,7 +23,7 @@ def _compute_sum(itype, otype, ndim, reduce1st_dim, req): axes = ([reduce1st_dim, 1 - reduce1st_dim] * ndim)[:ndim] - a = tvm.placeholder([tvm.var() for _ in range(ndim)], name='a', dtype=itype) + a = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='a', dtype=itype) reduce_output = reduce_axes(a, axes, tvm.sum, otype) output_placeholder, final_output = assign_by_req(reduce_output, req) s = tvm.create_schedule(final_output.op) diff --git a/contrib/tvmop/core/multiarray.py b/contrib/tvmop/core/multiarray.py index c8eed5b45368..baccba99369b 100644 --- a/contrib/tvmop/core/multiarray.py +++ b/contrib/tvmop/core/multiarray.py @@ -37,9 +37,9 @@ def dot(dtype, fallback): cfg = autotvm.get_config() cfg.define_knob("bn", [64] if fallback else [64, 32]) cfg.define_knob("factor", [4] if fallback else [4]) - M = tvm.var("M") - K = tvm.var("K") - N = tvm.var("N") + M = tvm.size_var("M") + K = tvm.size_var("K") + N = tvm.size_var("N") A = tvm.placeholder((M, K), name='A', dtype=dtype) B = tvm.placeholder((K, N), name='B', dtype=dtype) C = compute_dot(A, B) diff --git a/contrib/tvmop/core/umath.py b/contrib/tvmop/core/umath.py index ad099299aae5..30f2770d8868 100644 --- a/contrib/tvmop/core/umath.py +++ b/contrib/tvmop/core/umath.py @@ -29,9 +29,9 @@ def _compute_binary_logic(op, dtype, ndim): - a = tvm.placeholder([tvm.var() for _ in range(ndim)], dtype=dtype, name='a') - b = tvm.placeholder([tvm.var() for _ in range(ndim)], dtype=dtype, name='b') - c = tvm.compute([tvm.var() for _ in range(ndim)], + a = tvm.placeholder([tvm.size_var() for _ in range(ndim)], dtype=dtype, name='a') + b = tvm.placeholder([tvm.size_var() for _ in range(ndim)], dtype=dtype, name='b') + c = tvm.compute([tvm.size_var() for _ in range(ndim)], lambda *idx: _bin_logic_op_map[op](a, b, *idx), name='c') s = tvm.create_schedule(c.op) return s, a, b, c @@ -91,9 +91,9 @@ def _binary_logic_gpu(compute_func, op, itype, ndim): def _compute_binary_scalar_logic(op, dtype, ndim): - a = tvm.placeholder([tvm.var() for _ in range(ndim)], name='a', dtype=dtype) + a = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='a', dtype=dtype) b = tvm.var('b', dtype='float64') - c = tvm.compute([tvm.var() for _ in range(ndim)], + c = tvm.compute([tvm.size_var() for _ in range(ndim)], lambda *idx: _bin_scalar_logic_op_map[op](a, b, *idx), name='c') s = tvm.create_schedule(c.op) return s, a, b, c diff --git a/contrib/tvmop/utils.py b/contrib/tvmop/utils.py index 39d7a8092005..07eb748b4143 100644 --- a/contrib/tvmop/utils.py +++ b/contrib/tvmop/utils.py @@ -45,7 +45,7 @@ def get_index(idx, ridx): ishape = X.shape odim = (len(ishape) + 1 - axes[0]) // 2 - oshape = [tvm.var() for _ in range(odim)] + oshape = [tvm.size_var() for _ in range(odim)] ridx = [tvm.reduce_axis((0, ishape[i])) for (i, val) in enumerate(axes) if val == 1] ret = tvm.compute(oshape, lambda *idx: reducer(X[get_index(idx, ridx)].astype(atype) if atype else X[get_index(idx, ridx)],