Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions contrib/tvmop/basic/ufunc.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,9 @@
from .. import assign_by_req, reduce_axes

def compute_add(dtype, ndim):
A = tvm.placeholder([tvm.var() for _ in range(ndim)], name='A', dtype=dtype)
B = tvm.placeholder([tvm.var() for _ in range(ndim)], name='B', dtype=dtype)
C = tvm.compute([tvm.var() for _ in range(ndim)],
A = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='A', dtype=dtype)
B = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='B', dtype=dtype)
C = tvm.compute([tvm.size_var() for _ in range(ndim)],
lambda *index: A[index] + B[index], name='C')
s = tvm.create_schedule(C.op)
return s, A, B, C
Expand Down Expand Up @@ -62,7 +62,7 @@ def compute_backward_vadd(dtype, ndim, reduce1st, req):
# They compressed bit string is stored in `axes`. And `reduce1st` represents the first bit
# of the compressed bit string. Credit to @junrushao1994 and @yzhliu.
axes = ([reduce1st, 1 - reduce1st] * ndim)[:ndim]
X = tvm.placeholder([tvm.var() for _ in range(ndim)], name='X', dtype=dtype)
X = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='X', dtype=dtype)
reducer = tvm.comm_reducer(lambda x, y: x + y,
lambda t: tvm.const(0, dtype=t), name="sum")
ret = reduce_axes(X, axes, reducer)
Expand Down Expand Up @@ -101,13 +101,13 @@ def backward_vadd_gpu(dtype, ndim, reduce1st, req):


def compute_degandrad(dtype, ndim, n):
A = tvm.placeholder([tvm.var() for _ in range(ndim)], name='A', dtype=dtype)
A = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='A', dtype=dtype)
import math
if n == 0:
B = tvm.compute([tvm.var() for _ in range(ndim)],
B = tvm.compute([tvm.size_var() for _ in range(ndim)],
lambda *index: A[index] * tvm.const(math.pi, dtype) / tvm.const(180, dtype), name='B')
else:
B = tvm.compute([tvm.var() for _ in range(ndim)],
B = tvm.compute([tvm.size_var() for _ in range(ndim)],
lambda *index: A[index] / tvm.const(math.pi, dtype) * tvm.const(180, dtype), name='B')
s = tvm.create_schedule(B.op)
return s, A, B
Expand Down Expand Up @@ -160,7 +160,7 @@ def rad2deg_gpu(dtype, ndim):


def compute_backward_degandrad(dtype, ndim, req, n):
ishape = [tvm.var() for _ in range(ndim)]
ishape = [tvm.size_var() for _ in range(ndim)]
in_grad_tmp = tvm.placeholder(ishape, name='in_grad_tmp', dtype=dtype)
in_grad = tvm.placeholder(ishape, name='in_grad', dtype=dtype)
out_grad = tvm.placeholder(ishape, name='out_grad', dtype=dtype)
Expand Down
2 changes: 1 addition & 1 deletion contrib/tvmop/core/fromnumeric.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

def _compute_sum(itype, otype, ndim, reduce1st_dim, req):
axes = ([reduce1st_dim, 1 - reduce1st_dim] * ndim)[:ndim]
a = tvm.placeholder([tvm.var() for _ in range(ndim)], name='a', dtype=itype)
a = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='a', dtype=itype)
reduce_output = reduce_axes(a, axes, tvm.sum, otype)
output_placeholder, final_output = assign_by_req(reduce_output, req)
s = tvm.create_schedule(final_output.op)
Expand Down
6 changes: 3 additions & 3 deletions contrib/tvmop/core/multiarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,9 @@ def dot(dtype, fallback):
cfg = autotvm.get_config()
cfg.define_knob("bn", [64] if fallback else [64, 32])
cfg.define_knob("factor", [4] if fallback else [4])
M = tvm.var("M")
K = tvm.var("K")
N = tvm.var("N")
M = tvm.size_var("M")
K = tvm.size_var("K")
N = tvm.size_var("N")
A = tvm.placeholder((M, K), name='A', dtype=dtype)
B = tvm.placeholder((K, N), name='B', dtype=dtype)
C = compute_dot(A, B)
Expand Down
10 changes: 5 additions & 5 deletions contrib/tvmop/core/umath.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@


def _compute_binary_logic(op, dtype, ndim):
a = tvm.placeholder([tvm.var() for _ in range(ndim)], dtype=dtype, name='a')
b = tvm.placeholder([tvm.var() for _ in range(ndim)], dtype=dtype, name='b')
c = tvm.compute([tvm.var() for _ in range(ndim)],
a = tvm.placeholder([tvm.size_var() for _ in range(ndim)], dtype=dtype, name='a')
b = tvm.placeholder([tvm.size_var() for _ in range(ndim)], dtype=dtype, name='b')
c = tvm.compute([tvm.size_var() for _ in range(ndim)],
lambda *idx: _bin_logic_op_map[op](a, b, *idx), name='c')
s = tvm.create_schedule(c.op)
return s, a, b, c
Expand Down Expand Up @@ -91,9 +91,9 @@ def _binary_logic_gpu(compute_func, op, itype, ndim):


def _compute_binary_scalar_logic(op, dtype, ndim):
a = tvm.placeholder([tvm.var() for _ in range(ndim)], name='a', dtype=dtype)
a = tvm.placeholder([tvm.size_var() for _ in range(ndim)], name='a', dtype=dtype)
b = tvm.var('b', dtype='float64')
c = tvm.compute([tvm.var() for _ in range(ndim)],
c = tvm.compute([tvm.size_var() for _ in range(ndim)],
lambda *idx: _bin_scalar_logic_op_map[op](a, b, *idx), name='c')
s = tvm.create_schedule(c.op)
return s, a, b, c
Expand Down
2 changes: 1 addition & 1 deletion contrib/tvmop/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ def get_index(idx, ridx):

ishape = X.shape
odim = (len(ishape) + 1 - axes[0]) // 2
oshape = [tvm.var() for _ in range(odim)]
oshape = [tvm.size_var() for _ in range(odim)]
ridx = [tvm.reduce_axis((0, ishape[i])) for (i, val) in enumerate(axes) if val == 1]
ret = tvm.compute(oshape, lambda *idx: reducer(X[get_index(idx, ridx)].astype(atype)
if atype else X[get_index(idx, ridx)],
Expand Down