Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 1 addition & 4 deletions tests/python/topi/python/test_topi_argwhere.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,7 @@
from tvm import topi
import tvm.topi.testing

_argwhere_schedule = {
"generic": topi.generic.schedule_argwhere,
"gpu": topi.cuda.schedule_argwhere,
}
_argwhere_schedule = {"generic": topi.generic.schedule_argwhere, "gpu": topi.cuda.schedule_argwhere}

_argwhere_compute = {"llvm": topi.argwhere, "cuda": topi.cuda.argwhere}

Expand Down
12 changes: 5 additions & 7 deletions tests/python/topi/python/test_topi_batch_matmul.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,14 +110,12 @@ def get_ref_data():
# get the test data
a_np, b_np, c_np = get_ref_data()

def check_device(device):
dev = tvm.device(device, 0)
if device == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
def check_device(target, dev):
if target == "cuda" and not tvm.contrib.nvcc.have_int8(dev.compute_version):
print("Skip because int8 intrinsics are not available")
return

print("Running on target: %s" % device)
with tvm.target.Target(device):
with tvm.target.Target(target):
out = topi.cuda.batch_matmul_int8(x, y, None, out_dtype)
s = topi.cuda.schedule_batch_matmul_int8([out])
a = tvm.nd.array(a_np, dev)
Expand All @@ -127,8 +125,8 @@ def check_device(device):
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)

for device in ["cuda"]:
check_device(device)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)


@tvm.testing.uses_gpu
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
import tvm.testing

_batch_matmul_implement = {
"gpu": (topi.cuda.batch_matmul_tensorcore, topi.cuda.schedule_batch_matmul_tensorcore),
"gpu": (topi.cuda.batch_matmul_tensorcore, topi.cuda.schedule_batch_matmul_tensorcore)
}


Expand Down
2 changes: 1 addition & 1 deletion tests/python/topi/python/test_topi_batch_to_space_nd.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def verify_batch_to_space_nd(input_shape, block_shape, crop_begin_list, crop_end

def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.create(target):
with tvm.target.Target(target):
s = tvm.topi.testing.get_injective_schedule(target)(B)
a = tvm.nd.array(a_np, dev)
b = tvm.nd.array(np.zeros(out_shape, dtype=dtype), dev)
Expand Down
25 changes: 3 additions & 22 deletions tests/python/topi/python/test_topi_broadcast.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,13 +290,7 @@ def test_shift():

@tvm.testing.uses_gpu
def test_logical_single_ele():
def test_apply(
func,
name,
f_numpy,
indata,
dtype="bool",
):
def test_apply(func, name, f_numpy, indata, dtype="bool"):
# Build the logic and compile the function
A = te.placeholder(shape=indata.shape, name="A", dtype=dtype)
B = func(A)
Expand Down Expand Up @@ -327,13 +321,7 @@ def check_target(target, dev):

@tvm.testing.uses_gpu
def test_bitwise_not():
def test_apply(
func,
name,
f_numpy,
shape,
dtype="int32",
):
def test_apply(func, name, f_numpy, shape, dtype="int32"):
# Build the logic and compile the function
A = te.placeholder(shape=shape, name="A", dtype=dtype)
B = func(A)
Expand Down Expand Up @@ -365,14 +353,7 @@ def check_target(target, dev):

@tvm.testing.uses_gpu
def test_logical_binary_ele():
def test_apply(
func,
name,
f_numpy,
lhs,
rhs,
dtype="bool",
):
def test_apply(func, name, f_numpy, lhs, rhs, dtype="bool"):
# Build the logic and compile the function
A = te.var("A", dtype=dtype)
B = te.var("B", dtype=dtype)
Expand Down
20 changes: 8 additions & 12 deletions tests/python/topi/python/test_topi_conv2d_NCHWc.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ def _transform_bias(bias, bn):
return bias


@tvm.testing.requires_llvm
def verify_conv2d_NCHWc(
batch,
in_channel,
Expand Down Expand Up @@ -115,13 +116,8 @@ def get_ref_data():

a_np, w_np, b_np, c_np = get_ref_data()

def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
def check_device(target, dev):
with tvm.target.Target(target):
C = topi.x86.conv2d_NCHWc(
A,
W,
Expand All @@ -146,7 +142,7 @@ def check_device(device):
func = tvm.build(
s,
[A, W, bias, C],
device,
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
Expand All @@ -155,17 +151,17 @@ def check_device(device):
func = tvm.build(
s,
[A, W, C],
device,
target,
name="relu_%d_%d_%d_%d_%d_%d_%d_%d"
% (batch, in_channel, in_size, num_filter, kernel, stride, padding_sum, dilation),
)
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-3)

# test llvm only for now since conv2d_NCHWc implement is missing in other backend.
for device in ["llvm"]:
with autotvm.tophub.context(device): # load tophub pre-tuned parameters
check_device(device)
for target, device in tvm.testing.enabled_targets():
with autotvm.tophub.context(target): # load tophub pre-tuned parameters
check_device(target, device)


def test_conv2d_NCHWc():
Expand Down
6 changes: 3 additions & 3 deletions tests/python/topi/python/test_topi_conv2d_int8.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,8 +175,7 @@ def get_ref_data():

a_np, w_np, b_np, c_np = get_ref_data()

def check_target(target):
dev = tvm.device(target, 0)
def check_target(target, dev):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
Expand Down Expand Up @@ -222,7 +221,8 @@ def check_target(target):
func(a, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)

check_target("llvm")
for target, dev in tvm.testing.enabled_targets():
check_target(target, dev)


oc_block_factor = 4
Expand Down
22 changes: 3 additions & 19 deletions tests/python/topi/python/test_topi_conv2d_nchw.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,15 +52,7 @@ def bias_shape(num_filter):

@tvm.testing.fixture(cache_return_value=True)
def ref_data(
input_shape,
weight_shape,
bias_shape,
dtype,
stride,
padding,
dilation,
add_bias,
apply_relu,
input_shape, weight_shape, bias_shape, dtype, stride, padding, dilation, add_bias, apply_relu
):
a_np = np.random.uniform(size=input_shape).astype(dtype)
w_np = np.random.uniform(size=weight_shape).astype(dtype)
Expand Down Expand Up @@ -146,15 +138,7 @@ def test_conv2d_nchw(

@tvm.testing.parametrize_targets("llvm")
def test_workload_padding(
self,
target,
input_shape,
weight_shape,
stride,
padding,
dilation,
dtype,
ref_data,
self, target, input_shape, weight_shape, stride, padding, dilation, dtype, ref_data
):
a_np, w_np, b_np, c_np = ref_data
_, _, out_height, out_width = c_np.shape
Expand Down Expand Up @@ -282,7 +266,7 @@ class TestAsymmetricPadding(BaseConv2DTests):

class TestBatchSize(BaseConv2DTests):
in_channel, in_size, num_filter, kernel, stride, padding = tvm.testing.parameters(
(64, 56, 64, 3, 1, 1),
(64, 56, 64, 3, 1, 1)
)
batch = tvm.testing.parameter(1, 4, 9)

Expand Down
28 changes: 13 additions & 15 deletions tests/python/topi/python/test_topi_conv2d_nhwc.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,8 @@
topi.arm_cpu.conv2d_nhwc_spatial_pack,
topi.arm_cpu.schedule_conv2d_nhwc_spatial_pack,
),
"mali": (
topi.mali.conv2d_nhwc_spatial_pack,
topi.mali.schedule_conv2d_nhwc_spatial_pack,
),
"bifrost": (
topi.mali.conv2d_nhwc_spatial_pack,
topi.mali.schedule_conv2d_nhwc_spatial_pack,
),
"mali": (topi.mali.conv2d_nhwc_spatial_pack, topi.mali.schedule_conv2d_nhwc_spatial_pack),
"bifrost": (topi.mali.conv2d_nhwc_spatial_pack, topi.mali.schedule_conv2d_nhwc_spatial_pack),
"hls": (topi.nn.conv2d_nhwc, topi.hls.schedule_conv2d_nhwc),
}

Expand All @@ -66,21 +60,25 @@ def get_ref_data():

a_np, w_np, b_np = get_ref_data()

def check_device(target, dev):
print("Running on target: %s" % target)
with tvm.target.Target(target):
fcompute, fschedule = tvm.topi.testing.dispatch(target, _conv2d_nhwc_implement)
def check_device(device):
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
return
print("Running on target: %s" % device)
with tvm.target.Target(device):
fcompute, fschedule = tvm.topi.testing.dispatch(device, _conv2d_nhwc_implement)
B = fcompute(A, W, stride, padding, dilation, dtype)
s = fschedule([B])
dev = tvm.device(device, 0)
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], target)
func = tvm.build(s, [A, W, B], device)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)

for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)
for device in ["llvm", "cuda"]:
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should these become @tvm.testing.requires_?

check_device(device)


@tvm.testing.uses_gpu
Expand Down
18 changes: 9 additions & 9 deletions tests/python/topi/python/test_topi_conv2d_nhwc_pack_int8.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
import tvm.topi.testing
from tvm.contrib.pickle_memoize import memoize
from tvm.topi.utils import get_const_tuple
import tvm.testing


def verify_conv2d_1x1_nhwc_pack_int8(
Expand All @@ -51,26 +52,25 @@ def get_ref_data():

a_np, w_np, b_np = get_ref_data()

def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
def check_device(target, dev):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % device)
print("Running on target: %s" % target)

with tvm.target.Target(device):
with tvm.target.Target(target):
B = topi.nn.conv2d(A, W, stride, padding, dilation, layout="NHWC", out_dtype="int32")
s = topi.x86.schedule_conv2d_nhwc_pack_int8([B])
a = tvm.nd.array(a_np, dev)
w = tvm.nd.array(w_np, dev)
b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), dev)
func = tvm.build(s, [A, W, B], device)
func = tvm.build(s, [A, W, B], target)
func(a, w, b)
tvm.testing.assert_allclose(b.numpy(), b_np, rtol=1e-5)

# for device in ['llvm -mcpu=skylake-avx512']:
for device in ["llvm"]:
check_device(device)
for target, dev in tvm.testing.enabled_targets():
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same question--possible to use tvm.testing.parametrize_targets ?

check_device(target, dev)


# TODO(@llyfacebook): Please fix https://github.com/apache/tvm/issues/4122 to enable this test.
Expand Down
11 changes: 1 addition & 10 deletions tests/python/topi/python/test_topi_conv2d_winograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,16 +170,7 @@ def test_conv2d_nchw():
verify_conv2d_nchw(1, 48, 35, 48, 5, 1, "VALID", devices=["cuda"])


def verify_conv2d_nhwc(
batch,
in_channel,
in_size,
num_filter,
kernel,
stride,
padding,
dilation=1,
):
def verify_conv2d_nhwc(batch, in_channel, in_size, num_filter, kernel, stride, padding, dilation=1):
# This version is intented to be used by the auto-scheduler,
# so we only test the correctness of compute declaration
# with the default naive schedule in cpu
Expand Down
2 changes: 1 addition & 1 deletion tests/python/topi/python/test_topi_conv3d_winograd.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@


_conv3d_ncdhw_implement = {
"gpu": (topi.cuda.conv3d_ncdhw_winograd, topi.cuda.schedule_conv3d_ncdhw_winograd),
"gpu": (topi.cuda.conv3d_ncdhw_winograd, topi.cuda.schedule_conv3d_ncdhw_winograd)
}


Expand Down
21 changes: 10 additions & 11 deletions tests/python/topi/python/test_topi_deformable_conv2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
}

_deformable_conv2d_nhwc_implement = {
"generic": (topi.nn.deformable_conv2d_nhwc, topi.generic.schedule_deformable_conv2d_nhwc),
"generic": (topi.nn.deformable_conv2d_nhwc, topi.generic.schedule_deformable_conv2d_nhwc)
}


Expand Down Expand Up @@ -92,14 +92,13 @@ def get_ref_data():

a_np, offset_np, w_np, c_np = get_ref_data()

def check_device(device):
dev = tvm.device(device, 0)
if not tvm.testing.device_enabled(device):
print("Skip because %s is not enabled" % device)
def check_device(target, dev):
if not tvm.testing.device_enabled(target):
print("Skip because %s is not enabled" % target)
return
print("Running on target: %s" % device)
fcompute, fschedule = tvm.topi.testing.dispatch(device, _deformable_conv2d_nchw_implement)
with tvm.target.Target(device):
print("Running on target: %s" % target)
fcompute, fschedule = tvm.topi.testing.dispatch(target, _deformable_conv2d_nchw_implement)
with tvm.target.Target(target):
C = fcompute(A, Offset, W, stride, padding, dilation, deformable_groups, groups, dtype)
s = fschedule([C])

Expand All @@ -108,12 +107,12 @@ def check_device(device):
w = tvm.nd.array(w_np, dev)
c = tvm.nd.empty(c_np.shape, dtype=c_np.dtype, device=dev)

func = tvm.build(s, [A, Offset, W, C], device)
func = tvm.build(s, [A, Offset, W, C], target)
func(a, offset, w, c)
tvm.testing.assert_allclose(c.numpy(), c_np, rtol=1e-5)

for device in ["llvm", "cuda"]:
check_device(device)
for target, dev in tvm.testing.enabled_targets():
check_device(target, dev)


def verify_deformable_conv2d_nhwc(
Expand Down
Loading