From c259d854c8d57cf0f35e5a25b6d1f96474052160 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 22:35:00 +0800
Subject: [PATCH 01/19] Update pylint.sh
---
tests/lint/pylint.sh | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/tests/lint/pylint.sh b/tests/lint/pylint.sh
index 8bb492c6898b..9195e7f73592 100755
--- a/tests/lint/pylint.sh
+++ b/tests/lint/pylint.sh
@@ -31,6 +31,14 @@ python3 -m pylint tests/python/contrib/test_cblas.py --rcfile="$(dirname "$0")"/
python3 -m pylint tests/python/contrib/test_tflite_runtime.py --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/contrib/test_thrust.py --rcfile="$(dirname "$0")"/pylintrc
python3 -m pylint tests/python/contrib/test_util.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_sort.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_sparse.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_tedd.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_rpc_tracker.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_rpc_server_device.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_rpc_proxy.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_rocblas.py --rcfile="$(dirname "$0")"/pylintrc
+python3 -m pylint tests/python/contrib/test_random.py --rcfile="$(dirname "$0")"/pylintrc
# tests/python/contrib/test_hexagon tests
python3 -m pylint tests/python/contrib/test_hexagon/*.py --rcfile="$(dirname "$0")"/pylintrc
From e4c50450b7927637b16120559b89fb76f5fa2a46 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 22:48:17 +0800
Subject: [PATCH 02/19] Update test_sparse.py
---
tests/python/contrib/test_sparse.py | 83 +++++++++++++++--------------
1 file changed, 42 insertions(+), 41 deletions(-)
diff --git a/tests/python/contrib/test_sparse.py b/tests/python/contrib/test_sparse.py
index b4fdbc8481ba..0714312c4227 100644
--- a/tests/python/contrib/test_sparse.py
+++ b/tests/python/contrib/test_sparse.py
@@ -14,33 +14,34 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
+"""Configure pytest"""
+from collections import namedtuple
+import numpy as np
import tvm
import tvm.testing
from tvm import te
import tvm.contrib.sparse as tvmsp
import tvm.runtime.ndarray as _nd
-import numpy as np
-from collections import namedtuple
def test_static_tensor():
+ """Tests static tensor"""
dtype = "float32"
- stype = "csr"
target = "llvm"
dev = tvm.device(target, 0)
m = te.size_var("m")
n = te.size_var("n")
- A = tvmsp.placeholder(shape=(m, n), name="A", dtype=dtype)
- assert A.stype == "csr"
+ input_a = tvmsp.placeholder(shape=(m, n), name="input_a", dtype=dtype)
+ assert input_a.stype == "csr"
n = 3
a = np.maximum(np.random.uniform(size=(n, n)).astype(dtype) - 0.6, 0.0)
a = tvmsp.array(a, dev)
- A.data = te.placeholder(a.data.shape, dtype, name="A_data")
- Ab = tvm.tir.decl_buffer(a.data.shape, dtype, name="A_data")
- binds = {A.data: Ab}
- C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
- s = te.create_schedule(C.op)
- f = tvm.build(s, [A.data, C], target, binds=binds)
+ input_a.data = te.placeholder(a.data.shape, dtype, name="input_a_data")
+ result_b = tvm.tir.decl_buffer(a.data.shape, dtype, name="input_a_data")
+ binds = {input_a.data: result_b}
+ result_c = te.compute(input_a.data.shape, lambda i: input_a.data[i] * 2.0, tag="cs_scatter")
+ s = te.create_schedule(result_c.op)
+ f = tvm.build(s, [input_a.data, result_c], target, binds=binds)
c = tvmsp.array(np.zeros((n, n), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
@@ -50,25 +51,25 @@ def test_static_tensor():
def test_dynamic_tensor():
+ """Tests dynamic tensor"""
dtype = "float32"
- stype = "csr"
target = "llvm"
dev = tvm.device(target, 0)
- nr, nc, n = te.size_var("nr"), te.size_var("nc"), te.size_var("n")
- A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name="A", dtype=dtype)
- assert A.stype == "csr"
- C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
- s = te.create_schedule(C.op)
- _nr, _nc = 3, 5
- a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - 0.6, 0.0)
+ n_row, n_col, n = te.size_var("n_row"), te.size_var("n_col"), te.size_var("n")
+ input_a = tvmsp.placeholder(shape=(n_row, n_col), nonzeros=n, name="input_a", dtype=dtype)
+ assert input_a.stype == "csr"
+ result_c = te.compute(input_a.data.shape, lambda i: input_a.data[i] * 2.0, tag="cs_scatter")
+ s = te.create_schedule(result_c.op)
+ _n_row, _n_col = 3, 5
+ a = np.maximum(np.random.uniform(size=(_n_row, _n_col)).astype(dtype) - 0.6, 0.0)
a = tvmsp.array(a, dev)
assert a.data.dtype == a.dtype
- Ab = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
- Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_data")
- Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_indices")
- binds = {A.data: Ab.data, A.indices: Ab.indices}
- f = tvm.build(s, [nr, A.data, C], target, binds=binds)
- c = tvmsp.array(np.zeros((_nr, _nc), dtype), dev)
+ result_b = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
+ result_b.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="input_a_data")
+ result_b.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="input_a_indices")
+ binds = {input_a.data: result_b.data, input_a.indices: result_b.indices}
+ f = tvm.build(s, [n_row, input_a.data, result_c], target, binds=binds)
+ c = tvmsp.array(np.zeros((_n_row, _n_col), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
c.indptr = a.indptr
@@ -77,21 +78,21 @@ def test_dynamic_tensor():
def test_sparse_array_tuple():
+ """Tests array when it is sparse"""
dtype, itype = "float32", "int32"
- stype = "csr"
target = "llvm"
dev = tvm.device(target, 0)
- nr, nc, n = te.size_var("nr"), te.size_var("nc"), te.size_var("n")
- A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name="A", dtype=dtype)
- assert A.stype == "csr"
- C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
- s = te.create_schedule(C.op)
- _nr, _nc = 3, 5
- a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - 0.6, 0.0)
+ n_row, n_col, n = te.size_var("n_row"), te.size_var("n_col"), te.size_var("n")
+ input_a = tvmsp.placeholder(shape=(n_row, n_col), nonzeros=n, name="input_a", dtype=dtype)
+ assert input_a.stype == "csr"
+ result_c = te.compute(input_a.data.shape, lambda i: input_a.data[i] * 2.0, tag="cs_scatter")
+ s = te.create_schedule(result_c.op)
+ _n_row, _n_col = 3, 5
+ a = np.maximum(np.random.uniform(size=(_n_row, _n_col)).astype(dtype) - 0.6, 0.0)
# convert to sparse array tuple
source_array = a
- ridx, cidx = np.nonzero(source_array)
- data = source_array[ridx, cidx]
+ row_idx, col_idx = np.nonzero(source_array)
+ data = source_array[row_idx, col_idx]
a_data = _nd.array(data, dev)
indices = np.nonzero(source_array)[1].astype(itype)
a_indices = _nd.array(indices, dev)
@@ -102,12 +103,12 @@ def test_sparse_array_tuple():
# construct tvm sparse array with tuple
a = tvmsp.array(a_init, shape=source_array.shape, device=dev)
assert a.data.dtype == a.dtype
- Ab = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
- Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_data")
- Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_indices")
- binds = {A.data: Ab.data, A.indices: Ab.indices}
- f = tvm.build(s, [nr, A.data, C], target, binds=binds)
- c = tvmsp.array(np.zeros((_nr, _nc), dtype), dev)
+ result_b = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
+ result_b.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="input_a_data")
+ result_b.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="input_a_indices")
+ binds = {input_a.data: result_b.data, input_a.indices: result_b.indices}
+ f = tvm.build(s, [n_row, input_a.data, result_c], target, binds=binds)
+ c = tvmsp.array(np.zeros((_n_row, _n_col), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
c.indptr = a.indptr
From 9770484f75554f22a91a1a96b182d4a5ad6fe7ab Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 22:49:17 +0800
Subject: [PATCH 03/19] Update test_sort.py
---
tests/python/contrib/test_sort.py | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/tests/python/contrib/test_sort.py b/tests/python/contrib/test_sort.py
index fda9a777576d..000caac38cc8 100644
--- a/tests/python/contrib/test_sort.py
+++ b/tests/python/contrib/test_sort.py
@@ -14,18 +14,20 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
+"""Configure pytest"""
+import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm.topi.cuda import sort_by_key
-import numpy as np
def test_sort():
+ """Tests sort function"""
n = 2
- l = 5
+ lens = 5
m = 3
- data = te.placeholder((n, l, m), name="data")
+ data = te.placeholder((n, lens, m), name="data")
sort_num = te.placeholder((n, m), name="sort_num", dtype="int32")
axis = 1
is_ascend = False
@@ -38,7 +40,7 @@ def test_sort():
dtype="int32",
name="sort_tensor",
)
- input = [
+ input_data = [
[[1, 2, 3], [2, 4.5, 3.5], [1.1, 0.5, 1], [3.2, -5, 0.5], [1.5, 0, 0]],
[[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]],
]
@@ -52,7 +54,7 @@ def test_sort():
target = "llvm"
s = te.create_schedule(out.op)
f = tvm.build(s, [data, sort_num, out], target)
- a = tvm.nd.array(np.array(input).astype(data.dtype), dev)
+ a = tvm.nd.array(np.array(input_data).astype(data.dtype), dev)
b = tvm.nd.array(np.array(sort_num_input).astype(sort_num.dtype), dev)
c = tvm.nd.array(np.zeros(a.shape, dtype=out.dtype), dev)
f(a, b, c)
@@ -60,6 +62,7 @@ def test_sort():
def test_sort_np():
+ """Tests sort function using numpy"""
dshape = (1, 2, 3, 4, 5, 6)
axis = 4
reduced_shape = (1, 2, 3, 4, 6)
@@ -92,6 +95,7 @@ def test_sort_np():
def test_sort_by_key_gpu():
+ """Tests sort function using gpu"""
size = 6
keys = te.placeholder((size,), name="keys", dtype="int32")
values = te.placeholder((size,), name="values", dtype="int32")
From 790c17a9a6af7487458b7913c6be0879e8f849ca Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 22:50:33 +0800
Subject: [PATCH 04/19] Update test_rpc_tracker.py
---
tests/python/contrib/test_rpc_tracker.py | 30 +++++++++++++-----------
1 file changed, 16 insertions(+), 14 deletions(-)
diff --git a/tests/python/contrib/test_rpc_tracker.py b/tests/python/contrib/test_rpc_tracker.py
index 105d1aa7b03f..63ce8be174b4 100644
--- a/tests/python/contrib/test_rpc_tracker.py
+++ b/tests/python/contrib/test_rpc_tracker.py
@@ -14,19 +14,20 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-import tvm
-from tvm import te
+"""Configure pytest"""
import logging
-import numpy as np
import time
-import multiprocessing
+import tvm
from tvm import rpc
def check_server_drop():
"""test when server drops"""
try:
+ # pylint: disable=import-outside-toplevel
from tvm.rpc import tracker, proxy, base
+
+ # pylint: disable=import-outside-toplevel
from tvm.rpc.base import TrackerCode
@tvm.register_func("rpc.test2.addone")
@@ -63,8 +64,8 @@ def _put(tclient, value):
def check_timeout(timeout, sleeptime):
def myfunc(remote):
time.sleep(sleeptime)
- f1 = remote.get_function("rpc.test2.addone")
- assert f1(10) == 11
+ test_f1 = remote.get_function("rpc.test2.addone")
+ assert test_f1(10) == 11
try:
tclient.request_and_run("xyz", myfunc, session_timeout=timeout)
@@ -75,18 +76,19 @@ def myfunc(remote):
remote = tclient.request("xyz", priority=0, session_timeout=timeout)
remote2 = tclient.request("xyz", session_timeout=timeout)
time.sleep(sleeptime)
- f1 = remote.get_function("rpc.test2.addone")
- assert f1(10) == 11
- f1 = remote2.get_function("rpc.test2.addone")
- assert f1(10) == 11
+ test_f1 = remote.get_function("rpc.test2.addone")
+ assert test_f1(10) == 11
+ test_f1 = remote2.get_function("rpc.test2.addone")
+ assert test_f1(10) == 11
- except tvm.error.TVMError as e:
+ except tvm.error.TVMError:
pass
remote3 = tclient.request("abc")
- f1 = remote3.get_function("rpc.test2.addone")
+ test_f1 = remote3.get_function("rpc.test2.addone")
+ assert test_f1(10) == 11
remote3 = tclient.request("xyz1")
- f1 = remote3.get_function("rpc.test2.addone")
- assert f1(10) == 11
+ test_f1 = remote3.get_function("rpc.test2.addone")
+ assert test_f1(10) == 11
check_timeout(0.01, 0.1)
check_timeout(2, 0)
From 4f9382c6185002b94f1c1e8ed84b8d46f7352142 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 23:07:50 +0800
Subject: [PATCH 05/19] Update test_rpc_server_device.py
---
tests/python/contrib/test_rpc_server_device.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/tests/python/contrib/test_rpc_server_device.py b/tests/python/contrib/test_rpc_server_device.py
index db5459edf2c5..f3d6a6a5eb05 100644
--- a/tests/python/contrib/test_rpc_server_device.py
+++ b/tests/python/contrib/test_rpc_server_device.py
@@ -16,7 +16,6 @@
# under the License.
"""iOS RPC Server tests."""
# pylint: disable=invalid-name, no-value-for-parameter, missing-function-docstring, import-error
-import sys
import multiprocessing
import pytest
import numpy as np
From 765ffcf1808d153033d1aecf5df38cfb226ca0d8 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 23:08:37 +0800
Subject: [PATCH 06/19] Update test_rpc_proxy.py
---
tests/python/contrib/test_rpc_proxy.py | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/tests/python/contrib/test_rpc_proxy.py b/tests/python/contrib/test_rpc_proxy.py
index 08da29b0af7b..740396004ff5 100644
--- a/tests/python/contrib/test_rpc_proxy.py
+++ b/tests/python/contrib/test_rpc_proxy.py
@@ -14,12 +14,11 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
-import tvm
-from tvm import te
+"""Configure pytest"""
import logging
-import numpy as np
import time
import multiprocessing
+import tvm
from tvm import rpc
@@ -35,6 +34,7 @@ def rpc_proxy_check():
"""
try:
+ # pylint: disable=import-outside-toplevel
from tvm.rpc import proxy
web_port = 8888
@@ -52,9 +52,9 @@ def check():
server.deamon = True
server.start()
client = rpc.connect(prox.host, prox.port, key="x1")
- f1 = client.get_function("testing.echo")
- assert f1(10) == 10
- assert f1("xyz") == "xyz"
+ test_f1 = client.get_function("testing.echo")
+ assert test_f1(10) == 10
+ assert test_f1("xyz") == "xyz"
check()
except ImportError:
From 0f6249a1c9d15e4538ffd903657d7d63ab759cec Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 23:09:07 +0800
Subject: [PATCH 07/19] Update test_rocblas.py
---
tests/python/contrib/test_rocblas.py | 41 +++++++++++++++-------------
1 file changed, 22 insertions(+), 19 deletions(-)
diff --git a/tests/python/contrib/test_rocblas.py b/tests/python/contrib/test_rocblas.py
index 2defc9c81251..4f0b767d5bf7 100644
--- a/tests/python/contrib/test_rocblas.py
+++ b/tests/python/contrib/test_rocblas.py
@@ -14,34 +14,35 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
+"""Configure pytest"""
+import numpy as np
import tvm
import tvm.testing
from tvm import te
-import numpy as np
import tvm.topi.testing
-import tvm.testing
from tvm.contrib import rocblas
@tvm.testing.requires_rocm
def test_matmul():
+ """Tests matmul operation using roc"""
n = 1024
- l = 128
+ op_l = 128
m = 235
- A = te.placeholder((n, l), name="A")
- B = te.placeholder((l, m), name="B")
- C = rocblas.matmul(A, B)
- s = te.create_schedule(C.op)
+ input_a = te.placeholder((n, op_l), name="input_a")
+ input_b = te.placeholder((op_l, m), name="input_b")
+ result_c = rocblas.matmul(input_a, input_b)
+ s = te.create_schedule(result_c.op)
def verify(target="rocm"):
if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True):
print("skip because extern function is not available")
return
dev = tvm.rocm(0)
- f = tvm.build(s, [A, B, C], target)
- a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), dev)
- b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), dev)
- c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
+ f = tvm.build(s, [input_a, input_b, result_c], target)
+ a = tvm.nd.array(np.random.uniform(size=(n, op_l)).astype(input_a.dtype), dev)
+ b = tvm.nd.array(np.random.uniform(size=(op_l, m)).astype(input_b.dtype), dev)
+ c = tvm.nd.array(np.zeros((n, m), dtype=result_c.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()), rtol=1e-5)
@@ -49,12 +50,13 @@ def verify(target="rocm"):
def verify_batch_matmul(batch, m, k, n, lib, transa=False, transb=False, dtype="float32"):
+ """Tests matmul operation in batch using roc"""
ashape = (batch, k, m) if transa else (batch, m, k)
bshape = (batch, n, k) if transb else (batch, k, n)
- A = te.placeholder(ashape, name="A", dtype=dtype)
- B = te.placeholder(bshape, name="B", dtype=dtype)
- C = lib.batch_matmul(A, B, transa, transb)
- s = te.create_schedule(C.op)
+ input_a = te.placeholder(ashape, name="input_a", dtype=dtype)
+ input_b = te.placeholder(bshape, name="input_b", dtype=dtype)
+ result_c = lib.batch_matmul(input_a, input_b, transa, transb)
+ s = te.create_schedule(result_c.op)
def get_numpy(a, b, transa, transb):
if transa:
@@ -71,10 +73,10 @@ def verify(target="rocm"):
print("skip because extern function is not available")
return
dev = tvm.rocm(0)
- f = tvm.build(s, [A, B, C], target)
- a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
- b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
- c = tvm.nd.array(np.zeros((batch, m, n), dtype=C.dtype), dev)
+ f = tvm.build(s, [input_a, input_b, result_c], target)
+ a = tvm.nd.array(np.random.uniform(size=ashape).astype(input_a.dtype), dev)
+ b = tvm.nd.array(np.random.uniform(size=bshape).astype(input_b.dtype), dev)
+ c = tvm.nd.array(np.zeros((batch, m, n), dtype=result_c.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), get_numpy(a.numpy(), b.numpy(), transa, transb), rtol=1e-5
@@ -85,6 +87,7 @@ def verify(target="rocm"):
@tvm.testing.requires_rocm
def test_batch_matmul():
+ """Tests of matmul operation in batch using roc"""
verify_batch_matmul(128, 64, 512, 512, rocblas, transa=False, transb=False)
verify_batch_matmul(128, 64, 512, 512, rocblas, transa=False, transb=True)
verify_batch_matmul(128, 64, 512, 512, rocblas, transa=True, transb=False)
From 7a04f0e11d09599ed8f30a7ef6ecb671eea1b6db Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 23:09:24 +0800
Subject: [PATCH 08/19] Update test_random.py
---
tests/python/contrib/test_random.py | 58 +++++++++++++++--------------
1 file changed, 31 insertions(+), 27 deletions(-)
diff --git a/tests/python/contrib/test_random.py b/tests/python/contrib/test_random.py
index ddc06b07110e..f7ad4f3176a8 100644
--- a/tests/python/contrib/test_random.py
+++ b/tests/python/contrib/test_random.py
@@ -14,20 +14,22 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
+"""Configure pytest"""
+import threading
+import numpy as np
import tvm
from tvm import te
-import numpy as np
from tvm.contrib import random
from tvm import rpc
import tvm.testing
-import threading
def test_randint():
+ """Tests randint function"""
m = 10240
n = 10240
- A = random.randint(-127, 128, size=(m, n), dtype="int32")
- s = te.create_schedule(A.op)
+ input_a = random.randint(-127, 128, size=(m, n), dtype="int32")
+ s = te.create_schedule(input_a.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
@@ -37,22 +39,23 @@ def verify(target="llvm"):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
- f = tvm.build(s, [A], target)
- a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev)
+ f = tvm.build(s, [input_a], target)
+ a = tvm.nd.array(np.zeros((m, n), dtype=input_a.dtype), dev)
f(a)
- na = a.numpy()
- assert abs(np.mean(na)) < 0.3
- assert np.min(na) == -127
- assert np.max(na) == 127
+ _na = a.numpy()
+ assert abs(np.mean(_na)) < 0.3
+ assert np.min(_na) == -127
+ assert np.max(_na) == 127
verify()
def test_uniform():
+ """Tests uniform function"""
m = 10240
n = 10240
- A = random.uniform(0, 1, size=(m, n))
- s = te.create_schedule(A.op)
+ input_a = random.uniform(0, 1, size=(m, n))
+ s = te.create_schedule(input_a.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
@@ -62,22 +65,23 @@ def verify(target="llvm"):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
- f = tvm.build(s, [A], target)
+ f = tvm.build(s, [input_a], target)
a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev)
f(a)
- na = a.numpy()
- assert abs(np.mean(na) - 0.5) < 1e-1
- assert abs(np.min(na) - 0.0) < 1e-3
- assert abs(np.max(na) - 1.0) < 1e-3
+ op_na = a.numpy()
+ assert abs(np.mean(op_na) - 0.5) < 1e-1
+ assert abs(np.min(op_na) - 0.0) < 1e-3
+ assert abs(np.max(op_na) - 1.0) < 1e-3
verify()
def test_normal():
+ """Tests normal function"""
m = 10240
n = 10240
- A = random.normal(3, 4, size=(m, n))
- s = te.create_schedule(A.op)
+ input_a = random.normal(3, 4, size=(m, n))
+ s = te.create_schedule(input_a.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
@@ -87,18 +91,20 @@ def verify(target="llvm"):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
- f = tvm.build(s, [A], target)
- a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev)
+ f = tvm.build(s, [input_a], target)
+ a = tvm.nd.array(np.zeros((m, n), dtype=input_a.dtype), dev)
f(a)
- na = a.numpy()
- assert abs(np.mean(na) - 3) < 1e-1
- assert abs(np.std(na) - 4) < 1e-2
+ _na = a.numpy()
+ assert abs(np.mean(_na) - 3) < 1e-1
+ assert abs(np.std(_na) - 4) < 1e-2
verify()
@tvm.testing.uses_gpu
def test_random_fill():
+ """Tests random_fill function"""
+
def test_local(dev, dtype):
if not tvm.get_global_func("tvm.contrib.random.random_fill", True):
print("skip because extern function is not available")
@@ -120,8 +126,6 @@ def test_rpc(dtype):
if not tvm.testing.device_enabled("rpc") or not tvm.runtime.enabled("llvm"):
return
- np_ones = np.ones((512, 512), dtype=dtype)
-
def check_remote(server):
remote = rpc.connect(server.host, server.port)
value = tvm.nd.empty((512, 512), dtype, remote.cpu())
@@ -171,7 +175,7 @@ def test_body():
test_input = tvm.runtime.ndarray.empty((10, 10))
random_fill = tvm.get_global_func("tvm.contrib.random.random_fill_for_measure")
random_fill(test_input)
- except:
+ except: # pylint: disable=bare-except
nonlocal no_exception_happened
no_exception_happened = False
From 001f9bb5fef4d42cdaf820183be3749de2586c5c Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Fri, 3 Nov 2023 23:09:49 +0800
Subject: [PATCH 09/19] Update test_tedd.py
---
tests/python/contrib/test_tedd.py | 75 +++++++++++++++++--------------
1 file changed, 42 insertions(+), 33 deletions(-)
diff --git a/tests/python/contrib/test_tedd.py b/tests/python/contrib/test_tedd.py
index c1af9f682541..75c66f8a218a 100644
--- a/tests/python/contrib/test_tedd.py
+++ b/tests/python/contrib/test_tedd.py
@@ -14,8 +14,8 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
+"""Configure pytest of Tensor Expression Debug Display"""
import re
-
import tvm
from tvm import te
from tvm import topi
@@ -24,27 +24,31 @@
from tvm.relay.backend import Runtime, Executor
-def findany(pattern, str):
- matches = re.findall(pattern, str)
- assert len(matches) > 0, "Pattern not found.\nPattern: " + pattern + "\nString: " + str
+def findany(pattern, _str):
+ matches = re.findall(pattern, _str)
+ assert len(matches) > 0, "Pattern not found.\nPattern: " + pattern + "\nString: " + _str
def checkdependency():
+ # pylint: disable=import-outside-toplevel
import pkg_resources
+ # pylint: disable=E1133
return not {"graphviz", "ipython"} - {pkg.key for pkg in pkg_resources.working_set}
def test_dfg():
- A = te.placeholder((1024, 4096), dtype="float32", name="A")
- B = topi.nn.softmax(A)
+ """Tests dataflow graph"""
+ input_a = te.placeholder((1024, 4096), dtype="float32", name="input_a")
+ input_b = topi.nn.softmax(input_a)
# confirm lower works
- s = te.create_schedule([B.op])
+ s = te.create_schedule([input_b.op])
def verify():
+ # pylint: disable=import-outside-toplevel
from tvm.contrib import tedd
- str = tedd.viz_dataflow_graph(s, False, "", True)
+ _str = tedd.viz_dataflow_graph(s, False, "", True)
# Check all edges are available
findany(r"digraph \"Dataflow Graph\"", str)
findany(r"Stage_0:O_0 -> Tensor_0_0", str)
@@ -64,19 +68,21 @@ def verify():
def test_itervar_relationship_graph():
+ """Tests itervars relationship graph"""
n = te.var("n")
m = te.var("m")
- A = te.placeholder((n, m), name="A")
+ input_a = te.placeholder((n, m), name="input_a")
k = te.reduce_axis((0, m), "k")
- B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
+ input_b = te.compute((n,), lambda i: te.sum(input_a[i, k], axis=k), name="input_b")
- s = te.create_schedule(B.op)
- s[B].split(B.op.reduce_axis[0], factor=16)
+ s = te.create_schedule(input_b.op)
+ s[input_b].split(input_b.op.reduce_axis[0], factor=16)
def verify():
+ # pylint: disable=import-outside-toplevel
from tvm.contrib import tedd
- str = tedd.viz_itervar_relationship_graph(s, False, "", True)
+ _str = tedd.viz_itervar_relationship_graph(s, False, "", True)
findany(r"digraph \"IterVar Relationship Graph\"", str)
findany(r"subgraph cluster_legend", str)
# Check subgraphs for stages
@@ -97,39 +103,41 @@ def verify():
def test_schedule_tree():
+ """Tests schedule tree"""
block_x = te.thread_axis("blockIdx.x")
thread_x = te.thread_axis("threadIdx.x")
n = te.var("n")
m = te.var("m")
- l = te.var("l")
- A = te.placeholder((n, m, l), name="A")
- B = te.compute((n, m, l), lambda bi, bj, bk: A[bi, bj, bk] + 1, name="B")
- r = te.reduce_axis((0, m), "r")
- C = te.compute(
+ op_l = te.var("op_l")
+ input_a = te.placeholder((n, m, op_l), name="input_a")
+ result_b = te.compute((n, m, op_l), lambda bi, bj, bk: input_a[bi, bj, bk] + 1, name="result_b")
+ op_r = te.reduce_axis((0, m), "op_r")
+ result_c = te.compute(
(
n,
m,
),
- lambda ci, cj: te.sum(B[ci, cj, r], axis=r),
- name="C",
+ lambda ci, cj: te.sum(result_b[ci, cj, op_r], axis=op_r),
+ name="result_c",
)
- s = te.create_schedule(C.op)
- s.cache_read(A, "shared", [B])
- s[B].vectorize(B.op.axis[-1])
- s[C].reorder(C.op.reduce_axis[0], C.op.axis[0])
- _, ki = s[C].split(C.op.reduce_axis[0], factor=16)
- Cr = s.rfactor(C, ki)
- s[Cr].compute_at(s[C], s[C].op.axis[-1])
- s[C].bind(s[C].op.axis[0], block_x)
- s[C].bind(s[C].op.axis[1], thread_x)
+ s = te.create_schedule(result_c.op)
+ s.cache_read(input_a, "shared", [result_b])
+ s[result_b].vectorize(result_b.op.axis[-1])
+ s[result_c].reorder(result_c.op.reduce_axis[0], result_c.op.axis[0])
+ _, op_ki = s[result_c].split(result_c.op.reduce_axis[0], factor=16)
+ result_c2 = s.rfactor(result_c, op_ki)
+ s[result_c2].compute_at(s[result_c], s[result_c].op.axis[-1])
+ s[result_c].bind(s[result_c].op.axis[0], block_x)
+ s[result_c].bind(s[result_c].op.axis[1], thread_x)
def verify():
+ # pylint: disable=import-outside-toplevel
from tvm.contrib import tedd
- str = tedd.viz_schedule_tree(s, False, "", True)
+ _str = tedd.viz_schedule_tree(s, False, "", True)
findany(r"digraph \"Schedule Tree\"", str)
findany(r"subgraph cluster_legend", str)
- # Check the A_shared stage, including memory scope, itervars,
+ # Check the input_a_shared stage, including memory scope, itervars,
# and compute
findany(
r"Stage_1.*A\.shared
Scope: shared.+>0.+>"
@@ -153,6 +161,7 @@ def test_tedd_with_schedule_record():
"""Test to build a nn model and check if all schedules could be generated"""
def check_schedule(executor):
+ # pylint: disable=import-outside-toplevel
from tvm.contrib import tedd
error = {}
@@ -167,12 +176,12 @@ def check_schedule(executor):
tedd.viz_dataflow_graph(sch, False, "", True)
tedd.viz_itervar_relationship_graph(sch, False, "", True)
tedd.viz_schedule_tree(sch, False, "", True)
- except:
+ except: # pylint: disable=W0702
if func_name not in error:
error[func_name] = []
error[func_name].append(index)
- assert error == {}, str(error)
+ assert not error, str(error)
if checkdependency():
relay_mod, params = testing.mobilenet.get_workload(batch_size=1, dtype="float32")
From 193c30320f0b9e79d096a46e5f852a7ea815f5ca Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Sat, 4 Nov 2023 11:16:21 +0800
Subject: [PATCH 10/19] Update test_random.py
---
tests/python/contrib/test_random.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/python/contrib/test_random.py b/tests/python/contrib/test_random.py
index f7ad4f3176a8..66f1956f1d1c 100644
--- a/tests/python/contrib/test_random.py
+++ b/tests/python/contrib/test_random.py
@@ -66,7 +66,7 @@ def verify(target="llvm"):
return
dev = tvm.cpu(0)
f = tvm.build(s, [input_a], target)
- a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev)
+ a = tvm.nd.array(np.zeros((m, n), dtype=input_a.dtype), dev)
f(a)
op_na = a.numpy()
assert abs(np.mean(op_na) - 0.5) < 1e-1
From ecb967f9bcebe324bb94565bc9560e75a98448a3 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 11:44:29 +0800
Subject: [PATCH 11/19] Update test_tedd.py
---
tests/python/contrib/test_tedd.py | 43 ++++++++++++++++---------------
1 file changed, 22 insertions(+), 21 deletions(-)
diff --git a/tests/python/contrib/test_tedd.py b/tests/python/contrib/test_tedd.py
index 75c66f8a218a..ccdf14c96837 100644
--- a/tests/python/contrib/test_tedd.py
+++ b/tests/python/contrib/test_tedd.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Configure pytest of Tensor Expression Debug Display"""
+# pylint: disable=invalid-name
import re
import tvm
from tvm import te
@@ -71,12 +72,12 @@ def test_itervar_relationship_graph():
"""Tests itervars relationship graph"""
n = te.var("n")
m = te.var("m")
- input_a = te.placeholder((n, m), name="input_a")
+ A = te.placeholder((n, m), name="A")
k = te.reduce_axis((0, m), "k")
- input_b = te.compute((n,), lambda i: te.sum(input_a[i, k], axis=k), name="input_b")
+ B = te.compute((n,), lambda i: te.sum(A[i, k], axis=k), name="B")
- s = te.create_schedule(input_b.op)
- s[input_b].split(input_b.op.reduce_axis[0], factor=16)
+ s = te.create_schedule(B.op)
+ s[B].split(B.op.reduce_axis[0], factor=16)
def verify():
# pylint: disable=import-outside-toplevel
@@ -108,27 +109,27 @@ def test_schedule_tree():
thread_x = te.thread_axis("threadIdx.x")
n = te.var("n")
m = te.var("m")
- op_l = te.var("op_l")
- input_a = te.placeholder((n, m, op_l), name="input_a")
- result_b = te.compute((n, m, op_l), lambda bi, bj, bk: input_a[bi, bj, bk] + 1, name="result_b")
- op_r = te.reduce_axis((0, m), "op_r")
- result_c = te.compute(
+ l = te.var("l")
+ A = te.placeholder((n, m, l), name="A")
+ B = te.compute((n, m, l), lambda bi, bj, bk: A[bi, bj, bk] + 1, name="B")
+ r = te.reduce_axis((0, m), "r")
+ C = te.compute(
(
n,
m,
),
- lambda ci, cj: te.sum(result_b[ci, cj, op_r], axis=op_r),
- name="result_c",
+ lambda ci, cj: te.sum(B[ci, cj, r], axis=r),
+ name="C",
)
- s = te.create_schedule(result_c.op)
- s.cache_read(input_a, "shared", [result_b])
- s[result_b].vectorize(result_b.op.axis[-1])
- s[result_c].reorder(result_c.op.reduce_axis[0], result_c.op.axis[0])
- _, op_ki = s[result_c].split(result_c.op.reduce_axis[0], factor=16)
- result_c2 = s.rfactor(result_c, op_ki)
- s[result_c2].compute_at(s[result_c], s[result_c].op.axis[-1])
- s[result_c].bind(s[result_c].op.axis[0], block_x)
- s[result_c].bind(s[result_c].op.axis[1], thread_x)
+ s = te.create_schedule(C.op)
+ s.cache_read(A, "shared", [B])
+ s[B].vectorize(B.op.axis[-1])
+ s[C].reorder(C.op.reduce_axis[0], C.op.axis[0])
+ _, ki = s[C].split(C.op.reduce_axis[0], factor=16)
+ Cr = s.rfactor(C, ki)
+ s[Cr].compute_at(s[C], s[C].op.axis[-1])
+ s[C].bind(s[C].op.axis[0], block_x)
+ s[C].bind(s[C].op.axis[1], thread_x)
def verify():
# pylint: disable=import-outside-toplevel
@@ -137,7 +138,7 @@ def verify():
_str = tedd.viz_schedule_tree(s, False, "", True)
findany(r"digraph \"Schedule Tree\"", str)
findany(r"subgraph cluster_legend", str)
- # Check the input_a_shared stage, including memory scope, itervars,
+ # Check the A_shared stage, including memory scope, itervars,
# and compute
findany(
r"Stage_1.*A\.shared
Scope: shared.+>0.+>"
From 3b71df82c7234f9c9ddd71071537715e0278d884 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 11:44:59 +0800
Subject: [PATCH 12/19] Update test_sparse.py
---
tests/python/contrib/test_sparse.py | 73 +++++++++++++++--------------
1 file changed, 37 insertions(+), 36 deletions(-)
diff --git a/tests/python/contrib/test_sparse.py b/tests/python/contrib/test_sparse.py
index 0714312c4227..8ebd02cc170c 100644
--- a/tests/python/contrib/test_sparse.py
+++ b/tests/python/contrib/test_sparse.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Configure pytest"""
+# pylint: disable=invalid-name
from collections import namedtuple
import numpy as np
import tvm
@@ -31,17 +32,17 @@ def test_static_tensor():
dev = tvm.device(target, 0)
m = te.size_var("m")
n = te.size_var("n")
- input_a = tvmsp.placeholder(shape=(m, n), name="input_a", dtype=dtype)
- assert input_a.stype == "csr"
+ A = tvmsp.placeholder(shape=(m, n), name="A", dtype=dtype)
+ assert A.stype == "csr"
n = 3
a = np.maximum(np.random.uniform(size=(n, n)).astype(dtype) - 0.6, 0.0)
a = tvmsp.array(a, dev)
- input_a.data = te.placeholder(a.data.shape, dtype, name="input_a_data")
- result_b = tvm.tir.decl_buffer(a.data.shape, dtype, name="input_a_data")
- binds = {input_a.data: result_b}
- result_c = te.compute(input_a.data.shape, lambda i: input_a.data[i] * 2.0, tag="cs_scatter")
- s = te.create_schedule(result_c.op)
- f = tvm.build(s, [input_a.data, result_c], target, binds=binds)
+ A.data = te.placeholder(a.data.shape, dtype, name="A_data")
+ Ab = tvm.tir.decl_buffer(a.data.shape, dtype, name="A_data")
+ binds = {A.data: Ab}
+ C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
+ s = te.create_schedule(C.op)
+ f = tvm.build(s, [A.data, C], target, binds=binds)
c = tvmsp.array(np.zeros((n, n), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
@@ -55,21 +56,21 @@ def test_dynamic_tensor():
dtype = "float32"
target = "llvm"
dev = tvm.device(target, 0)
- n_row, n_col, n = te.size_var("n_row"), te.size_var("n_col"), te.size_var("n")
- input_a = tvmsp.placeholder(shape=(n_row, n_col), nonzeros=n, name="input_a", dtype=dtype)
- assert input_a.stype == "csr"
- result_c = te.compute(input_a.data.shape, lambda i: input_a.data[i] * 2.0, tag="cs_scatter")
- s = te.create_schedule(result_c.op)
- _n_row, _n_col = 3, 5
- a = np.maximum(np.random.uniform(size=(_n_row, _n_col)).astype(dtype) - 0.6, 0.0)
+ nr, nc, n = te.size_var("nr"), te.size_var("nc"), te.size_var("n")
+ A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name="A", dtype=dtype)
+ assert A.stype == "csr"
+ C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
+ s = te.create_schedule(C.op)
+ _nr, _nc = 3, 5
+ a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - 0.6, 0.0)
a = tvmsp.array(a, dev)
assert a.data.dtype == a.dtype
- result_b = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
- result_b.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="input_a_data")
- result_b.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="input_a_indices")
- binds = {input_a.data: result_b.data, input_a.indices: result_b.indices}
- f = tvm.build(s, [n_row, input_a.data, result_c], target, binds=binds)
- c = tvmsp.array(np.zeros((_n_row, _n_col), dtype), dev)
+ Ab = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
+ Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_data")
+ Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_indices")
+ binds = {A.data: Ab.data, A.indices: Ab.indices}
+ f = tvm.build(s, [nr, A.data, C], target, binds=binds)
+ c = tvmsp.array(np.zeros((_nr, _nc), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
c.indptr = a.indptr
@@ -82,17 +83,17 @@ def test_sparse_array_tuple():
dtype, itype = "float32", "int32"
target = "llvm"
dev = tvm.device(target, 0)
- n_row, n_col, n = te.size_var("n_row"), te.size_var("n_col"), te.size_var("n")
- input_a = tvmsp.placeholder(shape=(n_row, n_col), nonzeros=n, name="input_a", dtype=dtype)
- assert input_a.stype == "csr"
- result_c = te.compute(input_a.data.shape, lambda i: input_a.data[i] * 2.0, tag="cs_scatter")
- s = te.create_schedule(result_c.op)
- _n_row, _n_col = 3, 5
- a = np.maximum(np.random.uniform(size=(_n_row, _n_col)).astype(dtype) - 0.6, 0.0)
+ nr, nc, n = te.size_var("nr"), te.size_var("nc"), te.size_var("n")
+ A = tvmsp.placeholder(shape=(nr, nc), nonzeros=n, name="A", dtype=dtype)
+ assert A.stype == "csr"
+ C = te.compute(A.data.shape, lambda i: A.data[i] * 2.0, tag="cs_scatter")
+ s = te.create_schedule(C.op)
+ _nr, _nc = 3, 5
+ a = np.maximum(np.random.uniform(size=(_nr, _nc)).astype(dtype) - 0.6, 0.0)
# convert to sparse array tuple
source_array = a
- row_idx, col_idx = np.nonzero(source_array)
- data = source_array[row_idx, col_idx]
+ ridx, cidx = np.nonzero(source_array)
+ data = source_array[ridx, cidx]
a_data = _nd.array(data, dev)
indices = np.nonzero(source_array)[1].astype(itype)
a_indices = _nd.array(indices, dev)
@@ -103,12 +104,12 @@ def test_sparse_array_tuple():
# construct tvm sparse array with tuple
a = tvmsp.array(a_init, shape=source_array.shape, device=dev)
assert a.data.dtype == a.dtype
- result_b = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
- result_b.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="input_a_data")
- result_b.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="input_a_indices")
- binds = {input_a.data: result_b.data, input_a.indices: result_b.indices}
- f = tvm.build(s, [n_row, input_a.data, result_c], target, binds=binds)
- c = tvmsp.array(np.zeros((_n_row, _n_col), dtype), dev)
+ Ab = namedtuple("CSRBuffer", ["data", "indices", "indptr"])
+ Ab.data = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_data")
+ Ab.indices = tvm.tir.decl_buffer(a.data.shape, a.data.dtype, name="A_indices")
+ binds = {A.data: Ab.data, A.indices: Ab.indices}
+ f = tvm.build(s, [nr, A.data, C], target, binds=binds)
+ c = tvmsp.array(np.zeros((_nr, _nc), dtype), dev)
c.data = tvm.nd.empty(a.data.shape, dtype)
c.indices = a.indices
c.indptr = a.indptr
From b91cc4bd94d69ba67c9c89e3b76e6c087237d1f3 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 11:45:21 +0800
Subject: [PATCH 13/19] Update test_sort.py
---
tests/python/contrib/test_sort.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/tests/python/contrib/test_sort.py b/tests/python/contrib/test_sort.py
index 000caac38cc8..c135450c09e1 100644
--- a/tests/python/contrib/test_sort.py
+++ b/tests/python/contrib/test_sort.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Configure pytest"""
+# pylint: disable=invalid-name
import numpy as np
import tvm
import tvm.testing
@@ -25,9 +26,9 @@
def test_sort():
"""Tests sort function"""
n = 2
- lens = 5
+ l = 5
m = 3
- data = te.placeholder((n, lens, m), name="data")
+ data = te.placeholder((n, l, m), name="data")
sort_num = te.placeholder((n, m), name="sort_num", dtype="int32")
axis = 1
is_ascend = False
From 84d9c02d5ec718a775b9d5b602c807961263e2b7 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 11:45:37 +0800
Subject: [PATCH 14/19] Update test_rpc_tracker.py
---
tests/python/contrib/test_rpc_tracker.py | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/tests/python/contrib/test_rpc_tracker.py b/tests/python/contrib/test_rpc_tracker.py
index 63ce8be174b4..f6918db4e286 100644
--- a/tests/python/contrib/test_rpc_tracker.py
+++ b/tests/python/contrib/test_rpc_tracker.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Configure pytest"""
+# pylint: disable=invalid-name
import logging
import time
import tvm
@@ -64,8 +65,8 @@ def _put(tclient, value):
def check_timeout(timeout, sleeptime):
def myfunc(remote):
time.sleep(sleeptime)
- test_f1 = remote.get_function("rpc.test2.addone")
- assert test_f1(10) == 11
+ f1 = remote.get_function("rpc.test2.addone")
+ assert f1(10) == 11
try:
tclient.request_and_run("xyz", myfunc, session_timeout=timeout)
@@ -76,19 +77,19 @@ def myfunc(remote):
remote = tclient.request("xyz", priority=0, session_timeout=timeout)
remote2 = tclient.request("xyz", session_timeout=timeout)
time.sleep(sleeptime)
- test_f1 = remote.get_function("rpc.test2.addone")
- assert test_f1(10) == 11
- test_f1 = remote2.get_function("rpc.test2.addone")
- assert test_f1(10) == 11
+ f1 = remote.get_function("rpc.test2.addone")
+ assert f1(10) == 11
+ f1 = remote2.get_function("rpc.test2.addone")
+ assert f1(10) == 11
except tvm.error.TVMError:
pass
remote3 = tclient.request("abc")
- test_f1 = remote3.get_function("rpc.test2.addone")
- assert test_f1(10) == 11
+ f1 = remote3.get_function("rpc.test2.addone")
+ assert f1(10) == 11
remote3 = tclient.request("xyz1")
- test_f1 = remote3.get_function("rpc.test2.addone")
- assert test_f1(10) == 11
+ f1 = remote3.get_function("rpc.test2.addone")
+ assert f1(10) == 11
check_timeout(0.01, 0.1)
check_timeout(2, 0)
From 7a33a7326bc4b198527ea43b972b41e8bf162859 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 11:46:23 +0800
Subject: [PATCH 15/19] Update test_rpc_proxy.py
---
tests/python/contrib/test_rpc_proxy.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/tests/python/contrib/test_rpc_proxy.py b/tests/python/contrib/test_rpc_proxy.py
index 740396004ff5..11250a6519af 100644
--- a/tests/python/contrib/test_rpc_proxy.py
+++ b/tests/python/contrib/test_rpc_proxy.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Configure pytest"""
+# pylint: disable=invalid-name
import logging
import time
import multiprocessing
@@ -52,9 +53,9 @@ def check():
server.deamon = True
server.start()
client = rpc.connect(prox.host, prox.port, key="x1")
- test_f1 = client.get_function("testing.echo")
- assert test_f1(10) == 10
- assert test_f1("xyz") == "xyz"
+ f1 = client.get_function("testing.echo")
+ assert f1(10) == 10
+ assert f1("xyz") == "xyz"
check()
except ImportError:
From ef1d71ddbada943716480119f192a039fd6f14ba Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 11:46:40 +0800
Subject: [PATCH 16/19] Update test_rocblas.py
---
tests/python/contrib/test_rocblas.py | 35 ++++++++++++++--------------
1 file changed, 18 insertions(+), 17 deletions(-)
diff --git a/tests/python/contrib/test_rocblas.py b/tests/python/contrib/test_rocblas.py
index 4f0b767d5bf7..c5321cd4eaaf 100644
--- a/tests/python/contrib/test_rocblas.py
+++ b/tests/python/contrib/test_rocblas.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Configure pytest"""
+# pylint: disable=invalid-name
import numpy as np
import tvm
import tvm.testing
@@ -27,22 +28,22 @@
def test_matmul():
"""Tests matmul operation using roc"""
n = 1024
- op_l = 128
+ l = 128
m = 235
- input_a = te.placeholder((n, op_l), name="input_a")
- input_b = te.placeholder((op_l, m), name="input_b")
- result_c = rocblas.matmul(input_a, input_b)
- s = te.create_schedule(result_c.op)
+ A = te.placeholder((n, l), name="A")
+ B = te.placeholder((l, m), name="B")
+ C = rocblas.matmul(A, B)
+ s = te.create_schedule(C.op)
def verify(target="rocm"):
if not tvm.get_global_func("tvm.contrib.rocblas.matmul", True):
print("skip because extern function is not available")
return
dev = tvm.rocm(0)
- f = tvm.build(s, [input_a, input_b, result_c], target)
- a = tvm.nd.array(np.random.uniform(size=(n, op_l)).astype(input_a.dtype), dev)
- b = tvm.nd.array(np.random.uniform(size=(op_l, m)).astype(input_b.dtype), dev)
- c = tvm.nd.array(np.zeros((n, m), dtype=result_c.dtype), dev)
+ f = tvm.build(s, [A, B, C], target)
+ a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), dev)
+ b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), dev)
+ c = tvm.nd.array(np.zeros((n, m), dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(c.numpy(), np.dot(a.numpy(), b.numpy()), rtol=1e-5)
@@ -53,10 +54,10 @@ def verify_batch_matmul(batch, m, k, n, lib, transa=False, transb=False, dtype="
"""Tests matmul operation in batch using roc"""
ashape = (batch, k, m) if transa else (batch, m, k)
bshape = (batch, n, k) if transb else (batch, k, n)
- input_a = te.placeholder(ashape, name="input_a", dtype=dtype)
- input_b = te.placeholder(bshape, name="input_b", dtype=dtype)
- result_c = lib.batch_matmul(input_a, input_b, transa, transb)
- s = te.create_schedule(result_c.op)
+ A = te.placeholder(ashape, name="A", dtype=dtype)
+ B = te.placeholder(bshape, name="B", dtype=dtype)
+ C = lib.batch_matmul(A, B, transa, transb)
+ s = te.create_schedule(C.op)
def get_numpy(a, b, transa, transb):
if transa:
@@ -73,10 +74,10 @@ def verify(target="rocm"):
print("skip because extern function is not available")
return
dev = tvm.rocm(0)
- f = tvm.build(s, [input_a, input_b, result_c], target)
- a = tvm.nd.array(np.random.uniform(size=ashape).astype(input_a.dtype), dev)
- b = tvm.nd.array(np.random.uniform(size=bshape).astype(input_b.dtype), dev)
- c = tvm.nd.array(np.zeros((batch, m, n), dtype=result_c.dtype), dev)
+ f = tvm.build(s, [A, B, C], target)
+ a = tvm.nd.array(np.random.uniform(size=ashape).astype(A.dtype), dev)
+ b = tvm.nd.array(np.random.uniform(size=bshape).astype(B.dtype), dev)
+ c = tvm.nd.array(np.zeros((batch, m, n), dtype=C.dtype), dev)
f(a, b, c)
tvm.testing.assert_allclose(
c.numpy(), get_numpy(a.numpy(), b.numpy(), transa, transb), rtol=1e-5
From 35cc37941cee648eef94b13d4c69c523e37878cc Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 11:48:10 +0800
Subject: [PATCH 17/19] Update test_random.py
---
tests/python/contrib/test_random.py | 47 +++++++++++++++--------------
1 file changed, 24 insertions(+), 23 deletions(-)
diff --git a/tests/python/contrib/test_random.py b/tests/python/contrib/test_random.py
index 66f1956f1d1c..6ffd417a0a48 100644
--- a/tests/python/contrib/test_random.py
+++ b/tests/python/contrib/test_random.py
@@ -15,6 +15,7 @@
# specific language governing permissions and limitations
# under the License.
"""Configure pytest"""
+# pylint: disable=invalid-name
import threading
import numpy as np
import tvm
@@ -28,8 +29,8 @@ def test_randint():
"""Tests randint function"""
m = 10240
n = 10240
- input_a = random.randint(-127, 128, size=(m, n), dtype="int32")
- s = te.create_schedule(input_a.op)
+ A = random.randint(-127, 128, size=(m, n), dtype="int32")
+ s = te.create_schedule(A.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
@@ -39,13 +40,13 @@ def verify(target="llvm"):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
- f = tvm.build(s, [input_a], target)
- a = tvm.nd.array(np.zeros((m, n), dtype=input_a.dtype), dev)
+ f = tvm.build(s, [A], target)
+ a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev)
f(a)
- _na = a.numpy()
- assert abs(np.mean(_na)) < 0.3
- assert np.min(_na) == -127
- assert np.max(_na) == 127
+ na = a.numpy()
+ assert abs(np.mean(na)) < 0.3
+ assert np.min(na) == -127
+ assert np.max(na) == 127
verify()
@@ -54,8 +55,8 @@ def test_uniform():
"""Tests uniform function"""
m = 10240
n = 10240
- input_a = random.uniform(0, 1, size=(m, n))
- s = te.create_schedule(input_a.op)
+ A = random.uniform(0, 1, size=(m, n))
+ s = te.create_schedule(A.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
@@ -65,13 +66,13 @@ def verify(target="llvm"):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
- f = tvm.build(s, [input_a], target)
- a = tvm.nd.array(np.zeros((m, n), dtype=input_a.dtype), dev)
+ f = tvm.build(s, [A], target)
+ a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev)
f(a)
- op_na = a.numpy()
- assert abs(np.mean(op_na) - 0.5) < 1e-1
- assert abs(np.min(op_na) - 0.0) < 1e-3
- assert abs(np.max(op_na) - 1.0) < 1e-3
+ na = a.numpy()
+ assert abs(np.mean(na) - 0.5) < 1e-1
+ assert abs(np.min(na) - 0.0) < 1e-3
+ assert abs(np.max(na) - 1.0) < 1e-3
verify()
@@ -80,8 +81,8 @@ def test_normal():
"""Tests normal function"""
m = 10240
n = 10240
- input_a = random.normal(3, 4, size=(m, n))
- s = te.create_schedule(input_a.op)
+ A = random.normal(3, 4, size=(m, n))
+ s = te.create_schedule(A.op)
def verify(target="llvm"):
if not tvm.testing.device_enabled(target):
@@ -91,12 +92,12 @@ def verify(target="llvm"):
print("skip because extern function is not available")
return
dev = tvm.cpu(0)
- f = tvm.build(s, [input_a], target)
- a = tvm.nd.array(np.zeros((m, n), dtype=input_a.dtype), dev)
+ f = tvm.build(s, [A], target)
+ a = tvm.nd.array(np.zeros((m, n), dtype=A.dtype), dev)
f(a)
- _na = a.numpy()
- assert abs(np.mean(_na) - 3) < 1e-1
- assert abs(np.std(_na) - 4) < 1e-2
+ na = a.numpy()
+ assert abs(np.mean(na) - 3) < 1e-1
+ assert abs(np.std(na) - 4) < 1e-2
verify()
From 051f819fe505e6c8cdc118db300b2cb9e1d52c60 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 14:37:37 +0800
Subject: [PATCH 18/19] Update test_tedd.py
---
tests/python/contrib/test_tedd.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/tests/python/contrib/test_tedd.py b/tests/python/contrib/test_tedd.py
index ccdf14c96837..1ebdc8961574 100644
--- a/tests/python/contrib/test_tedd.py
+++ b/tests/python/contrib/test_tedd.py
@@ -39,9 +39,8 @@ def checkdependency():
def test_dfg():
- """Tests dataflow graph"""
- input_a = te.placeholder((1024, 4096), dtype="float32", name="input_a")
- input_b = topi.nn.softmax(input_a)
+ A = te.placeholder((1024, 4096), dtype="float32", name="A")
+ B = topi.nn.softmax(A)
# confirm lower works
s = te.create_schedule([input_b.op])
From 7fa695999fd33cc500a61aacb89366d478ed1de3 Mon Sep 17 00:00:00 2001
From: Tlopex <68688494+tlopex@users.noreply.github.com>
Date: Mon, 6 Nov 2023 16:03:47 +0800
Subject: [PATCH 19/19] Update test_tedd.py
---
tests/python/contrib/test_tedd.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/tests/python/contrib/test_tedd.py b/tests/python/contrib/test_tedd.py
index 1ebdc8961574..de0a65799c7c 100644
--- a/tests/python/contrib/test_tedd.py
+++ b/tests/python/contrib/test_tedd.py
@@ -39,10 +39,11 @@ def checkdependency():
def test_dfg():
+ """Tests dataflow graph"""
A = te.placeholder((1024, 4096), dtype="float32", name="A")
B = topi.nn.softmax(A)
# confirm lower works
- s = te.create_schedule([input_b.op])
+ s = te.create_schedule([B.op])
def verify():
# pylint: disable=import-outside-toplevel