From fc3e34dfc97efe5a66387956f6300fc20845bc12 Mon Sep 17 00:00:00 2001 From: Mehrdad Hessar Date: Thu, 12 May 2022 13:41:12 -0700 Subject: [PATCH 1/3] refactor requires_hexagon_toolchain --- python/tvm/testing/utils.py | 4 ++++ .../contrib/test_hexagon/benchmark_hexagon.py | 12 +++--------- .../test_hexagon/test_2d_physical_buffers.py | 10 +++++++--- .../python/contrib/test_hexagon/test_launcher.py | 16 +++++++--------- tests/python/contrib/test_hexagon/test_models.py | 5 ++--- .../contrib/test_hexagon/test_run_unit_tests.py | 9 +++++---- .../contrib/test_hexagon/test_thread_pool.py | 10 +++++----- .../test_hexagon/topi/test_batch_matmul.py | 10 +++++----- .../test_hexagon/topi/test_cache_read_write.py | 11 +++++------ .../test_hexagon/topi/test_conv2d_nchw.py | 4 +--- .../test_hexagon/topi/test_conv2d_nhwc.py | 5 +---- .../test_hexagon/topi/test_conv2d_transpose.py | 4 +--- .../contrib/test_hexagon/topi/test_dense.py | 4 +--- .../test_hexagon/topi/test_depthwise_conv2d.py | 3 +-- .../contrib/test_hexagon/topi/test_pooling.py | 10 ++++------ .../contrib/test_hexagon/topi/test_reduce.py | 4 +--- .../contrib/test_hexagon/topi/test_softmax.py | 4 +--- 17 files changed, 54 insertions(+), 71 deletions(-) diff --git a/python/tvm/testing/utils.py b/python/tvm/testing/utils.py index b86596feed6b..c038eeda61e5 100644 --- a/python/tvm/testing/utils.py +++ b/python/tvm/testing/utils.py @@ -404,6 +404,10 @@ def _get_targets(target_str=None): if target_kind == "cuda" and "cudnn" in tvm.target.Target(target).attrs.get("libs", []): is_enabled = tvm.support.libinfo()["USE_CUDNN"].lower() in ["on", "true", "1"] is_runnable = is_enabled and cudnn.exists() + elif target_kind == "hexagon": + is_enabled = tvm.runtime.enabled(target_kind) + # If Hexagon has compile-time support, we can always fall back + is_runnable = is_enabled and "ANDROID_SERIAL_NUMBER" in os.environ else: is_enabled = tvm.runtime.enabled(target_kind) is_runnable = is_enabled and tvm.device(target_kind).exist diff --git a/tests/python/contrib/test_hexagon/benchmark_hexagon.py b/tests/python/contrib/test_hexagon/benchmark_hexagon.py index f17530c3efdc..979bd111707b 100644 --- a/tests/python/contrib/test_hexagon/benchmark_hexagon.py +++ b/tests/python/contrib/test_hexagon/benchmark_hexagon.py @@ -27,13 +27,7 @@ import tvm.testing from tvm import te -from tvm import relay -from tvm.relay.backend import Executor, Runtime -from tvm.contrib import utils, ndk -from tvm.contrib.hexagon.build import HexagonLauncher -import tvm.contrib.hexagon as hexagon - -from .conftest import requires_hexagon_toolchain +from tvm.contrib.hexagon.build import HexagonLauncherRPC RPC_SERVER_PORT = 7070 @@ -47,8 +41,8 @@ # server to bind to the same port until the wait time elapses. -@requires_hexagon_toolchain -def test_elemwise_add(android_serial_number, hexagon_launcher): +@tvm.testing.requires_hexagon +def test_elemwise_add(hexagon_launcher: HexagonLauncherRPC): """ Starting with an elementwise-add computation, try various schedules / optimizations to see the impact they have on performance. diff --git a/tests/python/contrib/test_hexagon/test_2d_physical_buffers.py b/tests/python/contrib/test_hexagon/test_2d_physical_buffers.py index 9de55996b031..78e1eb11ad9f 100644 --- a/tests/python/contrib/test_hexagon/test_2d_physical_buffers.py +++ b/tests/python/contrib/test_hexagon/test_2d_physical_buffers.py @@ -19,8 +19,6 @@ import contextlib import sys -import tempfile -import pathlib import pytest import numpy as np @@ -272,6 +270,12 @@ def test_lower(self, schedule_args): @requires_hexagon_toolchain def test_build(self, schedule_args, target_host, input_layout, working_layout, output_layout): + """Testing build success/failure + + * On Hexagon targets, build must succeed for both 1-d and 2-d memory. + * On non-Hexagon targets, build must succeed 1-d memory. + * On non-Hexagon targets, build must fail and report an error for 2-d memory. + """ # contextlib.nullcontext wasn't added until python3.7, and the # CI currently runs on python3.6. Therefore, using ExitStack # to manage an optional context instead. @@ -292,7 +296,7 @@ def runtime_module(self, schedule_args, target_host): return tvm.build(*schedule_args, target=target_host) - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_execute( self, runtime_module, diff --git a/tests/python/contrib/test_hexagon/test_launcher.py b/tests/python/contrib/test_hexagon/test_launcher.py index 861ad4f15b48..4de9d5a61293 100644 --- a/tests/python/contrib/test_hexagon/test_launcher.py +++ b/tests/python/contrib/test_hexagon/test_launcher.py @@ -24,10 +24,8 @@ from tvm import relay from tvm.relay.backend import Executor, Runtime -from .conftest import requires_hexagon_toolchain - -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_add(hexagon_session): dtype = "int8" A = tvm.te.placeholder((2,), dtype=dtype) @@ -52,7 +50,7 @@ def test_add(hexagon_session): assert (C_data.numpy() == np.array([6, 7])).all() -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_add_vtcm(hexagon_session): dtype = "int8" A = tvm.te.placeholder((2,), dtype=dtype) @@ -86,7 +84,7 @@ class TestMatMul: N = tvm.testing.parameter(32) K = tvm.testing.parameter(32) - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_matmul(self, hexagon_session, M, N, K): X = te.placeholder((M, K), dtype="float32") Y = te.placeholder((K, N), dtype="float32") @@ -121,7 +119,7 @@ def test_matmul(self, hexagon_session, M, N, K): tvm.testing.assert_allclose(zt.numpy(), ztcpu.numpy(), rtol=1e-4) -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_graph_executor(hexagon_session): dtype = "float32" data = relay.var("data", relay.TensorType((1, 64, 64, 3), dtype)) @@ -177,7 +175,7 @@ def test_graph_executor(hexagon_session): tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5) -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_graph_executor_multiple_conv2d(hexagon_session): dtype = "float32" input_shape = (1, 8, 8, 3) @@ -254,7 +252,7 @@ def test_graph_executor_multiple_conv2d(hexagon_session): tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5) -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_aot_executor(hexagon_session, aot_host_target, aot_target): dtype = "float32" input_shape = (1, 128, 128, 3) @@ -313,7 +311,7 @@ def test_aot_executor(hexagon_session, aot_host_target, aot_target): tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5) -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_aot_executor_multiple_conv2d(hexagon_session, aot_host_target, aot_target): dtype = "float32" input_shape = (1, 8, 8, 3) diff --git a/tests/python/contrib/test_hexagon/test_models.py b/tests/python/contrib/test_hexagon/test_models.py index 0ce66a455e7b..50ee6df3dfe9 100644 --- a/tests/python/contrib/test_hexagon/test_models.py +++ b/tests/python/contrib/test_hexagon/test_models.py @@ -25,7 +25,6 @@ from tvm import relay from tvm.relay.backend import Executor, Runtime -from .conftest import requires_hexagon_toolchain MOBILENET_MODEL = "" @@ -41,7 +40,7 @@ def get_mobilenet(): return onnx.load(model_path) -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_mobilenet(hexagon_session): dtype = "float32" onnx_model = get_mobilenet() @@ -88,7 +87,7 @@ def test_mobilenet(hexagon_session): tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5) -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_mobilenet_aot(hexagon_session, aot_host_target, aot_target): if hexagon_session._launcher._serial_number == "simulator": pytest.skip(msg="Skip on simulator due to long runtime.") diff --git a/tests/python/contrib/test_hexagon/test_run_unit_tests.py b/tests/python/contrib/test_hexagon/test_run_unit_tests.py index 3a383d30e5f4..010c79b8f554 100644 --- a/tests/python/contrib/test_hexagon/test_run_unit_tests.py +++ b/tests/python/contrib/test_hexagon/test_run_unit_tests.py @@ -18,20 +18,21 @@ import os import pytest import numpy as np -from tvm.contrib.hexagon.build import HexagonLauncher -from .conftest import requires_hexagon_toolchain + +import tvm +from tvm.contrib.hexagon.session import Session # use pytest -sv to observe gtest output # use --gtest_args to pass arguments to gtest # for example to run all "foo" tests twice and observe gtest output run # pytest -sv --gtests_args="--gtest_filter=*foo* --gtest_repeat=2" -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon @pytest.mark.skipif( os.environ.get("HEXAGON_GTEST") == None, reason="Test requires environment variable HEXAGON_GTEST set with a path to a Hexagon gtest version normally located at /path/to/hexagon/sdk/utils/googletest/gtest", ) -def test_run_unit_tests(hexagon_session, gtest_args): +def test_run_unit_tests(hexagon_session: Session, gtest_args): try: func = hexagon_session._rpc.get_function("hexagon.run_unit_tests") except: diff --git a/tests/python/contrib/test_hexagon/test_thread_pool.py b/tests/python/contrib/test_hexagon/test_thread_pool.py index a05404914607..dc0106a78def 100644 --- a/tests/python/contrib/test_hexagon/test_thread_pool.py +++ b/tests/python/contrib/test_hexagon/test_thread_pool.py @@ -20,11 +20,11 @@ import tvm import tvm.contrib.hexagon +from tvm.contrib.hexagon.session import Session import tvm.script import tvm.testing from tvm import te -from .conftest import requires_hexagon_toolchain from tvm.script import tir as T @@ -66,8 +66,8 @@ def benchmark_func(mod, name, args, hexagon_session): return evaluator(a, b, c, n).mean -@requires_hexagon_toolchain -def test_speedup(hexagon_session, capsys): +@tvm.testing.requires_hexagon +def test_speedup(hexagon_session: Session, capsys): if hexagon_session is None: pytest.skip(msg="Skip hardware test, ANDROID_SERIAL_NUMBER is not set.") @@ -84,8 +84,8 @@ def test_speedup(hexagon_session, capsys): print("... speedup of {:.2f}".format(serial_mean / parallel_mean), end=" ") -@requires_hexagon_toolchain -def test_elemwise_sum_parallel(hexagon_session): +@tvm.testing.requires_hexagon +def test_elemwise_sum_parallel(hexagon_session: Session): if hexagon_session is None: pytest.skip(msg="Skip hardware test, ANDROID_SERIAL_NUMBER is not set.") diff --git a/tests/python/contrib/test_hexagon/topi/test_batch_matmul.py b/tests/python/contrib/test_hexagon/topi/test_batch_matmul.py index d73ab46424ae..62846e6c02d4 100644 --- a/tests/python/contrib/test_hexagon/topi/test_batch_matmul.py +++ b/tests/python/contrib/test_hexagon/topi/test_batch_matmul.py @@ -24,8 +24,8 @@ from tvm import te import tvm.topi.testing from tvm.topi.utils import get_const_tuple +from tvm.contrib.hexagon.session import Session -from ..conftest import requires_hexagon_toolchain dtype = tvm.testing.parameter( "float32", @@ -45,8 +45,8 @@ class TestMatMulFloat: ) # TODO(mehrdadh): add dynamic testing - @requires_hexagon_toolchain - def test_batch_matmul(self, hexagon_session, x_batch, y_batch, M, N, K, dtype): + @tvm.testing.requires_hexagon + def test_batch_matmul(self, hexagon_session: Session, x_batch, y_batch, M, N, K, dtype): if dtype == "float16": pytest.xfail("float16 is not supported.") @@ -97,8 +97,8 @@ class TestMatMulInt8: (5, 1, 16, 16, 32), ) - @requires_hexagon_toolchain - def test_batch_matmul_int8(self, hexagon_session, x_batch, y_batch, M, N, K): + @tvm.testing.requires_hexagon + def test_batch_matmul_int8(self, hexagon_session: Session, x_batch, y_batch, M, N, K): dtype = "int8" out_dtype = "int8" assert x_batch == y_batch or x_batch == 1 or y_batch == 1 diff --git a/tests/python/contrib/test_hexagon/topi/test_cache_read_write.py b/tests/python/contrib/test_hexagon/topi/test_cache_read_write.py index 46e78f668365..f0290ff2fc9d 100644 --- a/tests/python/contrib/test_hexagon/topi/test_cache_read_write.py +++ b/tests/python/contrib/test_hexagon/topi/test_cache_read_write.py @@ -20,8 +20,7 @@ import tvm.testing from tvm import te - -from ..conftest import requires_hexagon_toolchain +from tvm.contrib.hexagon.session import Session def intrin_mem_copy(shape, dtype, dst_scope, src_scope): @@ -97,8 +96,8 @@ def verify(hexagon_session, s, x, y, z, size): np.testing.assert_equal(zt.numpy(), ref) -@requires_hexagon_toolchain -def test_cache_read_write(hexagon_session): +@tvm.testing.requires_hexagon +def test_cache_read_write(hexagon_session: Session): size = 128 outer_shape = (size,) factor = 16 @@ -139,8 +138,8 @@ def layout_transform_2d(n): return [n // 16, te.AXIS_SEPARATOR, n % 16] -@requires_hexagon_toolchain -def test_cache_read_write_2d(hexagon_session): +@tvm.testing.requires_hexagon +def test_cache_read_write_2d(hexagon_session: Session): size = 128 outer_shape = (size,) factor = 16 diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py b/tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py index 12417e80af6e..767f02971d76 100644 --- a/tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py +++ b/tests/python/contrib/test_hexagon/topi/test_conv2d_nchw.py @@ -26,8 +26,6 @@ from tvm.topi.utils import get_const_tuple from tvm.topi.nn.utils import get_pad_tuple -from ..conftest import requires_hexagon_toolchain - dtype = tvm.testing.parameter("float32") random_seed = tvm.testing.parameter(0) @@ -90,7 +88,7 @@ class BaseConv2DTests: dilation = tvm.testing.parameter(1) batch = tvm.testing.parameter(1) - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_conv2d_nchw( self, hexagon_session, diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py b/tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py index 60b0b7ea6d39..570f5d14f0f3 100644 --- a/tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py +++ b/tests/python/contrib/test_hexagon/topi/test_conv2d_nhwc.py @@ -24,9 +24,6 @@ from tvm import te import tvm.topi.testing from tvm.topi.utils import get_const_tuple -from tvm.topi.nn.utils import get_pad_tuple - -from ..conftest import requires_hexagon_toolchain dtype = tvm.testing.parameter("float32") @@ -45,7 +42,7 @@ def ref_data(dtype, batch, in_channel, in_size, num_filter, kernel, stride, padd class BaseConv2DTests: - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_conv2d_nhwc( self, hexagon_session, diff --git a/tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py b/tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py index 1dbac67aeb76..c6c632741367 100644 --- a/tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py +++ b/tests/python/contrib/test_hexagon/topi/test_conv2d_transpose.py @@ -21,9 +21,7 @@ from tvm import te from tvm import topi import tvm.topi.testing -from tvm.contrib.pickle_memoize import memoize from tvm.topi.utils import get_const_tuple -from ..conftest import requires_hexagon_toolchain # TODO Should add kernal to tvm.testing.fixture @@ -67,7 +65,7 @@ def shift_shape(output_padding): class BaseConv2DTransposeTests: - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_conv2d( self, hexagon_session, diff --git a/tests/python/contrib/test_hexagon/topi/test_dense.py b/tests/python/contrib/test_hexagon/topi/test_dense.py index 59a1573a6bd5..09e811b167b3 100644 --- a/tests/python/contrib/test_hexagon/topi/test_dense.py +++ b/tests/python/contrib/test_hexagon/topi/test_dense.py @@ -25,8 +25,6 @@ import tvm.topi.testing from tvm.topi.utils import get_const_tuple -from ..conftest import requires_hexagon_toolchain - random_seed = tvm.testing.parameter(0) use_bias = tvm.testing.parameter(True, False) @@ -67,7 +65,7 @@ def dense_ref_data(random_seed, batch_size, in_dim, out_dim, use_bias, in_dtype, return (a_np, b_np, c_np, d_np) -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_dense( hexagon_session, batch_size, in_dim, out_dim, use_bias, in_dtype, out_dtype, dense_ref_data ): diff --git a/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py b/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py index 6343a10f1f77..5b86a90f1c13 100644 --- a/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py +++ b/tests/python/contrib/test_hexagon/topi/test_depthwise_conv2d.py @@ -27,7 +27,6 @@ from tvm import te, topi from tvm.topi.utils import get_const_tuple from tvm.topi.nn.utils import get_pad_tuple -from ..conftest import requires_hexagon_toolchain random_seed = tvm.testing.parameter(0) @@ -154,7 +153,7 @@ class BaseDepthwiseConv2D: (e.g. implemented only for llvm). """ - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_conv2d( self, hexagon_session, diff --git a/tests/python/contrib/test_hexagon/topi/test_pooling.py b/tests/python/contrib/test_hexagon/topi/test_pooling.py index f05611f2f544..e7751a982b26 100644 --- a/tests/python/contrib/test_hexagon/topi/test_pooling.py +++ b/tests/python/contrib/test_hexagon/topi/test_pooling.py @@ -25,8 +25,6 @@ import tvm.topi.testing from tvm.topi.utils import get_const_tuple -from ..conftest import requires_hexagon_toolchain - class TestAdaptivePool: dshape, out_size, pool_type, layout = tvm.testing.parameters( @@ -56,7 +54,7 @@ class TestAdaptivePool: ((1, 16, 32, 32, 32), (2, 4, 4), "max", "NDHWC"), ) - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_adaptive_pool(self, hexagon_session, dshape, out_size, pool_type, layout): dtype = "float32" np_data = np.random.uniform(low=0, high=255, size=dshape).astype(dtype) @@ -232,7 +230,7 @@ class TestPool1D: ([1, 31, 16], [3], [3], [3], [3, 0], "max", True, True, "NWC"), ) - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_pool1d( self, hexagon_session, @@ -309,7 +307,7 @@ class TestPool2D: ([1, 31, 31, 16], [3, 3], [3, 3], [2, 2], [3, 2, 1, 0], "max", True, True, "NHWC"), ) - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_pool2d( self, hexagon_session, @@ -707,7 +705,7 @@ class TestPool3D: ), ) - @requires_hexagon_toolchain + @tvm.testing.requires_hexagon def test_pool3d( self, hexagon_session, diff --git a/tests/python/contrib/test_hexagon/topi/test_reduce.py b/tests/python/contrib/test_hexagon/topi/test_reduce.py index 7978e3854f93..4d712b16fd02 100644 --- a/tests/python/contrib/test_hexagon/topi/test_reduce.py +++ b/tests/python/contrib/test_hexagon/topi/test_reduce.py @@ -24,8 +24,6 @@ from tvm import te import tvm.topi.testing -from ..conftest import requires_hexagon_toolchain - in_shape, axis, keepdims, reduce_type, dtype = tvm.testing.parameters( ((32,), 0, False, "argmax", "float32"), @@ -100,7 +98,7 @@ def ref_data(in_shape, axis, keepdims, reduce_type, dtype): return in_npy, in_npy_map, out_npy -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_reduce_map(hexagon_session, ref_data, in_shape, axis, keepdims, reduce_type, dtype): in_npy, in_npy_map, out_npy = ref_data diff --git a/tests/python/contrib/test_hexagon/topi/test_softmax.py b/tests/python/contrib/test_hexagon/topi/test_softmax.py index 4825d1e52442..d4c7505f78ec 100644 --- a/tests/python/contrib/test_hexagon/topi/test_softmax.py +++ b/tests/python/contrib/test_hexagon/topi/test_softmax.py @@ -25,8 +25,6 @@ import tvm.topi.testing from tvm.topi.utils import get_const_tuple -from ..conftest import requires_hexagon_toolchain - dtype = tvm.testing.parameter( "float16", "float32", @@ -53,7 +51,7 @@ ) -@requires_hexagon_toolchain +@tvm.testing.requires_hexagon def test_softmax(hexagon_session, shape, dtype, softmax_operation): if dtype == "float16": pytest.xfail("float16 is not supported.") From c1806dac927b32299be40a94a337505613a345b9 Mon Sep 17 00:00:00 2001 From: Mehrdad Hessar Date: Fri, 13 May 2022 13:42:21 -0700 Subject: [PATCH 2/3] trigger --- python/tvm/testing/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/tvm/testing/utils.py b/python/tvm/testing/utils.py index c038eeda61e5..8be5cc8ec471 100644 --- a/python/tvm/testing/utils.py +++ b/python/tvm/testing/utils.py @@ -405,7 +405,7 @@ def _get_targets(target_str=None): is_enabled = tvm.support.libinfo()["USE_CUDNN"].lower() in ["on", "true", "1"] is_runnable = is_enabled and cudnn.exists() elif target_kind == "hexagon": - is_enabled = tvm.runtime.enabled(target_kind) + is_enabled = tvm.support.libinfo()["USE_HEXAGON"].lower() in ["on", "true", "1"] # If Hexagon has compile-time support, we can always fall back is_runnable = is_enabled and "ANDROID_SERIAL_NUMBER" in os.environ else: From e57b050f9cdb7b57e9aaeb02c84f9adca7cda0e9 Mon Sep 17 00:00:00 2001 From: Mehrdad Hessar Date: Mon, 16 May 2022 10:28:07 -0700 Subject: [PATCH 3/3] lint --- tests/python/contrib/test_hexagon/test_models.py | 3 +-- tests/python/contrib/test_hexagon/test_thread_pool.py | 6 ------ 2 files changed, 1 insertion(+), 8 deletions(-) diff --git a/tests/python/contrib/test_hexagon/test_models.py b/tests/python/contrib/test_hexagon/test_models.py index 4a5bfa9b65d5..74f52f20d97c 100644 --- a/tests/python/contrib/test_hexagon/test_models.py +++ b/tests/python/contrib/test_hexagon/test_models.py @@ -25,7 +25,6 @@ from tvm.contrib.hexagon.session import Session - def get_mobilenet(): """Download and import mobilenet model with ONNX""" import onnx # pylint: disable=import-outside-toplevel @@ -84,9 +83,9 @@ def test_mobilenet(hexagon_session: Session): tvm.testing.assert_allclose(hexagon_output, expected_output, rtol=1e-4, atol=1e-5) - enable_usmp = tvm.testing.parameter(False, True) + @tvm.testing.requires_hexagon def test_mobilenet_aot(hexagon_session: Session, aot_host_target, aot_target, enable_usmp): if hexagon_session._launcher._serial_number == "simulator": diff --git a/tests/python/contrib/test_hexagon/test_thread_pool.py b/tests/python/contrib/test_hexagon/test_thread_pool.py index 267b0894687a..d95c4120b775 100644 --- a/tests/python/contrib/test_hexagon/test_thread_pool.py +++ b/tests/python/contrib/test_hexagon/test_thread_pool.py @@ -68,9 +68,6 @@ def benchmark_func(mod, name, args, hexagon_session): @tvm.testing.requires_hexagon def test_speedup(hexagon_session: Session, capsys): - if hexagon_session is None: - pytest.skip(msg="Skip hardware test, ANDROID_SERIAL_NUMBER is not set.") - target_hexagon = tvm.target.hexagon("v68", link_params=True) func = tvm.build( ElemwiseSumIRModule, target=tvm.target.Target(target_hexagon, host=target_hexagon) @@ -86,9 +83,6 @@ def test_speedup(hexagon_session: Session, capsys): @tvm.testing.requires_hexagon def test_elemwise_sum_parallel(hexagon_session: Session): - if hexagon_session is None: - pytest.skip(msg="Skip hardware test, ANDROID_SERIAL_NUMBER is not set.") - target_hexagon = tvm.target.hexagon("v68", link_params=True) func = tvm.build( ElemwiseSumIRModule, target=tvm.target.Target(target_hexagon, host=target_hexagon)