From e3578f1f7007f7b2330e4d92df7131640b7a6983 Mon Sep 17 00:00:00 2001 From: Eric Lunderberg Date: Thu, 2 Sep 2021 15:19:14 -0500 Subject: [PATCH 1/3] [UnitTest] Added ids argument to tvm.testing.parameters This matches the usage in `tvm.testing.parameter`, and allows for parameter sets to be referred to by a single name. --- python/tvm/testing/plugin.py | 23 ++++++++++++------- python/tvm/testing/utils.py | 10 +++++++- .../unittest/test_tvm_testing_features.py | 11 ++++++++- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/python/tvm/testing/plugin.py b/python/tvm/testing/plugin.py index 10752e75ed1b..91ba12f946b2 100644 --- a/python/tvm/testing/plugin.py +++ b/python/tvm/testing/plugin.py @@ -274,7 +274,7 @@ def _target_to_requirement(target): def _parametrize_correlated_parameters(metafunc): - parametrize_needed = collections.defaultdict(list) + parametrize_needed = {} for name, fixturedefs in metafunc.definition._fixtureinfo.name2fixturedefs.items(): fixturedef = fixturedefs[-1] @@ -283,13 +283,20 @@ def _parametrize_correlated_parameters(metafunc): ): group = fixturedef.func.parametrize_group values = fixturedef.func.parametrize_values - parametrize_needed[group].append((name, values)) + ids = fixturedef.func.parametrize_ids + if group in parametrize_needed: + assert ids == parametrize_needed[group]["ids"] + else: + parametrize_needed[group] = {"ids": ids, "params": []} + parametrize_needed[group]["params"].append((name, values)) for parametrize_group in parametrize_needed.values(): - if len(parametrize_group) == 1: - name, values = parametrize_group[0] - metafunc.parametrize(name, values, indirect=True) + params = parametrize_group["params"] + ids = parametrize_group["ids"] + if len(params) == 1: + name, values = params[0] + metafunc.parametrize(name, values, indirect=True, ids=ids) else: - names = ",".join(name for name, values in parametrize_group) - value_sets = zip(*[values for name, values in parametrize_group]) - metafunc.parametrize(names, value_sets, indirect=True) + names = ",".join(name for name, values in params) + value_sets = zip(*[values for name, values in params]) + metafunc.parametrize(names, value_sets, indirect=True, ids=ids) diff --git a/python/tvm/testing/utils.py b/python/tvm/testing/utils.py index 9a472e64a48c..5901acedd8f4 100644 --- a/python/tvm/testing/utils.py +++ b/python/tvm/testing/utils.py @@ -1029,7 +1029,7 @@ def as_fixture(*_cls, request): _parametrize_group = 0 -def parameters(*value_sets): +def parameters(*value_sets, ids=None): """Convenience function to define pytest parametrized fixtures. Declaring a variable using tvm.testing.parameters will define a @@ -1052,11 +1052,18 @@ def parameters(*value_sets): Parameters ---------- values : List[tuple] + A list of parameter value sets. Each set of values represents a single combination of values to be tested. A unit test that accepts parameters defined will be run once for every set of parameters in the list. + ids : List[str], optional + + A list of names for the parameter sets. If None, pytest will + generate a name from each parameter set. These generated names may + not be readable/useful for composite types such as tuples. + Returns ------- List[function] @@ -1085,6 +1092,7 @@ def fixture_func(*_cls, request): fixture_func.parametrize_group = parametrize_group fixture_func.parametrize_values = param_values + fixture_func.parametrize_ids = ids outputs.append(pytest.fixture(fixture_func)) return outputs diff --git a/tests/python/unittest/test_tvm_testing_features.py b/tests/python/unittest/test_tvm_testing_features.py index f396eeeee5fb..df94adf1bd2b 100644 --- a/tests/python/unittest/test_tvm_testing_features.py +++ b/tests/python/unittest/test_tvm_testing_features.py @@ -98,7 +98,8 @@ class TestJointParameter: joint_usages = 0 joint_param_vals = list(zip(param1_vals, param2_vals)) - joint_param1, joint_param2 = tvm.testing.parameters(*joint_param_vals) + joint_param_ids = ["apple", "pear", "banana"] + joint_param1, joint_param2 = tvm.testing.parameters(*joint_param_vals, ids=joint_param_ids) def test_using_independent(self, param1, param2): type(self).independent_usages += 1 @@ -113,6 +114,14 @@ def test_using_joint(self, joint_param1, joint_param2): def test_joint(self): assert self.joint_usages == len(self.joint_param_vals) + def test_joint_test_id(self, joint_param1, joint_param2, request): + param_string = ( + request.node.name.replace(request.node.originalname, "") + .replace("[", "") + .replace("]", "") + ) + assert param_string in self.joint_param_ids + class TestFixtureCaching: param1_vals = [1, 2, 3] From 816e6af4d982479348527d7478d8df9f9c0ca0ec Mon Sep 17 00:00:00 2001 From: Eric Lunderberg Date: Fri, 3 Sep 2021 16:51:56 -0500 Subject: [PATCH 2/3] [Pytest] Fixed ordering issue of tvm.testing.parametrize_targets and known_failing_targets If an explicit list of targets is given, then the `known_failing_targets` decorator would fail to apply. This commit resolves the issue, and cleans up all target-specific marks to apply in `tvm.testing.plugin._add_target_specific_marks`. --- python/tvm/testing/__init__.py | 1 + python/tvm/testing/plugin.py | 104 ++++++++---------- python/tvm/testing/utils.py | 21 ++++ .../unittest/test_tvm_testing_features.py | 9 +- 4 files changed, 75 insertions(+), 60 deletions(-) diff --git a/python/tvm/testing/__init__.py b/python/tvm/testing/__init__.py index 3b4e743d6f07..75349d8d5a14 100644 --- a/python/tvm/testing/__init__.py +++ b/python/tvm/testing/__init__.py @@ -17,6 +17,7 @@ # pylint: disable=redefined-builtin, wildcard-import """Utility Python functions for TVM testing""" + from .utils import * from ._ffi_api import nop, echo, device_test, run_check_signal, object_use_count diff --git a/python/tvm/testing/plugin.py b/python/tvm/testing/plugin.py index 91ba12f946b2..95875acbd82c 100644 --- a/python/tvm/testing/plugin.py +++ b/python/tvm/testing/plugin.py @@ -31,8 +31,6 @@ """ -import collections - import pytest import _pytest @@ -67,6 +65,7 @@ def pytest_generate_tests(metafunc): """Called once per unit test, modifies/parametrizes it as needed.""" _parametrize_correlated_parameters(metafunc) _auto_parametrize_target(metafunc) + _add_target_specific_marks(metafunc) def pytest_collection_modifyitems(config, items): @@ -100,7 +99,39 @@ def _auto_parametrize_target(metafunc): """ + if "target" in metafunc.fixturenames: + # Check if any explicit parametrizations exist, and apply one + # if they do not. If the function is marked with either + # excluded or known failing targets, use these to determine + # the targets to be used. + parametrized_args = [ + arg.strip() + for mark in metafunc.definition.iter_markers("parametrize") + for arg in mark.args[0].split(",") + ] + if "target" not in parametrized_args: + excluded_targets = getattr(metafunc.function, "tvm_excluded_targets", []) + + # Add a parametrize marker instead of calling + # metafunc.parametrize so that the parametrize rewriting + # can still occur. + mark = pytest.mark.parametrize( + "target", + [ + t["target"] + for t in utils._get_targets() + if t["target_kind"] not in excluded_targets + ], + scope="session", + ) + metafunc.definition.add_marker(mark) + + +def _add_target_specific_marks(metafunc): + """Add any target-specific marks to parametrizations over target""" + def update_parametrize_target_arg( + mark, argnames, argvalues, *args, @@ -131,6 +162,16 @@ def update_parametrize_target_arg( target = param_set[target_i] additional_marks = [] + if mark in metafunc.definition.own_markers: + xfail_targets = getattr(metafunc.function, "tvm_known_failing_targets", []) + target_kind = target.split()[0] if isinstance(target, str) else target.kind.name + if target_kind in xfail_targets: + additional_marks.append( + pytest.mark.xfail( + reason=f'Known failing test for target "{target_kind}"' + ) + ) + new_argvalues.append( pytest.param( *param_set, marks=_target_to_requirement(target) + additional_marks @@ -155,25 +196,7 @@ def update_parametrize_target_arg( # parametrize over targets. This adds the appropriate # @tvm.testing.requires_* markers for each target. for mark in metafunc.definition.iter_markers("parametrize"): - update_parametrize_target_arg(*mark.args, **mark.kwargs) - - # Check if any explicit parametrizations exist, and apply one - # if they do not. If the function is marked with either - # excluded or known failing targets, use these to determine - # the targets to be used. - parametrized_args = [ - arg.strip() - for mark in metafunc.definition.iter_markers("parametrize") - for arg in mark.args[0].split(",") - ] - if "target" not in parametrized_args: - excluded_targets = getattr(metafunc.function, "tvm_excluded_targets", []) - xfail_targets = getattr(metafunc.function, "tvm_known_failing_targets", []) - metafunc.parametrize( - "target", - _pytest_target_params(None, excluded_targets, xfail_targets), - scope="session", - ) + update_parametrize_target_arg(mark, *mark.args, **mark.kwargs) def _count_num_fixture_uses(items): @@ -212,43 +235,6 @@ def _remove_global_fixture_definitions(items): delattr(module, name) -def _pytest_target_params(targets, excluded_targets=None, xfail_targets=None): - # Include unrunnable targets here. They get skipped by the - # pytest.mark.skipif in _target_to_requirement(), showing up as - # skipped tests instead of being hidden entirely. - if targets is None: - if excluded_targets is None: - excluded_targets = set() - - if xfail_targets is None: - xfail_targets = set() - - target_marks = [] - for t in utils._get_targets(): - # Excluded targets aren't included in the params at all. - if t["target_kind"] not in excluded_targets: - - # Known failing targets are included, but are marked - # as expected to fail. - extra_marks = [] - if t["target_kind"] in xfail_targets: - extra_marks.append( - pytest.mark.xfail( - reason='Known failing test for target "{}"'.format(t["target_kind"]) - ) - ) - - target_marks.append((t["target"], extra_marks)) - - else: - target_marks = [(target, []) for target in targets] - - return [ - pytest.param(target, marks=_target_to_requirement(target) + extra_marks) - for target, extra_marks in target_marks - ] - - def _target_to_requirement(target): if isinstance(target, str): target = tvm.target.Target(target) @@ -256,6 +242,8 @@ def _target_to_requirement(target): # mapping from target to decorator if target.kind.name == "cuda" and "cudnn" in target.attrs.get("libs", []): return utils.requires_cudnn() + if target.kind.name == "cuda" and "cublas" in target.attrs.get("libs", []): + return utils.requires_cublas() if target.kind.name == "cuda": return utils.requires_cuda() if target.kind.name == "rocm": diff --git a/python/tvm/testing/utils.py b/python/tvm/testing/utils.py index 5901acedd8f4..75c77a09b2bb 100644 --- a/python/tvm/testing/utils.py +++ b/python/tvm/testing/utils.py @@ -594,6 +594,27 @@ def requires_cudnn(*args): return _compose(args, requirements) +def requires_cublas(*args): + """Mark a test as requiring the cuBLAS library. + + This also marks the test as requiring a cuda gpu. + + Parameters + ---------- + f : function + Function to mark + """ + + requirements = [ + pytest.mark.skipif( + tvm.get_global_func("tvm.contrib.cublas.matmul", True), + reason="cuDNN library not enabled", + ), + *requires_cuda(), + ] + return _compose(args, requirements) + + def requires_nvptx(*args): """Mark a test as requiring the NVPTX compilation on the CUDA runtime diff --git a/tests/python/unittest/test_tvm_testing_features.py b/tests/python/unittest/test_tvm_testing_features.py index df94adf1bd2b..cbcdc4356250 100644 --- a/tests/python/unittest/test_tvm_testing_features.py +++ b/tests/python/unittest/test_tvm_testing_features.py @@ -87,6 +87,11 @@ def test_known_failing_target(self, target): def test_all_targets_ran(self): assert self.run_targets_with_known_failure == self.enabled_targets + @tvm.testing.known_failing_targets("llvm") + @tvm.testing.parametrize_targets("llvm") + def test_known_failing_explicit_list(self, target): + assert target != "llvm" + class TestJointParameter: param1_vals = [1, 2, 3] @@ -211,8 +216,8 @@ def test_num_uses_cached(self): class TestAutomaticMarks: @staticmethod def check_marks(request, target): - parameter = tvm.testing.plugin._pytest_target_params([target])[0] - required_marks = [decorator.mark for decorator in parameter.marks] + decorators = tvm.testing.plugin._target_to_requirement(target) + required_marks = [decorator.mark for decorator in decorators] applied_marks = list(request.node.iter_markers()) for required_mark in required_marks: From 5c938ce3423807cb0268dbc6f99de6b93a12717a Mon Sep 17 00:00:00 2001 From: Eric Lunderberg Date: Wed, 1 Sep 2021 16:10:01 -0500 Subject: [PATCH 3/3] [UnitTest][Vulkan] Runnable relay unit tests on Vulkan This commit allows the relay test suite to be run targeting Vulkan with `TVM_TEST_TARGETS="vulkan -from_device=0" pytest tests/python/relay`. All tests that require a specific environment are skipped if that environment isn't present. All tests that are known to fail when running on Vulkan are marked as expected failure, and will be tracked in https://github.com/apache/tvm/issues/8903. - Failures during code generation - Type mismatches, boolean vs int8 - tests/python/relay/test_any.py::test_any_reduce - tests/python/relay/test_op_level3.py::test_sparse_reshape - tests/python/relay/test_op_level4.py::test_reduce_functions - tests/python/relay/test_vm.py::test_cond - tests/python/relay/test_vm.py::test_simple_if - Incorrect strategy selection, picks NCHWc implemenation for NHWC layout - tests/python/relay/test_op_level2.py::test_conv2d_run - Unresolved CallNode operation - tests/python/relay/test_op_level1.py::test_unary_op[erf/tan/atan] - tests/python/relay/test_op_level3.py::test_scatter_add - tests/python/relay/test_op_level3.py::test_segment_sum - Generates 64-bit calls to GLSL that have only 16-/32-bit support - tests/python/relay/test_op_grad_level1.py::test_log_softmax_grad - tests/python/relay/test_op_grad_level1.py::test_softmax_grad - tests/python/relay/test_op_grad_level1.py::test_unary_op - tests/python/relay/test_op_grad_level10.py::test_cross_entropy_grad - Codegen raises error for variable size - tests/python/relay/test_any.py::test_any_batch_matmul - tests/python/relay/test_any.py::test_any_conv2d_NCHWc - tests/python/relay/test_any.py::test_any_dense - Failures when running - Numeric differences (observed on GTX 1650 with NVIDIA driver) - tests/python/relay/test_op_level3.py::test_take - tests/python/relay/test_op_level5.py::TestCropAndResize - tests/python/relay/test_op_level5.py::TestResize1D - tests/python/relay/test_op_level5.py::TestResize2D --- python/tvm/relay/testing/__init__.py | 11 +- python/tvm/testing/utils.py | 25 +- tests/python/relay/aot/aot_test_utils.py | 6 +- tests/python/relay/test_any.py | 405 ++--- tests/python/relay/test_op_grad_level1.py | 242 +-- tests/python/relay/test_op_grad_level10.py | 128 +- tests/python/relay/test_op_level1.py | 85 +- tests/python/relay/test_op_level2.py | 427 ++++-- tests/python/relay/test_op_level3.py | 1325 ++++++++--------- tests/python/relay/test_op_level4.py | 259 ++-- tests/python/relay/test_op_level5.py | 590 ++++---- .../python/relay/test_pass_alter_op_layout.py | 35 +- tests/python/relay/test_vm.py | 291 ++-- 13 files changed, 2051 insertions(+), 1778 deletions(-) diff --git a/python/tvm/relay/testing/__init__.py b/python/tvm/relay/testing/__init__.py index 8eb07d7b583b..9fc75199bdf5 100644 --- a/python/tvm/relay/testing/__init__.py +++ b/python/tvm/relay/testing/__init__.py @@ -81,6 +81,7 @@ def check_grad( scale=None, mean=0, mode="higher_order", + target_devices=None, ): """Perform numerical gradient checking given a relay function. @@ -117,6 +118,11 @@ def check_grad( mean: float The mean of the inputs. + + target_devices: Optional[List[Tuple[tvm.target.Target, tvm.runtime.Device]]] + A list of targets/devices on which the gradient should be + tested. If not specified, will default to `tvm.testing.enabled_targets()`. + """ fwd_func = run_infer_type(func) @@ -133,7 +139,10 @@ def check_grad( if test_inputs is None: test_inputs = inputs - for target, dev in enabled_targets(): + if target_devices is None: + target_devices = enabled_targets() + + for target, dev in target_devices: # Eval the backward and forward functions # TODO(mbs): Evaluate a pair of functions so can share preparation between them. bwd_func_compiled = relay.create_executor(device=dev, target=target).evaluate(bwd_func) diff --git a/python/tvm/testing/utils.py b/python/tvm/testing/utils.py index 75c77a09b2bb..62531ff7c194 100644 --- a/python/tvm/testing/utils.py +++ b/python/tvm/testing/utils.py @@ -989,7 +989,7 @@ def wraps(func): return wraps -def parameter(*values, ids=None): +def parameter(*values, ids=None, by_dict=None): """Convenience function to define pytest parametrized fixtures. Declaring a variable using ``tvm.testing.parameter`` will define a @@ -1009,16 +1009,23 @@ def parameter(*values, ids=None): Parameters ---------- - values + values : Any + A list of parameter values. A unit test that accepts this parameter as an argument will be run once for each parameter given. ids : List[str], optional + A list of names for the parameters. If None, pytest will generate a name from the value. These generated names may not be readable/useful for composite types such as tuples. + by_dict : Dict[str, Any] + + A mapping from parameter name to parameter value, to set both the + values and ids. + Returns ------- function @@ -1036,8 +1043,22 @@ def parameter(*values, ids=None): >>> def test_using_size(shape): >>> ... # Test code here + Or + + >>> shape = tvm.testing.parameter(by_dict={'small': (5,10), 'large': (512,1024)}) + >>> def test_using_size(shape): + >>> ... # Test code here + """ + if by_dict is not None: + if values or ids: + raise RuntimeError( + "Use of the by_dict parameter cannot be used alongside positional arguments" + ) + + ids, values = zip(*by_dict.items()) + # Optional cls parameter in case a parameter is defined inside a # class scope. @pytest.fixture(params=values, ids=ids) diff --git a/tests/python/relay/aot/aot_test_utils.py b/tests/python/relay/aot/aot_test_utils.py index baa2397b2303..490257ac66da 100644 --- a/tests/python/relay/aot/aot_test_utils.py +++ b/tests/python/relay/aot/aot_test_utils.py @@ -155,6 +155,10 @@ def parametrize_aot_options(test): skip_i386 = pytest.mark.skipif( platform.machine() == "i686", reason="Reference system unavailable in i386 container" ) + requires_arm_eabi = pytest.mark.skipif( + shutil.which("arm-none-eabi-gcc") is None, reason="ARM embedded toolchain unavailable" + ) + interface_api = ["packed", "c"] use_unpacked_api = [True, False] test_runner = [AOT_DEFAULT_RUNNER, AOT_CORSTONE300_RUNNER] @@ -178,7 +182,7 @@ def parametrize_aot_options(test): # Skip reference system tests if running in i386 container marked_combinations = map( - lambda parameters: pytest.param(*parameters, marks=skip_i386) + lambda parameters: pytest.param(*parameters, marks=[skip_i386, requires_arm_eabi]) if parameters[2] == AOT_CORSTONE300_RUNNER else parameters, valid_combinations, diff --git a/tests/python/relay/test_any.py b/tests/python/relay/test_any.py index fb18d1eca9e1..497177d241f0 100644 --- a/tests/python/relay/test_any.py +++ b/tests/python/relay/test_any.py @@ -372,6 +372,53 @@ def test_any_shape_of(): check_result([data], mod, np.array(3).astype("int64")) +class TestAnyReduce: + config = { + "argmax": (relay.argmax, any_dims(3), None, False, False, (3, 4, 5), ()), + "argmin": (relay.argmin, any_dims(4), 1, False, True, (3, 4, 5, 6), (3, 1, 5, 6)), + "all": (relay.all, any_dims(3), (1, 2), True, False, (3, 4, 5), (4, 5)), + "max": (relay.max, any_dims(4), -1, True, True, (3, 4, 5, 6), (1, 1, 1, 6)), + "min": (relay.min, any_dims(3), (0, 1), False, False, (4, 5, 6), (6,)), + "prod": (relay.prod, any_dims(4), 2, True, True, (3, 4, 5, 6), (1, 1, 5, 1)), + "mean": (relay.mean, any_dims(2), 0, False, False, (1, 2), (2,)), + "variance": (relay.variance, any_dims(5), (2, 4), False, False, (3, 4, 5, 6, 7), (3, 4, 6)), + } + + ( + reduce_op, + data_shape, + axis, + exclude, + keepdims, + static_data_shape, + ref_out_shape, + ) = tvm.testing.parameters(*config.values(), ids=config.keys()) + + def test_any_reduce( + self, + target, + dev, + reduce_op, + data_shape, + axis, + exclude, + keepdims, + static_data_shape, + ref_out_shape, + ): + target = tvm.target.Target(target) + if target.kind.name == "vulkan" and reduce_op == relay.all: + pytest.xfail("Known failing test case for vulkan runtime") + + mod = tvm.IRModule() + dtype = "bool" if reduce_op == relay.all else "float32" + data = relay.var("data", shape=data_shape, dtype=dtype) + y = reduce_op(data, axis, keepdims, exclude) + mod["main"] = relay.Function([data], y) + data_np = np.random.uniform(size=static_data_shape).astype(dtype) + check_result([data_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)]) + + def verify_any_reduce( reduce_op, data_shape, axis, exclude, keepdims, static_data_shape, ref_out_shape ): @@ -579,66 +626,58 @@ def test_any_conv2d(): ) -def verify_any_conv2d_NCHWc( - data_shape, - kernel_shape, - strides, - padding, - dilation, - data_layout, - kernel_layout, - out_layout, - static_data_shape, - ref_out_shape, -): - mod = tvm.IRModule() - dtype = "float32" - data = relay.var("data", shape=data_shape, dtype=dtype) - kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype) - y = relay.nn.contrib_conv2d_nchwc( - data, - kernel, +class TestAnyConv2dNCHWc: + data_shape = tvm.testing.parameter((relay.Any(), 8, 224, 224, 8)) + kernel_shape = tvm.testing.parameter((8, 8, 3, 3, 8, 8)) + strides = tvm.testing.parameter((1, 1)) + padding = tvm.testing.parameter((1, 1)) + data_layout = tvm.testing.parameter("NCHW8c") + kernel_layout = tvm.testing.parameter("OIHW8i8o") + out_layout = tvm.testing.parameter("NCHW8c") + + dilation, static_data_shape, ref_out_shape = tvm.testing.parameters( + ((1, 1), (1, 8, 224, 224, 8), (1, 8, 224, 224, 8)), + ((2, 2), (2, 8, 224, 224, 8), (2, 8, 222, 222, 8)), + ) + + @tvm.testing.known_failing_targets("cuda", "vulkan") + def test_any_conv2d_NCHWc( + self, + target, + dev, + data_shape, + kernel_shape, strides, padding, dilation, - kernel_size=kernel_shape[2:4], - channels=kernel_shape[0] * kernel_shape[-1], - data_layout=data_layout, - kernel_layout=kernel_layout, - out_layout=out_layout, - ) - mod["main"] = relay.Function([data, kernel], y) - data_np = np.random.uniform(size=static_data_shape).astype(dtype) - kernel_np = np.random.uniform(size=kernel_shape).astype(dtype) - check_result([data_np, kernel_np], mod, ref_out_shape, assert_shape=True) - - -# TODO(@kevinthesun): Support dynamic input height and width. -def test_any_conv2d_NCHWc(): - verify_any_conv2d_NCHWc( - (relay.Any(), 8, 224, 224, 8), - (8, 8, 3, 3, 8, 8), - (1, 1), - (1, 1), - (1, 1), - "NCHW8c", - "OIHW8i8o", - "NCHW8c", - (1, 8, 224, 224, 8), - (1, 8, 224, 224, 8), - ) - verify_any_conv2d_NCHWc( - (relay.Any(), 8, 224, 224, 8), - (8, 8, 3, 3, 8, 8), - (1, 1), - (1, 1), - (2, 2), - "NCHW8c", - "OIHW8i8o", - "NCHW8c", - (2, 8, 224, 224, 8), - (2, 8, 222, 222, 8), - ) + data_layout, + kernel_layout, + out_layout, + static_data_shape, + ref_out_shape, + ): + mod = tvm.IRModule() + dtype = "float32" + data = relay.var("data", shape=data_shape, dtype=dtype) + kernel = relay.var("kernel", shape=kernel_shape, dtype=dtype) + y = relay.nn.contrib_conv2d_nchwc( + data, + kernel, + strides, + padding, + dilation, + kernel_size=kernel_shape[2:4], + channels=kernel_shape[0] * kernel_shape[-1], + data_layout=data_layout, + kernel_layout=kernel_layout, + out_layout=out_layout, + ) + mod["main"] = relay.Function([data, kernel], y) + data_np = np.random.uniform(size=static_data_shape).astype(dtype) + kernel_np = np.random.uniform(size=kernel_shape).astype(dtype) + check_result( + [data_np, kernel_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)] + ) def verify_any_conv1d_transpose_ncw( @@ -883,136 +922,150 @@ def test_any_batch_flatten(): check_result([data_np], mod, ref_out_shape, assert_shape=True) -def verify_any_dense( - data_shape, - weight_shape, - units, - static_data_shape, - static_weight_shape, - ref_out_shape, - use_cublas=False, -): - mod = tvm.IRModule() - dtype = "float32" - data = relay.var("data", shape=data_shape, dtype=dtype) - weight = relay.var("weight", shape=weight_shape, dtype=dtype) - y = relay.nn.dense(data, weight, units) - mod["main"] = relay.Function([data, weight], y) - data_np = np.random.uniform(size=static_data_shape).astype(dtype) - weight_np = np.random.uniform(size=static_weight_shape).astype(dtype) - - targets = None - if use_cublas and tvm.get_global_func("tvm.contrib.cublas.matmul", True): - targets = [("cuda -libs=cublas", tvm.cuda(0))] - - check_result([data_np, weight_np], mod, ref_out_shape, assert_shape=True, targets=targets) +# TODO(tvm-team) Fix dense schedule +@tvm.testing.known_failing_targets("cuda", "vulkan") +class TestAnyDense: + ( + data_shape, + weight_shape, + units, + static_data_shape, + static_weight_shape, + ref_out_shape, + ) = tvm.testing.parameters( + (any_dims(2), any_dims(2), None, (4, 16), (8, 16), (4, 8)), + (any_dims(2), (50, relay.Any()), 50, (4, 40), (50, 40), (4, 50)), + ) + @tvm.testing.known_failing_targets("cuda", "vulkan") + def test_any_dense( + self, + target, + dev, + data_shape, + weight_shape, + units, + static_data_shape, + static_weight_shape, + ref_out_shape, + ): + mod = tvm.IRModule() + dtype = "float32" + data = relay.var("data", shape=data_shape, dtype=dtype) + weight = relay.var("weight", shape=weight_shape, dtype=dtype) + y = relay.nn.dense(data, weight, units) + mod["main"] = relay.Function([data, weight], y) + data_np = np.random.uniform(size=static_data_shape).astype(dtype) + weight_np = np.random.uniform(size=static_weight_shape).astype(dtype) + + check_result( + [data_np, weight_np], mod, ref_out_shape, assert_shape=True, targets=[(target, dev)] + ) -# TODO(tvm-team) Fix dense schedule -# @tvm.testing.uses_gpu -def test_any_dense(): - verify_any_dense(any_dims(2), any_dims(2), None, (4, 16), (8, 16), (4, 8)) - verify_any_dense(any_dims(2), (50, relay.Any()), 50, (4, 40), (50, 40), (4, 50)) + @tvm.testing.parametrize_targets("cuda -libs=cublas") + @tvm.testing.known_failing_targets("cuda", "vulkan") + def test_any_dense_cublas( + self, + target, + dev, + data_shape, + weight_shape, + units, + static_data_shape, + static_weight_shape, + ref_out_shape, + ): + self.test_any_dense( + target, + dev, + data_shape, + weight_shape, + units, + static_data_shape, + static_weight_shape, + ref_out_shape, + ) -@tvm.testing.uses_gpu -def test_any_dense_dynamic_batch(): - verify_any_dense((relay.Any(), 40), (50, 40), 50, (4, 40), (50, 40), (4, 50)) - verify_any_dense((relay.Any(), 40), (50, 40), 50, (4, 40), (50, 40), (4, 50), use_cublas=True) - - -def verify_any_batch_matmul( - x_shape, - y_shape, - out_shape, - x_var_shape, - y_var_shape, - dtype="float32", - trans_x=False, - trans_y=True, -): - x = relay.var("x", relay.TensorType(x_var_shape, dtype)) - y = relay.var("y", relay.TensorType(y_var_shape, dtype)) - z = relay.nn.batch_matmul(x, y, transpose_a=trans_x, transpose_b=trans_y) - - func = relay.Function([x, y], z) - x_np = np.random.uniform(size=x_shape).astype(dtype) - y_np = np.random.uniform(size=y_shape).astype(dtype) - z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=trans_x, trans_y=trans_y) - - for target, dev in tvm.testing.enabled_targets(): - for kind in ["vm", "debug"]: - mod = tvm.ir.IRModule.from_expr(func) - z = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()( - x_np, y_np - ) - tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5) +class TestAnyBatchMatmul: + dtype = tvm.testing.parameter("float32") + executor_kind = tvm.testing.parameter("vm", "debug") -# TODO(mbrookhart): enable once VM supports heterogenous execution -# @tvm.testing.uses_gpu -def test_any_batch_matmul(): - verify_any_batch_matmul((1, 16, 32), (1, 16, 32), (1, 16, 16), (1, 16, 32), (relay.Any(),) * 3) - verify_any_batch_matmul((5, 16, 32), (5, 16, 32), (5, 16, 16), (5, 16, 32), (relay.Any(),) * 3) - verify_any_batch_matmul((5, 16, 32), (5, 20, 32), (5, 16, 20), (5, 16, 32), (relay.Any(),) * 3) - verify_any_batch_matmul( - (30, 16, 32), (30, 20, 32), (30, 16, 20), (30, 16, 32), (relay.Any(),) * 3 + (x_shape, y_shape) = tvm.testing.parameters( + ((1, 16, 32), (1, 32, 16)), + ((5, 16, 32), (5, 32, 16)), + ((5, 16, 32), (5, 32, 20)), + ((30, 16, 32), (30, 32, 20)), ) - verify_any_batch_matmul( - (1, 16, 32), (1, 16, 32), (1, 16, 16), (relay.Any(), 16, 32), (relay.Any(), 16, 32) - ) - verify_any_batch_matmul( - (5, 16, 32), (5, 16, 32), (5, 16, 16), (relay.Any(), 16, 32), (relay.Any(), 16, 32) - ) - verify_any_batch_matmul( - (5, 16, 32), (5, 20, 32), (5, 16, 20), (relay.Any(), 16, 32), (relay.Any(), 20, 32) - ) - verify_any_batch_matmul( - (30, 16, 32), (30, 20, 32), (30, 16, 20), (relay.Any(), 16, 32), (relay.Any(), 20, 32) - ) + # any_x = tvm.testing.parameter("none", "batch") + # any_y = tvm.testing.parameter("none", "batch", "all") - verify_any_batch_matmul( - (1, 32, 16), (1, 16, 32), (1, 16, 16), (1, 32, 16), (relay.Any(),) * 3, trans_x=True - ) - verify_any_batch_matmul( - (5, 16, 32), (5, 32, 16), (5, 16, 16), (5, 16, 32), (relay.Any(),) * 3, trans_y=False - ) - verify_any_batch_matmul( - (5, 32, 16), - (5, 32, 20), - (5, 16, 20), - (5, 32, 16), - (relay.Any(),) * 3, - trans_x=True, - trans_y=False, - ) - verify_any_batch_matmul( - (1, 32, 16), - (1, 16, 32), - (1, 16, 16), - (relay.Any(), 32, 16), - (relay.Any(), 16, 32), - trans_x=True, - ) - verify_any_batch_matmul( - (5, 16, 32), - (5, 32, 16), - (5, 16, 16), - (relay.Any(), 16, 32), - (relay.Any(), 32, 16), - trans_y=False, - ) - verify_any_batch_matmul( - (5, 32, 16), - (5, 32, 20), - (5, 16, 20), - (relay.Any(), 32, 16), - (relay.Any(), 32, 20), - trans_x=True, - trans_y=False, + any_x, any_y = tvm.testing.parameters( + ("none", "batch"), ("none", "all"), ("batch", "none"), ("batch", "batch"), ("batch", "all") ) + transpose_x = tvm.testing.parameter(True, False) + transpose_y = tvm.testing.parameter(True, False) + + @tvm.testing.fixture + def x_var_shape(self, x_shape, any_x): + if any_x == "none": + return x_shape + elif any_x == "batch": + return tuple(relay.Any() if i == 0 else size for i, size in enumerate(x_shape)) + elif any_x == "all": + return tuple(relay.Any() for _ in x_shape) + + @tvm.testing.fixture + def y_var_shape(self, y_shape, any_y): + if any_y == "none": + return y_shape + elif any_y == "batch": + return tuple(relay.Any() if i == 0 else size for i, size in enumerate(y_shape)) + elif any_y == "all": + return tuple(relay.Any() for _ in y_shape) + + @tvm.testing.known_failing_targets("cuda", "vulkan") + def test_any_batch_matmul( + self, + target, + dev, + x_shape, + y_shape, + any_x, + any_y, + x_var_shape, + y_var_shape, + transpose_x, + transpose_y, + executor_kind, + dtype, + ): + if transpose_x: + x_shape = (x_shape[0], x_shape[2], x_shape[1]) + x_var_shape = (x_var_shape[0], x_var_shape[2], x_var_shape[1]) + + if transpose_y: + y_shape = (y_shape[0], y_shape[2], y_shape[1]) + y_var_shape = (y_var_shape[0], y_var_shape[2], y_var_shape[1]) + + x = relay.var("x", relay.TensorType(x_var_shape, dtype)) + y = relay.var("y", relay.TensorType(y_var_shape, dtype)) + z = relay.nn.batch_matmul(x, y, transpose_a=transpose_x, transpose_b=transpose_y) + + func = relay.Function([x, y], z) + x_np = np.random.uniform(size=x_shape).astype(dtype) + y_np = np.random.uniform(size=y_shape).astype(dtype) + z_np = tvm.topi.testing.batch_matmul(x_np, y_np, trans_x=transpose_x, trans_y=transpose_y) + + mod = tvm.ir.IRModule.from_expr(func) + z = relay.create_executor(executor_kind, mod=mod, device=dev, target=target).evaluate()( + x_np, y_np + ) + tvm.testing.assert_allclose(z.numpy(), z_np, rtol=1e-5) + @tvm.testing.uses_gpu def verify_any_pad(data_shape, pad_width, static_data_shape): diff --git a/tests/python/relay/test_op_grad_level1.py b/tests/python/relay/test_op_grad_level1.py index 11099ffe50ee..bab709f2b88d 100644 --- a/tests/python/relay/test_op_grad_level1.py +++ b/tests/python/relay/test_op_grad_level1.py @@ -14,15 +14,17 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +import sys + import numpy as np import pytest import tvm -from tvm import te -from tvm import relay +import tvm.testing + +from tvm import te, relay from tvm.relay.testing import check_grad, run_infer_type from tvm.relay.transform import gradient -import tvm.testing def sigmoid(x): @@ -36,131 +38,179 @@ def relu(x): return x_copy -@tvm.testing.uses_gpu -def test_unary_op(): - def check_single_op(opfunc, ref, dtype): - shape = (10, 4) +class TestUnaryOp: + config = { + "log": (tvm.relay.log, lambda x, g: g * (1 / x)), + "exp": (tvm.relay.exp, lambda x, g: g * np.exp(x)), + "sigmoid": (tvm.relay.sigmoid, lambda x, g: g * sigmoid(x) * (1 - sigmoid(x))), + "tanh": (tvm.relay.tanh, lambda x, g: g * (1 - np.tanh(x) * np.tanh(x))), + "sqrt": (tvm.relay.sqrt, lambda x, g: g * 0.5 * np.power(x, -0.5)), + "abs": (tvm.relay.abs, lambda x, g: np.where(x < 0, -g, g)), + "relu": (relay.nn.relu, lambda x, g: np.where(x < 0, np.zeros_like(x), g)), + "erf": (tvm.relay.erf, lambda x, g: g * (2.0 / (np.pi ** (0.5)) * np.exp(-x * x))), + "cos": (tvm.relay.cos, lambda x, g: g * -1.0 * np.sin(x)), + "sin": (tvm.relay.sin, lambda x, g: g * np.cos(x)), + "tan": (tvm.relay.tan, lambda x, g: g * (1.0 / (np.cos(x) ** 2))), + "atan": (tvm.relay.atan, lambda x, g: g * (1 / (1 + np.power(x, 2.0)))), + "log2": (tvm.relay.log2, lambda x, g: g * (1 / (np.log(2) * x))), + "log10": (tvm.relay.log10, lambda x, g: g * (1 / (np.log(10) * x))), + "cosh": (tvm.relay.cosh, lambda x, g: g * (np.sinh(x))), + "sinh": (tvm.relay.sinh, lambda x, g: g * (np.cosh(x))), + "asin": (tvm.relay.asin, lambda x, g: g * (1.0 / (1.0 - x ** 2) ** (1.0 / 2.0))), + "acos": (tvm.relay.acos, lambda x, g: g * (-1.0 / (1.0 - x ** 2.0) ** (1.0 / 2.0))), + "acosh": (tvm.relay.acosh, lambda x, g: g * (1.0 / (x ** 2 - 1.0) ** (1.0 / 2.0))), + "asinh": (tvm.relay.asinh, lambda x, g: g * (1.0 / (x ** 2 + 1.0) ** (1.0 / 2.0))), + "atanh": (tvm.relay.atanh, lambda x, g: g * (-1.0 / (x ** 2 - 1.0))), + } + + relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys()) + dtype = tvm.testing.parameter("float32", "float64") + shape = tvm.testing.parameter((10, 4)) + + def test_op(self, target, dev, relay_op, ref_func, shape, dtype): + + target = tvm.target.Target(target) + if target.kind.name == "vulkan": + + known_breaks = { + "float32": [ + tvm.relay.erf, + tvm.relay.tan, + tvm.relay.atan, + tvm.relay.log10, + tvm.relay.cosh, + tvm.relay.sinh, + tvm.relay.asin, + tvm.relay.acos, + tvm.relay.acosh, + tvm.relay.asinh, + tvm.relay.atanh, + ], + "float64": [ + tvm.relay.log, + tvm.relay.exp, + tvm.relay.sigmoid, + tvm.relay.tanh, + tvm.relay.sqrt, + tvm.relay.erf, + tvm.relay.cos, + tvm.relay.sin, + tvm.relay.tan, + tvm.relay.atan, + tvm.relay.log2, + tvm.relay.log10, + tvm.relay.cosh, + tvm.relay.sinh, + tvm.relay.asin, + tvm.relay.acos, + tvm.relay.acosh, + tvm.relay.asinh, + tvm.relay.atanh, + ], + } + + if relay_op in known_breaks[dtype]: + pytest.xfail(f"{dtype} {relay_op.__name__} not yet supported on Vulkan runtime") + tp = relay.TensorType(shape, dtype) x = relay.var("x", tp) g = relay.var("g", tp) - y = opfunc(x) * g - - if ref is not None: - data = np.random.rand(*shape).astype(dtype) - grad_in = np.random.rand(*shape).astype(dtype) - ref_grad = ref(data, grad_in) - fwd_func = relay.Function([x, g], y) - fwd_func = run_infer_type(fwd_func) - bwd_func = run_infer_type(gradient(fwd_func)) - - for target, dev in tvm.testing.enabled_targets(): - op_res, (op_grad, _) = relay.create_executor(device=dev, target=target).evaluate( - bwd_func - )(data, grad_in) - np.testing.assert_allclose(op_grad.numpy(), ref_grad, rtol=0.01) - - for opfunc, ref in [ - (tvm.relay.log, lambda x, g: g * (1 / x)), - (tvm.relay.exp, lambda x, g: g * np.exp(x)), - (tvm.relay.sigmoid, lambda x, g: g * sigmoid(x) * (1 - sigmoid(x))), - (tvm.relay.tanh, lambda x, g: g * (1 - np.tanh(x) * np.tanh(x))), - (tvm.relay.sqrt, lambda x, g: g * 0.5 * np.power(x, -0.5)), - (tvm.relay.abs, lambda x, g: np.where(x < 0, -g, g)), - (relay.nn.relu, lambda x, g: np.where(x < 0, np.zeros_like(x), g)), - (tvm.relay.erf, lambda x, g: g * (2.0 / (np.pi ** (0.5)) * np.exp(-x * x))), - (tvm.relay.cos, lambda x, g: g * -1.0 * np.sin(x)), - (tvm.relay.sin, lambda x, g: g * np.cos(x)), - (tvm.relay.tan, lambda x, g: g * (1.0 / (np.cos(x) ** 2))), - (tvm.relay.atan, lambda x, g: g * (1 / (1 + np.power(x, 2.0)))), - (tvm.relay.log2, lambda x, g: g * (1 / (np.log(2) * x))), - (tvm.relay.log10, lambda x, g: g * (1 / (np.log(10) * x))), - (tvm.relay.cosh, lambda x, g: g * (np.sinh(x))), - (tvm.relay.sinh, lambda x, g: g * (np.cosh(x))), - (tvm.relay.asin, lambda x, g: g * (1.0 / (1.0 - x ** 2) ** (1.0 / 2.0))), - (tvm.relay.acos, lambda x, g: g * (-1.0 / (1.0 - x ** 2.0) ** (1.0 / 2.0))), - (tvm.relay.acosh, lambda x, g: g * (1.0 / (x ** 2 - 1.0) ** (1.0 / 2.0))), - (tvm.relay.asinh, lambda x, g: g * (1.0 / (x ** 2 + 1.0) ** (1.0 / 2.0))), - (tvm.relay.atanh, lambda x, g: g * (-1.0 / (x ** 2 - 1.0))), - ]: - for dtype in ("float32", "float64"): - check_single_op(opfunc, ref, dtype) - - -@tvm.testing.uses_gpu -def test_binary_op(): - def inst(vars, sh): - return [vars.get(s, s) for s in sh] - - def check_binary_op(opfunc, ref, dtype): - s = (5, 10, 5) - t = relay.TensorType((5, 10, 5), dtype=dtype) + y = relay_op(x) * g + + fwd_func = relay.Function([x, g], y) + fwd_func = run_infer_type(fwd_func) + bwd_func = run_infer_type(gradient(fwd_func)) + + data_in = np.random.rand(*shape).astype(dtype) + grad_in = np.random.rand(*shape).astype(dtype) + ref_grad_out = ref_func(data_in, grad_in) + + op_res, (op_grad, _) = relay.create_executor(device=dev, target=target).evaluate(bwd_func)( + data_in, grad_in + ) + np.testing.assert_allclose(op_grad.numpy(), ref_grad_out, rtol=0.01) + + +class TestBinaryOp: + config = { + "add": (relay.add, lambda x, y: [np.ones_like(x), np.ones_like(y)]), + "subtract": (relay.subtract, lambda x, y: [np.ones_like(x), -np.ones_like(y)]), + "multiply": (relay.multiply, lambda x, y: [y, x]), + "divide": (relay.divide, lambda x, y: [1 / y, -x / (y ** 2)]), + } + + relay_op, ref_func = tvm.testing.parameters(*config.values(), ids=config.keys()) + dtype = tvm.testing.parameter("float32", "float64") + shape = tvm.testing.parameter((5, 10, 5)) + + def test_binary_op(self, target, dev, relay_op, ref_func, shape, dtype): + t = relay.TensorType(shape, dtype=dtype) x = relay.var("x", t) y = relay.var("y", t) - z = opfunc(x, y) + z = relay_op(x, y) - x_data = np.random.rand(*s).astype(t.dtype) - y_data = np.random.rand(*s).astype(t.dtype) - ref_grad0, ref_grad1 = ref(x_data, y_data) + x_data = np.random.rand(*shape).astype(t.dtype) + y_data = np.random.rand(*shape).astype(t.dtype) + ref_grad0, ref_grad1 = ref_func(x_data, y_data) fwd_func = relay.Function([x, y], z) fwd_func = run_infer_type(fwd_func) bwd_func = run_infer_type(gradient(fwd_func)) - for target, dev in tvm.testing.enabled_targets(): - op_res, (op_grad0, op_grad1) = relay.create_executor( - device=dev, target=target - ).evaluate(bwd_func)(x_data, y_data) - np.testing.assert_allclose(op_grad0.numpy(), ref_grad0, rtol=0.01) - np.testing.assert_allclose(op_grad1.numpy(), ref_grad1, rtol=0.01) + op_res, (op_grad0, op_grad1) = relay.create_executor(device=dev, target=target).evaluate( + bwd_func + )(x_data, y_data) + np.testing.assert_allclose(op_grad0.numpy(), ref_grad0, rtol=0.01) + np.testing.assert_allclose(op_grad1.numpy(), ref_grad1, rtol=0.01) - for opfunc, ref in [ - (relay.add, lambda x, y: [np.ones_like(x), np.ones_like(y)]), - (relay.subtract, lambda x, y: [np.ones_like(x), -np.ones_like(y)]), - (relay.multiply, lambda x, y: [y, x]), - (relay.divide, lambda x, y: [1 / y, -x / (y ** 2)]), - ]: - for dtype in ("float32", "float64"): - check_binary_op(opfunc, ref, dtype) +def test_softmax_grad(target, dev): + target = tvm.target.Target(target) + if target.kind.name == "vulkan": + pytest.xfail("Known failure on vulkan") -def test_softmax_grad(): data = relay.var("data", relay.TensorType((1, 16), "float64")) fwd_func = relay.Function([data], relay.nn.softmax(data)) - check_grad(fwd_func, scale=1) + check_grad(fwd_func, scale=1, target_devices=[(target, dev)]) -def test_log_softmax_grad(): +def test_log_softmax_grad(target, dev): + target = tvm.target.Target(target) + if target.kind.name == "vulkan": + pytest.xfail("Known failure on vulkan") + data = relay.var("data", relay.TensorType((2, 16), "float64")) fwd_func = relay.Function([data], relay.nn.log_softmax(data)) - check_grad(fwd_func, scale=1) - + check_grad(fwd_func, scale=1, target_devices=[(target, dev)]) -def verify_bias_add(d_shape, b_shape, axis=1): - data = relay.var("data", relay.TensorType(d_shape, "float32")) - bias = relay.var("bias", relay.TensorType(b_shape, "float32")) - fwd_func = relay.Function([data, bias], relay.nn.bias_add(data, bias, axis=axis)) - check_grad(fwd_func) +class TestBiasAddGrad: + d_shape, b_shape, axis = tvm.testing.parameters( + ((1, 16), (16,), 1), + ((1, 8, 2, 2), (8,), 1), + ((1, 2, 2, 8), (8,), 3), + ((4, 8), (8,), 1), + ) -def test_bias_add_grad(): - verify_bias_add((1, 16), (16,)) - verify_bias_add((1, 8, 2, 2), (8,)) - verify_bias_add((1, 2, 2, 8), (8,), 3) - verify_bias_add((4, 8), (8,)) + def test_bias_add(self, target, dev, d_shape, b_shape, axis): + data = relay.var("data", relay.TensorType(d_shape, "float32")) + bias = relay.var("bias", relay.TensorType(b_shape, "float32")) + fwd_func = relay.Function([data, bias], relay.nn.bias_add(data, bias, axis=axis)) + check_grad(fwd_func, target_devices=[(target, dev)]) -def test_expand_dims_grad(): +def test_expand_dims_grad(target, dev): data = relay.var("data", shape=(2, 3), dtype="float64") fwd_func = relay.Function([data], relay.expand_dims(data, axis=1, num_newaxis=2)) - check_grad(fwd_func) + check_grad(fwd_func, target_devices=[(target, dev)]) -def test_concatenate_grad(): +def test_concatenate_grad(target, dev): x = relay.var("x", shape=(2, 2, 5)) y = relay.var("y", shape=(2, 1, 5)) z = relay.var("z", shape=(2, 4, 5)) fwd_func = relay.Function([x, y, z], relay.concatenate([x, y, z], axis=1)) - check_grad(fwd_func) + check_grad(fwd_func, target_devices=[(target, dev)]) if __name__ == "__main__": - pytest.main([__file__]) + sys.exit(pytest.main(sys.argv)) diff --git a/tests/python/relay/test_op_grad_level10.py b/tests/python/relay/test_op_grad_level10.py index 8d961eb60b18..4c2c9082e044 100644 --- a/tests/python/relay/test_op_grad_level10.py +++ b/tests/python/relay/test_op_grad_level10.py @@ -14,35 +14,52 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -import pytest +import sys + import numpy as np +import pytest + +import tvm +import tvm.testing from tvm import relay from tvm.relay.testing import check_grad -def test_cross_entropy_grad(): - for dtype in ("float32", "float64"): - x = relay.var("x", shape=(2, 5), dtype=dtype) - y = relay.var("y", shape=(2, 5), dtype=dtype) - check_grad( - relay.Function([x, y], relay.op.nn.cross_entropy(x, y)), eps=0.01, scale=0.1, mean=1 - ) +index_dtype = tvm.testing.parameter("int32", "int64") +val_dtype = tvm.testing.parameter("float32", "float64") -def test_cross_entropy_with_logits_grad(): - for dtype in ("float32", "float64"): - x = relay.var("x", shape=(2, 5), dtype=dtype) - y = relay.var("y", shape=(2, 5), dtype=dtype) - check_grad( - relay.Function([x, y], relay.op.nn.cross_entropy_with_logits(x, y)), - eps=0.01, - scale=0.1, - mean=1, - ) +def test_cross_entropy_grad(target, dev, val_dtype): + target = tvm.target.Target(target) + if target.kind.name == "vulkan" and val_dtype == "float64": + # GLSL.std.450's Log implementation only takes 16/32-bit floats. + pytest.xfail("Known failing test case for vulkan runtime") + + x = relay.var("x", shape=(2, 5), dtype=val_dtype) + y = relay.var("y", shape=(2, 5), dtype=val_dtype) + check_grad( + relay.Function([x, y], relay.op.nn.cross_entropy(x, y)), + eps=0.01, + scale=0.1, + mean=1, + target_devices=[(target, dev)], + ) + + +def test_cross_entropy_with_logits_grad(target, dev, val_dtype): + x = relay.var("x", shape=(2, 5), dtype=val_dtype) + y = relay.var("y", shape=(2, 5), dtype=val_dtype) + check_grad( + relay.Function([x, y], relay.op.nn.cross_entropy_with_logits(x, y)), + eps=0.01, + scale=0.1, + mean=1, + target_devices=[(target, dev)], + ) -def test_checkpoint(): +def test_checkpoint(target, dev): inputs = [relay.var("x{}".format(i), shape=(1,)) for i in range(4)] output = relay.multiply(relay.add(inputs[0], inputs[1]), relay.add(inputs[2], inputs[3])) check_grad(relay.Function(inputs, relay.annotation.checkpoint(output))) @@ -59,56 +76,59 @@ def test_checkpoint(): ) ) out_single = scope.get() - check_grad(relay.Function(inputs, out_single)) + check_grad(relay.Function(inputs, out_single), target_devices=[(target, dev)]) -def verify_batch_matmul_grad(a_shape, b_shape, transpose_a, transpose_b): - tensor_a = relay.var("tensor_a", relay.TensorType(a_shape, "float32")) - tensor_b = relay.var("tensor_b", relay.TensorType(b_shape, "float32")) - check_grad( - relay.Function( - [tensor_a, tensor_b], - relay.op.nn.batch_matmul( - tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b - ), - ) +class TestBatchMatmulGrad: + a_shape, b_shape, transpose_a, transpose_b = tvm.testing.parameters( + ((2, 3, 5), (2, 5, 4), False, False), + ((2, 3, 5), (2, 4, 5), False, True), + ((2, 5, 3), (2, 5, 4), True, False), + ((2, 5, 3), (2, 4, 5), True, True), ) - -def test_batch_matmul_grad(): - verify_batch_matmul_grad((2, 3, 5), (2, 5, 4), False, False) - verify_batch_matmul_grad((2, 3, 5), (2, 4, 5), False, True) - verify_batch_matmul_grad((2, 5, 3), (2, 5, 4), True, False) - verify_batch_matmul_grad((2, 5, 3), (2, 4, 5), True, True) + def test_batch_matmul_grad(self, target, dev, a_shape, b_shape, transpose_a, transpose_b): + tensor_a = relay.var("tensor_a", relay.TensorType(a_shape, "float32")) + tensor_b = relay.var("tensor_b", relay.TensorType(b_shape, "float32")) + check_grad( + relay.Function( + [tensor_a, tensor_b], + relay.op.nn.batch_matmul( + tensor_a, tensor_b, transpose_a=transpose_a, transpose_b=transpose_b + ), + ), + target_devices=[(target, dev)], + ) -def test_reverse_reshape_grad(): +def test_reverse_reshape_grad(target, dev): x = relay.var("x", shape=(3, 4, 5), dtype="float64") - check_grad(relay.Function([x], relay.op.reverse_reshape(x, (-1, 0)))) + check_grad( + relay.Function([x], relay.op.reverse_reshape(x, (-1, 0))), + target_devices=[(target, dev)], + ) -def test_one_hot_grad(): +def test_one_hot_grad(target, dev, index_dtype, val_dtype): indices_shape = (3, 4) depth = 5 axis = -1 - for indices_dtype in ["int32", "int64"]: - for val_dtype in ["float32", "float64"]: - inputs = [ - np.random.randint(depth, size=indices_shape, dtype=indices_dtype), - np.array(np.random.randn() * 1e-5).astype(val_dtype), - np.array(np.random.randn() * 1e-5).astype(val_dtype), - ] - test_inputs = inputs[1:] + inputs = [ + np.random.randint(depth, size=indices_shape, dtype=index_dtype), + np.array(np.random.randn() * 1e-5).astype(val_dtype), + np.array(np.random.randn() * 1e-5).astype(val_dtype), + ] + test_inputs = inputs[1:] - indices = relay.var("indices", shape=indices_shape, dtype=indices_dtype) - on_val = relay.var("on_val", shape=tuple(), dtype=val_dtype) - off_val = relay.var("off_val", shape=tuple(), dtype=val_dtype) - y = relay.one_hot(indices, on_val, off_val, depth, axis, val_dtype) - f = relay.Function([indices, on_val, off_val], y) + indices = relay.var("indices", shape=indices_shape, dtype=index_dtype) + on_val = relay.var("on_val", shape=tuple(), dtype=val_dtype) + off_val = relay.var("off_val", shape=tuple(), dtype=val_dtype) + y = relay.one_hot(indices, on_val, off_val, depth, axis, val_dtype) + f = relay.Function([indices, on_val, off_val], y) - check_grad(f, inputs=inputs, test_inputs=test_inputs) + check_grad(f, inputs=inputs, test_inputs=test_inputs, target_devices=[(target, dev)]) if __name__ == "__main__": - pytest.main([__file__]) + sys.exit(pytest.main(sys.argv)) diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py index 97e10eb25a95..eff3919460c2 100644 --- a/tests/python/relay/test_op_level1.py +++ b/tests/python/relay/test_op_level1.py @@ -43,54 +43,67 @@ def rsqrt(x): return one / np.sqrt(x) -@tvm.testing.uses_gpu -def test_unary_op(): - def check_single_op(opfunc, ref, dtype): +class TestUnaryOp: + op_list = { + "log": (tvm.relay.log, np.log), + "exp": (tvm.relay.exp, np.exp), + "erf": (tvm.relay.erf, scipy.special.erf), + "sqrt": (tvm.relay.sqrt, np.sqrt), + "rqsrt": (tvm.relay.rsqrt, rsqrt), + "sigmoid": (tvm.relay.sigmoid, sigmoid), + "tanh": (tvm.relay.tanh, np.tanh), + "relu": (relay.nn.relu, relu), + "cos": (tvm.relay.cos, np.cos), + "sin": (tvm.relay.sin, np.sin), + "tan": (tvm.relay.tan, np.tan), + "atan": (tvm.relay.atan, np.arctan), + } + + dtype = tvm.testing.parameter("float16", "float32") + + relay_op, ref_func = tvm.testing.parameters(*op_list.values(), ids=op_list.keys()) + + def test_unary_op(self, target, dev, relay_op, ref_func, dtype): + target = tvm.target.Target(target) + if ( + dtype == "float16" + and target.kind.name == "cuda" + and not have_fp16(tvm.cuda(0).compute_version) + ): + pytest.xfail("No float16 support on local cuda device") + elif ( + dtype == "float16" + and target.kind.name == "cuda" + and not target.attrs.get("supports_float16", False) + ): + pytest.xfail("No float16 support on vulkan target") + + if target.kind.name == "vulkan" and relay_op in [ + tvm.relay.erf, + tvm.relay.tan, + tvm.relay.atan, + ]: + pytest.xfail(f"Vulkan runtime doesn't yet support {relay_op}") + shape = (10, 4) dtype = dtype tp = relay.TensorType(shape) x = relay.var("x", tp, dtype=dtype) - y = opfunc(x) + y = relay_op(x) # test printer assert ("{}(%x)".format(y.op.name)) in y.astext() # test type inference yy = run_infer_type(y) assert yy.checked_type == tp - if ref is not None: + if ref_func is not None: data = np.random.rand(*shape).astype(dtype) - ref_res = ref(data) + ref_res = ref_func(data) func = relay.Function([x], y) - for target, dev in tvm.testing.enabled_targets(): - # use graph by execuor default for testing, as we need - # create function explicitly to avoid constant-folding. - if ( - dtype == "float16" - and target == "cuda" - and not have_fp16(tvm.cuda(0).compute_version) - ): - continue - op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)( - data - ) - np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) - - for opfunc, ref in [ - (tvm.relay.log, np.log), - (tvm.relay.exp, np.exp), - (tvm.relay.erf, scipy.special.erf), - (tvm.relay.sqrt, np.sqrt), - (tvm.relay.rsqrt, rsqrt), - (tvm.relay.sigmoid, sigmoid), - (tvm.relay.tanh, np.tanh), - (relay.nn.relu, relu), - (tvm.relay.cos, np.cos), - (tvm.relay.sin, np.sin), - (tvm.relay.tan, np.tan), - (tvm.relay.atan, np.arctan), - ]: - for dtype in ["float16", "float32"]: - check_single_op(opfunc, ref, dtype) + # use graph by execuor default for testing, as we need + # create function explicitly to avoid constant-folding. + op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)(data) + np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) @tvm.testing.uses_gpu diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index 87cdc41570d0..0ae88fce5b8c 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -16,10 +16,15 @@ # under the License. """ Support level2 operator test cases. """ +import sys + import numpy as np +import pytest + import tvm import tvm.testing import tvm.topi.testing + from tvm import autotvm, relay, te from tvm.contrib import utils from tvm.relay import transform @@ -191,169 +196,275 @@ def test_conv2d_infer_type(): assert yy.checked_type == relay.TensorType((n, h, w, 16), "int32") -@tvm.testing.uses_gpu -def test_conv2d_run(): - def run_test_conv2d( +class TestConv2D: + config = { + "group1": dict( + dtype="float32", + out_dtype="float32", + scale=1, + dshape=(1, 32, 18, 18), + kshape=(32, 4, 3, 3), + padding=(1, 1), + channels=32, + groups=8, + kernel_size=(3, 3), + dilation=(1, 1), + ), + "group2": dict( + dtype="float32", + out_dtype="float32", + scale=1, + dshape=(1, 32, 18, 18), + kshape=(64, 1, 3, 3), + padding=(1, 1), + channels=64, + groups=32, + kernel_size=(3, 3), + dilation=(1, 1), + ), + "normal": dict( + dtype="float32", + out_dtype="float32", + scale=1, + dshape=(1, 3, 224, 224), + kshape=(10, 3, 3, 3), + padding=(1, 1), + channels=10, + groups=1, + kernel_size=(3, 3), + dilation=(1, 1), + ), + "mixed_precision_int8_int32_case1": dict( + dtype="int8", + out_dtype="int32", + scale=1, + dshape=(1, 3, 224, 224), + kshape=(10, 3, 3, 3), + padding=(1, 1), + channels=10, + groups=1, + kernel_size=(3, 3), + dilation=(1, 1), + ), + "mixed_precision_int8_int32_case2": dict( + dtype="int8", + out_dtype="int32", + scale=1, + dshape=(1, 3, 224, 224), + kshape=(10, 3, 1, 3), + padding=(0, 1), + channels=10, + groups=1, + kernel_size=(1, 3), + dilation=(1, 1), + ), + "dilated": dict( + dtype="float32", + out_dtype="float32", + scale=1, + dshape=(1, 3, 18, 18), + kshape=(10, 3, 3, 3), + padding=(1, 1), + channels=10, + groups=1, + kernel_size=(3, 3), + dilation=(3, 3), + ), + } + + # TODO(Lunderberg): Make a cleaner utility for this type of + # parametrization. It would be much nicer to have the fixture + # name come from the dictionaries themselves, rather than needing + # to be re-packed into tuples. + ( dtype, out_dtype, scale, dshape, kshape, - padding=(1, 1), - fref=None, - groups=1, - dilation=(1, 1), - except_targets=None, - **attrs, + padding, + channels, + groups, + kernel_size, + dilation, + ) = tvm.testing.parameters( + *[ + [ + d[p] + for p in [ + "dtype", + "out_dtype", + "scale", + "dshape", + "kshape", + "padding", + "channels", + "groups", + "kernel_size", + "dilation", + ] + ] + for d in config.values() + ], + ids=config.keys(), + ) + + def test_run( + self, + target, + dev, + dtype, + out_dtype, + scale, + dshape, + kshape, + padding, + groups, + dilation, + channels, + kernel_size, ): - if except_targets is None: - except_targets = [] + target = tvm.target.Target(target) + if target.kind.name == "vulkan" and dtype == "int8": + # The schedule selection incorrectly picks an + # implementation that requires NCHWc packed input. + pytest.xfail("Known failing test for vulkan") x = relay.var("x", shape=dshape, dtype=dtype) w = relay.var("w", shape=kshape, dtype=dtype) - y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) + y = relay.nn.conv2d( + x, + w, + padding=padding, + dilation=dilation, + groups=groups, + channels=channels, + kernel_size=kernel_size, + ) func = relay.Function([x, w], y) - data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) + kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation) - if fref is None: - ref_res = tvm.topi.testing.conv2d_nchw_python( - data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups - ) - else: - ref_res = fref(data.astype(out_dtype), dkernel.astype(out_dtype)) - for target, dev in tvm.testing.enabled_targets(): - if target in except_targets: - continue - dev = tvm.device(target, 0) - op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)( - data, kernel - ) - tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-4, atol=1e-4) + data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) + ref_res = tvm.topi.testing.conv2d_nchw_python( + data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups + ) - def compile_test_conv2d_arm_cpu( - dtype, out_dtype, scale, dshape, kshape, padding=(1, 1), groups=1, dilation=(1, 1), **attrs - ): - x = relay.var("x", shape=dshape, dtype=dtype) - w = relay.var("w", shape=kshape, dtype=dtype) - y = relay.nn.conv2d(x, w, padding=padding, dilation=dilation, groups=groups, **attrs) - func = relay.Function([x, w], y) - mod = tvm.IRModule() - mod["main"] = func + op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)( + data, kernel + ) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-4, atol=1e-4) - test_schedule = '{"i": ["llvm -device=arm_cpu", "depthwise_conv2d_nchw_spatial_pack.arm_cpu", \ - [["TENSOR", [1, 512, 32, 32], "float32"], \ - ["TENSOR", [512, 1, 3, 3], "float32"], \ - [1, 1], [1, 1], [1, 1], "float32"], {}, \ - ["depthwise_conv2d_nchw_spatial_pack.arm_cpu", [1, 512, 32, 32, "float32"], \ - [512, 1, 3, 3, "float32"], [1, 1], [1, 1], [1, 1], "float32"], \ - {"i": 743640, "t": "", "c": null, \ - "e": [["tile_co", "sp", [32, 16]], ["tile_oh", "sp", [8, 1]], \ - ["tile_ow", "sp", [1, 8]], \ - ["reorder_0", "re", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \ - ["reorder_1", "re", [0, 1, 2, 3, 6, 4, 5]], \ - ["ann_reduce", "an", ["unroll", "none"]], \ - ["ann_spatial", "an", ["unroll", "unroll", "vec"]], \ - ["data_pad_inline", "ot", 4], ["data_vec_inline", "ot", 1], \ - ["conv_inline", "ot", 0]]}], "r": [[0.0002933163], \ - 0, 3.1976189613342285, 1570811630.6058347], "v": 0.1}' - temp = utils.tempdir() - with open(temp.relpath("temp.log"), "w") as log_file: - log_file.write(test_schedule) - with autotvm.apply_history_best(temp.relpath("temp.log")): - with tvm.transform.PassContext(opt_level=3): - print("Compiling...") - graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu") - # depthwise conv2d - dshape = (1, 32, 18, 18) - kshape = (32, 1, 3, 3) - run_test_conv2d( - "float32", - "float32", - 1, +def test_conv2d_run(target, dev): + def run_test_conv2d( + dtype, + out_dtype, + scale, dshape, kshape, padding=(1, 1), + fref=None, + groups=1, + dilation=(1, 1), channels=32, - groups=32, kernel_size=(3, 3), - fref=lambda x, w: tvm.topi.testing.depthwise_conv2d_python_nchw(x, w, (1, 1), "SAME"), - ) + ): + x = relay.var("x", shape=dshape, dtype=dtype) + w = relay.var("w", shape=kshape, dtype=dtype) + y = relay.nn.conv2d( + x, + w, + padding=padding, + dilation=dilation, + groups=groups, + channels=channels, + kernel_size=kernel_size, + ) + func = relay.Function([x, w], y) + data = np.random.uniform(-scale, scale, size=dshape).astype(dtype) + kernel = np.random.uniform(-scale, scale, size=kshape).astype(dtype) + dkernel = tvm.topi.testing.dilate_python(kernel, (1, 1) + dilation) + ref_res = tvm.topi.testing.conv2d_nchw_python( + data.astype(out_dtype), dkernel.astype(out_dtype), 1, padding, groups=groups + ) - # depthwise conv2d for arm_cpu - dshape = (1, 512, 32, 32) - kshape = (512, 1, 3, 3) - compile_test_conv2d_arm_cpu( - "float32", - "float32", - 1, - dshape, - kshape, - padding=(1, 1), - channels=512, - groups=512, - kernel_size=(3, 3), - ) + op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)( + data, kernel + ) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-4, atol=1e-4) - # CUDA is disabled for 'direct' schedule: - # https://github.com/apache/tvm/pull/3070#issuecomment-486597553 # group conv2d - dshape = (1, 32, 18, 18) - kshape = (32, 4, 3, 3) run_test_conv2d( - "float32", - "float32", - 1, - dshape, - kshape, + dtype="float32", + out_dtype="float32", + scale=1, + dshape=(1, 32, 18, 18), + kshape=(32, 4, 3, 3), padding=(1, 1), channels=32, groups=8, kernel_size=(3, 3), - except_targets=["cuda"], + dilation=(1, 1), ) # also group conv2d - dshape = (1, 32, 18, 18) - kshape = (64, 1, 3, 3) run_test_conv2d( - "float32", - "float32", - 1, - dshape, - kshape, + dtype="float32", + out_dtype="float32", + scale=1, + dshape=(1, 32, 18, 18), + kshape=(64, 1, 3, 3), padding=(1, 1), channels=64, groups=32, kernel_size=(3, 3), - except_targets=["cuda"], + dilation=(1, 1), ) # normal conv2d - dshape = (1, 3, 224, 224) - kshape = (10, 3, 3, 3) run_test_conv2d( - "float32", "float32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3) + dtype="float32", + out_dtype="float32", + scale=1, + dshape=(1, 3, 224, 224), + kshape=(10, 3, 3, 3), + padding=(1, 1), + channels=10, + kernel_size=(3, 3), + dilation=(1, 1), ) # mixed precision run_test_conv2d( - "int8", "int32", 1, dshape, kshape, padding=(1, 1), channels=10, kernel_size=(3, 3) + dtype="int8", + out_dtype="int32", + scale=1, + dshape=(1, 3, 224, 224), + kshape=(10, 3, 3, 3), + padding=(1, 1), + channels=10, + kernel_size=(3, 3), + dilation=(1, 1), ) - kshape = (10, 3, 1, 3) # mixed precision. run_test_conv2d( - "int8", "int32", 1, dshape, kshape, padding=(0, 1), channels=10, kernel_size=(1, 3) + dtype="int8", + out_dtype="int32", + scale=1, + dshape=(1, 3, 224, 224), + kshape=(10, 3, 1, 3), + padding=(0, 1), + channels=10, + kernel_size=(1, 3), + dilation=(1, 1), ) # dilated conv2d - dshape = (1, 3, 18, 18) - kshape = (10, 3, 3, 3) run_test_conv2d( - "float32", - "float32", - 1, - dshape, - kshape, + dtype="float32", + out_dtype="float32", + scale=1, + dshape=(1, 3, 18, 18), + kshape=(10, 3, 3, 3), padding=(1, 1), channels=10, kernel_size=(3, 3), @@ -361,6 +472,58 @@ def compile_test_conv2d_arm_cpu( ) +def test_compile_depthwise_conv2d_arm_cpu(): + dtype = "float32" + out_dtype = "float32" + scale = 1 + dshape = (1, 512, 32, 32) + kshape = (512, 1, 3, 3) + padding = (1, 1) + channels = 512 + groups = 512 + kernel_size = (3, 3) + dilation = (1, 1) + + x = relay.var("x", shape=dshape, dtype=dtype) + w = relay.var("w", shape=kshape, dtype=dtype) + y = relay.nn.conv2d( + x, + w, + padding=padding, + dilation=dilation, + groups=groups, + channels=channels, + kernel_size=kernel_size, + ) + func = relay.Function([x, w], y) + mod = tvm.IRModule() + mod["main"] = func + + test_schedule = '{"i": ["llvm -device=arm_cpu", "depthwise_conv2d_nchw_spatial_pack.arm_cpu", \ + [["TENSOR", [1, 512, 32, 32], "float32"], \ + ["TENSOR", [512, 1, 3, 3], "float32"], \ + [1, 1], [1, 1], [1, 1], "float32"], {}, \ + ["depthwise_conv2d_nchw_spatial_pack.arm_cpu", [1, 512, 32, 32, "float32"], \ + [512, 1, 3, 3, "float32"], [1, 1], [1, 1], [1, 1], "float32"], \ + {"i": 743640, "t": "", "c": null, \ + "e": [["tile_co", "sp", [32, 16]], ["tile_oh", "sp", [8, 1]], \ + ["tile_ow", "sp", [1, 8]], \ + ["reorder_0", "re", [0, 1, 2, 3, 4, 5, 8, 6, 7]], \ + ["reorder_1", "re", [0, 1, 2, 3, 6, 4, 5]], \ + ["ann_reduce", "an", ["unroll", "none"]], \ + ["ann_spatial", "an", ["unroll", "unroll", "vec"]], \ + ["data_pad_inline", "ot", 4], ["data_vec_inline", "ot", 1], \ + ["conv_inline", "ot", 0]]}], "r": [[0.0002933163], \ + 0, 3.1976189613342285, 1570811630.6058347], "v": 0.1}' + temp = utils.tempdir() + with open(temp.relpath("temp.log"), "w") as log_file: + log_file.write(test_schedule) + with autotvm.apply_history_best(temp.relpath("temp.log")): + with tvm.transform.PassContext(opt_level=3): + print("Compiling...") + graph_json, mod, params = tvm.relay.build(mod, target="llvm -device=arm_cpu") + + @tvm.testing.uses_gpu def test_conv2d_winograd(): class WinogradFallback(autotvm.FallbackContext): @@ -1851,38 +2014,4 @@ def _test_correlation( if __name__ == "__main__": - test_pool1d() - test_pool2d() - test_pool3d() - test_avg_pool2d_no_count_pad() - test_lrn() - test_l2_normalize() - test_conv1d_infer_type() - test_conv2d_infer_type() - test_conv3d_infer_type() - test_bitpack_infer_type() - test_upsampling_infer_type() - test_upsampling3d_infer_type() - test_flatten_infer_type() - test_pad_infer_type() - test_pad_run() - test_pad_run_dynamic_pad_value() - test_conv3d_transpose_infer_type() - test_conv3d_transpose_ncdhw_run() - test_conv2d_transpose_infer_type() - test_conv2d_transpose_nchw_run() - test_conv2d_transpose_nhwc_run() - test_conv1d_transpose_ncw_run() - test_conv1d_run() - test_conv2d_run() - test_conv2d_winograd() - test_conv3d_run() - test_conv3d_ndhwc_run() - test_conv3d_winograd() - test_bitserial_conv2d_infer_type() - test_batch_flatten() - test_upsampling() - test_upsampling3d() - test_conv2d_int8_intrinsics() - test_depthwise_conv2d_int8() - test_correlation() + sys.exit(pytest.main(sys.argv)) diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py index e0b95fe7fbf7..eaddd33678df 100644 --- a/tests/python/relay/test_op_level3.py +++ b/tests/python/relay/test_op_level3.py @@ -16,21 +16,31 @@ # under the License. """ Support level3 operator test cases. """ +import sys from typing import Callable, Optional import numpy as np import pytest + import tvm import tvm.testing + from tvm import relay, te from tvm.error import TVMError from tvm.relay import create_executor, transform from tvm.relay.testing import check_grad, run_infer_type + from utils import ref_funcs -def test_zeros_ones(): - for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]: +executor_kind = tvm.testing.parameter("graph", "debug") + + +class TestZerosOnes: + config = {"zeros": (relay.zeros, np.zeros), "ones": (relay.ones, np.ones)} + op, ref = tvm.testing.parameters(*config.values(), ids=config.keys()) + + def test_zeros_ones(self, op, ref): y = op(shape=(124, 50), dtype="float64") yy = run_infer_type(y) assert yy.checked_type == relay.TensorType((124, 50), "float64") @@ -38,19 +48,22 @@ def test_zeros_ones(): np.testing.assert_allclose(intrp_res, ref((124, 50), "float64")) -def test_unary_identity(): - for op, ref in [ - (relay.zeros_like, np.zeros_like), - (relay.ones_like, np.ones_like), - (relay.ceil, np.ceil), - (relay.floor, np.floor), - (relay.trunc, np.trunc), - (relay.round, np.round), - (relay.abs, np.abs), - (relay.copy, None), # np.copy - (relay.negative, np.negative), - (relay.sign, np.sign), - ]: +class TestUnaryIdentity: + config = { + "zeros_like": (relay.zeros_like, np.zeros_like), + "ones_like": (relay.ones_like, np.ones_like), + "ceil": (relay.ceil, np.ceil), + "floor": (relay.floor, np.floor), + "trunc": (relay.trunc, np.trunc), + "round": (relay.round, np.round), + "abs": (relay.abs, np.abs), + "copy": (relay.copy, None), # np.copy + "negative": (relay.negative, np.negative), + "sign": (relay.sign, np.sign), + } + op, ref = tvm.testing.parameters(*config.values(), ids=config.keys()) + + def test_unary_identity(self, op, ref): shape = (8, 9, 4) x = relay.var("x", relay.TensorType(shape, "float32")) y = op(x) @@ -169,8 +182,14 @@ def reference_tanh(x): np.testing.assert_allclose(op_res.numpy(), reference_tanh(data), atol=4e-5, rtol=1e-9) -def test_squeeze(): - def verify_squeeze(shape, dtype, axis): +class TestSqueeze: + shape, dtype, axis = tvm.testing.parameters( + ((1, 3, 2, 5), "float32", None), + ((1, 3, 1), "float32", [0]), + ((1, 2, 1, 2, 1), "float32", [0, 2]), + ) + + def test_squeeze(self, shape, dtype, axis): x = relay.var("x", relay.TensorType(shape, dtype)) squeeze = relay.squeeze(x, axis=axis) @@ -181,10 +200,6 @@ def verify_squeeze(shape, dtype, axis): ref_res = np.squeeze(data, axis=np_axis) np.testing.assert_allclose(op_res.numpy(), ref_res, rtol=0.01) - verify_squeeze((1, 3, 2, 5), "float32", None) - verify_squeeze((1, 3, 1), "float32", [0]) - verify_squeeze((1, 2, 1, 2, 1), "float32", [0, 2]) - def test_transpose_infer_type(): n, t, d = te.size_var("n"), te.size_var("t"), 100 @@ -200,24 +215,19 @@ def test_transpose_infer_type(): assert yy.checked_type == relay.TensorType((100, t, n), "float32") -@tvm.testing.uses_gpu -def test_transpose(): - def verify_transpose(dshape, axes): - x = relay.var("x", relay.TensorType(dshape, "float32")) - z = relay.transpose(x, axes=axes) +def test_transpose(target, dev, executor_kind): + dshape = (2, 3, 4) + axes = (0, 2, 1) - func = relay.Function([x], z) - x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32") - ref_res = np.transpose(x_data, axes=axes) + x = relay.var("x", relay.TensorType(dshape, "float32")) + z = relay.transpose(x, axes=axes) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + func = relay.Function([x], z) + x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32") + ref_res = np.transpose(x_data, axes=axes) - verify_transpose((2, 3, 4), (0, 2, 1)) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)(x_data) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) def test_squeeze_infer_type(): @@ -253,9 +263,26 @@ def test_reshape_infer_type(): assert yy.checked_type == relay.TensorType((n, t, 2000), "float32") -@tvm.testing.uses_gpu -def test_reshape(): - def verify_reshape(shape, newshape, oshape): +class TestReshape: + shape, newshape, oshape = tvm.testing.parameters( + ((2, 3, 4), (8, 3), (8, 3)), + ((4, 7), (2, 7, 2), (2, 7, 2)), + ((2, 3, 4), (4, 0, 2), (4, 3, 2)), + ((2, 3, 4), (2, 0, 0), (2, 3, 4)), + ((2, 3, 4), (0, -1), (2, 12)), + ((2, 3, 4), (-1, 0), (8, 3)), + ((2, 3, 4), (2, -2), (2, 3, 4)), + ((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1)), + ((2, 3, 4), (-3, 4), (6, 4)), + ((2, 3, 4, 5), (-3, -3), (6, 20)), + ((2, 3, 4), (0, -3), (2, 12)), + ((2, 3, 4), (-3, -2), (6, 4)), + ((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4)), + ((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4)), + ((1,), (), ()), + ) + + def test_reshape(self, target, dev, executor_kind, shape, newshape, oshape): x = relay.var("x", relay.TensorType(shape, "float32")) z = relay.reshape(x, newshape=newshape) zz = run_infer_type(z) @@ -266,28 +293,10 @@ def verify_reshape(shape, newshape, oshape): check_grad(func) x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32") ref_res = np.reshape(x_data, oshape) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - - verify_reshape((2, 3, 4), (8, 3), (8, 3)) - verify_reshape((4, 7), (2, 7, 2), (2, 7, 2)) - verify_reshape((2, 3, 4), (4, 0, 2), (4, 3, 2)) - verify_reshape((2, 3, 4), (2, 0, 0), (2, 3, 4)) - verify_reshape((2, 3, 4), (0, -1), (2, 12)) - verify_reshape((2, 3, 4), (-1, 0), (8, 3)) - verify_reshape((2, 3, 4), (2, -2), (2, 3, 4)) - verify_reshape((2, 3, 4), (-2, 1, 1), (2, 3, 4, 1, 1)) - verify_reshape((2, 3, 4), (-3, 4), (6, 4)) - verify_reshape((2, 3, 4, 5), (-3, -3), (6, 20)) - verify_reshape((2, 3, 4), (0, -3), (2, 12)) - verify_reshape((2, 3, 4), (-3, -2), (6, 4)) - verify_reshape((2, 3, 4), (-4, 1, 2, -2), (1, 2, 3, 4)) - verify_reshape((2, 3, 4), (2, -4, -1, 3, -2), (2, 1, 3, 4)) - verify_reshape((1,), (), ()) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) def test_reshape_fail(): @@ -340,9 +349,16 @@ def test_reshape_like_infer_type(): assert w.checked_type == relay.TensorType((5, 6, 4), "float32") -@tvm.testing.uses_gpu -def test_reshape_like(): - def verify_reshape_like(shape, oshape, shape_like=None, reshape_like_kwargs={}): +class TestReshapeLike: + shape, oshape, shape_like, reshape_like_kwargs = tvm.testing.parameters( + ((2, 3, 4), (1, 8, 3), None, {}), + ((4, 7), (2, 7, 2), None, {}), + ((1, 2, 3, 4), (1, 6, 4), (1, 6, 5), dict(lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2)), + ) + + def test_reshape_like( + self, target, dev, executor_kind, shape, oshape, shape_like=None, reshape_like_kwargs={} + ): if shape_like is None: shape_like = oshape x_data = np.random.uniform(low=-1, high=1, size=shape).astype("float32") @@ -357,41 +373,56 @@ def verify_reshape_like(shape, oshape, shape_like=None, reshape_like_kwargs={}): func = relay.Function([x, y], z) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data, y_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data, y_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - verify_reshape_like((2, 3, 4), (1, 8, 3)) - verify_reshape_like((4, 7), (2, 7, 2)) - verify_reshape_like( - (1, 2, 3, 4), (1, 6, 4), (1, 6, 5), dict(lhs_begin=1, lhs_end=3, rhs_begin=1, rhs_end=2) - ) +class TestTakeInferType: + d1, d2, d3 = te.var("d1"), te.var("d2"), te.var("d3") + d4, d5, d6 = te.var("d4"), te.var("d5"), te.var("d6") + dshape, indices_shape, oshape, axis = tvm.testing.parameters( + ((d1,), (1,), (1,), 0), + ((4,), (d1, d2), (d1, d2), None), + ((3, 3, 3), (1, d2), (1, d2), None), + ((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0), + ((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1), + ((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2), + ) -def test_take_infer_type(): - def verify_take(dshape, indices_shape, oshape, axis=None): + def test_take(self, dshape, indices_shape, oshape, axis): x = relay.var("x", relay.TensorType(dshape, "float32")) indices = relay.var("indices", relay.TensorType(indices_shape, "int32")) y = relay.take(x, indices, axis=axis) yy = run_infer_type(y) assert yy.checked_type == relay.TensorType(oshape, "float32") - d1, d2, d3 = te.var("d1"), te.var("d2"), te.var("d3") - d4, d5, d6 = te.var("d4"), te.var("d5"), te.var("d6") - verify_take((d1,), (1,), (1,), 0) - verify_take((4,), (d1, d2), (d1, d2)) - verify_take((3, 3, 3), (1, d2), (1, d2)) - verify_take((d1, d2), (d3, d4, d5), (d3, d4, d5, d2), 0) - verify_take((d1, d2), (d3, d4, d5), (d1, d3, d4, d5), 1) - verify_take((d1, d2, d3, d4), (d5, d6), (d1, d2, d5, d6, d4), -2) +class TestTake: + src_shape, indices_src, axis, mode = tvm.testing.parameters( + ((4,), [1], None, "clip"), + ((4,), [[0, 1, 2, 3]], None, "clip"), + ((3, 3, 3), [[11, 25]], None, "clip"), + ((4,), [[0, 1], [2, 3]], None, "clip"), + ((4,), [1], 0, "clip"), + ((2, 2), [[[1, 0], [0, 1]]], 0, "clip"), + ((2, 2), [[[1, 0], [0, 1]]], 1, "clip"), + ((4, 3, 5, 6), [[2, 1, 0, 0]], -2, "clip"), + ((3, 4), [-5, 20], None, "clip"), + ((3, 4), [-5, 20], None, "wrap"), + ((3, 4), [-1, 2], 0, "clip"), + ((3, 4), [-1, 2], 0, "wrap"), + ((3, 4), [-1, 2], 1, "clip"), + ((3, 4), [-1, 2], 1, "wrap"), + ((3, 3, 3), [[11, 25]], None, "fast"), + ((3, 4), [0, 2], 0, "fast"), + ((3, 4), [0, 2], 1, "fast"), + ) -@tvm.testing.uses_gpu -def test_take(): - def verify_take(src_shape, indices_src, axis=None, mode="clip"): + # Incorrect numeric output in some cases on vulkan + @tvm.testing.known_failing_targets("vulkan") + def test_take(self, target, dev, executor_kind, src_shape, indices_src, axis, mode): src_dtype = "float32" indices_dtype = "int32" indices_src = np.array(indices_src, dtype=indices_dtype) @@ -404,134 +435,117 @@ def verify_take(src_shape, indices_src, axis=None, mode="clip"): np_mode = "raise" if mode == "fast" else mode ref_res = np.take(x_data, indices=indices_src, axis=axis, mode=np_mode) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data, indices_src - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - - verify_take((4,), [1]) - verify_take((4,), [[0, 1, 2, 3]]) - verify_take((3, 3, 3), [[11, 25]]) - verify_take((4,), [[0, 1], [2, 3]]) - verify_take((4,), [1], 0) - verify_take((2, 2), [[[1, 0], [0, 1]]], 0) - verify_take((2, 2), [[[1, 0], [0, 1]]], 1) - verify_take((4, 3, 5, 6), [[2, 1, 0, 0]], -2) - verify_take((3, 4), [-5, 20]) - verify_take((3, 4), [-5, 20], mode="wrap") - verify_take((3, 4), [-1, 2], axis=0) - verify_take((3, 4), [-1, 2], axis=0, mode="wrap") - verify_take((3, 4), [-1, 2], axis=1) - verify_take((3, 4), [-1, 2], axis=1, mode="wrap") - verify_take((3, 3, 3), [[11, 25]], mode="fast") - verify_take((3, 4), [0, 2], axis=0, mode="fast") - verify_take((3, 4), [0, 2], axis=1, mode="fast") - - -def test_split_infer_type(): - def verify_split(dshape, indices_or_sections, ret_type, axis=None): - x = relay.var("x", relay.ty.TensorType(dshape, "float32")) - y = relay.split(x, indices_or_sections, axis=axis) - yy = run_infer_type(y.astuple()) - assert yy.checked_type == ret_type + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data, indices_src + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + +class TestSplitInferType: idxd = tvm.tir.indexdiv d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4") axis = te.var("axis") - verify_split( - (5, 5, 2, 2), - 5, - relay.ty.TupleType( - tvm.runtime.convert( - [ - relay.ty.TensorType((5, 1, 2, 2), "float32"), - relay.ty.TensorType((5, 1, 2, 2), "float32"), - relay.ty.TensorType((5, 1, 2, 2), "float32"), - relay.ty.TensorType((5, 1, 2, 2), "float32"), - relay.ty.TensorType((5, 1, 2, 2), "float32"), - ] - ) + + dshape, indices_or_sections, ret_type, axis = tvm.testing.parameters( + ( + (5, 5, 2, 2), + 5, + relay.ty.TupleType( + tvm.runtime.convert( + [ + relay.ty.TensorType((5, 1, 2, 2), "float32"), + relay.ty.TensorType((5, 1, 2, 2), "float32"), + relay.ty.TensorType((5, 1, 2, 2), "float32"), + relay.ty.TensorType((5, 1, 2, 2), "float32"), + relay.ty.TensorType((5, 1, 2, 2), "float32"), + ] + ) + ), + 1, ), - axis=1, - ) - verify_split( - (5, 5, 2, 2), - 5, - relay.ty.TupleType( - tvm.runtime.convert( - [ - relay.ty.TensorType((1, 5, 2, 2), "float32"), - relay.ty.TensorType((1, 5, 2, 2), "float32"), - relay.ty.TensorType((1, 5, 2, 2), "float32"), - relay.ty.TensorType((1, 5, 2, 2), "float32"), - relay.ty.TensorType((1, 5, 2, 2), "float32"), - ] - ) + ( + (5, 5, 2, 2), + 5, + relay.ty.TupleType( + tvm.runtime.convert( + [ + relay.ty.TensorType((1, 5, 2, 2), "float32"), + relay.ty.TensorType((1, 5, 2, 2), "float32"), + relay.ty.TensorType((1, 5, 2, 2), "float32"), + relay.ty.TensorType((1, 5, 2, 2), "float32"), + relay.ty.TensorType((1, 5, 2, 2), "float32"), + ] + ) + ), + 0, ), - axis=0, - ) - verify_split( - (d1, d2, d3, d4), - 4, - relay.ty.TupleType( - tvm.runtime.convert( - [ - relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"), - relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"), - relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"), - relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"), - ] - ) + ( + (d1, d2, d3, d4), + 4, + relay.ty.TupleType( + tvm.runtime.convert( + [ + relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"), + relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"), + relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"), + relay.ty.TensorType((d1, d2, idxd(d3, 4), d4), "float32"), + ] + ) + ), + 2, ), - axis=2, - ) - verify_split( - (d1, d2, d3, d4), - 2, - relay.ty.TupleType( - tvm.runtime.convert( - [ - relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"), - relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"), - ] - ) + ( + (d1, d2, d3, d4), + 2, + relay.ty.TupleType( + tvm.runtime.convert( + [ + relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"), + relay.ty.TensorType((idxd(d1, 2), d2, d3, d4), "float32"), + ] + ) + ), + 0, ), - axis=0, - ) - verify_split( - (d1, d2, d3, d4), - (2, 4, 7), - relay.ty.TupleType( - tvm.runtime.convert( - [ - relay.ty.TensorType((d1, 2, d3, d4), "float32"), - relay.ty.TensorType((d1, 2, d3, d4), "float32"), - relay.ty.TensorType((d1, 3, d3, d4), "float32"), - relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"), - ] - ) + ( + (d1, d2, d3, d4), + (2, 4, 7), + relay.ty.TupleType( + tvm.runtime.convert( + [ + relay.ty.TensorType((d1, 2, d3, d4), "float32"), + relay.ty.TensorType((d1, 2, d3, d4), "float32"), + relay.ty.TensorType((d1, 3, d3, d4), "float32"), + relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"), + ] + ) + ), + 1, ), - axis=1, - ) - verify_split( - (d1, d2, d3, d4), - tuple(np.array([2, 4, 7]).astype(np.int64)), - relay.ty.TupleType( - tvm.runtime.convert( - [ - relay.ty.TensorType((d1, 2, d3, d4), "float32"), - relay.ty.TensorType((d1, 2, d3, d4), "float32"), - relay.ty.TensorType((d1, 3, d3, d4), "float32"), - relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"), - ] - ) + ( + (d1, d2, d3, d4), + tuple(np.array([2, 4, 7]).astype(np.int64)), + relay.ty.TupleType( + tvm.runtime.convert( + [ + relay.ty.TensorType((d1, 2, d3, d4), "float32"), + relay.ty.TensorType((d1, 2, d3, d4), "float32"), + relay.ty.TensorType((d1, 3, d3, d4), "float32"), + relay.ty.TensorType((d1, (d2 - 7), d3, d4), "float32"), + ] + ) + ), + 1, ), - axis=1, ) + def test_split(self, dshape, indices_or_sections, ret_type, axis): + x = relay.var("x", relay.ty.TensorType(dshape, "float32")) + y = relay.split(x, indices_or_sections, axis=axis) + yy = run_infer_type(y.astuple()) + assert yy.checked_type == ret_type + def test_full_infer_type(): # default settings: match input dtype @@ -548,23 +562,36 @@ def test_full_infer_type(): assert yy.checked_type == relay.TensorType((1, 2), "int8") -@tvm.testing.uses_gpu -def test_full(): - def verify_full(fill_value, src_shape, dtype): +class TestFull: + fill_value, arr_shape, dtype = tvm.testing.parameters( + (4, (1, 3, 4, 4), "int32"), + (4, (1, 3, 4, 4), "int64"), + (4.0, (1, 4), "float32"), + ) + + def test_full(self, target, dev, executor_kind, fill_value, arr_shape, dtype): x = relay.var("x", relay.scalar_type(dtype)) - z = relay.full(x, src_shape, dtype) + z = relay.full(x, arr_shape, dtype) func = relay.Function([x], z) - ref_res = np.full(src_shape, fill_value) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - np.array(fill_value, dtype) - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + ref_res = np.full(arr_shape, fill_value, dtype=dtype) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + np.array(fill_value, dtype) + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - verify_full(4, (1, 3, 4, 4), "int32") - # verify_full(4, (1, 3, 4, 4), "int64") # This does not pass, python int32 is not upcast to int64, not sure how to fix it. - verify_full(4.0, (1, 4), "float32") + def test_full_like(self, target, dev, executor_kind, arr_shape, fill_value, dtype): + x_data = np.random.uniform(low=-1, high=1, size=arr_shape).astype(dtype) + x = relay.var("x", relay.TensorType(arr_shape, dtype)) + y = relay.var("y", relay.scalar_type(dtype)) + z = relay.full_like(x, y) + + func = relay.Function([x, y], z) + ref_res = np.full_like(x_data, fill_value) + + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data, np.array(fill_value, dtype) + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) def test_full_like_infer_type(): @@ -584,30 +611,7 @@ def test_full_like_infer_type(): assert yy.checked_type == relay.TensorType((n, c, h, w), "float32") -@tvm.testing.uses_gpu -def test_full_like(): - def verify_full_like(base, fill_value, dtype): - x_data = np.random.uniform(low=-1, high=1, size=base).astype(dtype) - x = relay.var("x", relay.TensorType(base, dtype)) - y = relay.var("y", relay.scalar_type(dtype)) - z = relay.full_like(x, y) - - func = relay.Function([x, y], z) - ref_res = np.full_like(x_data, fill_value) - - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data, np.array(fill_value, dtype) - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - - verify_full_like((1, 3, 4, 4), 4, "int32") - verify_full_like((1, 1), 44.0, "float32") - - -@tvm.testing.uses_gpu -def test_infer_type_leaky_relu(): +def test_infer_type_leaky_relu(target, dev): n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w") x = relay.var("x", relay.TensorType((n, c, h, w), "float32")) y = relay.nn.leaky_relu(x, alpha=0.1) @@ -626,42 +630,55 @@ def test_infer_type_leaky_relu(): x_data = np.random.uniform(low=-1, high=1, size=shape).astype(dtype) ref_res = np.where(x_data > 0, x_data, x_data * 0.1) - for target, dev in tvm.testing.enabled_targets(): - op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) - op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) + op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(x_data) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) + op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(x_data) + tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) -def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"): - x = relay.var("data", relay.TensorType(data, dtype)) - if alpha: - y = relay.var("alpha", relay.TensorType(alpha, dtype)) - else: - y = relay.var("alpha", relay.IncompleteType()) - z = relay.nn.prelu(x, y, axis=axis) - zz = run_infer_type(z) - if axis != 1: - assert "axis" in z.astext() - assert zz.checked_type == relay.ty.TensorType(output, dtype) - if not alpha: - axis = axis if axis else 1 - alpha_shape = (data[axis],) - assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32") - - if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha: - return - - func = relay.Function([x, y], z) - x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype) - a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype) - - if axis == 1: - ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data >= 0) * x_data - else: - ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data >= 0) * x_data - - for target, dev in tvm.testing.enabled_targets(): +class TestInferTypePrelu: + dtype = tvm.testing.parameter("float32") + + n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w") + data, alpha, axis, output = tvm.testing.parameters( + ((n, c, h, w), (c,), 1, (n, c, h, w)), + ((n, h, w, c), (c,), 3, (n, h, w, c)), + ((n, c, h, w), None, 1, (n, c, h, w)), + ((n, h, w, c), None, 3, (n, h, w, c)), + ((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2)), + ((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3)), + ((1, 3, 2, 2), None, 1, (1, 3, 2, 2)), + ((1, 2, 2, 3), None, 3, (1, 2, 2, 3)), + ) + + def test_infer_type_prelu(self, target, dev, data, alpha, axis, output, dtype): + x = relay.var("data", relay.TensorType(data, dtype)) + if alpha: + y = relay.var("alpha", relay.TensorType(alpha, dtype)) + else: + y = relay.var("alpha", relay.IncompleteType()) + z = relay.nn.prelu(x, y, axis=axis) + zz = run_infer_type(z) + if axis != 1: + assert "axis" in z.astext() + assert zz.checked_type == relay.ty.TensorType(output, dtype) + if not alpha: + axis = axis if axis else 1 + alpha_shape = (data[axis],) + assert zz.args[1].checked_type == relay.TensorType(alpha_shape, "float32") + + if all(isinstance(v, tvm.tir.Var) == 1 for v in data) or not alpha: + return + + func = relay.Function([x, y], z) + x_data = np.random.uniform(low=-1, high=1, size=data).astype(dtype) + a_data = np.random.uniform(low=-1, high=1, size=alpha).astype(dtype) + + if axis == 1: + ref_res = (x_data < 0) * (x_data * a_data.reshape(3, 1, 1)) + (x_data >= 0) * x_data + else: + ref_res = (x_data < 0) * (x_data * a_data.reshape(1, 1, 3)) + (x_data >= 0) * x_data + op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)( x_data, a_data ) @@ -672,23 +689,24 @@ def verify_infer_type_prelu(data, alpha, axis, output, dtype="float32"): tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) -@tvm.testing.uses_gpu -def test_infer_type_prelu(): - n, c, h, w = te.size_var("n"), te.size_var("c"), te.size_var("h"), te.size_var("w") - verify_infer_type_prelu((n, c, h, w), (c,), 1, (n, c, h, w)) - verify_infer_type_prelu((n, h, w, c), (c,), 3, (n, h, w, c)) - verify_infer_type_prelu((n, c, h, w), None, 1, (n, c, h, w)) - verify_infer_type_prelu((n, h, w, c), None, 3, (n, h, w, c)) - verify_infer_type_prelu((1, 3, 2, 2), (3,), 1, (1, 3, 2, 2)) - verify_infer_type_prelu((1, 2, 2, 3), (3,), 3, (1, 2, 2, 3)) - verify_infer_type_prelu((1, 3, 2, 2), None, 1, (1, 3, 2, 2)) - verify_infer_type_prelu((1, 2, 2, 3), None, 3, (1, 2, 2, 3)) - - -@tvm.testing.uses_gpu -def test_arange(): - def verify_arange(start, stop, step): - dtype = "float32" +class TestArange: + dtype = tvm.testing.parameter("float32") + + start, stop, step = tvm.testing.parameters( + (None, 20, None), + (None, 20, 2), + (1, 20, None), + (1, 20, 2), + # arange doesnt' support floating point right now, see type relation + # (1, 20, 1.5), + (1, 20.5, None), + (1, 20, 3), + (20, 1, -1), + # arange doesnt' support floating point right now, see type relation + # (20, 1, -1.5), + ) + + def test_arange(self, target, dev, executor_kind, start, stop, step, dtype): if start is None and step is None: x = relay.arange(relay.const(stop, dtype=dtype)) ref_res = np.arange(stop).astype(dtype) @@ -707,27 +725,21 @@ def verify_arange(start, stop, step): ref_res = np.arange(start, stop, step).astype(dtype) func = relay.Function([], x) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)() - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - - verify_arange(None, 20, None) - verify_arange(None, 20, 2) - verify_arange(1, 20, None) - verify_arange(1, 20, 2) - # arange doesnt' support floating point right now, see type relation - # verify_arange(1, 20, 1.5) - verify_arange(1, 20.5, None) - verify_arange(1, 20, 3) - verify_arange(20, 1, -1) - # arange doesnt' support floating point right now, see type relation - # verify_arange(20, 1, -1.5) - - -@tvm.testing.uses_gpu -def test_meshgrid(): - def verify_meshgrid(lengths, indexing="ij"): + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)() + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + + +class TestMeshgrid: + lengths, indexing = tvm.testing.parameters( + ([3, 5], "ij"), + ([4, 2], "xy"), + ([3, 5, 2], "ij"), + ([3, 1, 5], "xy"), + # Length 0 signifies scalar. + ([3, 5, 0], "ij"), + ) + + def test_meshgrid(self, target, dev, executor_kind, lengths, indexing="ij"): input_vars = [] input_data = [] for i, length in enumerate(lengths): @@ -745,26 +757,22 @@ def verify_meshgrid(lengths, indexing="ij"): # Get ref ref_res = np.meshgrid(*input_data, indexing=indexing) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - *input_data - ) - assert len(op_res) == len(ref_res) - for i in range(len(op_res)): - tvm.testing.assert_allclose(op_res[i].numpy(), ref_res[i], rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + *input_data + ) + assert len(op_res) == len(ref_res) + for i in range(len(op_res)): + tvm.testing.assert_allclose(op_res[i].numpy(), ref_res[i], rtol=1e-5) - verify_meshgrid([3, 5]) - verify_meshgrid([4, 2], indexing="xy") - verify_meshgrid([3, 5, 2]) - verify_meshgrid([3, 1, 5], indexing="xy") - # Length 0 signifies scalar. - verify_meshgrid([3, 5, 0]) +class TestTile: + dshape, reps = tvm.testing.parameters( + ((2, 3, 4), (3, 2, 1)), + ((2, 3, 4), (1, 2)), + ((2, 3), (3, 2, 1)), + ) -@tvm.testing.uses_gpu -def test_tile(): - def verify_tile(dshape, reps): + def test_tile(self, target, dev, executor_kind, dshape, reps): x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.tile(x, reps=reps) @@ -772,95 +780,91 @@ def verify_tile(dshape, reps): x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32") ref_res = np.tile(x_data, reps=reps) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - verify_tile((2, 3, 4), (3, 2, 1)) - verify_tile((2, 3, 4), (1, 2)) - verify_tile((2, 3), (3, 2, 1)) +class TestRepeat: + dshape, repeats, axis = tvm.testing.parameters( + ((3,), 2, 0), + ((3, 10), 2, -1), + ((3, 2, 4), 3, 1), + ) -@tvm.testing.uses_gpu -def test_repeat(): - def verify_repeat(dshape, repeats, axis): + def test_repeat(self, target, dev, executor_kind, dshape, repeats, axis): x = relay.Var("x", relay.TensorType(dshape, "float32")) func = relay.Function([x], relay.repeat(x, repeats, axis)) data = np.random.uniform(size=dshape).astype("float32") ref_res = np.repeat(data, repeats, axis) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(data) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + - verify_repeat((3,), 2, 0) - verify_repeat((3, 10), 2, -1) - verify_repeat((3, 2, 4), 3, 1) +class TestStack: + dshapes, axis = tvm.testing.parameters( + ([(2,), (2,), (2,)], -1), + ([(2,), (2,), (2,)], 0), + ([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1), + ([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1), + ([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4), + ) + + expr_type = tvm.testing.parameter("tuple", "list", "tuple_expr") + + @tvm.testing.fixture + def ref_data(self, dshapes, axis): + np_in = [np.random.normal(size=shape).astype("float32") for shape in dshapes] + np_out = np.stack(np_in, axis=axis) + return np_in, np_out + + @tvm.testing.fixture + def input_expr(self, dshapes, axis, expr_type, ref_data): + input_vars = [relay.var("input", relay.TensorType(shape, "float32")) for shape in dshapes] + if expr_type == "tuple": + input_expr = relay.Tuple(input_vars) -@tvm.testing.uses_gpu -def test_stack(): - def produce_input_tuple(dshapes): - y = [relay.var("input", relay.TensorType(shape, "float32")) for shape in dshapes] - return relay.Tuple(y) + elif expr_type == "list": + input_expr = input_vars - def ref_stack(inputs, axis): - return np.stack(inputs, axis=axis) + elif expr_type == "tuple_expr": + # expression that evaluates to a tuple + # but is not a tuple literal + np_in, np_out = ref_data + x = relay.Var("x") + input_expr = relay.Let(x, relay.Tuple([relay.const(inp) for inp in np_in]), x) - def verify_stack(input_expr, relay_args, ref_res, axis): + else: + raise ValueError(f"Unknown expr_type '{expr_type}'") + + return input_expr + + def test_stack(self, target, dev, executor_kind, input_expr, ref_data, axis): z = relay.stack(input_expr, axis=axis) inp_vars = relay.analysis.free_vars(z) func = relay.Function(inp_vars, z) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - *relay_args - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - - def verify_tup_lit_stack(dshapes, axis): - input_tuple = produce_input_tuple(dshapes) - input_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes] - ref_res = ref_stack(input_data, axis) - verify_stack(input_tuple, input_data, ref_res, axis) - - def verify_list_lit_stack(dshapes, axis): - input_list = produce_input_tuple(dshapes).fields - input_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes] - ref_res = ref_stack(input_data, axis) - verify_stack(input_list, input_data, ref_res, axis) - - def verify_tup_expr_stack(dshapes, axis): - input_data = [np.random.normal(size=shape).astype("float32") for shape in dshapes] - ref_res = ref_stack(input_data, axis) - - # expression that evaluates to a tuple - # but is not a tuple literal - x = relay.Var("x") - input_expr = relay.Let(x, relay.Tuple([relay.const(inp) for inp in input_data]), x) - verify_stack(input_expr, [], ref_res, axis) - - dshape_axis_combos = [ - ([(2,), (2,), (2,)], -1), - ([(2,), (2,), (2,)], 0), - ([(2, 2, 4), (2, 2, 4), (2, 2, 4)], 1), - ([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], -1), - ([(2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4), (2, 2, 3, 4)], 4), - ] + np_in, np_out = ref_data + relay_args = np_in if inp_vars else [] - for dshapes, axis in dshape_axis_combos: - verify_tup_lit_stack(dshapes, axis) - verify_list_lit_stack(dshapes, axis) - verify_tup_expr_stack(dshapes, axis) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + *relay_args + ) + tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5) -@tvm.testing.uses_gpu -def test_reverse(): - def verify_reverse(dshape, axis): +class TestReverse: + dshape, axis = tvm.testing.parameters( + ((2, 3, 4), 1), + ((4, 7), 0), + ((2, 3, 4), -1), + ) + + def test_reverse(self, target, dev, executor_kind, dshape, axis): x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.reverse(x, axis=axis) zz = run_infer_type(z) @@ -868,20 +872,13 @@ def verify_reverse(dshape, axis): func = relay.Function([x], z) x_data = np.random.uniform(low=-1, high=1, size=dshape).astype("float32") ref_res = np.flip(x_data, axis) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - - verify_reverse((2, 3, 4), 1) - verify_reverse((4, 7), 0) - verify_reverse((2, 3, 4), -1) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) -@tvm.testing.uses_gpu -def test_reverse_sequence(): +def test_reverse_sequence(target, dev, executor_kind): def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res): seq_lengths_data = np.array(seq_lengths).astype("int32") x = relay.var("x", relay.TensorType(x_data.shape, str(x_data.dtype))) @@ -890,12 +887,10 @@ def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res): assert zz.checked_type == x.type_annotation func = relay.Function([x], z) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) indata = np.array(np.arange(0, 16)).reshape([4, 4]).astype("int32") result = [[0, 5, 10, 15], [4, 1, 6, 11], [8, 9, 2, 7], [12, 13, 14, 3]] @@ -958,19 +953,19 @@ def verify_reverse_sequence(x_data, seq_lengths, batch_axis, seq_axis, ref_res): ) -@tvm.testing.uses_gpu -def test_scatter(): - def ref_scatter(data, indices, updates, axis=0): - idx = np.indices(indices.shape).reshape(indices.ndim, -1) +def ref_scatter(data, indices, updates, axis=0): + idx = np.indices(indices.shape).reshape(indices.ndim, -1) - updated_idx = np.copy(idx) - indices = indices.reshape(-1) - for i in range(len(indices)): - updated_idx[axis, i] = indices[i] - scattered = np.copy(data) - scattered[tuple(updated_idx)] = updates[tuple(idx)] - return scattered + updated_idx = np.copy(idx) + indices = indices.reshape(-1) + for i in range(len(indices)): + updated_idx[axis, i] = indices[i] + scattered = np.copy(data) + scattered[tuple(updated_idx)] = updates[tuple(idx)] + return scattered + +def test_scatter(target, dev, executor_kind): def verify_scatter(dshape, ishape, axis=0): d = relay.var("d", relay.TensorType(dshape, "float32")) i = relay.var("i", relay.TensorType(ishape, "int64")) @@ -985,14 +980,45 @@ def verify_scatter(dshape, ishape, axis=0): ref_res = ref_scatter(data_np, indices_np, updates_np, axis) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - data_np, indices_np, updates_np - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + data_np, indices_np, updates_np + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - def verify_dynamic_scatter(dshape, ishape, axis=0): + verify_scatter((10,), (10,), 0) + verify_scatter((10, 5), (10, 5), -2) + verify_scatter((10, 5), (10, 5), -1) + verify_scatter((10, 5), (3, 5), 0) + verify_scatter((12, 4), (7, 2), 1) + verify_scatter((2, 3, 4), (1, 3, 4), 0) + verify_scatter((2, 3, 4), (2, 1, 4), 1) + verify_scatter((2, 3, 4), (2, 3, 1), 2) + verify_scatter((4, 2, 1), (1, 1, 1), 0) + verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0) + verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1) + verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2) + verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3) + + +class TestDynamicScatter: + dshape, ishape, axis = tvm.testing.parameters( + ((10,), (10,), 0), + ((10, 5), (10, 5), -2), + ((10, 5), (10, 5), -1), + ((10, 5), (3, 5), 0), + ((12, 4), (7, 2), 1), + ((2, 3, 4), (1, 3, 4), 0), + ((2, 3, 4), (2, 1, 4), 1), + ((2, 3, 4), (2, 3, 1), 2), + ((4, 2, 1), (1, 1, 1), 0), + ((2, 3, 4, 5), (1, 3, 4, 5), 0), + ((6, 3, 4, 5), (2, 3, 4, 5), 1), + ((2, 3, 8, 5), (2, 3, 1, 1), 2), + ((16, 16, 4, 5), (16, 16, 4, 5), 3), + ) + + @pytest.mark.parametrize("executor_kind", ["vm", "debug"]) + def test_dynamic_scatter(self, target, dev, executor_kind, dshape, ishape, axis): d = relay.var("d", relay.TensorType([relay.Any() for i in range(len(dshape))], "float32")) i = relay.var("i", relay.TensorType([relay.Any() for i in range(len(ishape))], "int64")) u = relay.var("u", relay.TensorType([relay.Any() for i in range(len(ishape))], "float32")) @@ -1006,47 +1032,15 @@ def verify_dynamic_scatter(dshape, ishape, axis=0): ref_res = ref_scatter(data_np, indices_np, updates_np, axis) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["vm", "debug"]: - mod = tvm.ir.IRModule.from_expr(func) - op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()( - data_np, indices_np, updates_np - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + mod = tvm.ir.IRModule.from_expr(func) + op_res = relay.create_executor( + executor_kind, mod=mod, device=dev, target=target + ).evaluate()(data_np, indices_np, updates_np) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - verify_scatter((10,), (10,), 0) - verify_scatter((10, 5), (10, 5), -2) - verify_scatter((10, 5), (10, 5), -1) - verify_scatter((10, 5), (3, 5), 0) - verify_scatter((12, 4), (7, 2), 1) - verify_scatter((2, 3, 4), (1, 3, 4), 0) - verify_scatter((2, 3, 4), (2, 1, 4), 1) - verify_scatter((2, 3, 4), (2, 3, 1), 2) - verify_scatter((4, 2, 1), (1, 1, 1), 0) - verify_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0) - verify_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1) - verify_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2) - verify_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3) - verify_dynamic_scatter((10,), (10,), 0) - verify_dynamic_scatter((10, 5), (10, 5), -2) - verify_dynamic_scatter((10, 5), (10, 5), -1) - verify_dynamic_scatter((10, 5), (3, 5), 0) - verify_dynamic_scatter((12, 4), (7, 2), 1) - verify_dynamic_scatter((2, 3, 4), (1, 3, 4), 0) - verify_dynamic_scatter((2, 3, 4), (2, 1, 4), 1) - verify_dynamic_scatter((2, 3, 4), (2, 3, 1), 2) - verify_dynamic_scatter((4, 2, 1), (1, 1, 1), 0) - verify_dynamic_scatter((2, 3, 4, 5), (1, 3, 4, 5), 0) - verify_dynamic_scatter((6, 3, 4, 5), (2, 3, 4, 5), 1) - verify_dynamic_scatter((2, 3, 8, 5), (2, 3, 1, 1), 2) - verify_dynamic_scatter((16, 16, 4, 5), (16, 16, 4, 5), 3) - - -@tvm.testing.uses_gpu -@pytest.mark.parametrize( - "dshape, ishape, axis, dtype", - [ +class TestScatterAdd: + dshape, ishape, axis, dtype = tvm.testing.parameters( ((10,), (10,), 0, "int32"), ((1000,), (1000,), 0, "int32"), ((10, 5), (10, 5), -2, "float32"), @@ -1060,18 +1054,25 @@ def verify_dynamic_scatter(dshape, ishape, axis=0): ((6, 3, 4, 5), (2, 3, 4, 5), 1, "float32"), ((2, 3, 8, 5), (2, 3, 1, 1), 2, "float32"), ((16, 16, 4, 5), (16, 16, 4, 5), 3, "float32"), - ], -) -def test_scatter_add(dshape, ishape, axis, dtype): - def ref_scatter_add(data, indices, updates, axis=0): - output = np.copy(data) - for index in np.ndindex(*indices.shape): - new_index = list(index) - new_index[axis] = indices[index] - output[tuple(new_index)] += updates[index] - return output + ) - def verify_scatter_add(dshape, ishape, axis=0, dtype="float32"): + @tvm.testing.fixture(cache_return_value=True) + def ref_data(self, dshape, ishape, axis, dtype): + data_np = np.random.uniform(size=dshape).astype(dtype) + updates_np = np.random.uniform(size=ishape).astype(dtype) + indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64") + + out_np = np.copy(data_np) + for index in np.ndindex(*indices_np.shape): + new_index = list(index) + new_index[axis] = indices_np[index] + out_np[tuple(new_index)] += updates_np[index] + return data_np, updates_np, indices_np, out_np + + # Optimization can produce tir.atomic_add, not currently supported + # on vulkan runtime. + @tvm.testing.known_failing_targets("vulkan") + def test_scatter_add(self, target, dev, ref_data, dshape, ishape, axis, dtype): d = relay.var("d", relay.TensorType(shape=[relay.Any() for _ in dshape], dtype=dtype)) i = relay.var("i", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype="int64")) u = relay.var("u", relay.TensorType(shape=[relay.Any() for _ in ishape], dtype=dtype)) @@ -1079,22 +1080,11 @@ def verify_scatter_add(dshape, ishape, axis=0, dtype="float32"): func = relay.Function([d, i, u], z) - data_np = np.random.uniform(size=dshape).astype(dtype) - updates_np = np.random.uniform(size=ishape).astype(dtype) - indices_np = np.random.randint(-dshape[axis], dshape[axis] - 1, ishape).astype("int64") + data_np, updates_np, indices_np, out_np = ref_data - ref_res = ref_scatter_add(data_np, indices_np, updates_np, axis) + verify_func(target, dev, func, [data_np, indices_np, updates_np], out_np) - verify_func( - func, - [data_np, indices_np, updates_np], - ref_res, - ) - - verify_scatter_add(dshape, ishape, axis, dtype) - -@tvm.testing.uses_gpu @pytest.mark.parametrize( "data, axis, indices, ref_res", [ @@ -1250,7 +1240,7 @@ def verify_scatter_add(dshape, ishape, axis=0, dtype="float32"): ), ], ) -def test_gather(data, axis, indices, ref_res): +def test_gather(target, dev, executor_kind, data, axis, indices, ref_res): def verify_gather(data, axis, indices, ref_res): data = np.asarray(data, dtype="float32") indices = np.asarray(indices, dtype="int32") @@ -1261,18 +1251,15 @@ def verify_gather(data, axis, indices, ref_res): func = relay.Function([d, i], z) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - data, indices - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + data, indices + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_gather(data, axis, indices, ref_res) -@tvm.testing.uses_gpu -def test_gather_nd(): +def test_gather_nd(target, dev, executor_kind): def verify_gather_nd(xshape, yshape, y_data, batch_dims=0): x = relay.var("x", relay.TensorType(xshape, "float32")) y = relay.var("y", relay.TensorType(yshape, "int32")) @@ -1289,12 +1276,10 @@ def verify_gather_nd(xshape, yshape, y_data, batch_dims=0): ref_res = ref_funcs.gather_nd(x_data, y_data, batch_dims) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data, y_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data, y_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) verify_gather_nd((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]]) verify_gather_nd((2, 2, 2), (2, 2), [[0, 1], [1, 0]]) @@ -1353,8 +1338,7 @@ def test_isinf(): _verify_infiniteness_ops(relay.isinf, np.isinf) -@tvm.testing.uses_gpu -def test_unravel_index(): +def test_unravel_index(target, dev, executor_kind): def verify_unravel_index(indices, shape, dtype): x_data = np.array(indices).astype(dtype) y_data = np.array(shape).astype(dtype) @@ -1372,12 +1356,10 @@ def verify_unravel_index(indices, shape, dtype): func = relay.Function([x, y], z) ref_res = np.unravel_index(x_data, y_data) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data, y_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data, y_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) for dtype in ["int64", "int32"]: verify_unravel_index([0, 1, 2, 3], [2, 2], dtype) @@ -1392,8 +1374,7 @@ def verify_unravel_index(indices, shape, dtype): # verify_unravel_index([0, 1, 2, 5], [2, 2], dtype) -@tvm.testing.uses_gpu -def test_sparse_to_dense(): +def test_sparse_to_dense(target, dev, executor_kind): def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_shape, xpected): sparse_indices_data = np.array(sparse_indices) sparse_values_data = np.array(sparse_values) @@ -1419,14 +1400,12 @@ def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_ assert zz.checked_type == relay.ty.TensorType(output_shape, str(sparse_values_data.dtype)) func = relay.Function(args, d) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - f = relay.create_executor(kind, device=dev, target=target).evaluate(func) - if default_value is None: - op_res = f(sparse_indices_data, sparse_values_data) - else: - op_res = f(sparse_indices_data, sparse_values_data, default_value_data) - tvm.testing.assert_allclose(op_res.numpy(), xpected, rtol=1e-5) + f = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func) + if default_value is None: + op_res = f(sparse_indices_data, sparse_values_data) + else: + op_res = f(sparse_indices_data, sparse_values_data, default_value_data) + tvm.testing.assert_allclose(op_res.numpy(), xpected, rtol=1e-5) verify_sparse_to_dense(1, 3, 0, [5], [0, 3, 0, 0, 0]) # scalar verify_sparse_to_dense([0, 1, 4], [3, 3, 3], 0, [5], [3, 3, 0, 0, 3]) # vector @@ -1454,10 +1433,9 @@ def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_ # verify_sparse_to_dense([[[[0, 1, 4], [0, 2, 4]]]], [[[[3.1, 3.1, 3.1]]]], 3.5, [5], [3.1, 3.1, 3.5, 3.5, 3.1]) -@tvm.testing.uses_gpu -@pytest.mark.parametrize( - "sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np", - [ +class TestSparseReshape: + + sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np = tvm.testing.parameters( ( np.array([[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], dtype=np.int32), np.array([7, 5, 6, 3, 9], dtype=np.int32), @@ -1542,46 +1520,48 @@ def verify_sparse_to_dense(sparse_indices, sparse_values, default_value, output_ np.array([3, 6], dtype=np.int32), np.array([-1, 2], dtype=np.int32), ), - ], -) -@pytest.mark.parametrize("use_dyn", [True, False]) -def test_sparse_reshape(sparse_indices_np, sparse_values_np, prev_shape_np, new_shape_np, use_dyn): - def ref_sparse_reshape( - sparse_indices: np.ndarray, - prev_shape: np.ndarray, - new_shape: np.ndarray, + ) + + use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"]) + + @tvm.testing.fixture(cache_return_value=True) + def ref_res( + self, + sparse_indices_np: np.ndarray, + prev_shape_np: np.ndarray, + new_shape_np: np.ndarray, ): """ This function calculates the expected output of sparseshape operator given the inputs. """ new_sparse_indices = np.ones( - (sparse_indices.shape[0], new_shape.shape[0]), dtype=sparse_indices.dtype + (sparse_indices_np.shape[0], new_shape_np.shape[0]), dtype=sparse_indices_np.dtype ) - multipliers = np.ones(prev_shape.shape[0]) - dividers = np.ones(new_shape.shape[0]) - total_ele = np.prod(prev_shape) + multipliers = np.ones(prev_shape_np.shape[0]) + dividers = np.ones(new_shape_np.shape[0]) + total_ele = np.prod(prev_shape_np) division_total_ele = 1 - for i in range(new_shape.shape[0]): - if new_shape[i] == -1: + for i in range(new_shape_np.shape[0]): + if new_shape_np[i] == -1: continue - division_total_ele *= new_shape[i] - for i in range(prev_shape.shape[0] - 2, -1, -1): - multipliers[i] = prev_shape[i + 1] * multipliers[i + 1] + division_total_ele *= new_shape_np[i] + for i in range(prev_shape_np.shape[0] - 2, -1, -1): + multipliers[i] = prev_shape_np[i + 1] * multipliers[i + 1] - for i in range(len(new_shape)): - if new_shape[i] == -1: - new_shape[i] = total_ele // division_total_ele + for i in range(len(new_shape_np)): + if new_shape_np[i] == -1: + new_shape_np[i] = total_ele // division_total_ele - if np.array_equal(prev_shape, new_shape): - return sparse_indices, prev_shape + if np.array_equal(prev_shape_np, new_shape_np): + return sparse_indices_np, prev_shape_np - for i in range(new_shape.shape[0] - 2, -1, -1): - dividers[i] = new_shape[i + 1] * dividers[i + 1] + for i in range(new_shape_np.shape[0] - 2, -1, -1): + dividers[i] = new_shape_np[i + 1] * dividers[i + 1] - for row_num, sparse_row in enumerate(sparse_indices): + for row_num, sparse_row in enumerate(sparse_indices_np): flat_idx = 0 - if len(sparse_indices.shape) != 1: + if len(sparse_indices_np.shape) != 1: for i, ele in enumerate(sparse_row): flat_idx += sparse_row[i] * multipliers[i] else: @@ -1593,17 +1573,20 @@ def ref_sparse_reshape( else: new_sparse_indices[row_num] = flat_idx - return new_sparse_indices, new_shape + return new_sparse_indices, new_shape_np - def verify_sparse_reshape( - sparse_indices_np: np.ndarray, - sparse_values_np: np.ndarray, - prev_shape_np: np.ndarray, - new_shape_np: np.ndarray, + @tvm.testing.known_failing_targets("vulkan") + def test_sparse_reshape( + self, + target, + dev, + ref_res, + sparse_indices_np, + sparse_values_np, + prev_shape_np, + new_shape_np, + use_dyn, ): - """ - This function verifies the relay output of sparse_reshape with its expected output. - """ if use_dyn: sparse_indices = relay.var( "sparse_indices", @@ -1635,7 +1618,6 @@ def verify_sparse_reshape( func = relay.Function([sparse_indices, prev_shape, new_shape], z) - ref_res = ref_sparse_reshape(sparse_indices_np, prev_shape_np, new_shape_np) outputs = run_infer_type(z) new_sparse_indices_infer_type, new_shape_infer_type = ( outputs.checked_type.fields[0].dtype, @@ -1645,23 +1627,16 @@ def verify_sparse_reshape( assert new_sparse_indices_infer_type == sparse_indices_np.dtype assert new_shape_infer_type == new_shape_np.dtype verify_func( + target, + dev, func, [sparse_indices_np, prev_shape_np, new_shape_np], ref_res, ) - verify_sparse_reshape( - sparse_indices_np, - sparse_values_np, - prev_shape_np, - new_shape_np, - ) - -@tvm.testing.uses_gpu -@pytest.mark.parametrize( - "data_np, segment_ids_np, num_segments", - [ +class TestSegmentSum: + data_np, segment_ids_np, num_segments = tvm.testing.parameters( ( np.array([5, 1, 7, 2, 3, 4], dtype=np.float32), np.array([0, 0, 1, 1, 0, 1], dtype=np.int32), @@ -1697,28 +1672,40 @@ def verify_sparse_reshape( np.array([0, 0, 1, 5, 5], dtype=np.int32), 100, ), - ], -) -@pytest.mark.parametrize("use_dyn", [True, False]) -def test_segment_sum(data_np, segment_ids_np, num_segments, use_dyn): - def ref_segment_sum( - data: np.ndarray, - segment_ids: np.ndarray, - num_segments: Optional[int] = None, + ) + + use_dyn = tvm.testing.parameter(True, False, ids=["dyn", "static"]) + + @tvm.testing.fixture(cache_return_value=True) + def ref_res( + self, + data_np: np.ndarray, + segment_ids_np: np.ndarray, + num_segments: Optional[int], ): """ This function calculates the expected output of segment_sum operator given the inputs. """ if not num_segments: - num_segments = np.unique(segment_ids).shape[0] + num_segments = np.unique(segment_ids_np).shape[0] - result = np.zeros((num_segments,) + data.shape[1:], data.dtype) - for i, index in enumerate(segment_ids): - result[index] += data[i] + result = np.zeros((num_segments,) + data_np.shape[1:], data_np.dtype) + for i, index in enumerate(segment_ids_np): + result[index] += data_np[i] return result - def verify_segment_sum( - data_np: np.ndarray, segment_ids_np: np.ndarray, num_segments: Optional[int] + # Optimization can produce tir.atomic_add, not currently supported + # on vulkan runtime. + @tvm.testing.known_failing_targets("vulkan") + def test_segment_sum( + self, + target, + dev, + ref_res: np.ndarray, + data_np: np.ndarray, + segment_ids_np: np.ndarray, + num_segments: Optional[int], + use_dyn: bool, ): """ This function verifies the relay output of segment_sum with its expected output. @@ -1745,40 +1732,35 @@ def verify_segment_sum( z = relay.op.segment_sum(data, segment_ids, num_segments) func = relay.Function([data, segment_ids], z) - ref_res = ref_segment_sum(data_np, segment_ids_np, num_segments=num_segments) segment_sum_result = run_infer_type(z) assert segment_sum_result.checked_type.dtype == data_np.dtype verify_func( + target, + dev, func, [data_np, segment_ids_np], ref_res, ) - verify_segment_sum(data_np, segment_ids_np, num_segments) - -def verify_func(func, data, ref_res, target_device=tvm.testing.enabled_targets()): +def verify_func(target, dev, func, data, ref_res): assert isinstance(data, list) - for target, dev in target_device: - for kind in ["vm"]: - mod = tvm.ir.IRModule.from_expr(func) - op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()( - *data - ) - if isinstance(op_res, tvm.runtime.container.ADT): - assert len(op_res) == len( - ref_res - ), "Outputs from TVM and Python implementation must be equal " - - for op_result, ref_result in zip(op_res, ref_res): - tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5) - else: - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) - relay.backend.compile_engine.get().clear() + for kind in ["vm"]: + mod = tvm.ir.IRModule.from_expr(func) + op_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()(*data) + if isinstance(op_res, tvm.runtime.container.ADT): + assert len(op_res) == len( + ref_res + ), "Outputs from TVM and Python implementation must be equal " + + for op_result, ref_result in zip(op_res, ref_res): + tvm.testing.assert_allclose(op_result.numpy(), ref_result, rtol=1e-5) + else: + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-5) + relay.backend.compile_engine.get().clear() -@tvm.testing.uses_gpu -def test_adv_index(): +def test_adv_index(target, dev, executor_kind): def verify_adv_index(data_shape, index_shapes): dtype = "float32" inputs = [relay.var("data", relay.TensorType(data_shape, dtype))] @@ -1793,12 +1775,10 @@ def verify_adv_index(data_shape, index_shapes): out = relay.op.adv_index(inputs) func = relay.Function(inputs, out) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - *np_args - ) - tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + *np_args + ) + tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=1e-5) verify_adv_index((10, 5), [(3, 4), (3, 1)]) verify_adv_index( @@ -1815,7 +1795,12 @@ def verify_adv_index(data_shape, index_shapes): def run_binop_tests( - target, dev, binop_type: str, gt_func: Callable[..., np.array], identity_value: int + target, + dev, + executor_kind, + binop_type: str, + gt_func: Callable[..., np.array], + identity_value: int, ): def assert_relay_scanop( data_np: np.array, @@ -1833,9 +1818,10 @@ def assert_relay_scanop( out = scanops_supported[binop_type](inp, axis, out_dtype, exclusive=exclusive) func = relay.Function([inp], out) - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(data_np) - tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=rtol, atol=atol) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + data_np + ) + tvm.testing.assert_allclose(op_res.numpy(), np_out, rtol=rtol, atol=atol) data = np.array([2, 3, 0]) assert_relay_scanop(data, gt_func(data)) @@ -1873,17 +1859,21 @@ def assert_relay_scanop( @tvm.testing.parametrize_targets -def test_cumsum(target, dev): - run_binop_tests(target, dev, binop_type="cumsum", gt_func=np.cumsum, identity_value=0) +def test_cumsum(target, dev, executor_kind): + run_binop_tests( + target, dev, executor_kind, binop_type="cumsum", gt_func=np.cumsum, identity_value=0 + ) @tvm.testing.parametrize_targets -def test_cumprod(target, dev): - run_binop_tests(target, dev, binop_type="cumprod", gt_func=np.cumprod, identity_value=1) +def test_cumprod(target, dev, executor_kind): + run_binop_tests( + target, dev, executor_kind, binop_type="cumprod", gt_func=np.cumprod, identity_value=1 + ) @tvm.testing.parametrize_targets -def test_scatter_nd(target, dev): +def test_scatter_nd(target, dev, executor_kind): def verify_scatter_nd( data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5 ): @@ -1894,11 +1884,10 @@ def verify_scatter_nd( out = relay.op.scatter_nd(data, indices, updates, mode) func = relay.Function([data, indices, updates], out) - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - data_np, indices_np, updates_np - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + data_np, indices_np, updates_np + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol) def verify_scatter_nd_with_stack( data_np, indices_np, updates_np, ref_res, mode="add", rtol=1e-5, atol=1e-5 @@ -1921,9 +1910,10 @@ def verify_scatter_nd_with_stack( fargs = [data_np, updates_np] for a in indices_np: fargs.append(a) - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(*fargs) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + *fargs + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=rtol, atol=atol) data = np.zeros((2, 2)).astype("int64") indices = np.array([[1, 1, 0], [0, 1, 0]]) @@ -1968,7 +1958,7 @@ def verify_scatter_nd_with_stack( verify_scatter_nd_with_stack(data, indices, updates, out, mode) -def test_unique(): +def test_unique(target, dev): def calc_numpy_unique(data, is_sorted=False): uniq, index, inverse, counts = np.unique( data, return_index=True, return_inverse=True, return_counts=True @@ -2004,32 +1994,27 @@ def verify_unique(n, dtype, is_dyn=False, is_sorted=False, return_counts=False): else: backends = ["graph", "debug"] - for target, dev in tvm.testing.enabled_targets(): - for kind in backends: - mod = tvm.ir.IRModule.from_expr(func) - tvm_res = relay.create_executor( - kind, mod=mod, device=dev, target=target - ).evaluate()( - x_data - ) # unique, indices, inverse_indices, num_unique, (counts) - np_res = calc_numpy_unique( - x_data, is_sorted - ) # unique, indices, inverse_indices, num_unique, counts - num_unique = np_res[3][0] - - # num_unique - assert num_unique == tvm_res[3].numpy()[0] - # unique - tvm.testing.assert_allclose(tvm_res[0].numpy()[:num_unique], np_res[0], rtol=1e-5) - # indices - tvm.testing.assert_allclose(tvm_res[1].numpy()[:num_unique], np_res[1], rtol=1e-5) - # inverse_indices - tvm.testing.assert_allclose(tvm_res[2].numpy(), np_res[2], rtol=1e-5) - # counts - if return_counts: - tvm.testing.assert_allclose( - tvm_res[4].numpy()[:num_unique], np_res[4], rtol=1e-5 - ) + for kind in backends: + mod = tvm.ir.IRModule.from_expr(func) + tvm_res = relay.create_executor(kind, mod=mod, device=dev, target=target).evaluate()( + x_data + ) # unique, indices, inverse_indices, num_unique, (counts) + np_res = calc_numpy_unique( + x_data, is_sorted + ) # unique, indices, inverse_indices, num_unique, counts + num_unique = np_res[3][0] + + # num_unique + assert num_unique == tvm_res[3].numpy()[0] + # unique + tvm.testing.assert_allclose(tvm_res[0].numpy()[:num_unique], np_res[0], rtol=1e-5) + # indices + tvm.testing.assert_allclose(tvm_res[1].numpy()[:num_unique], np_res[1], rtol=1e-5) + # inverse_indices + tvm.testing.assert_allclose(tvm_res[2].numpy(), np_res[2], rtol=1e-5) + # counts + if return_counts: + tvm.testing.assert_allclose(tvm_res[4].numpy()[:num_unique], np_res[4], rtol=1e-5) for dtype in ["int32", "int64"]: for i in range(8): @@ -2038,4 +2023,4 @@ def verify_unique(n, dtype, is_dyn=False, is_sorted=False, return_counts=False): if __name__ == "__main__": - pytest.main([__file__]) + sys.exit(pytest.main(sys.argv)) diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py index 6415976bfd59..7b8e922fb721 100644 --- a/tests/python/relay/test_op_level4.py +++ b/tests/python/relay/test_op_level4.py @@ -14,15 +14,22 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +import sys + import numpy as np import numpy.random +import pytest + import tvm import tvm.testing import tvm.topi.testing + from tvm import relay, te from tvm.relay import transform from tvm.relay.testing import run_infer_type +executor_kind = tvm.testing.parameter("graph", "debug") + @tvm.testing.uses_gpu def test_binary_op(): @@ -223,123 +230,146 @@ def verify(x_np, y_np, cond_np): verify(x_np.astype(dtype), y_np.astype(dtype), cond_np) -def verify_reduce(funcs, data, axis, keepdims, exclude, output, dtype="float32"): - test_func = funcs[0] - ref_func = funcs[1] - dtype = "bool" if ref_func in [np.all, np.any] else dtype - - x = relay.var("x", relay.TensorType(data, dtype)) - if test_func == relay.logsumexp: - z = test_func(x, axis, keepdims) - else: - z = test_func(x, axis, keepdims, exclude) - zz = run_infer_type(z) - if axis: - assert "axis=" in z.astext() - if keepdims: - assert "keepdims=" in z.astext() - if exclude: - assert "exclude=" in z.astext() - out_type = "int32" if test_func in [relay.argmin, relay.argmax] else dtype - assert zz.checked_type == relay.ty.TensorType(output, out_type) - - if all(isinstance(v, tvm.tir.Var) == 1 for v in data): - return - - func = relay.Function([x], z) - x_data = ( - np.random.choice([True, False], size=data) - if ref_func in [np.all] - else np.random.uniform(size=data).astype(dtype) +def _with_keepdims(func): + def _wrapper(data, axis=None, keepdims=False): + if not keepdims: + return func(data, axis=axis) + else: + if axis is not None: + axis = axis if isinstance(axis, int) else axis[0] + out_shape = list(data.shape) + out_shape[axis] = 1 + else: + out_shape = [1 for _ in range(len(data.shape))] + return func(data, axis=axis).reshape(out_shape) + + return _wrapper + + +def _np_log_sum_exp(x, axis, keepdims=False): + max_x = np.max(x, axis=axis, keepdims=True) + x = np.log(np.sum(np.exp(x - max_x), axis=axis, keepdims=True)) + x = x + max_x + if not keepdims: + x = np.squeeze(x, axis=axis) + return x + + +def _unbiased_relay_wrapper(f): + def _unbiased_func(x, axis=None, keepdims=False, exclude=False): + return f(x, axis=axis, keepdims=keepdims, exclude=exclude, unbiased=True) + + return _unbiased_func + + +def _unbiased_np_wrapper(f): + def _unbiased_func(a, axis=None, dtype=None, keepdims=None): + return f(a, axis=axis, dtype=dtype, ddof=1, keepdims=keepdims) + + return _unbiased_func + + +class TestReduceFunctions: + funcs = { + "sum": (relay.sum, np.sum), + "max": (relay.max, np.max), + "min": (relay.min, np.min), + "mean": (relay.mean, np.mean), + "var": (relay.variance, np.var), + "unbiased_var": (_unbiased_relay_wrapper(relay.variance), _unbiased_np_wrapper(np.var)), + "std": (relay.std, np.std), + "unbiased_std": (_unbiased_relay_wrapper(relay.std), _unbiased_np_wrapper(np.std)), + "prod": (relay.prod, np.prod), + "all": (relay.all, np.all), + "any": (relay.any, np.any), + "logsumexp": (relay.logsumexp, _np_log_sum_exp), + "argmin": (relay.argmin, _with_keepdims(np.argmin)), + "argmax": (relay.argmax, _with_keepdims(np.argmax)), + } + relay_func, ref_func = tvm.testing.parameters( + *funcs.values(), + ids=list(funcs), ) - if ref_func in [np.sum]: - ref_res = ref_func(x_data + 0, axis=axis, dtype=dtype, keepdims=keepdims) - elif ref_func in [np.max, np.min, np.mean, np.prod]: - ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims) - else: # argmin/argmax - if axis and not isinstance(axis, int) and len(axis) > 1: - return - ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims) - - for target, dev in tvm.testing.enabled_targets(): - op_res1 = relay.create_executor("graph", device=dev, target=target).evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) - op_res2 = relay.create_executor("debug", device=dev, target=target).evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res2.numpy(), ref_res, rtol=1e-5) - + d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4") -@tvm.testing.uses_gpu -def test_reduce_functions(): - def _with_keepdims(func): - def _wrapper(data, axis=None, keepdims=False): - if not keepdims: - return func(data, axis=axis) - else: - if axis is not None: - axis = axis if isinstance(axis, int) else axis[0] - out_shape = list(data.shape) - out_shape[axis] = 1 - else: - out_shape = [1 for _ in range(len(data.shape))] - return func(data, axis=axis).reshape(out_shape) - - return _wrapper - - def _np_log_sum_exp(x, axis, keepdims=False): - max_x = np.max(x, axis=axis, keepdims=True) - x = np.log(np.sum(np.exp(x - max_x), axis=axis, keepdims=True)) - x = x + max_x - if not keepdims: - x = np.squeeze(x, axis=axis) - return x + data, axis, keepdims, exclude, output = tvm.testing.parameters( + ((d1, d2, d3, d4), None, False, False, ()), + ((d1, d2, d3, d4), 2, True, False, (d1, d2, 1, d4)), + ((d1, d2, d3, d4), 0, True, False, (1, d2, d3, d4)), + ((d1, d2, d3), 1, True, False, (d1, 1, d3)), + ((d1, d2, d3), 0, True, False, (1, d2, d3)), + ((d1, d2, d3), None, True, False, (1, 1, 1)), + ((d1, d2, d3), (0, 1), True, False, (1, 1, d3)), + ((2, 3, 4), 1, True, False, (2, 1, 4)), + ((2, 3, 4), (1,), True, False, (2, 1, 4)), + ((2, 3, 4), -1, True, False, (2, 3, 1)), + ((2, 3, 4), (0, 1, 2), False, False, ()), + ((4, 4, 3), None, False, False, ()), + ((4, 4, 3), (0, 2), False, False, (4,)), + ((128, 24, 128), (0, 1), False, False, (128,)), + ((128, 24, 128), (0, 2), False, False, (24,)), + ((128, 24, 128), (0, 1), True, False, (1, 1, 128)), + ((128, 24, 128), (0, 2), True, False, (1, 24, 1)), + ) - def _unbiased_relay_wrapper(f): - def _unbiased_func(x, axis=None, keepdims=False, exclude=False): - return f(x, axis=axis, keepdims=keepdims, exclude=exclude, unbiased=True) + def test_reduce( + self, + target, + dev, + relay_func, + ref_func, + executor_kind, + data, + axis, + keepdims, + exclude, + output, + ): + dtype = "bool" if ref_func in [np.all, np.any] else "float32" + out_type = "int32" if relay_func in [relay.argmin, relay.argmax] else dtype - return _unbiased_func + target = tvm.target.Target(target) + if target.kind.name == "vulkan" and dtype == "bool": + pytest.xfail("Known failing test on vulkan runtime") - def _unbiased_np_wrapper(f): - def _unbiased_func(a, axis=None, dtype=None, keepdims=None): - return f(a, axis=axis, dtype=dtype, ddof=1, keepdims=keepdims) + x = relay.var("x", relay.TensorType(data, dtype)) + if relay_func == relay.logsumexp: + z = relay_func(x, axis, keepdims) + else: + z = relay_func(x, axis, keepdims, exclude) + zz = run_infer_type(z) + if axis: + assert "axis=" in z.astext() + if keepdims: + assert "keepdims=" in z.astext() + if exclude: + assert "exclude=" in z.astext() + assert zz.checked_type == relay.ty.TensorType(output, out_type) + + if all(isinstance(v, tvm.tir.Var) == 1 for v in data): + return - return _unbiased_func + func = relay.Function([x], z) + x_data = ( + np.random.choice([True, False], size=data) + if ref_func in [np.all] + else np.random.uniform(size=data).astype(dtype) + ) - d1, d2, d3, d4 = te.var("d1"), te.var("d2"), te.var("d3"), te.var("d4") - for func in [ - [relay.sum, np.sum], - [relay.max, np.max], - [relay.min, np.min], - [relay.mean, np.mean], - [relay.variance, np.var], - [_unbiased_relay_wrapper(relay.variance), _unbiased_np_wrapper(np.var)], - [relay.std, np.std], - [_unbiased_relay_wrapper(relay.std), _unbiased_np_wrapper(np.std)], - [relay.prod, np.prod], - [relay.all, np.all], - [relay.any, np.any], - [relay.logsumexp, _np_log_sum_exp], - [relay.argmin, _with_keepdims(np.argmin)], - [relay.argmax, _with_keepdims(np.argmax)], - ]: - verify_reduce(func, (d1, d2, d3, d4), None, False, False, ()) - verify_reduce(func, (d1, d2, d3, d4), 2, True, False, (d1, d2, 1, d4)) - verify_reduce(func, (d1, d2, d3, d4), 0, True, False, (1, d2, d3, d4)) - verify_reduce(func, (d1, d2, d3), 1, True, False, (d1, 1, d3)) - verify_reduce(func, (d1, d2, d3), 0, True, False, (1, d2, d3)) - verify_reduce(func, (d1, d2, d3), None, True, False, (1, 1, 1)) - verify_reduce(func, (d1, d2, d3), (0, 1), True, False, (1, 1, d3)) - verify_reduce(func, (2, 3, 4), 1, True, False, (2, 1, 4)) - verify_reduce(func, (2, 3, 4), (1,), True, False, (2, 1, 4)) - verify_reduce(func, (2, 3, 4), -1, True, False, (2, 3, 1)) - verify_reduce(func, (2, 3, 4), (0, 1, 2), False, False, ()) - verify_reduce(func, (4, 4, 3), None, False, False, ()) - verify_reduce(func, (4, 4, 3), (0, 2), False, False, (4,)) - verify_reduce(func, (128, 24, 128), (0, 1), False, False, (128,)) - verify_reduce(func, (128, 24, 128), (0, 2), False, False, (24,)) - verify_reduce(func, (128, 24, 128), (0, 1), True, False, (1, 1, 128)) - verify_reduce(func, (128, 24, 128), (0, 2), True, False, (1, 24, 1)) + if ref_func in [np.sum]: + ref_res = ref_func(x_data + 0, axis=axis, dtype=dtype, keepdims=keepdims) + elif ref_func in [np.max, np.min, np.mean, np.prod]: + ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims) + else: # argmin/argmax + if axis and not isinstance(axis, int) and len(axis) > 1: + return + ref_res = ref_func(x_data + 0, axis=axis, keepdims=keepdims) + + op_res1 = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data + ) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5) @tvm.testing.uses_gpu @@ -611,13 +641,4 @@ def verify(dshape, begin, end, strides, vshape, test_ref=True): if __name__ == "__main__": - test_strided_slice() - test_dyn_strided_slice() - # test_strided_set() - # test_binary_op() - # test_cmp_type() - # test_binary_int_broadcast_1() - # test_binary_int_broadcast_2() - # test_where() - # test_reduce_functions() - # test_mean_var_std() + sys.exit(pytest.main(sys.argv)) diff --git a/tests/python/relay/test_op_level5.py b/tests/python/relay/test_op_level5.py index c08b538d22e6..3414fd453646 100644 --- a/tests/python/relay/test_op_level5.py +++ b/tests/python/relay/test_op_level5.py @@ -17,14 +17,19 @@ """ Support level5 operator test cases. """ import math +import sys import numpy as np +import pytest + import tvm import tvm.testing import tvm.topi.testing from tvm import relay, te from tvm.relay.testing import run_infer_type +executor_kind = tvm.testing.parameter("graph", "debug") + def test_resize1d_infer_type(): n, c, w = te.size_var("n"), te.size_var("c"), te.size_var("w") @@ -41,9 +46,31 @@ def test_resize1d_infer_type(): assert zz.checked_type == relay.TensorType((n, c, 200), "int8") -@tvm.testing.uses_gpu -def test_resize1d(): - def verify_resize(dshape, scale, method, layout, coord_trans): +class TestResize1D: + interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic") + coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel") + + layout = tvm.testing.parameter("NWC", "NCW") + dshape, scale = tvm.testing.parameters( + ((1, 4, 4), 2), + ((2, 8, 17), 3), + ((2, 8, 17), 3), + ((3, 4, 5), 5), + ) + + def test_resize( + self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans + ): + target_kind = tvm.target.Target(target).kind.name + if ( + target_kind == "vulkan" + and dshape == (3, 4, 5) + and scale == 5 + and interpolate_method == "nearest_neighbor" + and coord_trans == "align_corners" + ): + pytest.xfail("Known failing case for these parameters") + if layout == "NWC": size = (dshape[1] * scale,) else: @@ -51,29 +78,21 @@ def verify_resize(dshape, scale, method, layout, coord_trans): x_data = np.random.uniform(size=dshape).astype("float32") - ref_res = tvm.topi.testing.resize1d_python(x_data, (scale,), layout, method, coord_trans) + ref_res = tvm.topi.testing.resize1d_python( + x_data, (scale,), layout, interpolate_method, coord_trans + ) x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.image.resize1d( - x, size, layout, method, coordinate_transformation_mode=coord_trans + x, size, layout, interpolate_method, coordinate_transformation_mode=coord_trans ) assert "size=" in z.astext() zz = run_infer_type(z) assert zz.checked_type == relay.TensorType(ref_res.shape, "float32") func = relay.Function([x], z) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4) - - for method in ["nearest_neighbor", "linear", "cubic"]: - for coord_trans in ["asymmetric", "align_corners", "half_pixel"]: - for layout in ["NWC", "NCW"]: - verify_resize((1, 4, 4), 2, method, layout, coord_trans) - verify_resize((2, 8, 17), 3, method, layout, coord_trans) - verify_resize((2, 8, 17), 3, method, layout, coord_trans) - verify_resize((3, 4, 5), 5, method, layout, coord_trans) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4) def test_resize2d_infer_type(): @@ -91,9 +110,32 @@ def test_resize2d_infer_type(): assert zz.checked_type == relay.TensorType((n, c, 100, 200), "int8") -@tvm.testing.uses_gpu -def test_resize2d(): - def verify_resize(dshape, scale, method, layout, coord_trans): +class TestResize2D: + interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic") + coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel") + + layout = tvm.testing.parameter("NHWC", "NCHW") + + dshape, scale = tvm.testing.parameters( + ((1, 4, 4, 4), 2), + ((2, 8, 17, 20), 3), + ((2, 8, 17, 20), 3), + ((3, 4, 5, 6), 5), + ) + + def test_resize( + self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans + ): + target_kind = tvm.target.Target(target).kind.name + if ( + target_kind == "vulkan" + and dshape == (3, 4, 5, 6) + and scale == 5 + and interpolate_method == "nearest_neighbor" + and coord_trans == "align_corners" + ): + pytest.xfail("Known failing case for these parameters") + if layout == "NHWC": size = (dshape[1] * scale, dshape[2] * scale) else: @@ -102,30 +144,20 @@ def verify_resize(dshape, scale, method, layout, coord_trans): x_data = np.random.uniform(size=dshape).astype("float32") ref_res = tvm.topi.testing.resize2d_python( - x_data, (scale, scale), layout, method, coord_trans + x_data, (scale, scale), layout, interpolate_method, coord_trans ) x = relay.var("x", relay.TensorType(dshape, "float32")) z = relay.image.resize2d( - x, size, layout, method, coordinate_transformation_mode=coord_trans + x, size, layout, interpolate_method, coordinate_transformation_mode=coord_trans ) assert "size=" in z.astext() zz = run_infer_type(z) assert zz.checked_type == relay.TensorType(ref_res.shape, "float32") func = relay.Function([x], z) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - x_data - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4) - - for method in ["nearest_neighbor", "linear", "cubic"]: - for coord_trans in ["asymmetric", "align_corners", "half_pixel"]: - for layout in ["NHWC", "NCHW"]: - verify_resize((1, 4, 4, 4), 2, method, layout, coord_trans) - verify_resize((2, 8, 17, 20), 3, method, layout, coord_trans) - verify_resize((2, 8, 17, 20), 3, method, layout, coord_trans) - verify_resize((3, 4, 5, 6), 5, method, layout, coord_trans) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-4) def test_resize3d_infer_type(): @@ -149,9 +181,19 @@ def test_resize3d_infer_type(): assert zz.checked_type == relay.TensorType((n, c, 10, 10, 20), "int8") -@tvm.testing.parametrize_targets -def test_resize3d(target, dev): - def verify_resize(dshape, scale, method, layout): +class TestResize3D: + interpolate_method = tvm.testing.parameter("nearest_neighbor", "linear", "cubic") + coord_trans = tvm.testing.parameter("asymmetric", "align_corners", "half_pixel") + + layout = tvm.testing.parameter("NDHWC", "NCDHW") + + dshape, scale = tvm.testing.parameters( + ((1, 4, 4, 4, 4), 2), + ) + + def test_resize( + self, target, dev, executor_kind, dshape, scale, interpolate_method, layout, coord_trans + ): if layout == "NDHWC": size = (dshape[1] * scale, dshape[2] * scale, dshape[3] * scale) else: @@ -159,35 +201,59 @@ def verify_resize(dshape, scale, method, layout): x_data = np.random.uniform(size=dshape).astype("float32") ref_res = tvm.topi.testing.resize3d_python( - x_data, (scale, scale, scale), layout, method, "align_corners" + x_data, (scale, scale, scale), layout, interpolate_method, coord_trans ) x = relay.var("x", relay.TensorType(dshape, "float32")) - z = relay.image.resize3d(x, size, layout, method, "align_corners") + z = relay.image.resize3d(x, size, layout, interpolate_method, coord_trans) assert "size=" in z.astext() zz = run_infer_type(z) assert zz.checked_type == relay.TensorType(ref_res.shape, "float32") func = relay.Function([x], z) - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)(x_data) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6) + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + x_data + ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-4, atol=1e-6) - for method in ["nearest_neighbor", "linear", "cubic"]: - for coord_trans in ["asymmetric", "align_corners", "half_pixel"]: - for layout in ["NDHWC", "NCDHW"]: - verify_resize((1, 4, 4, 4, 4), 2, method, layout) +class TestCropAndResize: + interpolate_method = tvm.testing.parameter("bilinear", "nearest_neighbor") + layout = tvm.testing.parameter("NHWC", "NCHW") -@tvm.testing.uses_gpu -def test_crop_and_resize(): - def verify_crop_and_resize( - img_shape, boxes, box_indices, crop_size, layout, method, extrapolation_value=0.0 - ): + def test_crop_and_resize(self, target, dev, executor_kind, layout, interpolate_method): + target_kind = tvm.target.Target(target).kind.name + if ( + target_kind == "vulkan" + and layout == "NHWC" + and interpolate_method == "nearest_neighbor" + ): + pytest.xfail("Known failing case for these parameters") + + extrapolation_value = 0.0 + + if layout == "NHWC": + img_shape = (10, 224, 224, 3) + boxes = np.array([[0.1, 0.2, 0.8, 0.7], [0.2, 0, 1, 0.6]]).astype("float32") + box_indices = np.array([1, 0]).astype("int32") + crop_size = np.array([20, 30]).astype("int32") + elif layout == "NCHW": + img_shape = (5, 3, 255, 255) + boxes = np.array([[0, 0, 1, 1], [0.2, 0.1, 1, 0.9]]).astype("float32") + box_indices = np.array([0, 1]).astype("int32") + crop_size = np.array([30, 30]).astype("int32") + else: + raise ValueError(f"Unknown layout: {layout}") image_data = np.random.uniform(size=img_shape).astype("float32") ref_res = tvm.topi.testing.crop_and_resize_python( - image_data, boxes, box_indices, crop_size, layout, method, extrapolation_value + image_data, + boxes, + box_indices, + crop_size, + layout, + interpolate_method, + extrapolation_value, ) img = relay.var("img", relay.TensorType(img_shape, "float32")) @@ -195,33 +261,16 @@ def verify_crop_and_resize( bx_idx = relay.var("bx_idx", relay.TensorType(box_indices.shape, "int32")) z = relay.image.crop_and_resize( - img, bx, bx_idx, list(crop_size), layout, method, extrapolation_value + img, bx, bx_idx, list(crop_size), layout, interpolate_method, extrapolation_value ) zz = run_infer_type(z) assert zz.checked_type == relay.TensorType(ref_res.shape, "float32") func = relay.Function([img, bx, bx_idx], z) - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug"]: - op_res = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - image_data, boxes, box_indices - ) - tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-04) - - boxes_nhwc = np.array([[0.1, 0.2, 0.8, 0.7], [0.2, 0, 1, 0.6]]).astype("float32") - indices_nhwc = np.array([1, 0]).astype("int32") - size_nhwc = np.array([20, 30]).astype("int32") - boxes_nchw = np.array([[0, 0, 1, 1], [0.2, 0.1, 1, 0.9]]).astype("float32") - indices_nchw = np.array([0, 1]).astype("int32") - size_nchw = np.array([30, 30]).astype("int32") - - for method in ["bilinear", "nearest_neighbor"]: - verify_crop_and_resize( - (10, 224, 224, 3), boxes_nhwc, indices_nhwc, size_nhwc, "NHWC", method - ) - verify_crop_and_resize( - (5, 3, 255, 255), boxes_nchw, indices_nchw, size_nchw, "NCHW", method, 0.1 + op_res = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + image_data, boxes, box_indices ) + tvm.testing.assert_allclose(op_res.numpy(), ref_res, rtol=1e-3, atol=1e-04) @tvm.testing.uses_gpu @@ -957,90 +1006,74 @@ def verify_yolo_reorg(shape, stride): verify_yolo_reorg((1, 4, 6, 6), 2) -@tvm.testing.uses_gpu -def test_deformable_conv2d(): - def test_infer_type(batch, in_channel, size, out_channel, deformable_groups, groups, layout): - kernel_size = (3, 3) +class TestDeformableConv2D: + batch, in_channel, size, out_channel, deformable_groups = tvm.testing.parameters( + (1, 4, 16, 4, 4), + (2, 4, 16, 4, 1), + ) + kernel_size = tvm.testing.parameter((3, 3)) + groups = tvm.testing.parameter(1, 2) + layout = tvm.testing.parameter("NCHW", "NHWC") + dtype = tvm.testing.parameter("float32") + + @tvm.testing.fixture + def data_shape(self, layout, batch, in_channel, size): if layout == "NCHW": - kernel_layout = "OIHW" - data_shape = (batch, in_channel, size, size) - weight_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1]) - out_shape = (batch, out_channel, size, size) - offset_shape = ( - batch, - 2 * kernel_size[0] * kernel_size[1] * deformable_groups, - out_shape[2], - out_shape[3], - ) - else: - kernel_layout = "HWIO" - data_shape = (batch, size, size, in_channel) - weight_shape = (kernel_size[0], kernel_size[1], in_channel // groups, out_channel) - out_shape = (batch, size, size, out_channel) - offset_shape = ( - batch, - out_shape[1], - out_shape[2], - 2 * kernel_size[0] * kernel_size[1] * deformable_groups, - ) + return (batch, in_channel, size, size) + elif layout == "NHWC": + return (batch, size, size, in_channel) - data = relay.var("data", shape=data_shape) - offset = relay.var("offset") - kernel = relay.var("kernel") - y = relay.nn.deformable_conv2d( - data, - offset, - kernel, - strides=(1, 1), - padding=(1, 1), - dilation=(1, 1), - data_layout=layout, - kernel_layout=kernel_layout, - kernel_size=kernel_size, - deformable_groups=deformable_groups, - groups=groups, - channels=out_channel, - ) - yy = run_infer_type(y) - assert yy.checked_type == relay.TensorType(out_shape), yy.checked_type - assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type - assert yy.args[2].checked_type == relay.TensorType(weight_shape), yy.args[2].checked_type + @tvm.testing.fixture + def kernel_shape(self, layout, in_channel, out_channel, groups, kernel_size): + if layout == "NCHW": + return (out_channel, in_channel // groups, kernel_size[0], kernel_size[1]) + elif layout == "NHWC": + return (kernel_size[0], kernel_size[1], in_channel // groups, out_channel) - test_infer_type(1, 4, 16, 4, 4, 1, "NCHW") - test_infer_type(2, 4, 16, 4, 1, 2, "NCHW") - test_infer_type(1, 4, 16, 4, 4, 1, "NHWC") - test_infer_type(2, 4, 16, 4, 1, 2, "NHWC") + @tvm.testing.fixture + def out_shape(self, layout, batch, out_channel, size): + if layout == "NCHW": + return (batch, out_channel, size, size) + elif layout == "NHWC": + return (batch, size, size, out_channel) - def test_run(batch, in_channel, size, out_channel, deformable_groups, groups, layout): - kernel_size = (3, 3) + @tvm.testing.fixture + def offset_shape(self, layout, batch, kernel_size, deformable_groups, out_shape): if layout == "NCHW": - kernel_layout = "OIHW" - data_shape = (batch, in_channel, size, size) - kernel_shape = (out_channel, in_channel // groups, kernel_size[0], kernel_size[1]) - out_shape = (batch, out_channel, size, size) - offset_shape = ( + return ( batch, 2 * kernel_size[0] * kernel_size[1] * deformable_groups, out_shape[2], out_shape[3], ) - else: - kernel_layout = "HWIO" - data_shape = (batch, size, size, in_channel) - kernel_shape = (kernel_size[0], kernel_size[1], in_channel // groups, out_channel) - out_shape = (batch, size, size, out_channel) - offset_shape = ( + elif layout == "NHWC": + return ( batch, out_shape[1], out_shape[2], 2 * kernel_size[0] * kernel_size[1] * deformable_groups, ) - dtype = "float32" + @tvm.testing.fixture + def kernel_layout(self, layout): + return {"NCHW": "OIHW", "NHWC": "HWIO"}[layout] + + @tvm.testing.fixture + def relay_setup( + self, + dtype, + data_shape, + layout, + kernel_layout, + kernel_size, + deformable_groups, + groups, + out_channel, + ): data = relay.var("data", shape=data_shape, dtype=dtype) - offset = relay.var("offset") - kernel = relay.var("kernel") - y = relay.nn.deformable_conv2d( + offset = relay.var("offset", dtype=dtype) + kernel = relay.var("kernel", dtype=dtype) + expr = relay.nn.deformable_conv2d( data, offset, kernel, @@ -1054,7 +1087,37 @@ def test_run(batch, in_channel, size, out_channel, deformable_groups, groups, la groups=groups, channels=out_channel, ) - func = relay.Function([data, offset, kernel], y) + func = relay.Function([data, offset, kernel], expr) + return expr, func + + def test_infer_type(self, relay_setup, out_shape, offset_shape, kernel_shape): + expr, func = relay_setup + yy = run_infer_type(expr) + assert yy.checked_type == relay.TensorType(out_shape), yy.checked_type + assert yy.args[1].checked_type == relay.TensorType(offset_shape), yy.args[1].checked_type + assert yy.args[2].checked_type == relay.TensorType(kernel_shape), yy.args[2].checked_type + + # The reference python implementation only supports groups==1. + @pytest.mark.parametrize("groups", [1]) + def test_run( + self, + target, + dev, + dtype, + executor_kind, + data_shape, + offset_shape, + kernel_shape, + relay_setup, + deformable_groups, + groups, + layout, + ): + target = tvm.target.Target(target) + if layout == "NHWC" and target.kind.name != "llvm": + pytest.xfail("Can only run NHWC layout on llvm") + + expr, func = relay_setup data = np.random.uniform(size=data_shape).astype(dtype) offset = np.random.uniform(size=offset_shape).astype(dtype) kernel = np.random.uniform(size=kernel_shape).astype(dtype) @@ -1080,19 +1143,11 @@ def test_run(batch, in_channel, size, out_channel, deformable_groups, groups, la deformable_groups=deformable_groups, groups=groups, ) - for target, dev in tvm.testing.enabled_targets(): - if target == "cuda" and layout == "NHWC": - continue # Cannot run NHWC layout on cuda target, only on llvm - for kind in ["graph", "debug"]: - op_res1 = relay.create_executor(kind, device=dev, target=target).evaluate(func)( - data, offset, kernel - ) - tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) - test_run(1, 4, 16, 4, 1, 1, "NCHW") - test_run(1, 4, 16, 4, 1, 1, "NHWC") - test_run(2, 4, 16, 4, 4, 1, "NCHW") - test_run(2, 4, 16, 4, 4, 1, "NHWC") + op_res1 = relay.create_executor(executor_kind, device=dev, target=target).evaluate(func)( + data, offset, kernel + ) + tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -1202,119 +1257,111 @@ def test_dilation2d_infer_type(): assert yy.checked_type == relay.TensorType((n, 10, 217, 217), "float32") -@tvm.testing.uses_gpu -def test_dilation2d_run(): - def run_test_dilation2d( - indata, - kernel, - out, - dtype="float32", - strides=[1, 1], - padding=[0, 0], - dilations=[1, 1], - except_targets=["cuda"], - **attrs, - ): - - dshape = indata.shape - kshape = kernel.shape - - if except_targets is None: - except_targets = [] - - x = relay.var("x", shape=dshape, dtype=dtype) - w = relay.var("w", shape=kshape, dtype=dtype) - y = relay.image.dilation2d( - x, w, strides=strides, dilations=dilations, padding=padding, **attrs - ) - func = relay.Function([x, w], y) +class TestDilation2DRun: + data_layout, kernel_layout = tvm.testing.parameters(("NCHW", "IHW"), ("NHWC", "HWI")) + dtype = tvm.testing.parameter("float32") + + config = tvm.testing.parameter( + dict( + image=[[[[0.1], [0.2]], [[0.3], [0.4]]]], + kernel=[[[0.4], [0.3]], [[0.1], [0.0]]], + out=[[[[0.5]]]], + ), + dict( + image=[[[[0.1], [0.2]], [[0.3], [0.4]]]], + kernel=[[[0.4], [0.3]], [[0.1], [0.0]]], + out=[[[[0.5], [0.6]], [[0.7], [0.8]]]], + padding=[0, 0, 1, 1], + ), + dict( + image=[[[[0.1, 0.2, 0.0], [0.2, 0.3, 0.1]], [[0.3, 0.4, 0.2], [0.4, 0.5, 0.3]]]], + kernel=[[[0.4, 0.5, 0.3], [0.3, 0.4, 0.2]], [[0.1, 0.2, 0.0], [0.0, 0.1, -0.1]]], + out=[[[[0.5, 0.7, 0.3], [0.6, 0.8, 0.4]], [[0.7, 0.9, 0.5], [0.8, 1.0, 0.6]]]], + padding=[0, 0, 1, 1], + ), + dict( + image=[[[[0.1], [0.2]], [[0.3], [0.4]]], [[[0.2], [0.3]], [[0.4], [0.5]]]], + kernel=[[[0.4], [0.3]], [[0.1], [0.0]]], + out=[[[[0.5], [0.6]], [[0.7], [0.8]]], [[[0.6], [0.7]], [[0.8], [0.9]]]], + padding=[0, 0, 1, 1], + ), + dict( + image=[[[[0.1], [0.2]], [[0.3], [0.4]]]], + kernel=[[[0.4], [0.3]]], + out=[[[[0.5]], [[0.7]]]], + ), + dict( + image=[[[[0.1], [0.2], [0.3]], [[0.4], [0.5], [0.6]], [[0.7], [0.8], [0.9]]]], + kernel=[[[0.4], [0.3]], [[0.1], [0.2]]], + out=[[[[0.7], [0.8], [0.6]], [[1.0], [1.1], [0.9]], [[0.8], [0.9], [0.9]]]], + padding=[1, 1], + dilations=[2, 2], + ), + dict( + image=[ + [ + [[0.1], [0.2], [0.3], [0.4]], + [[0.5], [0.6], [0.7], [0.8]], + [[0.9], [1.0], [1.1], [1.2]], + ] + ], + kernel=[[[0.4], [0.3]], [[0.1], [0.2]]], + out=[[[[0.8], [1.0]], [[1.2], [1.4]]]], + strides=[1, 2], + ), + ) - for target, dev in tvm.testing.enabled_targets(): - if target in except_targets: - continue - op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)( - indata, kernel - ) - tvm.testing.assert_allclose(op_res.numpy(), out, rtol=1e-5, atol=1e-5) + @tvm.testing.fixture + def test_case(self, config, data_layout, dtype): + indata = np.array(config["image"], dtype=dtype) + kernel = np.array(config["kernel"], dtype=dtype) + out = np.array(config["out"], dtype=dtype) - def _convert_data(indata, kernel, out, layout=None): - indata = np.asarray(indata) - kernel = np.asarray(kernel) - out = np.asarray(out) - if layout == "NCHW": + if data_layout == "NHWC": + pass + elif data_layout == "NCHW": indata = indata.transpose([0, 3, 1, 2]) kernel = kernel.transpose([2, 0, 1]) out = out.transpose([0, 3, 1, 2]) - return indata, kernel, out + else: + raise ValueError(f"Unsupported layout '{data_layout}'") - image = [[[[0.1], [0.2]], [[0.3], [0.4]]]] - kernel = [[[0.4], [0.3]], [[0.1], [0.0]]] - out = [[[[0.5]]]] - run_test_dilation2d(*_convert_data(image, kernel, out, layout="NCHW")) - run_test_dilation2d(*_convert_data(image, kernel, out), data_layout="NHWC", kernel_layout="HWI") - - image = [[[[0.1], [0.2]], [[0.3], [0.4]]]] - kernel = [[[0.4], [0.3]], [[0.1], [0.0]]] - out = [[[[0.5], [0.6]], [[0.7], [0.8]]]] - run_test_dilation2d(*_convert_data(image, kernel, out, layout="NCHW"), padding=[0, 0, 1, 1]) - run_test_dilation2d( - *_convert_data(image, kernel, out), - padding=[0, 0, 1, 1], - data_layout="NHWC", - kernel_layout="HWI", - ) + return indata, kernel, out - image = [[[[0.1, 0.2, 0.0], [0.2, 0.3, 0.1]], [[0.3, 0.4, 0.2], [0.4, 0.5, 0.3]]]] - kernel = [[[0.4, 0.5, 0.3], [0.3, 0.4, 0.2]], [[0.1, 0.2, 0.0], [0.0, 0.1, -0.1]]] - out = [[[[0.5, 0.7, 0.3], [0.6, 0.8, 0.4]], [[0.7, 0.9, 0.5], [0.8, 1.0, 0.6]]]] - run_test_dilation2d(*_convert_data(image, kernel, out, layout="NCHW"), padding=[0, 0, 1, 1]) - run_test_dilation2d( - *_convert_data(image, kernel, out), - padding=[0, 0, 1, 1], - data_layout="NHWC", - kernel_layout="HWI", - ) + @tvm.testing.parametrize_targets("llvm") + def test_dilation2d( + self, + target, + dev, + test_case, + dtype, + config, + data_layout, + kernel_layout, + ): + strides = config.get("strides", [1, 1]) + padding = config.get("padding", [0, 0]) + dilations = config.get("dilations", [1, 1]) - image = [[[[0.1], [0.2]], [[0.3], [0.4]]], [[[0.2], [0.3]], [[0.4], [0.5]]]] - kernel = [[[0.4], [0.3]], [[0.1], [0.0]]] - out = [[[[0.5], [0.6]], [[0.7], [0.8]]], [[[0.6], [0.7]], [[0.8], [0.9]]]] - run_test_dilation2d(*_convert_data(image, kernel, out, layout="NCHW"), padding=[0, 0, 1, 1]) - run_test_dilation2d( - *_convert_data(image, kernel, out), - padding=[0, 0, 1, 1], - data_layout="NHWC", - kernel_layout="HWI", - ) + indata, kernel, out = test_case - image = [[[[0.1], [0.2]], [[0.3], [0.4]]]] - kernel = [[[0.4], [0.3]]] - out = [[[[0.5]], [[0.7]]]] - run_test_dilation2d(*_convert_data(image, kernel, out, layout="NCHW")) - run_test_dilation2d(*_convert_data(image, kernel, out), data_layout="NHWC", kernel_layout="HWI") - - image = [[[[0.1], [0.2], [0.3]], [[0.4], [0.5], [0.6]], [[0.7], [0.8], [0.9]]]] - kernel = [[[0.4], [0.3]], [[0.1], [0.2]]] - out = [[[[0.7], [0.8], [0.6]], [[1.0], [1.1], [0.9]], [[0.8], [0.9], [0.9]]]] - run_test_dilation2d( - *_convert_data(image, kernel, out, layout="NCHW"), padding=[1, 1], dilations=[2, 2] - ) - run_test_dilation2d( - *_convert_data(image, kernel, out), - padding=[1, 1], - dilations=[2, 2], - data_layout="NHWC", - kernel_layout="HWI", - ) + x = relay.var("x", shape=indata.shape, dtype=dtype) + w = relay.var("w", shape=kernel.shape, dtype=dtype) + y = relay.image.dilation2d( + x, + w, + strides=strides, + dilations=dilations, + padding=padding, + data_layout=data_layout, + kernel_layout=kernel_layout, + ) + func = relay.Function([x, w], y) - image = [ - [[[0.1], [0.2], [0.3], [0.4]], [[0.5], [0.6], [0.7], [0.8]], [[0.9], [1.0], [1.1], [1.2]]] - ] - kernel = [[[0.4], [0.3]], [[0.1], [0.2]]] - out = [[[[0.8], [1.0]], [[1.2], [1.4]]]] - run_test_dilation2d(*_convert_data(image, kernel, out, layout="NCHW"), strides=[1, 2]) - run_test_dilation2d( - *_convert_data(image, kernel, out), strides=[1, 2], data_layout="NHWC", kernel_layout="HWI" - ) + op_res = relay.create_executor("graph", device=dev, target=target).evaluate(func)( + indata, kernel + ) + tvm.testing.assert_allclose(op_res.numpy(), out, rtol=1e-5, atol=1e-5) @tvm.testing.uses_gpu @@ -1523,25 +1570,4 @@ def verify_all_class_non_max_suppression( if __name__ == "__main__": - test_resize_infer_type() - test_resize() - test_resize3d_infer_type() - test_crop_and_resize() - test_multibox_prior() - test_multibox_transform_loc() - test_get_valid_counts() - test_roi_align() - test_roi_pool() - test_proposal() - test_yolo_reorg_infer_shape() - test_yolo_reorg() - test_non_max_suppression() - test_deformable_conv2d() - test_depth_to_space() - test_space_to_depth() - test_dilation2d_infer_type() - test_dilation2d_run() - test_affine_grid() - test_grid_sample() - test_space_to_batch_nd() - test_all_class_non_max_suppression() + sys.exit(pytest.main(sys.argv)) diff --git a/tests/python/relay/test_pass_alter_op_layout.py b/tests/python/relay/test_pass_alter_op_layout.py index b5702a1542a9..40a4cb55bf86 100644 --- a/tests/python/relay/test_pass_alter_op_layout.py +++ b/tests/python/relay/test_pass_alter_op_layout.py @@ -712,7 +712,8 @@ def expected(): assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) -def test_alter_layout_strided_slice(): +@tvm.testing.parametrize_targets("llvm") +def test_alter_layout_strided_slice(target, dev): """Test rewriting strided_slice during alter_iop_layout""" def before(): @@ -756,24 +757,20 @@ def expected(): mod_before = transform.InferType()(mod_before) mod_new = transform.InferType()(mod_new) with relay.build_config(opt_level=3): - for target, dev in tvm.testing.enabled_targets(): - for kind in ["graph", "debug", "vm"]: - np_data = np.random.uniform(size=(1, 32, 28, 28)).astype("float32") - np_weight = np.random.uniform(size=(32, 32, 3, 3)).astype("float32") - f_before = relay.create_executor( - kind, mod=mod_before, device=dev, target=target - ).evaluate() - result_before = f_before(np_data, np_weight) - f_new = relay.create_executor( - kind, mod=mod_new, device=dev, target=target - ).evaluate() - result_new = f_new(np_data, np_weight) - tvm.testing.assert_allclose( - result_before.numpy(), result_new.numpy(), rtol=1e-5, atol=1e-5 - ) - - -@tvm.testing.uses_gpu + for kind in ["graph", "debug", "vm"]: + np_data = np.random.uniform(size=(1, 32, 28, 28)).astype("float32") + np_weight = np.random.uniform(size=(32, 32, 3, 3)).astype("float32") + f_before = relay.create_executor( + kind, mod=mod_before, device=dev, target=target + ).evaluate() + result_before = f_before(np_data, np_weight) + f_new = relay.create_executor(kind, mod=mod_new, device=dev, target=target).evaluate() + result_new = f_new(np_data, np_weight) + tvm.testing.assert_allclose( + result_before.numpy(), result_new.numpy(), rtol=1e-5, atol=1e-5 + ) + + def test_alter_layout_strided_slice_axes_nhwc(): """Test rewriting strided_slice with axes during alter_iop_layout""" diff --git a/tests/python/relay/test_vm.py b/tests/python/relay/test_vm.py index 4c5b98514724..725d2765477f 100644 --- a/tests/python/relay/test_vm.py +++ b/tests/python/relay/test_vm.py @@ -34,7 +34,7 @@ from tvm.relay.testing import mlp -def check_result(args, expected_result, mod=None): +def check_result(target, dev, args, expected_result, mod=None): """ Check that evaluating `expr` applied to the arguments produces `result` on Relay VM. @@ -47,11 +47,8 @@ def check_result(args, expected_result, mod=None): expected_result: The expected result of running the expression. """ - for target, dev in tvm.testing.enabled_targets(): - rts_result = relay.create_executor("vm", device=dev, target=target, mod=mod).evaluate()( - *args - ) - tvm.testing.assert_allclose(expected_result, rts_result.numpy()) + rts_result = relay.create_executor("vm", device=dev, target=target, mod=mod).evaluate()(*args) + tvm.testing.assert_allclose(expected_result, rts_result.numpy()) def veval(f, *args, device=tvm.cpu(), target="llvm"): @@ -78,8 +75,7 @@ def vmobj_to_list(o): raise RuntimeError("Unknown object type: %s" % type(o)) -@tvm.testing.uses_gpu -def test_split(): +def test_split(target, dev): x = relay.var("x", shape=(12,)) y = relay.split(x, 3, axis=0).astuple() f = relay.Function([x], y) @@ -88,14 +84,12 @@ def test_split(): 12, ).astype("float32") ref_res = np.split(x_data, 3, axis=0) - for tgt, dev in tvm.testing.enabled_targets(): - res = veval(f, x_data, device=dev, target=tgt) - for i in range(3): - tvm.testing.assert_allclose(res[i].numpy(), ref_res[i]) + res = veval(f, x_data, device=dev, target=target) + for i in range(3): + tvm.testing.assert_allclose(res[i].numpy(), ref_res[i]) -@tvm.testing.uses_gpu -def test_split_no_fuse(): +def test_split_no_fuse(target, dev): x = relay.var("x", shape=(12,)) y = relay.split(x, 3, axis=0).astuple() z = relay.concatenate([relay.TupleGetItem(y, 0)], axis=0) @@ -104,29 +98,27 @@ def test_split_no_fuse(): x_data = np.random.rand( 12, ).astype("float32") - for tgt, dev in tvm.testing.enabled_targets(): - res = veval(f, x_data, device=dev, target=tgt) - tvm.testing.assert_allclose(res.numpy(), np.split(x_data, 3, axis=0)[0]) + res = veval(f, x_data, device=dev, target=target) + tvm.testing.assert_allclose(res.numpy(), np.split(x_data, 3, axis=0)[0]) -@tvm.testing.uses_gpu -def test_id(): + +def test_id(target, dev): x = relay.var("x", shape=(10, 10), dtype="float64") f = relay.Function([x], x) x_data = np.random.rand(10, 10).astype("float64") mod = tvm.IRModule() mod["main"] = f - check_result([x_data], x_data, mod=mod) + check_result(target, dev, [x_data], x_data, mod=mod) -@tvm.testing.uses_gpu -def test_op(): +def test_op(target, dev): x = relay.var("x", shape=(10, 10)) f = relay.Function([x], x + x) x_data = np.random.rand(10, 10).astype("float32") mod = tvm.IRModule() mod["main"] = f - check_result([x_data], 2 * x_data, mod=mod) + check_result(target, dev, [x_data], 2 * x_data, mod=mod) def any(x): @@ -134,8 +126,8 @@ def any(x): return relay.op.min(x, axis=[0, 1]) -@tvm.testing.uses_gpu -def test_cond(): +@tvm.testing.known_failing_targets("vulkan") +def test_cond(target, dev): x = relay.var("x", shape=(10, 10)) y = relay.var("y", shape=(10, 10)) # f = relay.Function([x, y], relay.op.equal(x, y)) @@ -146,14 +138,14 @@ def test_cond(): mod = tvm.IRModule() mod["main"] = f # same - check_result([x_data, x_data], True, mod=mod) + check_result(target, dev, [x_data, x_data], True, mod=mod) # diff - check_result([x_data, y_data], False, mod=mod) + check_result(target, dev, [x_data, y_data], False, mod=mod) -@tvm.testing.uses_gpu -def test_simple_if(): +@tvm.testing.known_failing_targets("vulkan") +def test_simple_if(target, dev): x = relay.var("x", shape=(10, 10)) y = relay.var("y", shape=(10, 10)) f = relay.Function([x, y], relay.If(any(relay.op.equal(x, y)), x, y)) @@ -163,14 +155,14 @@ def test_simple_if(): mod = tvm.IRModule() mod["main"] = f # same - check_result([x_data, x_data], x_data, mod=mod) + check_result(target, dev, [x_data, x_data], x_data, mod=mod) # diff - check_result([x_data, y_data], y_data, mod=mod) + check_result(target, dev, [x_data, y_data], y_data, mod=mod) -@tvm.testing.uses_gpu -def test_multiple_ifs(): +@tvm.testing.parametrize_targets("llvm") +def test_multiple_ifs(target, dev): mod = tvm.IRModule({}) b = relay.var("b") v0 = relay.var("v0") @@ -184,14 +176,12 @@ def test_multiple_ifs(): out = relay.Let(v0, relay.Tuple([relay.const(0)]), out) fn = relay.Function([b], out) mod["main"] = fn - dev = tvm.runtime.device("llvm", 0) func = relay.create_executor(device=dev, mod=mod, kind="vm").evaluate() res = vmobj_to_list(func(False)) assert res == [1, 0] -@tvm.testing.uses_gpu -def test_unused_function(): +def test_unused_function(target, dev): cond = relay.const(True) mod = tvm.IRModule() then_name = relay.GlobalVar("times_2") @@ -212,11 +202,10 @@ def test_unused_function(): x_data = np.random.rand(2, 2).astype("float32") y_data = x_data * 2 - check_result([x_data], y_data, mod=mod) + check_result(target, dev, [x_data], y_data, mod=mod) -@tvm.testing.uses_gpu -def test_simple_call(): +def test_simple_call(target, dev): mod = tvm.IRModule({}) sum_up = relay.GlobalVar("sum_up") i = relay.var("i", shape=[], dtype="int32") @@ -227,11 +216,10 @@ def test_simple_call(): i_data = np.array(0, dtype="int32") iarg = relay.var("iarg", shape=[], dtype="int32") mod["main"] = relay.Function([iarg], sum_up(iarg)) - check_result([i_data], i_data, mod=mod) + check_result(target, dev, [i_data], i_data, mod=mod) -@tvm.testing.uses_gpu -def test_count_loop(): +def test_count_loop(target, dev): mod = tvm.IRModule({}) sum_up = relay.GlobalVar("sum_up") i = relay.var("i", shape=[], dtype="int32") @@ -247,14 +235,12 @@ def test_count_loop(): i_data = np.array(0, dtype="int32") iarg = relay.var("i", shape=[], dtype="int32") mod["main"] = relay.Function([iarg], sum_up(iarg)) - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, i_data, device=dev, target=tgt) - tvm.testing.assert_allclose(result.numpy(), i_data) - check_result([i_data], i_data, mod=mod) + result = veval(mod, i_data, device=dev, target=target) + tvm.testing.assert_allclose(result.numpy(), i_data) + check_result(target, dev, [i_data], i_data, mod=mod) -@tvm.testing.uses_gpu -def test_sum_loop(): +def test_sum_loop(target, dev): mod = tvm.IRModule({}) sum_up = relay.GlobalVar("sum_up") i = relay.var("i", shape=[], dtype="int32") @@ -275,11 +261,10 @@ def test_sum_loop(): iarg = relay.var("i", shape=[], dtype="int32") aarg = relay.var("accum", shape=[], dtype="int32") mod["main"] = relay.Function([iarg, aarg], sum_up(iarg, aarg)) - check_result([i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod) + check_result(target, dev, [i_data, accum_data], sum(range(1, loop_bound + 1)), mod=mod) -@tvm.testing.uses_gpu -def test_tuple_fst(): +def test_tuple_fst(target, dev): ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))]) tup = relay.var("tup", type_annotation=ttype) f = relay.Function([tup], relay.TupleGetItem(tup, 0)) @@ -287,11 +272,10 @@ def test_tuple_fst(): j_data = np.random.rand(10).astype("float32") mod = tvm.IRModule() mod["main"] = f - check_result([(i_data, j_data)], i_data, mod=mod) + check_result(target, dev, [(i_data, j_data)], i_data, mod=mod) -@tvm.testing.uses_gpu -def test_tuple_second(): +def test_tuple_second(target, dev): ttype = relay.TupleType([relay.TensorType((1,)), relay.TensorType((10,))]) tup = relay.var("tup", type_annotation=ttype) f = relay.Function([tup], relay.TupleGetItem(tup, 1)) @@ -299,11 +283,10 @@ def test_tuple_second(): j_data = np.random.rand(10).astype("float32") mod = tvm.IRModule() mod["main"] = f - check_result([(i_data, j_data)], j_data, mod=mod) + check_result(target, dev, [(i_data, j_data)], j_data, mod=mod) -@tvm.testing.uses_gpu -def test_list_constructor(): +def test_list_constructor(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -316,17 +299,15 @@ def test_list_constructor(): mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - assert len(result) == 2 - assert len(result[1]) == 2 + result = veval(mod, device=dev, target=target) + assert len(result) == 2 + assert len(result[1]) == 2 - obj = vmobj_to_list(result) - tvm.testing.assert_allclose(obj, np.array([3, 2, 1])) + obj = vmobj_to_list(result) + tvm.testing.assert_allclose(obj, np.array([3, 2, 1])) -@tvm.testing.uses_gpu -def test_let_tensor(): +def test_let_tensor(target, dev): sb = relay.ScopeBuilder() shape = (1,) x = relay.var("x", shape=shape, dtype="float32") @@ -342,11 +323,10 @@ def test_let_tensor(): x_data = np.random.rand(*shape).astype("float32") mod = tvm.IRModule() mod["main"] = f - check_result([x_data], x_data + 42.0, mod=mod) + check_result(target, dev, [x_data], x_data + 42.0, mod=mod) -@tvm.testing.uses_gpu -def test_let_scalar(): +def test_let_scalar(target, dev): sb = relay.ScopeBuilder() x = relay.var("x", "float32") @@ -360,11 +340,10 @@ def test_let_scalar(): x_data = np.array(np.random.rand()).astype("float32") mod = tvm.IRModule() mod["main"] = f - check_result([x_data], x_data + 42.0, mod=mod) + check_result(target, dev, [x_data], x_data + 42.0, mod=mod) -@tvm.testing.uses_gpu -def test_compose(): +def test_compose(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -394,13 +373,11 @@ def test_compose(): mod["main"] = f x_data = np.array(np.random.rand()).astype("float32") - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, [x_data], device=dev, target=tgt) - tvm.testing.assert_allclose(result.numpy(), x_data + 2.0) + result = veval(mod, [x_data], device=dev, target=target) + tvm.testing.assert_allclose(result.numpy(), x_data + 2.0) -@tvm.testing.uses_gpu -def test_list_hd(): +def test_list_hd(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -415,13 +392,11 @@ def test_list_hd(): mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(result.numpy(), 3) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(result.numpy(), 3) -@pytest.mark.xfail -def test_list_tl_empty_list(): +def test_list_tl_empty_list(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -432,12 +407,11 @@ def test_list_tl_empty_list(): mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) + with pytest.raises(tvm.error.TVMError): + result = veval(mod, device=dev, target=target) -@tvm.testing.uses_gpu -def test_list_tl(): +def test_list_tl(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -452,13 +426,11 @@ def test_list_tl(): mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(vmobj_to_list(result), np.array([2, 1])) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(vmobj_to_list(result), np.array([2, 1])) -@tvm.testing.uses_gpu -def test_list_nth(): +def test_list_nth(target, dev): expected = list(range(10)) for i in range(len(expected)): @@ -474,13 +446,11 @@ def test_list_nth(): f = relay.Function([], nth(l, relay.const(i))) mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(result.numpy(), expected[i]) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(result.numpy(), expected[i]) -@tvm.testing.uses_gpu -def test_list_update(): +def test_list_update(target, dev): expected = list(range(10)) mod = tvm.IRModule() @@ -500,13 +470,11 @@ def test_list_update(): f = relay.Function([], l) mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected)) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(vmobj_to_list(result), np.array(expected)) -@tvm.testing.uses_gpu -def test_list_length(): +def test_list_length(target, dev): expected = list(range(10)) mod = tvm.IRModule() @@ -524,13 +492,11 @@ def test_list_length(): f = relay.Function([], l) mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(result.numpy(), 10) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(result.numpy(), 10) -@tvm.testing.uses_gpu -def test_list_map(): +def test_list_map(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -544,13 +510,11 @@ def test_list_map(): f = relay.Function([], map(add_one_func, l)) mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2])) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 2])) -@tvm.testing.uses_gpu -def test_list_foldl(): +def test_list_foldl(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -564,13 +528,11 @@ def test_list_foldl(): l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil()))) f = relay.Function([], foldl(rev_dup_func, nil(), l)) mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 3, 2, 2, 1, 1])) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 3, 2, 2, 1, 1])) -@tvm.testing.uses_gpu -def test_list_foldr(): +def test_list_foldr(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -584,13 +546,11 @@ def test_list_foldr(): l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil()))) f = relay.Function([], foldr(identity_func, nil(), l)) mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(vmobj_to_list(result), np.array([1, 2, 3])) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(vmobj_to_list(result), np.array([1, 2, 3])) -@tvm.testing.uses_gpu -def test_list_sum(): +def test_list_sum(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -600,13 +560,11 @@ def test_list_sum(): l = cons(relay.const(1), cons(relay.const(2), cons(relay.const(3), nil()))) f = relay.Function([], sum(l)) mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(result.numpy(), 6) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(result.numpy(), 6) -@tvm.testing.uses_gpu -def test_list_filter(): +def test_list_filter(target, dev): mod = tvm.IRModule() p = Prelude(mod) @@ -623,26 +581,22 @@ def test_list_filter(): ) f = relay.Function([], filter(greater_than_one, l)) mod["main"] = f - for tgt, dev in tvm.testing.enabled_targets(): - result = veval(mod, device=dev, target=tgt) - tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5])) + result = veval(mod, device=dev, target=target) + tvm.testing.assert_allclose(vmobj_to_list(result), np.array([3, 5])) -@tvm.testing.uses_gpu -def test_closure(): +def test_closure(target, dev): x = relay.var("x", shape=()) y = relay.var("y", shape=()) f = relay.Function([x], x + y) ff = relay.Function([y], f) clo = ff(relay.const(1.0)) main = clo(relay.const(2.0)) - for tgt, dev in tvm.testing.enabled_targets(): - res = veval(main, device=dev, target=tgt) - tvm.testing.assert_allclose(res.numpy(), 3.0) + res = veval(main, device=dev, target=target) + tvm.testing.assert_allclose(res.numpy(), 3.0) -@tvm.testing.uses_gpu -def test_add_op_scalar(): +def test_add_op_scalar(target, dev): """ test_add_op_scalar: fn (x, y) { @@ -660,11 +614,10 @@ def test_add_op_scalar(): ] for (x_data, y_data) in x_y_data: mod["main"] = func - check_result([x_data, y_data], x_data + y_data, mod=mod) + check_result(target, dev, [x_data, y_data], x_data + y_data, mod=mod) -@tvm.testing.uses_gpu -def test_add_op_scalar_int(): +def test_add_op_scalar_int(target, dev): """ test_add_op_scalar_int: fn (x, y) { @@ -682,11 +635,10 @@ def test_add_op_scalar_int(): ] for (x_data, y_data) in x_y_data: mod["main"] = func - check_result([x_data, y_data], x_data + y_data, mod=mod) + check_result(target, dev, [x_data, y_data], x_data + y_data, mod=mod) -@tvm.testing.uses_gpu -def test_add_op_tensor(): +def test_add_op_tensor(target, dev): """ test_add_op_tensor: fn (x, y) { @@ -700,11 +652,10 @@ def test_add_op_tensor(): x_data = np.random.rand(10, 5).astype("float32") y_data = np.random.rand(10, 5).astype("float32") mod["main"] = func - check_result([x_data, y_data], x_data + y_data, mod=mod) + check_result(target, dev, [x_data, y_data], x_data + y_data, mod=mod) -@tvm.testing.uses_gpu -def test_add_op_broadcast(): +def test_add_op_broadcast(target, dev): """ test_add_op_broadcast: fn (x, y) { @@ -718,7 +669,7 @@ def test_add_op_broadcast(): x_data = np.random.rand(10, 5).astype("float32") y_data = np.random.rand(1, 5).astype("float32") mod["main"] = func - check_result([x_data, y_data], x_data + y_data, mod=mod) + check_result(target, dev, [x_data, y_data], x_data + y_data, mod=mod) def test_vm_optimize_dynamic(): @@ -742,8 +693,7 @@ def test_vm_optimize(): assert len(free_vars) == 1 -@tvm.testing.uses_gpu -def test_loop_free_var(): +def test_loop_free_var(target, dev): x = relay.var("x", shape=(), dtype="int32") i = relay.var("i", shape=(), dtype="int32") s = relay.var("s", shape=(), dtype="int32") @@ -765,11 +715,10 @@ def body_with_free_var(i, acc): ret = relay.TupleGetItem(tup, 1) mod = tvm.IRModule() mod["main"] = relay.Function(relay.analysis.free_vars(ret), ret) - check_result(args, expected, mod=mod) + check_result(target, dev, args, expected, mod=mod) -@tvm.testing.uses_gpu -def test_vm_reshape_tensor(): +def test_vm_reshape_tensor(target, dev): x_np = np.random.uniform(size=(8, 16)).astype("float32") x = relay.var("x", shape=(8, 16), dtype="float32") y = relay.reshape(x, [-1, 4, 8]) @@ -778,7 +727,7 @@ def test_vm_reshape_tensor(): with tvm.transform.PassContext(opt_level=3): exec = relay.vm.compile(mod, "llvm") assert "reshape_tensor" in exec.bytecode - check_result([x_np], x_np.reshape([4, 4, 8]), mod) + check_result(target, dev, [x_np], x_np.reshape([4, 4, 8]), mod) x = relay.var("x", shape=(8, 16), dtype="float32") y = relay.reshape(x, [16, -1]) @@ -788,7 +737,7 @@ def test_vm_reshape_tensor(): with tvm.transform.PassContext(opt_level=3): exec = relay.vm.compile(mod, "llvm") assert exec.bytecode.count("reshape_tensor") == 1 - check_result([x_np], x_np.reshape([4, 4, 8]), mod) + check_result(target, dev, [x_np], x_np.reshape([4, 4, 8]), mod) # reshape with symbolic/any shape for n in [tvm.tir.Any(), tvm.te.size_var("n")]: @@ -800,7 +749,7 @@ def test_vm_reshape_tensor(): with tvm.transform.PassContext(opt_level=3): exec = relay.vm.compile(mod, "llvm") assert exec.bytecode.count("reshape_tensor") == 1 - check_result([x_np], x_np.reshape([32, 2, 2]), mod) + check_result(target, dev, [x_np], x_np.reshape([32, 2, 2]), mod) # dyn.reshape x = relay.var("x", shape=(8, 16), dtype="float32") @@ -814,10 +763,10 @@ def test_vm_reshape_tensor(): assert exec.bytecode.count("reshape_tensor") == 2 assert "reshape_tensor" in exec.bytecode y_np = np.array([8, 2, 8]).astype("int32") - check_result([x_np, y_np], x_np.reshape([8, 2, 8]), mod) + check_result(target, dev, [x_np, y_np], x_np.reshape([8, 2, 8]), mod) -def test_vm_reshape_tuple(x_shape=(1, 4, 2), y_shape=(1, 2, 10)): +def test_vm_reshape_tuple(target, dev, x_shape=(1, 4, 2), y_shape=(1, 2, 10)): tup = relay.var( "tup", type_annotation=relay.TupleType([relay.TensorType(x_shape), relay.TensorType(y_shape)]), @@ -828,9 +777,8 @@ def test_vm_reshape_tuple(x_shape=(1, 4, 2), y_shape=(1, 2, 10)): x_data = np.random.uniform(size=x_shape).astype("float32") y_data = np.random.uniform(size=y_shape).astype("float32") - for tgt, dev in tvm.testing.enabled_targets(): - res = veval(f, (x_data, y_data), device=dev, target=tgt) - tvm.testing.assert_allclose(res.numpy(), np.reshape(x_data, (1, -1))) + res = veval(f, (x_data, y_data), device=dev, target=target) + tvm.testing.assert_allclose(res.numpy(), np.reshape(x_data, (1, -1))) def test_constant_shape_with_external_codegen(): @@ -921,9 +869,8 @@ def test_get_output_single(): np.testing.assert_allclose(outputs[0].numpy(), inp + inp) -def test_get_output_multiple(): - target = tvm.target.Target("llvm") - +@tvm.testing.parametrize_targets("llvm") +def test_get_output_multiple(target, dev): # Build a IRModule. x = relay.var("x", shape=(10,)) f = relay.Function([x], relay.Tuple([x + x, x])) @@ -931,7 +878,7 @@ def test_get_output_multiple(): # Compile to VMExecutable. vm_exec = vm.compile(mod, target=target) - vm_factory = runtime.vm.VirtualMachine(vm_exec, tvm.cpu()) + vm_factory = runtime.vm.VirtualMachine(vm_exec, dev) inp = np.ones(10, dtype="float32") vm_factory.invoke_stateful("main", inp) outputs = vm_factory.get_outputs() @@ -940,9 +887,8 @@ def test_get_output_multiple(): np.testing.assert_allclose(outputs[1].numpy(), inp) -def test_get_input_index(): - target = tvm.target.Target("llvm") - +@tvm.testing.parametrize_targets("llvm") +def test_get_input_index(target, dev): # Build a IRModule. data_0, data_1 = ["d1", "d2"] x, y = [relay.var(c, shape=(10,)) for c in [data_0, data_1]] @@ -951,16 +897,16 @@ def test_get_input_index(): # Compile to VMExecutable. vm_exec = vm.compile(mod, target=target) - vm_factory = runtime.vm.VirtualMachine(vm_exec, tvm.cpu()) + vm_factory = runtime.vm.VirtualMachine(vm_exec, dev) assert vm_factory.get_input_index(data_1) == 1 assert vm_factory.get_input_index(data_0) == 0 assert vm_factory.get_input_index("invalid") == -1 -@tvm.testing.requires_llvm -def test_benchmark(): +@tvm.testing.parametrize_targets("llvm") +def test_benchmark(target, dev): mod, params = mlp.get_workload(1) - lib = vm.compile(mod, target="llvm", params=params) + lib = vm.compile(mod, target=target, params=params) exe = runtime.vm.VirtualMachine(lib, tvm.cpu()) data = tvm.nd.array(np.random.rand(1, 1, 28, 28).astype("float32")) result = exe.benchmark(tvm.cpu(), data, func_name="main", repeat=2, number=1) @@ -973,7 +919,7 @@ def test_benchmark(): "time_evaluator", return_value=lambda x: tvm.runtime.module.BenchmarkResult([1, 2, 2, 5]), ) as method: - result = exe.benchmark(tvm.cpu(), data, func_name="main", repeat=2, number=1) + result = exe.benchmark(dev, data, func_name="main", repeat=2, number=1) assert result.mean == 2.5 assert result.median == 2.0 assert result.max == 5 @@ -981,8 +927,7 @@ def test_benchmark(): assert result.std == 1.5 -@tvm.testing.parametrize_targets("cuda", "llvm") -def test_benchmark_end_to_end(dev, target): +def test_benchmark_end_to_end(target, dev): mod, params = mlp.get_workload(1) lib = vm.compile(mod, target=target, params=params) exe = runtime.vm.VirtualMachine(lib, dev) @@ -1014,4 +959,4 @@ def test_benchmark_end_to_end_rpc(): if __name__ == "__main__": - pytest.main([__file__]) + sys.exit(pytest.main(sys.argv))