Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 10 additions & 1 deletion python/tvm/relay/testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ def check_grad(
scale=None,
mean=0,
mode="higher_order",
target_devices=None,
):
"""Perform numerical gradient checking given a relay function.

Expand Down Expand Up @@ -117,6 +118,11 @@ def check_grad(

mean: float
The mean of the inputs.

target_devices: Optional[List[Tuple[tvm.target.Target, tvm.runtime.Device]]]
A list of targets/devices on which the gradient should be
tested. If not specified, will default to `tvm.testing.enabled_targets()`.

"""

fwd_func = run_infer_type(func)
Expand All @@ -133,7 +139,10 @@ def check_grad(
if test_inputs is None:
test_inputs = inputs

for target, dev in enabled_targets():
if target_devices is None:
target_devices = enabled_targets()

for target, dev in target_devices:
# Eval the backward and forward functions
# TODO(mbs): Evaluate a pair of functions so can share preparation between them.
bwd_func_compiled = relay.create_executor(device=dev, target=target).evaluate(bwd_func)
Expand Down
1 change: 1 addition & 0 deletions python/tvm/testing/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

# pylint: disable=redefined-builtin, wildcard-import
"""Utility Python functions for TVM testing"""

from .utils import *

from ._ffi_api import nop, echo, device_test, run_check_signal, object_use_count
Expand Down
127 changes: 61 additions & 66 deletions python/tvm/testing/plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@

"""

import collections

import pytest
import _pytest

Expand Down Expand Up @@ -67,6 +65,7 @@ def pytest_generate_tests(metafunc):
"""Called once per unit test, modifies/parametrizes it as needed."""
_parametrize_correlated_parameters(metafunc)
_auto_parametrize_target(metafunc)
_add_target_specific_marks(metafunc)


def pytest_collection_modifyitems(config, items):
Expand Down Expand Up @@ -100,7 +99,39 @@ def _auto_parametrize_target(metafunc):

"""

if "target" in metafunc.fixturenames:
# Check if any explicit parametrizations exist, and apply one
# if they do not. If the function is marked with either
# excluded or known failing targets, use these to determine
# the targets to be used.
parametrized_args = [
arg.strip()
for mark in metafunc.definition.iter_markers("parametrize")
for arg in mark.args[0].split(",")
]
if "target" not in parametrized_args:
excluded_targets = getattr(metafunc.function, "tvm_excluded_targets", [])

# Add a parametrize marker instead of calling
# metafunc.parametrize so that the parametrize rewriting
# can still occur.
mark = pytest.mark.parametrize(
"target",
[
t["target"]
for t in utils._get_targets()
if t["target_kind"] not in excluded_targets
],
scope="session",
)
metafunc.definition.add_marker(mark)


def _add_target_specific_marks(metafunc):
"""Add any target-specific marks to parametrizations over target"""

def update_parametrize_target_arg(
mark,
argnames,
argvalues,
*args,
Expand Down Expand Up @@ -131,6 +162,16 @@ def update_parametrize_target_arg(
target = param_set[target_i]
additional_marks = []

if mark in metafunc.definition.own_markers:
xfail_targets = getattr(metafunc.function, "tvm_known_failing_targets", [])
target_kind = target.split()[0] if isinstance(target, str) else target.kind.name
if target_kind in xfail_targets:
additional_marks.append(
pytest.mark.xfail(
reason=f'Known failing test for target "{target_kind}"'
)
)

new_argvalues.append(
pytest.param(
*param_set, marks=_target_to_requirement(target) + additional_marks
Expand All @@ -155,25 +196,7 @@ def update_parametrize_target_arg(
# parametrize over targets. This adds the appropriate
# @tvm.testing.requires_* markers for each target.
for mark in metafunc.definition.iter_markers("parametrize"):
update_parametrize_target_arg(*mark.args, **mark.kwargs)

# Check if any explicit parametrizations exist, and apply one
# if they do not. If the function is marked with either
# excluded or known failing targets, use these to determine
# the targets to be used.
parametrized_args = [
arg.strip()
for mark in metafunc.definition.iter_markers("parametrize")
for arg in mark.args[0].split(",")
]
if "target" not in parametrized_args:
excluded_targets = getattr(metafunc.function, "tvm_excluded_targets", [])
xfail_targets = getattr(metafunc.function, "tvm_known_failing_targets", [])
metafunc.parametrize(
"target",
_pytest_target_params(None, excluded_targets, xfail_targets),
scope="session",
)
update_parametrize_target_arg(mark, *mark.args, **mark.kwargs)


def _count_num_fixture_uses(items):
Expand Down Expand Up @@ -212,50 +235,15 @@ def _remove_global_fixture_definitions(items):
delattr(module, name)


def _pytest_target_params(targets, excluded_targets=None, xfail_targets=None):
# Include unrunnable targets here. They get skipped by the
# pytest.mark.skipif in _target_to_requirement(), showing up as
# skipped tests instead of being hidden entirely.
if targets is None:
if excluded_targets is None:
excluded_targets = set()

if xfail_targets is None:
xfail_targets = set()

target_marks = []
for t in utils._get_targets():
# Excluded targets aren't included in the params at all.
if t["target_kind"] not in excluded_targets:

# Known failing targets are included, but are marked
# as expected to fail.
extra_marks = []
if t["target_kind"] in xfail_targets:
extra_marks.append(
pytest.mark.xfail(
reason='Known failing test for target "{}"'.format(t["target_kind"])
)
)

target_marks.append((t["target"], extra_marks))

else:
target_marks = [(target, []) for target in targets]

return [
pytest.param(target, marks=_target_to_requirement(target) + extra_marks)
for target, extra_marks in target_marks
]


def _target_to_requirement(target):
if isinstance(target, str):
target = tvm.target.Target(target)

# mapping from target to decorator
if target.kind.name == "cuda" and "cudnn" in target.attrs.get("libs", []):
return utils.requires_cudnn()
if target.kind.name == "cuda" and "cublas" in target.attrs.get("libs", []):
return utils.requires_cublas()
if target.kind.name == "cuda":
return utils.requires_cuda()
if target.kind.name == "rocm":
Expand All @@ -274,7 +262,7 @@ def _target_to_requirement(target):


def _parametrize_correlated_parameters(metafunc):
parametrize_needed = collections.defaultdict(list)
parametrize_needed = {}

for name, fixturedefs in metafunc.definition._fixtureinfo.name2fixturedefs.items():
fixturedef = fixturedefs[-1]
Expand All @@ -283,13 +271,20 @@ def _parametrize_correlated_parameters(metafunc):
):
group = fixturedef.func.parametrize_group
values = fixturedef.func.parametrize_values
parametrize_needed[group].append((name, values))
ids = fixturedef.func.parametrize_ids
if group in parametrize_needed:
assert ids == parametrize_needed[group]["ids"]
else:
parametrize_needed[group] = {"ids": ids, "params": []}
parametrize_needed[group]["params"].append((name, values))

for parametrize_group in parametrize_needed.values():
if len(parametrize_group) == 1:
name, values = parametrize_group[0]
metafunc.parametrize(name, values, indirect=True)
params = parametrize_group["params"]
ids = parametrize_group["ids"]
if len(params) == 1:
name, values = params[0]
metafunc.parametrize(name, values, indirect=True, ids=ids)
else:
names = ",".join(name for name, values in parametrize_group)
value_sets = zip(*[values for name, values in parametrize_group])
metafunc.parametrize(names, value_sets, indirect=True)
names = ",".join(name for name, values in params)
value_sets = zip(*[values for name, values in params])
metafunc.parametrize(names, value_sets, indirect=True, ids=ids)
56 changes: 53 additions & 3 deletions python/tvm/testing/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -594,6 +594,27 @@ def requires_cudnn(*args):
return _compose(args, requirements)


def requires_cublas(*args):
"""Mark a test as requiring the cuBLAS library.

This also marks the test as requiring a cuda gpu.

Parameters
----------
f : function
Function to mark
"""

requirements = [
pytest.mark.skipif(
tvm.get_global_func("tvm.contrib.cublas.matmul", True),
reason="cuDNN library not enabled",
),
*requires_cuda(),
]
return _compose(args, requirements)


def requires_nvptx(*args):
"""Mark a test as requiring the NVPTX compilation on the CUDA runtime

Expand Down Expand Up @@ -968,7 +989,7 @@ def wraps(func):
return wraps


def parameter(*values, ids=None):
def parameter(*values, ids=None, by_dict=None):
"""Convenience function to define pytest parametrized fixtures.

Declaring a variable using ``tvm.testing.parameter`` will define a
Expand All @@ -988,16 +1009,23 @@ def parameter(*values, ids=None):

Parameters
----------
values
values : Any

A list of parameter values. A unit test that accepts this
parameter as an argument will be run once for each parameter
given.

ids : List[str], optional

A list of names for the parameters. If None, pytest will
generate a name from the value. These generated names may not
be readable/useful for composite types such as tuples.

by_dict : Dict[str, Any]

A mapping from parameter name to parameter value, to set both the
values and ids.

Returns
-------
function
Expand All @@ -1015,8 +1043,22 @@ def parameter(*values, ids=None):
>>> def test_using_size(shape):
>>> ... # Test code here

Or

>>> shape = tvm.testing.parameter(by_dict={'small': (5,10), 'large': (512,1024)})
>>> def test_using_size(shape):
>>> ... # Test code here

"""

if by_dict is not None:
if values or ids:
raise RuntimeError(
"Use of the by_dict parameter cannot be used alongside positional arguments"
)

ids, values = zip(*by_dict.items())

# Optional cls parameter in case a parameter is defined inside a
# class scope.
@pytest.fixture(params=values, ids=ids)
Expand All @@ -1029,7 +1071,7 @@ def as_fixture(*_cls, request):
_parametrize_group = 0


def parameters(*value_sets):
def parameters(*value_sets, ids=None):
"""Convenience function to define pytest parametrized fixtures.

Declaring a variable using tvm.testing.parameters will define a
Expand All @@ -1052,11 +1094,18 @@ def parameters(*value_sets):
Parameters
----------
values : List[tuple]

A list of parameter value sets. Each set of values represents
a single combination of values to be tested. A unit test that
accepts parameters defined will be run once for every set of
parameters in the list.

ids : List[str], optional

A list of names for the parameter sets. If None, pytest will
generate a name from each parameter set. These generated names may
not be readable/useful for composite types such as tuples.

Returns
-------
List[function]
Expand Down Expand Up @@ -1085,6 +1134,7 @@ def fixture_func(*_cls, request):

fixture_func.parametrize_group = parametrize_group
fixture_func.parametrize_values = param_values
fixture_func.parametrize_ids = ids
outputs.append(pytest.fixture(fixture_func))

return outputs
Expand Down
6 changes: 5 additions & 1 deletion tests/python/relay/aot/aot_test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,6 +155,10 @@ def parametrize_aot_options(test):
skip_i386 = pytest.mark.skipif(
platform.machine() == "i686", reason="Reference system unavailable in i386 container"
)
requires_arm_eabi = pytest.mark.skipif(
shutil.which("arm-none-eabi-gcc") is None, reason="ARM embedded toolchain unavailable"
)

interface_api = ["packed", "c"]
use_unpacked_api = [True, False]
test_runner = [AOT_DEFAULT_RUNNER, AOT_CORSTONE300_RUNNER]
Expand All @@ -178,7 +182,7 @@ def parametrize_aot_options(test):

# Skip reference system tests if running in i386 container
marked_combinations = map(
lambda parameters: pytest.param(*parameters, marks=skip_i386)
lambda parameters: pytest.param(*parameters, marks=[skip_i386, requires_arm_eabi])
if parameters[2] == AOT_CORSTONE300_RUNNER
else parameters,
valid_combinations,
Expand Down
Loading