Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -615,7 +615,7 @@ lib/libtvm_runtime.so:
[ -e $(LLVM_PATH)/bin/llvm-config ] || sh $(ROOTDIR)/contrib/tvmop/prepare_tvm.sh; \
cd $(TVM_PATH)/build; \
cmake -DUSE_LLVM="$(LLVM_PATH)/bin/llvm-config" \
-DUSE_SORT=OFF -DUSE_CUDA=$(TVM_USE_CUDA) -DUSE_CUDNN=OFF -DUSE_OPENMP=ON ..; \
-DUSE_SORT=OFF -DUSE_CUDA=$(TVM_USE_CUDA) -DUSE_CUDNN=OFF -DUSE_OPENMP=gnu ..; \
$(MAKE) VERBOSE=1; \
mkdir -p $(ROOTDIR)/lib; \
cp $(TVM_PATH)/build/libtvm_runtime.so $(ROOTDIR)/lib/libtvm_runtime.so; \
Expand Down
2 changes: 1 addition & 1 deletion ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@

set -ex

NOSE_COVERAGE_ARGUMENTS="--with-coverage --cover-inclusive --cover-xml --cover-branches --cover-package=mxnet"
NOSE_COVERAGE_ARGUMENTS="--with-coverage --cover-inclusive --cover-xml --cover-branches --cover-package=mxnet --nocapture"
NOSE_TIMER_ARGUMENTS="--with-timer --timer-ok 1 --timer-warning 15 --timer-filter warning,error"
CI_CUDA_COMPUTE_CAPABILITIES="-gencode=arch=compute_52,code=sm_52 -gencode=arch=compute_70,code=sm_70"
CI_CMAKE_CUDA_ARCH="5.2 7.0"
Expand Down
2 changes: 1 addition & 1 deletion cmake/BuildTVM.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ set(USE_VTA_TSIM OFF)
set(USE_RELAY_DEBUG OFF)

# Use OPENMP thread pool to be compatible with MXNet
set(USE_OPENMP ON)
set(USE_OPENMP gnu)

# Disable USE_MKLDNN for TVM
set(USE_MKLDNN OFF)
45 changes: 5 additions & 40 deletions python/mxnet/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
from .symbol.numpy import _Symbol as np_symbol
from .util import use_np # pylint: disable=unused-import
from .runtime import Features
from .numpy_extension import get_cuda_compute_capability


def default_context():
Expand Down Expand Up @@ -2380,45 +2379,6 @@ def is_cd_run():
_features = Features()


def has_tvm_ops():
"""Returns True if MXNet is compiled with TVM generated operators. If current ctx
is GPU, it only returns True for CUDA compute capability > 52 where FP16 is supported.
"""
built_with_tvm_op = _features.is_enabled("TVM_OP")
ctx = current_context()
if ctx.device_type == 'gpu':
try:
cc = get_cuda_compute_capability(ctx)
except: # pylint: disable=bare-except
print('Failed to get CUDA compute capability for context {}. The operators '
'built with USE_TVM_OP=1 will not be run in unit tests.'.format(ctx))
return False
print('Cuda arch compute capability: sm_{}'.format(str(cc)))
return built_with_tvm_op and cc >= 53
return built_with_tvm_op


def is_op_runnable():
"""Returns True for all CPU tests. Returns True for GPU tests that are either of the following.
1. Built with USE_TVM_OP=0.
2. Built with USE_TVM_OP=1, but with compute capability >= 53.
"""
ctx = current_context()
if ctx.device_type == 'gpu':
if not _features.is_enabled("TVM_OP"):
return True
else:
try:
cc = get_cuda_compute_capability(ctx)
except: # pylint: disable=bare-except
print('Failed to get CUDA compute capability for context {}. The operators '
'built with USE_TVM_OP=1 will not be run in unit tests.'.format(ctx))
return False
print('Cuda arch compute capability: sm_{}'.format(str(cc)))
return cc >= 53
return True


@use_np
def check_gluon_hybridize_consistency(net_builder, data_l, numpy_func=None, test_grad=True,
rtol=1E-4, atol=1E-4):
Expand Down Expand Up @@ -2541,3 +2501,8 @@ def new_sym_matrix_with_real_eigvals_nd(shape):
"""Generate sym matrices with real eigenvalues."""
n = int(np.prod(shape[:-2])) if len(shape) > 2 else 1
return np.array([new_sym_matrix_with_real_eigvals_2d(shape[-1]) for i in range(n)]).reshape(shape)


def use_tvm_op():
"""Returns True if MXNet is built with USE_TVM_OP=1."""
return _features.is_enabled("TVM_OP")
29 changes: 11 additions & 18 deletions tests/python/unittest/test_numpy_interoperability.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@
from mxnet import np
from mxnet.test_utils import assert_almost_equal
from mxnet.test_utils import use_np
from mxnet.test_utils import is_op_runnable
from common import assertRaises, with_seed
from mxnet.numpy_dispatch_protocol import with_array_function_protocol, with_array_ufunc_protocol
from mxnet.numpy_dispatch_protocol import _NUMPY_ARRAY_FUNCTION_LIST, _NUMPY_ARRAY_UFUNC_LIST
Expand All @@ -36,13 +35,8 @@
_INT_DTYPES = [np.int8, np.int32, np.int64, np.uint8]
_FLOAT_DTYPES = [np.float16, np.float32, np.float64]
_DTYPES = _INT_DTYPES + _FLOAT_DTYPES
_TVM_OPS = [
'equal',
'not_equal',
'less',
'less_equal',
'greater',
'greater_equal'
_SKIP_ASSERT_EQUAL_LIST = [
'empty_like',
]


Expand Down Expand Up @@ -1944,31 +1938,30 @@ def _check_interoperability_helper(op_name, *args, **kwargs):
onp_op = getattr(getattr(_np, strs[0]), strs[1])
else:
assert False
if not is_op_runnable():
return
out = onp_op(*args, **kwargs)
expected_out = _get_numpy_op_output(onp_op, *args, **kwargs)
if isinstance(out, (tuple, list)):
assert type(out) == type(expected_out)
for arr, expected_arr in zip(out, expected_out):
if isinstance(arr, np.ndarray):
assert_almost_equal(arr.asnumpy(), expected_arr, rtol=1e-3, atol=1e-4, use_broadcast=False, equal_nan=True)
if op_name in _SKIP_ASSERT_EQUAL_LIST:
assert arr.shape == expected_arr.shape
else:
assert_almost_equal(arr.asnumpy(), expected_arr, rtol=1e-3, atol=1e-4, use_broadcast=False, equal_nan=True)
else:
_np.testing.assert_equal(arr, expected_arr)
elif isinstance(out, np.ndarray):
assert_almost_equal(out.asnumpy(), expected_out, rtol=1e-3, atol=1e-4, use_broadcast=False, equal_nan=True)
if op_name in _SKIP_ASSERT_EQUAL_LIST:
assert out.shape == expected_out.shape
else:
assert_almost_equal(out.asnumpy(), expected_out, rtol=1e-3, atol=1e-4, use_broadcast=False, equal_nan=True)
else:
_np.testing.assert_almost_equal(out, expected_out)


def check_interoperability(op_list):
for name in op_list:
if name in _TVM_OPS and not is_op_runnable():
continue
if name in ['shares_memory', 'may_share_memory', 'empty_like']: # skip list
continue
if name in ['full_like', 'zeros_like', 'ones_like'] and \
StrictVersion(platform.python_version()) < StrictVersion('3.0.0'):
if name in ['shares_memory', 'may_share_memory']: # skip list
continue
print('Dispatch test:', name)
workloads = OpArgMngr.get_workloads(name)
Expand Down
21 changes: 7 additions & 14 deletions tests/python/unittest/test_numpy_ndarray.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
from mxnet.gluon import HybridBlock
from mxnet.test_utils import same, assert_almost_equal, rand_shape_nd, rand_ndarray, retry, use_np
from common import with_seed, TemporaryDirectory
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_exception, is_op_runnable, collapse_sum_like
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf, assert_exception, collapse_sum_like
from mxnet.ndarray.ndarray import py_slice
from mxnet.base import integer_types

Expand Down Expand Up @@ -267,19 +267,14 @@ def test_np_ndarray_binary_element_wise_ops():
'/': _np.divide,
'mod': _np.mod,
'pow': _np.power,

'==': _np.equal,
'!=': _np.not_equal,
'>': _np.greater,
'>=': _np.greater_equal,
'<': _np.less,
'<=': _np.less_equal,
}

if is_op_runnable():
np_op_map.update({
'==': _np.equal,
'!=': _np.not_equal,
'>': _np.greater,
'>=': _np.greater_equal,
'<': _np.less,
'<=': _np.less_equal
})

def _get_grad_func(op, scalar=None, reverse=False):
if op == '+':
if scalar is None:
Expand Down Expand Up @@ -1122,8 +1117,6 @@ def test_np_multinomial():


@with_seed()
@unittest.skipUnless(is_op_runnable(), "Comparison ops can only run on either CPU instances, or GPU instances with"
" compute capability >= 53 if MXNet is built with USE_TVM_OP=ON")
@use_np
def test_np_ndarray_boolean_indexing():
def test_single_bool_index():
Expand Down
8 changes: 4 additions & 4 deletions tests/python/unittest/test_numpy_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@
import random
from mxnet.test_utils import verify_generator, gen_buckets_probs_with_ppf
from mxnet.numpy_op_signature import _get_builtin_op
from mxnet.test_utils import is_op_runnable, has_tvm_ops
from mxnet.test_utils import use_tvm_op
from mxnet.operator import get_all_registered_operators


Expand Down Expand Up @@ -465,7 +465,7 @@ def is_int(dtype):
expected_ret = _np.sum(x.asnumpy(), axis=axis, dtype=acc_type[itype], keepdims=keepdims)
expected_ret = expected_ret.astype(dtype)
if itype == 'bool':
if is_op_runnable() and (not is_windows): # special handling of boolean ndarray
if not is_windows: # special handling of boolean ndarray
y = test_sum(x)
assert y.dtype == expected_ret.dtype
assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-4, atol=1e-5,
Expand Down Expand Up @@ -870,7 +870,7 @@ def is_int(dtype):
expected_ret = _np.mean(x.asnumpy(), axis=axis, dtype=dtype, keepdims=keepdims)

if itype == 'bool':
if is_op_runnable() and (not is_windows) and dtype not in ['float16', 'int8']: # special handling of boolean ndarray
if (not is_windows) and dtype not in ['float16', 'int8']: # special handling of boolean ndarray
y = test_mean(x)
assert y.shape == expected_ret.shape
assert_almost_equal(y.asnumpy(), expected_ret, rtol=1e-3 if dtype == 'float16' else 1e-3,
Expand Down Expand Up @@ -1903,7 +1903,7 @@ def hybrid_forward(self, F, a, *args, **kwargs):
'arccosh' : (lambda x: 1./(x**2 - 1.)**(1./2.), 2.0, 5.0),
'arctanh' : (lambda x: -1./(x**2 - 1.), -0.99, 0.99)
}
if has_tvm_ops():
if use_tvm_op():
funcs['rad2deg'] = (lambda x: 180. / _np.pi * _np.ones(x.shape), -1.0, 1.0)
funcs['deg2rad'] = (lambda x: _np.pi / 180. * _np.ones(x.shape), -1.0, 1.0)
ndim = random.choice([2, 3, 4])
Expand Down