Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion ci/docker/Dockerfile.build.ubuntu
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,8 @@ RUN cd /usr/local/src && \

# Python & cmake
COPY install/requirements /work/
RUN python3 -m pip install cmake==3.16.6 && \
RUN python3 -m pip install --upgrade pip && \
python3 -m pip install cmake==3.16.6 && \
python3 -m pip install -r /work/requirements

ARG USER_ID=0
Expand Down
3 changes: 2 additions & 1 deletion ci/docker/install/requirements
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ graphviz<0.9.0,>=0.8.1
contextvars;python_version<"3.7"

# Optional dependencies
onnx==1.5.0
onnx==1.7.0
onnxruntime==1.4.0
# protobuf version frozen due to ps-lite
protobuf==3.5.2
scipy==1.4.1
Expand Down
4 changes: 3 additions & 1 deletion ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -898,11 +898,13 @@ unittest_centos7_gpu() {
integrationtest_ubuntu_cpu_onnx() {
set -ex
export PYTHONPATH=./python/
export DMLC_LOG_STACK_TRACE_DEPTH=10
export MXNET_SUBGRAPH_VERBOSE=0
export DMLC_LOG_STACK_TRACE_DEPTH=10
python3 tests/python/unittest/onnx/backend_test.py
OMP_NUM_THREADS=$(expr $(nproc) / 4) pytest -n 4 tests/python/unittest/onnx/mxnet_export_test.py
OMP_NUM_THREADS=$(expr $(nproc) / 4) pytest -n 4 tests/python/unittest/onnx/test_models.py
OMP_NUM_THREADS=$(expr $(nproc) / 4) pytest -n 4 tests/python/unittest/onnx/test_node.py
OMP_NUM_THREADS=$(expr $(nproc) / 4) pytest -n 4 tests/python-pytest/onnx/test_onnxruntime.py
}

integrationtest_ubuntu_cpu_dist_kvstore() {
Expand Down
217 changes: 156 additions & 61 deletions python/mxnet/contrib/onnx/mx2onnx/_op_translations.py
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def convert_weights_and_inputs(node, **kwargs):
data_type=data_type,
dims=dims,
vals=np_arr.flatten().tolist(),
raw=False,
raw=False
)
)

Expand Down Expand Up @@ -478,36 +478,73 @@ def convert_pad(node, **kwargs):
"""Map MXNet's pad operator attributes to onnx's Pad operator
and return the created node.
"""
opset_version = kwargs["opset_version"]
name, input_nodes, attrs = get_inputs(node, kwargs)

mxnet_pad_width = convert_string_to_list(attrs.get("pad_width"))
onnx_pad_width = transform_padding(mxnet_pad_width)

pad_mode = attrs.get("mode")
pad_value = np.float32(attrs.get("constant_value", 0.0))

if pad_mode == "constant":
pad_value = float(attrs.get("constant_value")) \
if "constant_value" in attrs else 0.0
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode='constant',
value=pad_value,
pads=onnx_pad_width,
name=name
)
if opset_version >= 11:
# starting with opset 11, pads and constant_value are inputs instead of attributes
from onnx.helper import make_tensor, make_tensor_value_info
initializer = kwargs["initializer"]
pads_input_name = name + "_pads"
pads_input_type = onnx.TensorProto.INT64
pads_input_shape = np.shape(np.array(onnx_pad_width))
pads_value_node = make_tensor_value_info(pads_input_name, pads_input_type, pads_input_shape)
pads_tensor_node = make_tensor(pads_input_name, pads_input_type, pads_input_shape, onnx_pad_width)
initializer.append(pads_tensor_node)
input_nodes.append(pads_input_name)

if pad_mode == "constant":
const_input_name = name + "_constant"
const_input_type = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[pad_value.dtype]
const_value_node = make_tensor_value_info(const_input_name, const_input_type, ())
const_tensor_node = make_tensor(const_input_name, const_input_type, (), [pad_value])
initializer.append(const_tensor_node)
input_nodes.append(const_input_name)
pad_node = onnx.helper.make_node(
"Pad",
input_nodes,
[name],
mode=pad_mode,
name=name
)
return [pads_value_node, const_value_node, pad_node]
else:
pad_node = onnx.helper.make_node(
"Pad",
input_nodes,
[name],
mode=pad_mode,
name=name
)
return [pads_value_node, pad_node]
else:
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode=pad_mode,
pads=onnx_pad_width,
name=name
)

return [node]
if pad_mode == "constant":
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode='constant',
value=pad_value,
pads=onnx_pad_width,
name=name
)
return [node]
else:
node = onnx.helper.make_node(
'Pad',
inputs=input_nodes,
outputs=[name],
mode=pad_mode,
pads=onnx_pad_width,
name=name
)
return [node]

def create_helper_tensor_node(input_vals, output_name, kwargs):
"""create extra tensor node from numpy values"""
Expand Down Expand Up @@ -766,6 +803,7 @@ def convert_pooling(node, **kwargs):
MaxPool/AveragePool/GlobalMaxPool/GlobalAveragePool operators
based on the input node's attributes and return the created node.
"""
opset_version = kwargs["opset_version"]
name, input_nodes, attrs = get_inputs(node, kwargs)

kernel = eval(attrs["kernel"])
Expand All @@ -777,12 +815,12 @@ def convert_pooling(node, **kwargs):
pooling_convention = attrs.get('pooling_convention', 'valid')
ceil_mode = False
if pooling_convention == 'full':
if onnx.__version__ < "1.5.0":
if opset_version < 10:
pooling_warning = "Pooling: ONNX lower than 1.5.0 doesn't support pooling_convention. " \
"This might lead to shape or accuracy issues. " \
"https://github.com/onnx/onnx/issues/549"
logging.warning(pooling_warning)
ceil_mode = True
logging.warning(pooling_warning)

pad_dims = list(parse_helper(attrs, "pad", [0, 0]))
pad_dims = pad_dims + pad_dims
Expand Down Expand Up @@ -822,7 +860,7 @@ def convert_pooling(node, **kwargs):
name=name
)
else:
if onnx.__version__ >= "1.5.0":
if opset_version >= 10:
node = onnx.helper.make_node(
pool_types[pool_type],
input_nodes, # input
Expand Down Expand Up @@ -1353,17 +1391,35 @@ def convert_dropout(node, **kwargs):
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
opset_version = kwargs["opset_version"]

probability = float(attrs.get("p", 0.5))

dropout_node = onnx.helper.make_node(
"Dropout",
input_nodes,
[name],
ratio=probability,
name=name
)
return [dropout_node]
if opset_version >= 12:
# opset >= 12 requires the ratio to be an input
initializer = kwargs["initializer"]
ratio_input_name = name + "_ratio"
value_node = onnx.helper.make_tensor_value_info(ratio_input_name,
onnx.TensorProto.FLOAT, ())
tensor_node = onnx.helper.make_tensor(ratio_input_name, onnx.TensorProto.FLOAT,
(), [probability])
initializer.append(tensor_node)
dropout_node = onnx.helper.make_node(
"Dropout",
[input_nodes[0], ratio_input_name],
[name],
name=name
)
return [value_node, dropout_node]
else:
dropout_node = onnx.helper.make_node(
"Dropout",
input_nodes,
[name],
ratio=probability,
name=name
)
return [dropout_node]


@mx_op.register("Flatten")
Expand All @@ -1379,19 +1435,46 @@ def convert_clip(node, **kwargs):
and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
opset_version = kwargs["opset_version"]

a_min = np.float(attrs.get('a_min', -np.inf))
a_max = np.float(attrs.get('a_max', np.inf))
a_min = float(attrs.get('a_min', -np.inf))
a_max = float(attrs.get('a_max', np.inf))

clip_node = onnx.helper.make_node(
"Clip",
input_nodes,
[name],
name=name,
min=a_min,
max=a_max
)
return [clip_node]
if opset_version >= 11:
# opset >= 11 requires min/max to be inputs
initializer = kwargs["initializer"]
min_input_name = name + "_min"
max_input_name = name + "_max"
min_value_node = onnx.helper.make_tensor_value_info(min_input_name,
onnx.TensorProto.FLOAT, ())
max_value_node = onnx.helper.make_tensor_value_info(max_input_name,
onnx.TensorProto.FLOAT, ())
min_tensor_node = onnx.helper.make_tensor(min_input_name, onnx.TensorProto.FLOAT,
(), [a_min])
max_tensor_node = onnx.helper.make_tensor(max_input_name, onnx.TensorProto.FLOAT,
(), [a_max])
initializer.append(min_tensor_node)
initializer.append(max_tensor_node)
input_nodes.append(min_input_name)
input_nodes.append(max_input_name)
clip_node = onnx.helper.make_node(
"Clip",
input_nodes,
[name],
name=name
)
return [min_value_node, max_value_node, clip_node]

else:
clip_node = onnx.helper.make_node(
"Clip",
input_nodes,
[name],
name=name,
min=a_min,
max=a_max
)
return [clip_node]


def scalar_op_helper(node, op_name, **kwargs):
Expand Down Expand Up @@ -2496,22 +2579,34 @@ def convert_topk(node, **kwargs):
else:
raise NotImplementedError("ONNX expects both value and indices as output")

export_nodes = []

k = np.asarray([k], dtype=np.int)
k_node = create_helper_tensor_node(k, name + '__k', kwargs)
export_nodes.extend(k_node)
k_node = k_node[-1].name

input_node = input_nodes[0]
topk_node = onnx.helper.make_node(
"TopK",
[input_node, k_node],
outputs,
axis=axis,
name=name
)
export_nodes.extend([topk_node])
opset_version = kwargs['opset_version']
if opset_version >= 10:
from onnx.helper import make_tensor, make_tensor_value_info
initializer = kwargs["initializer"]
k_input_name = name + "_k"
k_input_type = onnx.TensorProto.INT64
k_value_node = make_tensor_value_info(k_input_name, k_input_type, ())
k_tensor_node = make_tensor(k_input_name, k_input_type, (), (k, ))
initializer.append(k_tensor_node)
input_nodes.append(k_input_name)

topk_node = onnx.helper.make_node(
"TopK",
input_nodes,
outputs,
axis=axis,
name=name
)
return [k_value_node, topk_node]
else:
topk_node = onnx.helper.make_node(
"TopK",
input_nodes,
outputs,
axis=axis,
k=k,
name=name
)

return [topk_node]

Expand Down
10 changes: 7 additions & 3 deletions python/mxnet/contrib/onnx/mx2onnx/export_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@


def export_model(sym, params, input_shape, input_type=np.float32,
onnx_file_path='model.onnx', verbose=False):
onnx_file_path='model.onnx', verbose=False, opset_version=None):
"""Exports the MXNet model file, passed as a parameter, into ONNX model.
Accepts both symbol,parameter objects as well as json and params filepaths as input.
Operator support and coverage -
Expand Down Expand Up @@ -63,11 +63,15 @@ def export_model(sym, params, input_shape, input_type=np.float32,

try:
from onnx import helper, mapping
from onnx.defs import onnx_opset_version
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. "
+ "Instructions to install - https://github.com/onnx/onnx")

converter = MXNetGraph()
if opset_version is None:
# default is to use latest opset version the onnx package supports
opset_version = onnx_opset_version()

data_format = np.dtype(input_type)
# if input parameters are strings(file paths), load files and create symbol parameter objects
Expand All @@ -76,11 +80,11 @@ def export_model(sym, params, input_shape, input_type=np.float32,
sym_obj, params_obj = load_module(sym, params)
onnx_graph = converter.create_onnx_graph_proto(sym_obj, params_obj, input_shape,
mapping.NP_TYPE_TO_TENSOR_TYPE[data_format],
verbose=verbose)
verbose=verbose, opset_version=opset_version)
elif isinstance(sym, symbol.Symbol) and isinstance(params, dict):
onnx_graph = converter.create_onnx_graph_proto(sym, params, input_shape,
mapping.NP_TYPE_TO_TENSOR_TYPE[data_format],
verbose=verbose)
verbose=verbose, opset_version=opset_version)
else:
raise ValueError("Input sym and params should either be files or objects")

Expand Down
Loading