Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 45 additions & 12 deletions python/tvm/relay/frontend/tflite.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from .common import ExprTable
from .common import infer_shape as _infer_shape
from .common import lstm_cell, to_int_list, shape_of, try_infer_value
from .common import set_span
from .tflite_flexbuffer import FlexBufferDecoder

__all__ = ["from_tflite"]
Expand Down Expand Up @@ -275,6 +276,11 @@ def convert_op_to_relay(self):
if ret is None:
continue

output_names = ", ".join(
[get_tensor_name(self.subgraph, tensor.tensor_idx) for tensor in output_tensors]
)
ret = set_span(ret, f"{output_names}")

if len(output_tensors) == 1:
tensor_idx = output_tensors[0].tensor_idx
self.exp_tab.set_expr(get_tensor_name(self.subgraph, tensor_idx), ret)
Expand Down Expand Up @@ -1553,7 +1559,9 @@ def convert_gather(self, op):
else:
indices_val = self.get_tensor_value(indices)
indices_expr = self.exp_tab.new_const(
indices_val, dtype=self.get_tensor_type_str(indices_type)
indices_val,
dtype=self.get_tensor_type_str(indices_type),
source_name=indices.tensor.Name(),
)
indices_shape = list(indices_val.shape)
indices_len = len(indices_shape)
Expand Down Expand Up @@ -1954,7 +1962,9 @@ def convert_fully_connected(self, op):
weight_expr = self.get_expr(weight_tensor.tensor_idx)
else:
weight_value = self.get_tensor_value(weight_tensor)
weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str)
weight_expr = self.exp_tab.new_const(
weight_value, dtype=weight_tensor_type_str, source_name=weight_tensor.tensor.Name()
)
weight_shape = _infer_shape(weight_expr)

if input_tensor.qnn_params:
Expand Down Expand Up @@ -1983,7 +1993,9 @@ def convert_fully_connected(self, op):
bias_expr = self.get_expr(bias_tensor.tensor_idx)
else:
bias_expr = self.exp_tab.new_const(
self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str
self.get_tensor_value(bias_tensor),
dtype=bias_tensor_type_str,
source_name=bias_tensor.tensor.Name(),
)
out = _op.nn.bias_add(out, bias_expr)

Expand Down Expand Up @@ -2195,7 +2207,9 @@ def convert_conv(self, op, conv_type):
else:
weight_value = weight_value.transpose((1, 2, 3, 0))

weight_expr = self.exp_tab.new_const(weight_value, dtype=weight_tensor_type_str)
weight_expr = self.exp_tab.new_const(
weight_value, dtype=weight_tensor_type_str, source_name=weight_tensor.tensor.Name()
)

if padding == Padding.VALID:
pass
Expand Down Expand Up @@ -2236,7 +2250,9 @@ def convert_conv(self, op, conv_type):
bias_expr = self.get_expr(bias_tensor.tensor_idx)
else:
bias_expr = self.exp_tab.new_const(
self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str
self.get_tensor_value(bias_tensor),
dtype=bias_tensor_type_str,
source_name=bias_tensor.tensor.Name(),
)
channel_axis = 3
out = _op.nn.bias_add(out, bias_expr, axis=channel_axis)
Expand Down Expand Up @@ -3043,7 +3059,9 @@ def convert_prelu(self, op):
alpha_tensor_type = alpha_tensor.tensor.Type()
alpha_tensor_type_str = self.get_tensor_type_str(alpha_tensor_type)
alpha_expr = self.exp_tab.new_const(
self.get_tensor_value(alpha_tensor), dtype=alpha_tensor_type_str
self.get_tensor_value(alpha_tensor),
dtype=alpha_tensor_type_str,
source_name=alpha_tensor.tensor.Name(),
)
in_expr = self.get_expr(input_tensor.tensor_idx)
data_shape = to_int_list(self.get_tensor_shape(input_tensor))
Expand Down Expand Up @@ -3119,7 +3137,9 @@ def convert_transpose_conv(self, op):
# Relay weights layout should be different from kernel_layout - it should be IOHW
weight_value_iohw = np.transpose(weight_value_ohwi, (3, 0, 1, 2))
weight_expr_iohw = self.exp_tab.new_const(
weight_value_iohw, dtype=weight_tensor_type_str
weight_value_iohw,
dtype=weight_tensor_type_str,
source_name=weights_tensor.tensor.Name(),
)

# Output shape value
Expand Down Expand Up @@ -3181,7 +3201,9 @@ def convert_transpose_conv(self, op):
bias_expr = self.get_expr(bias_tensor.tensor_idx)
else:
bias_expr = self.exp_tab.new_const(
self.get_tensor_value(bias_tensor), dtype=bias_tensor_type_str
self.get_tensor_value(bias_tensor),
dtype=bias_tensor_type_str,
source_name=bias_tensor.tensor.Name(),
)
channel_axis = 3
out = _op.nn.bias_add(out, bias_expr, axis=channel_axis)
Expand Down Expand Up @@ -3258,7 +3280,9 @@ def convert_dequantize(self, op):
if input_tensor.tensor.Type() == TensorType.FLOAT16:
dtype = self.get_tensor_type_str(input_tensor.tensor.Type())
input_value = self.get_tensor_value(input_tensor)
in_expr = self.exp_tab.new_const(input_value, dtype=dtype)
in_expr = self.exp_tab.new_const(
input_value, dtype=dtype, source_name=input_tensor.tensor.Name()
)
out = relay.cast(in_expr, dtype="float32")
return out

Expand Down Expand Up @@ -3292,7 +3316,9 @@ def convert_detection_postprocess(self, op):
anchor_values = self.get_tensor_value(inputs[2])
anchor_boxes = len(anchor_values)
anchor_type = self.get_tensor_type_str(inputs[2].tensor.Type())
anchor_expr = self.exp_tab.new_const(anchor_values, dtype=anchor_type)
anchor_expr = self.exp_tab.new_const(
anchor_values, dtype=anchor_type, source_name=inputs[2].tensor.Name()
)

if inputs[0].qnn_params:
loc_prob = _qnn.op.dequantize(
Expand Down Expand Up @@ -3685,7 +3711,11 @@ def get_tensor_expr(self, tensor, is_sparse=False):
expr = self.get_expr(tensor.tensor_idx)
else:
type_str = self.get_tensor_type_str(tensor.tensor.Type())
expr = self.exp_tab.new_const(self.get_tensor_value(tensor, is_sparse), dtype=type_str)
expr = self.exp_tab.new_const(
self.get_tensor_value(tensor, is_sparse),
dtype=type_str,
source_name=tensor.tensor.Name(),
)
return expr

def get_tensor_shape(self, tensor_wrapper):
Expand Down Expand Up @@ -4022,7 +4052,10 @@ def from_tflite(model, shape_dict=None, dtype_dict=None, op_converter=OperatorCo
model_input_name = get_tensor_name(subgraph, model_input)
shape = _shape_dict[model_input_name] if model_input_name in _shape_dict else None
dtype = _dtype_dict[model_input_name] if model_input_name in _dtype_dict else "float32"
exp_tab.set_expr(model_input_name, _expr.var(model_input_name, shape=shape, dtype=dtype))
input_var = set_span(
_expr.var(model_input_name, shape=shape, dtype=dtype), model_input_name
)
exp_tab.set_expr(model_input_name, input_var)

# op code in model
op_converter = op_converter(model, subgraph, exp_tab)
Expand Down
173 changes: 169 additions & 4 deletions tests/python/frontend/tflite/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,9 +35,10 @@
import tvm
import tvm.relay.testing.tf as tf_testing
from tvm.contrib.download import download_testdata
from tvm import relay
from tvm import relay, ir
from tvm.contrib import graph_executor
from tflite.BuiltinOperator import BuiltinOperator
from relay.utils.tag_span import _set_span, _create_span, _verify_structural_equal_with_span


try:
Expand Down Expand Up @@ -213,9 +214,15 @@ def run_tvm_graph(
shape_dict[node] = input_data[i].shape
dtype_dict[node] = input_data[i].dtype.name

mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict, op_converter=op_converter
)
with tvm.testing.disable_span_filling():
mod, params = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict, op_converter=op_converter
)
with tvm.testing.enable_span_filling():
mod_with_span, _ = relay.frontend.from_tflite(
tflite_model, shape_dict=shape_dict, dtype_dict=dtype_dict, op_converter=op_converter
)
assert tvm.ir.structural_equal(mod["main"], mod_with_span["main"])

if mode in ["debug", "vm"]:
inputs = []
Expand Down Expand Up @@ -5139,6 +5146,161 @@ def test_forward_nms_v5():
_test_nms_v5((1000, 4), (1000,), 0.7, 0.3, 50)


#######################################################################
# Test structural_equal and span of a model
# --------------------------------------
def test_structure_and_span():
"""Test Structure and span of frequently-used models"""

def _verify(res_fptr, golden_fptr):
with tvm.testing.enable_span_filling():
with_span = res_fptr()
with tvm.testing.disable_span_filling():
without_span = res_fptr()
assert tvm.ir.structural_equal(with_span, without_span)
_verify_structural_equal_with_span(with_span, golden_fptr())

def _tf_to_tflite(
input_tensors, output_tensors, init_global_variables=False, experimental_new_converter=False
):
with tf.Session() as sess:
if init_global_variables:
sess.run(variables.global_variables_initializer())
converter = tf.lite.TFLiteConverter.from_session(sess, input_tensors, output_tensors)
converter.experimental_new_converter = experimental_new_converter

tflite_model_buffer = converter.convert()

try:
import tflite.Model

tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buffer, 0)
except AttributeError:
import tflite

tflite_model = tflite.Model.GetRootAsModel(tflite_model_buffer, 0)
except ImportError:
raise ImportError("The tflite package must be installed")
return tflite_model

def _test_conv2d_bias_add_span():
def _res():
in_shape = (1, 5, 5, 1)
kernel_shpae = (2, 2, 1, 2)
kernel_in = np.ones(kernel_shpae)

with tf.Graph().as_default():
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
kernel = tf.constant(kernel_in, dtype=tf.float32, name="filter_weight")
tf_model = tf.nn.conv2d(
x, kernel, strides=[1, 1, 1, 1], padding="VALID", name="conv2d"
)
tflite_model = _tf_to_tflite([x], [tf_model])

mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": in_shape},
dtype_dict={"input": "float32"},
op_converter=relay.frontend.tflite.OperatorConverter,
)
return mod["main"]

def _golden():
in_input = relay.var(
"input", relay.TensorType([1, 5, 5, 1]), span=_create_span("input")
)
weight = relay.var(
"_param_1", relay.TensorType([2, 2, 1, 2]), span=_create_span("filter_weight")
)
bias = relay.var("_param_2", relay.TensorType([2]), span=_create_span("conv2d_bias"))
conv2d = _set_span(
relay.nn.conv2d(
in_input,
weight,
channels=2,
kernel_size=[2, 2],
data_layout="NHWC",
kernel_layout="HWIO",
),
"conv2d",
)
bias_add = _set_span(relay.nn.bias_add(conv2d, bias, axis=3), "conv2d")
attrs = ir.make_node("DictAttrs", **{"output_tensor_names": ["conv2d"]})
func = relay.Function([in_input, weight, bias], bias_add, attrs=attrs)
mod = ir.IRModule.from_expr(func)
return mod["main"]

_verify(_res, _golden)

def _test_fully_connected_bias_add_span():
def _res():
in_shape = (1, 10)
kernel_shpae = (10, 10)
kernel_in = np.ones(kernel_shpae)

with tf.Graph().as_default():
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
weight = tf.constant(kernel_in, dtype=tf.float32, name="filter_weight")
tf_model = math_ops.mat_mul(x, weight, name="dense")
tflite_model = _tf_to_tflite([x], [tf_model])

mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": in_shape},
dtype_dict={"input": "float32"},
op_converter=relay.frontend.tflite.OperatorConverter,
)
return mod["main"]

def _golden():
in_input = relay.var("input", relay.TensorType([1, 10]), span=_create_span("input"))
weight = relay.var(
"_param_1", relay.TensorType([10, 10]), span=_create_span("filter_weight/transpose")
)
bias = relay.var("_param_2", relay.TensorType([10]), span=_create_span("dense_bias"))
reshape = _set_span(relay.reshape(in_input, [-1, 10]), "dense")
dense = _set_span(relay.nn.dense(reshape, weight, units=10), "dense")
bias_add = _set_span(relay.nn.bias_add(dense, bias), "dense")
attrs = ir.make_node("DictAttrs", **{"output_tensor_names": ["dense"]})
func = relay.Function([in_input, weight, bias], bias_add, attrs=attrs)
mod = ir.IRModule.from_expr(func)
return mod["main"]

_verify(_res, _golden)

def _test_reshape_span():
def _res():
in_shape = (1, 10)
output_shape = (2, 5)

with tf.Graph().as_default():
x = array_ops.placeholder(shape=in_shape, dtype="float32", name="input")
tf_model = array_ops.reshape(x, output_shape, "reshape")
tflite_model = _tf_to_tflite([x], [tf_model])

mod, _ = relay.frontend.from_tflite(
tflite_model,
shape_dict={"input": in_shape},
dtype_dict={"input": "float32"},
op_converter=relay.frontend.tflite.OperatorConverter,
)
return mod["main"]

def _golden():
in_input = relay.var("input", relay.TensorType([1, 10]), span=_create_span("input"))
reshape = _set_span(relay.reshape(in_input, [2, 5]), "reshape")
attrs = ir.make_node("DictAttrs", **{"output_tensor_names": ["reshape"]})
func = relay.Function([in_input], reshape, attrs=attrs)
mod = ir.IRModule.from_expr(func)
return mod["main"]

_verify(_res, _golden)

_test_conv2d_bias_add_span()
_test_fully_connected_bias_add_span()
_test_reshape_span()


#######################################################################
# Main
# ----
Expand Down Expand Up @@ -5239,6 +5401,9 @@ def test_forward_nms_v5():
# Overwrite Converter
test_custom_op_converter()

# test structural_equal and span information
test_structure_and_span()

# End to End
test_forward_mobilenet_v1()
test_forward_mobilenet_v2()
Expand Down