From 8dacecf933614db90033e5b665fee47b235afb9c Mon Sep 17 00:00:00 2001 From: Luke Hutton Date: Thu, 10 Sep 2020 13:09:40 +0100 Subject: [PATCH] [BUG][ConvertLayout] Fix qnn.conv2d layout conversion too many values to unpack This patch follows a previous bugfix in #6419. I made a very simple oversight for qnn.conv2d in that tinfos also contains qnn parameters. Therefore, we need to extract data_info and weight_info differently. Change-Id: Ib0ad01f427543371380d0bb604a77b5e0ec1103d --- python/tvm/relay/qnn/op/layout_conversions.py | 3 +- .../relay/test_pass_convert_op_layout.py | 46 +++++++++++++++++++ 2 files changed, 48 insertions(+), 1 deletion(-) diff --git a/python/tvm/relay/qnn/op/layout_conversions.py b/python/tvm/relay/qnn/op/layout_conversions.py index 4105172f3c22..a7c90daf36a4 100644 --- a/python/tvm/relay/qnn/op/layout_conversions.py +++ b/python/tvm/relay/qnn/op/layout_conversions.py @@ -63,7 +63,8 @@ def convert_qnn_conv2d(attrs, inputs, tinfos, desired_layouts): return relay.qnn.op.conv2d(*inputs, **new_attrs) if desired_data_layout == "NHWC": # Check for depthwise convolution. - data_info, weight_info = tinfos + data_info = tinfos[0] + weight_info = tinfos[1] if is_depthwise_conv2d( data_info.shape, attrs["data_layout"], diff --git a/tests/python/relay/test_pass_convert_op_layout.py b/tests/python/relay/test_pass_convert_op_layout.py index e4771a0021cf..d2a13298d6ea 100644 --- a/tests/python/relay/test_pass_convert_op_layout.py +++ b/tests/python/relay/test_pass_convert_op_layout.py @@ -749,6 +749,51 @@ def expected(): assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) +def test_qnn_conv_nhwc_convert_layout(): + def before(): + x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8') + weight = relay.var('weight', shape=(64, 64, 3, 3), dtype='int8') + y = relay.qnn.op.conv2d(x, weight, + relay.const(1, 'int32'), + relay.const(1, 'int32'), + relay.const(1, 'float32'), + relay.const(1, 'float32'), + channels=64, + kernel_size=(3, 3), + padding=(1, 1), + data_layout='NCHW', + kernel_layout='OIHW') + y = relay.nn.relu(y) + y = relay.Function([x, weight], y) + return y + + def expected(): + x = relay.var("x", shape=(1, 64, 56, 56), dtype='int8') + weight = relay.var('weight', shape=(64, 64, 3, 3), dtype='int8') + x = relay.layout_transform(x, 'NCHW', 'NHWC') + weight = relay.layout_transform(weight, 'OIHW', 'HWIO') + y = relay.qnn.op.conv2d(x, weight, + relay.const(1, 'int32'), + relay.const(1, 'int32'), + relay.const(1, 'float32'), + relay.const(1, 'float32'), + channels=64, + kernel_size=(3, 3), + padding=(1, 1), + data_layout="NHWC", + kernel_layout="HWIO") + y = relay.nn.relu(y) + y = relay.layout_transform(y, 'NHWC', 'NCHW') + y = relay.Function(relay.analysis.free_vars(y), y) + return y + + a = before() + a = run_opt_pass(a, transform.ConvertLayout({'qnn.conv2d': ['NHWC', 'default']})) + b = run_opt_pass(expected(), transform.InferType()) + + assert tvm.ir.structural_equal(a, b), "Actual = \n" + str(a) + + def test_conv_convert_kernel_layout(): """ Check that convolution kernel layout is correctly transformed. """ @@ -951,6 +996,7 @@ def expected(): test_qnn_conv_requantize_convert_layout() test_qnn_conv_concat_convert_layout() test_qnn_conv_add_convert_layout() + test_qnn_conv_nhwc_convert_layout() test_conv_convert_kernel_layout() test_conv_transpose_convert_layout() test_default_keyword()