From 62bf4a1d5f12560877d1039a38ab9af165931167 Mon Sep 17 00:00:00 2001 From: jonghewk Date: Mon, 20 Nov 2023 18:41:20 +0900 Subject: [PATCH 1/5] fix conv3d depthwise weight shape --- src/relay/op/nn/convolution.cc | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/src/relay/op/nn/convolution.cc b/src/relay/op/nn/convolution.cc index 13c7f74c7ecd..a63911fad974 100644 --- a/src/relay/op/nn/convolution.cc +++ b/src/relay/op/nn/convolution.cc @@ -460,8 +460,32 @@ bool Conv3DRel(const Array& types, int num_inputs, const Attrs& attrs, if (param->kernel_size.defined() && param->channels.defined()) { ICHECK_EQ(param->kernel_size.size(), 3); ICHECK_EQ(param->dilation.size(), 3); - Array wshape({param->channels, indexdiv(dshape_ncdhw[1], param->groups), - param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}); + + bool is_depthwise = false; + if (param->groups > 1) { + if (!(weight && weight->shape.defined())) { + reporter->GetDiagCtx().Emit( + Diagnostic::Error(reporter->GetSpan()) + << "Weight shape must be specified when groups is greater than 1."); + return false; + } + + Array wshape_oidhw = trans_kernel_layout.ForwardShape(weight->shape); + if (tvm::tir::ExprDeepEqual()(param->groups, dshape_ncdhw[1]) && + tvm::tir::ExprDeepEqual()(param->groups, wshape_oidhw[0])) { + is_depthwise = true; + } + } + + Array wshape; + if (is_depthwise) { + auto channel_multiplier = indexdiv(param->channels, dshape_ncdhw[1]); + wshape = {dshape_ncdhw[1], channel_multiplier, param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}; + } else { + wshape = {param->channels, indexdiv(dshape_ncdhw[1], param->groups), + param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}; + } + wshape = trans_kernel_layout.BackwardShape(wshape); channels = param->channels; dilated_ksize_z = 1 + (param->kernel_size[0] - 1) * param->dilation[0]; From a16f4a05919804d386ab3461a8acaea0d5d4e693 Mon Sep 17 00:00:00 2001 From: jonghewk Date: Tue, 21 Nov 2023 10:45:37 +0900 Subject: [PATCH 2/5] apply cpplint --- src/relay/op/nn/convolution.cc | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/relay/op/nn/convolution.cc b/src/relay/op/nn/convolution.cc index a63911fad974..67f3231c180a 100644 --- a/src/relay/op/nn/convolution.cc +++ b/src/relay/op/nn/convolution.cc @@ -480,10 +480,11 @@ bool Conv3DRel(const Array& types, int num_inputs, const Attrs& attrs, Array wshape; if (is_depthwise) { auto channel_multiplier = indexdiv(param->channels, dshape_ncdhw[1]); - wshape = {dshape_ncdhw[1], channel_multiplier, param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}; + wshape = {dshape_ncdhw[1], channel_multiplier, + param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}; } else { wshape = {param->channels, indexdiv(dshape_ncdhw[1], param->groups), - param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}; + param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}; } wshape = trans_kernel_layout.BackwardShape(wshape); From 1bc33ba1fe95d25eade057a6423f64707cc66265 Mon Sep 17 00:00:00 2001 From: jonghewk Date: Tue, 21 Nov 2023 13:36:39 +0900 Subject: [PATCH 3/5] apply clang-format --- src/relay/op/nn/convolution.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/relay/op/nn/convolution.cc b/src/relay/op/nn/convolution.cc index 67f3231c180a..75e28c8d078e 100644 --- a/src/relay/op/nn/convolution.cc +++ b/src/relay/op/nn/convolution.cc @@ -480,11 +480,11 @@ bool Conv3DRel(const Array& types, int num_inputs, const Attrs& attrs, Array wshape; if (is_depthwise) { auto channel_multiplier = indexdiv(param->channels, dshape_ncdhw[1]); - wshape = {dshape_ncdhw[1], channel_multiplier, - param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}; + wshape = {dshape_ncdhw[1], channel_multiplier, param->kernel_size[0], param->kernel_size[1], + param->kernel_size[2]}; } else { - wshape = {param->channels, indexdiv(dshape_ncdhw[1], param->groups), - param->kernel_size[0], param->kernel_size[1], param->kernel_size[2]}; + wshape = {param->channels, indexdiv(dshape_ncdhw[1], param->groups), param->kernel_size[0], + param->kernel_size[1], param->kernel_size[2]}; } wshape = trans_kernel_layout.BackwardShape(wshape); From a7ec46c218299936c491e127ae507b63fc3d0d12 Mon Sep 17 00:00:00 2001 From: jonghewk Date: Wed, 22 Nov 2023 17:07:45 +0900 Subject: [PATCH 4/5] add test case for conv3d depthwise --- tests/python/relay/test_op_level2.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index cb785021783d..df06d714531a 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -823,6 +823,33 @@ def test_conv3d_transpose_ncdhw_run(): tvm.testing.assert_allclose(op_res1.numpy(), ref_res, rtol=1e-5, atol=1e-5) +def test_compile_depthwise_conv3d(): + dshape = [1, 16, 10, 10, 10] + wshape = [16, 2, 1, 1, 1] + params = {} + data = relay.var("data", shape=dshape, dtype="float32") + kernel = relay.const(tvm.nd.array(np.ones(shape=wshape).astype(dtype="float32"))) + mod = tvm.IRModule() + res = relay.nn.conv3d( + data, + kernel, + kernel_size=[1, 1, 1], + padding=[0] * 3, + channels=32, + groups=16, + data_layout="NCDHW", + kernel_layout="OIDHW", + ) + func = relay.Function([data], res) + mod = tvm.IRModule.from_expr(func) + + target = "llvm" + _ = relay.build( + mod, + tvm.target.Target(target, host=target) + ) + + @tvm.testing.uses_gpu def test_conv2d_transpose_infer_type(): # symbolic in batch dimension From 3337c60d1ae1b20b59d351b235615620bf95d400 Mon Sep 17 00:00:00 2001 From: jonghewk Date: Wed, 22 Nov 2023 22:04:59 +0900 Subject: [PATCH 5/5] appy lint --- tests/python/relay/test_op_level2.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/tests/python/relay/test_op_level2.py b/tests/python/relay/test_op_level2.py index df06d714531a..399f8556e09e 100644 --- a/tests/python/relay/test_op_level2.py +++ b/tests/python/relay/test_op_level2.py @@ -844,11 +844,8 @@ def test_compile_depthwise_conv3d(): mod = tvm.IRModule.from_expr(func) target = "llvm" - _ = relay.build( - mod, - tvm.target.Target(target, host=target) - ) - + _ = relay.build(mod, tvm.target.Target(target, host=target)) + @tvm.testing.uses_gpu def test_conv2d_transpose_infer_type():