Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit bdcf137

Browse files
bartekkuncerbgawrych
andauthored
Make convolution operator fully work with oneDNN v2.4+ (#20847)
* Restore full functionality to convolution * Update src/operator/nn/dnnl/dnnl_convolution.cc Co-authored-by: bgawrych <bartlomiej.gawrych@intel.com> Co-authored-by: bgawrych <bartlomiej.gawrych@intel.com>
1 parent e9840b8 commit bdcf137

File tree

2 files changed

+13
-8
lines changed

2 files changed

+13
-8
lines changed

src/operator/nn/dnnl/dnnl_convolution.cc

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -118,10 +118,18 @@ std::shared_ptr<dnnl::convolution_forward::primitive_desc> GetConvFwdImpl(
118118
// suboptimal kernel for computation that has the expected memory size requirements
119119
auto conv_pd =
120120
std::make_shared<dnnl::convolution_forward::primitive_desc>(desc, attr, engine);
121-
while (conv_pd->dst_desc().get_size() != GetArraySize(output) ||
122-
conv_pd->src_desc().get_size() != GetArraySize(data) ||
123-
(!param.dnnl_param.quantized &&
124-
conv_pd->weights_desc().get_size() != GetArraySize(weights))) {
121+
while (
122+
conv_pd->dst_desc().get_size() != GetArraySize(output) ||
123+
conv_pd->src_desc().get_size() != GetArraySize(data) ||
124+
(!param.dnnl_param.quantized &&
125+
conv_pd->weights_desc().get_size() != GetArraySize(weights)) ||
126+
// With the upgrade of oneDNN to version 2.4+
127+
// tests/python/dnnl/subgraphs/test_conv_subgraph.py::test_pos_conv_add[True-data_shape1]
128+
// started failing. Switching away from primitive with weight dnnl::format_tag
129+
// ABcd4b16a4b in order to temporarily fix the issue until full fix arrives.
130+
// Tracking issue: https://github.com/apache/incubator-mxnet/issues/20826.
131+
(param.dnnl_param.quantized && conv_pd->weights_desc().dims()[1] < 4 &&
132+
conv_pd->weights_desc().data.padded_dims[1] == 16)) {
125133
// next_impl() will visit desc and engine, please make sure they are still alive here.
126134
CHECK(conv_pd->next_impl()) << "No convolution implementation for this request.";
127135
}

tests/python/dnnl/subgraphs/subgraph_common.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,7 @@
4242
}
4343
}
4444

45-
DATA_SHAPE=[(64, 4, 10, 10), (4, 4, 24, 24), (1, 16, 32, 32)]
46-
# Second shape has been temporairly changed from (4, 3, 24, 24) to (4, 4, 24, 24) due to
47-
# a bug regarding conv+sum fuse with the amount of input channels < 4. It will be reverted
48-
# as soon as the problem is fixed. Issue: https://github.com/apache/incubator-mxnet/issues/20826.
45+
DATA_SHAPE=[(64, 4, 10, 10), (4, 3, 24, 24), (1, 16, 32, 32)]
4946

5047
# Helpers
5148
class RELU6(nn.HybridBlock):

0 commit comments

Comments
 (0)