Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 12 additions & 4 deletions src/operator/nn/dnnl/dnnl_convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,18 @@ std::shared_ptr<dnnl::convolution_forward::primitive_desc> GetConvFwdImpl(
// suboptimal kernel for computation that has the expected memory size requirements
auto conv_pd =
std::make_shared<dnnl::convolution_forward::primitive_desc>(desc, attr, engine);
while (conv_pd->dst_desc().get_size() != GetArraySize(output) ||
conv_pd->src_desc().get_size() != GetArraySize(data) ||
(!param.dnnl_param.quantized &&
conv_pd->weights_desc().get_size() != GetArraySize(weights))) {
while (
conv_pd->dst_desc().get_size() != GetArraySize(output) ||
conv_pd->src_desc().get_size() != GetArraySize(data) ||
(!param.dnnl_param.quantized &&
conv_pd->weights_desc().get_size() != GetArraySize(weights)) ||
// With the upgrade of oneDNN to version 2.4+
// tests/python/dnnl/subgraphs/test_conv_subgraph.py::test_pos_conv_add[True-data_shape1]
// started failing. Switching away from primitive with weight dnnl::format_tag
// ABcd4b16a4b in order to temporarily fix the issue until full fix arrives.
// Tracking issue: https://github.com/apache/incubator-mxnet/issues/20826.
(param.dnnl_param.quantized && conv_pd->weights_desc().dims()[1] < 4 &&
conv_pd->weights_desc().data.padded_dims[1] == 16)) {
// next_impl() will visit desc and engine, please make sure they are still alive here.
CHECK(conv_pd->next_impl()) << "No convolution implementation for this request.";
}
Expand Down
5 changes: 1 addition & 4 deletions tests/python/dnnl/subgraphs/subgraph_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,7 @@
}
}

DATA_SHAPE=[(64, 4, 10, 10), (4, 4, 24, 24), (1, 16, 32, 32)]
# Second shape has been temporairly changed from (4, 3, 24, 24) to (4, 4, 24, 24) due to
# a bug regarding conv+sum fuse with the amount of input channels < 4. It will be reverted
# as soon as the problem is fixed. Issue: https://github.com/apache/incubator-mxnet/issues/20826.
DATA_SHAPE=[(64, 4, 10, 10), (4, 3, 24, 24), (1, 16, 32, 32)]

# Helpers
class RELU6(nn.HybridBlock):
Expand Down