Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
37 commits
Select commit Hold shift + click to select a range
0d9b9b7
update mkldnn and fix conv/deconv
TaoLv Oct 22, 2018
6ce7f19
Merge remote-tracking branch 'origin/master' into update-mkldnn
TaoLv Oct 23, 2018
501cccb
fix
TaoLv Oct 24, 2018
7c09f38
fix indent
TaoLv Oct 24, 2018
41da42f
fix cmake
TaoLv Oct 24, 2018
2e3c22f
fix cmake
TaoLv Oct 24, 2018
686445a
Merge remote-tracking branch 'origin/master' into update-mkldnn
TaoLv Oct 25, 2018
db3f674
fix cpp test for mkldnn
TaoLv Oct 25, 2018
e85139f
fix typo
TaoLv Oct 25, 2018
e15141c
Merge remote-tracking branch 'origin/master' into update-mkldnn
TaoLv Oct 26, 2018
f905404
fix conficts after merge
TaoLv Oct 26, 2018
1fe9f88
debug: remove 5d test
TaoLv Oct 26, 2018
5412d64
debug: remove 4d test
TaoLv Oct 26, 2018
af2f80a
add comments
TaoLv Oct 27, 2018
32551b3
debug: remove 2d test
TaoLv Oct 29, 2018
9ff3687
update mklml in ci
Oct 29, 2018
328a22a
fix mklml
Oct 29, 2018
7c08742
Revert "fix mklml"
TaoLv Oct 30, 2018
026910f
Revert "update mklml in ci"
TaoLv Oct 30, 2018
c90a48d
Revert "debug: remove 2d test"
TaoLv Oct 30, 2018
904a114
Revert "debug: remove 4d test"
TaoLv Oct 30, 2018
030d24a
Revert "debug: remove 5d test"
TaoLv Oct 30, 2018
153b068
debug illegal core dump
Oct 30, 2018
39321d5
debug illegal core dump
Oct 30, 2018
0062cbe
Revert "debug illegal core dump"
TaoLv Oct 30, 2018
cb6f3ab
Revert "debug illegal core dump"
TaoLv Oct 30, 2018
4ffea76
change cmake
TaoLv Oct 30, 2018
c8b8d6d
Merge remote-tracking branch 'origin/master' into update-mkldnn
TaoLv Oct 30, 2018
266e31e
Merge remote-tracking branch 'origin/master' into update-mkldnn
TaoLv Nov 2, 2018
bf14044
pin mkldnn version to 0.17rc
TaoLv Nov 2, 2018
aeb26f1
change format number
TaoLv Nov 2, 2018
f48b155
remove include directories in cmake
TaoLv Nov 2, 2018
09a89fc
fix cpp test
TaoLv Nov 7, 2018
3ee4133
Merge remote-tracking branch 'origin/master' into update-mkldnn
TaoLv Nov 7, 2018
0376aa0
address cpplint complaint
TaoLv Nov 7, 2018
e1fb6b7
remove comment code
TaoLv Nov 7, 2018
a728583
update mkldnn head
TaoLv Nov 7, 2018
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion 3rdparty/mkldnn
6 changes: 4 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -229,8 +229,10 @@ if(USE_MKLDNN)
if(NOT MSVC)
set(ARCH_OPT_FLAGS "-mtune=generic")
endif()
set(WITH_TEST OFF)
set(WITH_EXAMPLE OFF)
set(WITH_TEST OFF CACHE INTERNAL "" FORCE)
set(WITH_EXAMPLE OFF CACHE INTERNAL "" FORCE)
set(ARCH_OPT_FLAGS "" CACHE INTERNAL "" FORCE)

add_subdirectory(3rdparty/mkldnn)

include_directories(3rdparty/mkldnn/include)
Expand Down
12 changes: 12 additions & 0 deletions src/operator/nn/mkldnn/mkldnn_base-inl.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,6 +208,18 @@ static inline int get_mxnet_type(mkldnn_data_type_t dtype) {
}
}

static inline size_t GetMemDescSize(const mkldnn::memory::desc &md) {
if (md.data.ndims == 0) return 0;

size_t ret = 1;
for (int i = 0; i < md.data.ndims; i++) {
ret *= md.data.dims[i];
}

ret *= mshadow::mshadow_sizeof(get_mxnet_type(md.data.data_type));
return ret;
}

inline static mkldnn::memory::desc GetMemDesc(const NDArray &arr, int ndim) {
mkldnn::memory::dims dims(ndim);
for (size_t i = 0; i < dims.size(); i++) dims[i] = arr.shape()[i];
Expand Down
3 changes: 3 additions & 0 deletions src/operator/nn/mkldnn/mkldnn_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -311,9 +311,12 @@ mkldnn_memory_format_t GetDefaultFormat(const mkldnn::memory::desc &desc) {
case mkldnn_oihw:
case mkldnn_ihwo:
case mkldnn_hwio:
case mkldnn_oIhw8i:
case mkldnn_oIhw16i:
case mkldnn_OIhw8i8o:
case mkldnn_OIhw16i16o:
case mkldnn_OIhw4i16o4i:
case mkldnn_OIhw4i16o4i_s8s8:
case mkldnn_OIhw8i16o2i:
case mkldnn_OIhw8o16i2o:
case mkldnn_OIhw8o8i:
Expand Down
97 changes: 87 additions & 10 deletions src/operator/nn/mkldnn/mkldnn_convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -85,16 +85,33 @@ mkldnn::convolution_forward::primitive_desc GetConvFwdImpl(
attr.set_int_output_round_mode(round_nearest);
}

// MKL-DNN introduced padded formats since 0.15 which require more memory
// for computation compared with the actual tensor size. Currently, MKL-DNN
// operators are still reusing those memory from memory planning and the
// memory size may smaller than what MKL-DNN kernels require. So here we need
// select suboptimal kernel for computation according to tensor sizes.
if (param.conv_param.dilate.ndim() == 0 && bias == nullptr) {
mkldnn::convolution_forward::desc desc(prop, mkldnn::algorithm::convolution_direct,
data_md, weight_md, out_md, strides, padding, padding, mkldnn::padding_kind::zero);
return mkldnn::convolution_forward::primitive_desc(desc, attr, engine);
auto conv_pd = mkldnn::convolution_forward::primitive_desc(desc, attr, engine);
while (conv_pd.dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

please comment on what the change does.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also curious about this. Is this an adaptation to an API change in MKLDNN? Doe sit return a list of results now rather than a single value?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add comments. Yes, MKL-DNN introduced new API to select other implementations. conv_pd should have the same data type as before, not a list. next_impl is just a member function of it and can change some status of this object.

} else if (param.conv_param.dilate.ndim() == 0) {
auto bias_md = GetMemDesc(*bias);
mkldnn::convolution_forward::desc desc(prop, mkldnn::algorithm::convolution_direct,
data_md, weight_md, bias_md, out_md, strides, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_forward::primitive_desc(desc, attr, engine);
auto conv_pd = mkldnn::convolution_forward::primitive_desc(desc, attr, engine);
while (conv_pd.dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
} else {
mkldnn::memory::dims dilates{0, 0};
dilates[0] = param.conv_param.dilate[0] - 1;
Expand All @@ -103,14 +120,26 @@ mkldnn::convolution_forward::primitive_desc GetConvFwdImpl(
mkldnn::convolution_forward::desc desc(prop, mkldnn::algorithm::convolution_direct,
data_md, weight_md, out_md, strides, dilates, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_forward::primitive_desc(desc, attr, engine);
auto conv_pd = mkldnn::convolution_forward::primitive_desc(desc, attr, engine);
while (conv_pd.dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
} else {
auto bias_md = GetMemDesc(*bias);
mkldnn::convolution_forward::desc desc(prop, mkldnn::algorithm::convolution_direct,
data_md, weight_md, bias_md, out_md, strides,
dilates, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_forward::primitive_desc(desc, attr, engine);
auto conv_pd = mkldnn::convolution_forward::primitive_desc(desc, attr, engine);
while (conv_pd.dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
}
}
}
Expand All @@ -131,18 +160,36 @@ static mkldnn::convolution_backward_data::primitive_desc GetConvBwdData(
mkldnn::memory::dims padding{0, 0};
padding[0] = param.pad[0];
padding[1] = param.pad[1];

// MKL-DNN introduced padded formats since 0.15 which require more memory
// for computation compared with the actual tensor size. Currently, MKL-DNN
// operators are still reusing those memory from memory planning and the
// memory size may smaller than what MKL-DNN kernels require. So here we need
// select suboptimal kernel for computation according to tensor sizes.
if (param.dilate.ndim() == 0) {
mkldnn::convolution_backward_data::desc desc(mkldnn::algorithm::convolution_direct,
data_md, weight_md, out_md, strides, padding, padding, mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_data::primitive_desc(desc, engine, fwd_pd);
auto conv_pd = mkldnn::convolution_backward_data::primitive_desc(desc, engine, fwd_pd);
while (conv_pd.diff_dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.diff_src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
} else {
mkldnn::memory::dims dilates{0, 0};
dilates[0] = param.dilate[0] - 1;
dilates[1] = param.dilate[1] - 1;
mkldnn::convolution_backward_data::desc desc(mkldnn::algorithm::convolution_direct,
data_md, weight_md, out_md, strides, dilates, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_data::primitive_desc(desc, engine, fwd_pd);
auto conv_pd = mkldnn::convolution_backward_data::primitive_desc(desc, engine, fwd_pd);
while (conv_pd.diff_dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.diff_src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
}
}

Expand All @@ -163,16 +210,34 @@ static mkldnn::convolution_backward_weights::primitive_desc GetConvBwdWeights(
mkldnn::memory::dims padding{0, 0};
padding[0] = param.pad[0];
padding[1] = param.pad[1];

// MKL-DNN introduced padded formats since 0.15 which require more memory
// for computation compared with the actual tensor size. Currently, MKL-DNN
// operators are still reusing those memory from memory planning and the
// memory size may smaller than what MKL-DNN kernels require. So here we need
// select suboptimal kernel for computation according to tensor sizes.
if (param.dilate.ndim() == 0 && bias == nullptr) {
mkldnn::convolution_backward_weights::desc desc(mkldnn::algorithm::convolution_direct,
data_md, weight_md, out_md, strides, padding, padding, mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
auto conv_pd = mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
while (conv_pd.diff_dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.diff_weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
} else if (param.dilate.ndim() == 0) {
auto bias_md = GetMemDesc(*bias);
mkldnn::convolution_backward_weights::desc desc(mkldnn::algorithm::convolution_direct,
data_md, weight_md, bias_md, out_md, strides, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
auto conv_pd = mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
while (conv_pd.diff_dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.diff_weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
} else {
mkldnn::memory::dims dilates{0, 0};
dilates[0] = param.dilate[0] - 1;
Expand All @@ -181,14 +246,26 @@ static mkldnn::convolution_backward_weights::primitive_desc GetConvBwdWeights(
mkldnn::convolution_backward_weights::desc desc(mkldnn::algorithm::convolution_direct,
data_md, weight_md, out_md, strides, dilates, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
auto conv_pd = mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
while (conv_pd.diff_dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.diff_weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
} else {
auto bias_md = GetMemDesc(*bias);
mkldnn::convolution_backward_weights::desc desc(mkldnn::algorithm::convolution_direct,
data_md, weight_md, bias_md, out_md,
strides, dilates, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
auto conv_pd = mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
while (conv_pd.diff_dst_primitive_desc().get_size() != GetArraySize(output) ||
conv_pd.src_primitive_desc().get_size() != GetArraySize(data) ||
conv_pd.diff_weights_primitive_desc().get_size() != GetArraySize(weights)) {
CHECK(conv_pd.next_impl()) << "No implementation";
}
return conv_pd;
}
}
}
Expand Down
56 changes: 51 additions & 5 deletions src/operator/nn/mkldnn/mkldnn_deconvolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,17 +52,34 @@ static mkldnn::convolution_forward::primitive_desc GetDeconvBwd_(
bool has_bias, const mkldnn::memory::desc &out_md,
const mkldnn::engine &engine, const mkldnn::memory::dims &strides,
const mkldnn::memory::dims &padding, const mkldnn::memory::dims &dilates) {
// MKL-DNN introduced padded formats since 0.15 which require more memory
// for computation compared with the actual tensor size. Currently, MKL-DNN
// operators are still reusing those memory from memory planning and the
// memory size may smaller than what MKL-DNN kernels require. So here we need
// select suboptimal kernel for computation according to tensor sizes.
if (!has_bias) {
mkldnn::convolution_forward::desc desc(mkldnn::prop_kind::forward_training,
mkldnn::algorithm::convolution_direct, out_md, weights_md, data_md, strides,
dilates, padding, padding, mkldnn::padding_kind::zero);
return mkldnn::convolution_forward::primitive_desc(desc, engine);
auto deconv_pd = mkldnn::convolution_forward::primitive_desc(desc, engine);
while (deconv_pd.dst_primitive_desc().get_size() != GetMemDescSize(data_md) ||
deconv_pd.src_primitive_desc().get_size() != GetMemDescSize(out_md) ||
deconv_pd.weights_primitive_desc().get_size() != GetMemDescSize(weights_md)) {
CHECK(deconv_pd.next_impl()) << "No implementation";
}
return deconv_pd;
} else {
auto bias_md = GetBiasDesc(data_md);
mkldnn::convolution_forward::desc desc(mkldnn::prop_kind::forward_training,
mkldnn::algorithm::convolution_direct, out_md, weights_md, bias_md,
data_md, strides, dilates, padding, padding, mkldnn::padding_kind::zero);
return mkldnn::convolution_forward::primitive_desc(desc, engine);
auto deconv_pd = mkldnn::convolution_forward::primitive_desc(desc, engine);
while (deconv_pd.dst_primitive_desc().get_size() != GetMemDescSize(data_md) ||
deconv_pd.src_primitive_desc().get_size() != GetMemDescSize(out_md) ||
deconv_pd.weights_primitive_desc().get_size() != GetMemDescSize(weights_md)) {
CHECK(deconv_pd.next_impl()) << "No implementation";
}
return deconv_pd;
}
}

Expand Down Expand Up @@ -90,7 +107,18 @@ static mkldnn::convolution_backward_data::primitive_desc GetDeconvFwdImpl(
mkldnn::convolution_backward_data::desc desc(mkldnn::algorithm::convolution_direct,
out_md, weight_md, data_md, strides, dilate, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_data::primitive_desc(desc, engine, bwd_pd);
auto deconv_pd = mkldnn::convolution_backward_data::primitive_desc(desc, engine, bwd_pd);
// MKL-DNN introduced padded formats since 0.15 which require more memory
// for computation compared with the actual tensor size. Currently, MKL-DNN
// operators are still reusing those memory from memory planning and the
// memory size may smaller than what MKL-DNN kernels require. So here we need
// select suboptimal kernel for computation according to tensor sizes.
while (deconv_pd.diff_dst_primitive_desc().get_size() != GetMemDescSize(data_md) ||
deconv_pd.diff_src_primitive_desc().get_size() != GetMemDescSize(out_md) ||
deconv_pd.weights_primitive_desc().get_size() != GetMemDescSize(weight_md)) {
CHECK(deconv_pd.next_impl()) << "No implementation";
}
return deconv_pd;
}

static mkldnn::convolution_forward::primitive_desc GetDeconvBwdDataImpl(
Expand Down Expand Up @@ -137,16 +165,34 @@ GetDeconvBwdWeightsImpl(
mkldnn::memory::dims dilate{0, 0};
dilate[0] = param.dilate[0] - 1;
dilate[1] = param.dilate[1] - 1;

// MKL-DNN introduced padded formats since 0.15 which require more memory
// for computation compared with the actual tensor size. Currently, MKL-DNN
// operators are still reusing those memory from memory planning and the
// memory size may smaller than what MKL-DNN kernels require. So here we need
// select suboptimal kernel for computation according to tensor sizes.
if (!has_bias) {
mkldnn::convolution_backward_weights::desc desc(mkldnn::algorithm::convolution_direct,
out_md, weight_md, data_md, strides, dilate, padding, padding, mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
auto deconv_pd = mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
while (deconv_pd.diff_dst_primitive_desc().get_size() != GetMemDescSize(data_md) ||
deconv_pd.src_primitive_desc().get_size() != GetMemDescSize(out_md) ||
deconv_pd.diff_weights_primitive_desc().get_size() != GetMemDescSize(weight_md)) {
CHECK(deconv_pd.next_impl()) << "No implementation";
}
return deconv_pd;
} else {
auto bias_md = GetBiasDesc(data_md);
mkldnn::convolution_backward_weights::desc desc(mkldnn::algorithm::convolution_direct,
out_md, weight_md, bias_md, data_md, strides, dilate, padding, padding,
mkldnn::padding_kind::zero);
return mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
auto deconv_pd = mkldnn::convolution_backward_weights::primitive_desc(desc, engine, fwd_pd);
while (deconv_pd.diff_dst_primitive_desc().get_size() != GetMemDescSize(data_md) ||
deconv_pd.src_primitive_desc().get_size() != GetMemDescSize(out_md) ||
deconv_pd.diff_weights_primitive_desc().get_size() != GetMemDescSize(weight_md)) {
CHECK(deconv_pd.next_impl()) << "No implementation";
}
return deconv_pd;
}
}

Expand Down
18 changes: 14 additions & 4 deletions tests/cpp/include/test_mkldnn.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,15 @@ inline static std::vector<mkldnn::memory::format> GetMKLDNNFormat(size_t num_dim
data_md, weight_md, out_md, strides,
padding, padding, mkldnn::padding_kind::zero);
mkldnn::convolution_forward::primitive_desc pd(desc, CpuEngine::Get()->get_engine());
std::vector<mkldnn::memory::format> ret(2);
while (pd.dst_primitive_desc().get_size() != GetMemDescSize(out_md) ||
pd.src_primitive_desc().get_size() != GetMemDescSize(data_md) ||
pd.weights_primitive_desc().get_size() != GetMemDescSize(weight_md)) {
CHECK(pd.next_impl()) << "No implementation";
}

std::vector<mkldnn::memory::format> ret(1);
ret[0] = static_cast<mkldnn::memory::format>(pd.dst_primitive_desc().desc().data.format);
ret[1] = static_cast<mkldnn::memory::format>(pd.weights_primitive_desc().desc().data.format);
printf("format: %d, %d\n", ret[0], ret[1]);
printf("format: %d \n", ret[0]);
return ret;
} else if (num_dims == 5) {
mkldnn::memory::dims data_dims{1, 32, 112, 112};
Expand All @@ -139,6 +144,12 @@ inline static std::vector<mkldnn::memory::format> GetMKLDNNFormat(size_t num_dim
data_md, weight_md, out_md, strides,
padding, padding, mkldnn::padding_kind::zero);
mkldnn::convolution_forward::primitive_desc pd(desc, CpuEngine::Get()->get_engine());
while (pd.dst_primitive_desc().get_size() != GetMemDescSize(out_md) ||
pd.src_primitive_desc().get_size() != GetMemDescSize(data_md) ||
pd.weights_primitive_desc().get_size() != GetMemDescSize(weight_md)) {
CHECK(pd.next_impl()) << "No implementation";
}

std::vector<mkldnn::memory::format> ret(1);
ret[0] = static_cast<mkldnn::memory::format>(pd.weights_primitive_desc().desc().data.format);
printf("format: %d\n", ret[0]);
Expand Down Expand Up @@ -188,7 +199,6 @@ inline static TestArrayShapes GetTestArrayShapes() {

std::vector<mkldnn::memory::format> formats = GetMKLDNNFormat(4, dtype);
pds.push_back(GetMemPD(s1, dtype, formats[0]));
pds.push_back(GetMemPD(s2, dtype, formats[1]));
}
{
// 5D
Expand Down
Loading