Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit 3143aab

Browse files
bgawrychBart Gawrych
andauthored
[v1.7.x] ElementWiseSum fix for oneDNN (#18777)
* Fix ElementwiseSum for DNNL * Fix sanity and replace push_back with emplace_back * Change order of the data format conditions * Add NOLINT to avoid readability error * Add test for oneDNN ElemwiseSum Co-authored-by: Bart Gawrych <gawrych.bartlomiej@intel.com>
1 parent d95de55 commit 3143aab

File tree

2 files changed

+31
-8
lines changed

2 files changed

+31
-8
lines changed

src/operator/tensor/elemwise_sum.cc

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -113,7 +113,14 @@ void ElementWiseSumComputeExCPU(const nnvm::NodeAttrs& attrs,
113113
CHECK_EQ(outputs.size(), 1U);
114114
CHECK_EQ(req.size(), 1U);
115115
if (req[0] == kNullOp) return;
116-
if (common::ContainsOnlyStorage(inputs, kRowSparseStorage) ||
116+
#if MXNET_USE_MKLDNN == 1
117+
if (IsMKLDNNData(inputs)) {
118+
MKLDNNRun(MKLDNNSumForward, attrs, ctx, inputs, req, outputs);
119+
} else if (common::ContainsOnlyStorage(inputs, kDefaultStorage)) {
120+
FallBackCompute(ElementWiseSumCompute<cpu>, attrs, ctx, inputs, req, outputs);
121+
}
122+
#endif
123+
else if (common::ContainsOnlyStorage(inputs, kRowSparseStorage) || // NOLINT(*)
117124
(inputs.size() == 3U && inputs[0].storage_type() == kDefaultStorage &&
118125
inputs[1].storage_type() == kCSRStorage && inputs[2].storage_type() == kDefaultStorage) ||
119126
(inputs.size() > 4U && common::ContainsStorageType(inputs, kDefaultStorage) &&
@@ -123,12 +130,6 @@ void ElementWiseSumComputeExCPU(const nnvm::NodeAttrs& attrs,
123130
ResourceRequest(ResourceRequest::kTempSpace));
124131
NDArray out_nd = outputs[0];
125132
mxnet::ndarray::ElementwiseSum<cpu>(s, rsc, inputs, &out_nd);
126-
#if MXNET_USE_MKLDNN == 1
127-
} else if (IsMKLDNNData(inputs)) {
128-
MKLDNNRun(MKLDNNSumForward, attrs, ctx, inputs, req, outputs);
129-
} else if (common::ContainsOnlyStorage(inputs, kDefaultStorage)) {
130-
FallBackCompute(ElementWiseSumCompute<cpu>, attrs, ctx, inputs, req, outputs);
131-
#endif
132133
} else {
133134
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
134135
}

tests/python/mkl/test_mkldnn.py

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -254,6 +254,28 @@ def test_flatten_slice_after_conv():
254254
print(p[0])
255255

256256

257+
def test_mkldnn_sum_with_mkldnn_layout():
258+
259+
x_shape = (32, 3, 224, 224)
260+
x_npy = np.ones(x_shape)
261+
w_shape = (32, 3, 3, 3)
262+
w_npy = np.ones(w_shape)
263+
264+
x = mx.sym.Variable("x")
265+
w = mx.sym.Variable("w")
266+
z = mx.symbol.Convolution(data=x, weight=w, num_filter=32, kernel=(3, 3))
267+
num_inputs = [2, 3, 4, 5]
268+
for i in num_inputs:
269+
inputs = []
270+
for n in range(i):
271+
inputs.append(z)
272+
y = mx.sym.add_n(*inputs) # (only MKLDNN data input)
273+
exe = y.simple_bind(ctx=mx.cpu(), x=x_shape, w=w_shape)
274+
out = exe.forward(is_train=False, x=x_npy, w=np.ones(w_shape))[0]
275+
#conv with kernel (3,3) on ones should give result=27
276+
single_cov = 27.0
277+
assert_almost_equal(out[0].asnumpy()[0, 0, 0], single_cov*i)
278+
257279
def test_mkldnn_sum_inplace_with_cpu_layout():
258280

259281
x_shape = (32, 3, 224, 224)
@@ -263,7 +285,7 @@ def test_mkldnn_sum_inplace_with_cpu_layout():
263285
x = mx.sym.Variable("x")
264286
y = mx.sym.Variable("y")
265287
z = mx.symbol.Convolution(data=x, num_filter=32, kernel=(3, 3))
266-
z = mx.sym.add_n(z, y)
288+
z = mx.sym.add_n(z, y) # (MKLDNN data, cpu data)
267289
exe = z.simple_bind(ctx=mx.cpu(), x=x_shape, y=y_shape)
268290
out = exe.forward(is_train=False, x=x_npy, y=y_npy)[0]
269291
assert_almost_equal(out[0].asnumpy()[0, 0, 0], 1.0)

0 commit comments

Comments
 (0)