From 0c8f5a6ce481b5d8f7d241487978eafc6872d8b1 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Tue, 31 Aug 2021 11:53:11 +0200 Subject: [PATCH 01/27] numpy transpose onednn usage --- src/operator/nn/mkldnn/mkldnn_base-inl.h | 3 +- src/operator/nn/mkldnn/mkldnn_ops-inl.h | 6 +++ src/operator/nn/mkldnn/mkldnn_transpose.cc | 48 +++++++++++++++++++--- src/operator/numpy/np_matrix_op-inl.h | 18 ++++++++ src/operator/numpy/np_matrix_op.cc | 43 ++++++++++++++++++- src/operator/tensor/matrix_op.cc | 3 +- 6 files changed, 111 insertions(+), 10 deletions(-) diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h b/src/operator/nn/mkldnn/mkldnn_base-inl.h index cf7c9b1f5d62..147b3351da54 100644 --- a/src/operator/nn/mkldnn/mkldnn_base-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h @@ -182,6 +182,7 @@ struct DeconvolutionParam; struct SoftmaxParam; struct SoftmaxOutputParam; struct TransposeParam; +struct NumpyTransposeParam; struct ReshapeParam; struct LayerNormParam; bool SupportMKLDNNAct(const ActivationParam& param); @@ -196,7 +197,7 @@ bool SupportMKLDNNLogSoftmax(const SoftmaxParam& param, const NDArray& input, const NDArray& output); bool SupportMKLDNNSoftmaxOutput(const SoftmaxOutputParam& param); -bool SupportMKLDNNTranspose(const TransposeParam& param, const NDArray& data); +bool SupportMKLDNNTranspose(const NDArray& data); bool SupportMKLDNNBatchDot(const std::vector& inputs, const NDArray& output); bool SupportMKLDNNLayerNorm(const LayerNormParam& param, const std::vector& inputs); bool SupportMKLDNNReshape(const NDArray& input, const NDArray& output); diff --git a/src/operator/nn/mkldnn/mkldnn_ops-inl.h b/src/operator/nn/mkldnn/mkldnn_ops-inl.h index d9d84e68050f..90753317aa15 100644 --- a/src/operator/nn/mkldnn/mkldnn_ops-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_ops-inl.h @@ -185,6 +185,12 @@ void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, const OpReqType& req, const NDArray& output); +void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output); + void MKLDNNReshapeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, diff --git a/src/operator/nn/mkldnn/mkldnn_transpose.cc b/src/operator/nn/mkldnn/mkldnn_transpose.cc index 27dc1adbcf0d..fe77b9853c9c 100644 --- a/src/operator/nn/mkldnn/mkldnn_transpose.cc +++ b/src/operator/nn/mkldnn/mkldnn_transpose.cc @@ -28,11 +28,12 @@ #include #include "../../tensor/matrix_op-inl.h" +#include "../../numpy/np_matrix_op-inl.h" namespace mxnet { namespace op { -bool SupportMKLDNNTranspose(const TransposeParam& param, const NDArray& data) { +bool SupportMKLDNNTranspose(const NDArray& data) { auto data_ndim = data.shape().ndim(); if (data_ndim > 4 || data_ndim == 0 || data.shape().Size() == 0 || @@ -42,7 +43,7 @@ bool SupportMKLDNNTranspose(const TransposeParam& param, const NDArray& data) { return true; } -typedef ParamOpSign MKLDNNTransposeSignature; +typedef ParamOpSign MKLDNNTransposeSignature; class MKLDNNTransposeForward { public: @@ -52,12 +53,12 @@ class MKLDNNTransposeForward { std::shared_ptr transpose_; public: - MKLDNNTransposeForward(const TransposeParam& param, const NDArray& data) { + MKLDNNTransposeForward(const NumpyTransposeParam& param, const NDArray& data) { auto shape = data.shape(); auto data_ndim = shape.ndim(); auto axes_ndim = param.axes.ndim(); auto axes = mxnet::TShape(data_ndim, -1); - if (axes_ndim == 0) { + if (axes_ndim == -1) { for (int i = 0; i < data_ndim; i++) { axes[i] = data_ndim - i - 1; } @@ -114,7 +115,7 @@ class MKLDNNTransposeForward { } }; -static MKLDNNTransposeForward& GetTransposeForward(const TransposeParam& param, +static MKLDNNTransposeForward& GetTransposeForward(const NumpyTransposeParam& param, const NDArray& data) { #if DMLC_CXX11_THREAD_LOCAL static thread_local std::unordered_map @@ -135,17 +136,52 @@ static MKLDNNTransposeForward& GetTransposeForward(const TransposeParam& param, return it->second; } +NumpyTransposeParam ProcessTransposeParam(const TransposeParam ¶m) { + NumpyTransposeParam p; + if (param.axes.ndim() == 0) + { + p.axes = mxnet::TShape(-1,0); + } + else + { + p.axes = param.axes; + } + return p; +} + +NumpyTransposeParam ProcessNumpyTransposeParam(const NumpyTransposeParam ¶m) { + NumpyTransposeParam p; + p.axes = common::CanonicalizeAxes(param.axes); + return p; +} + void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& data, const OpReqType& req, const NDArray& output) { const TransposeParam& param = nnvm::get(attrs.parsed); + const NumpyTransposeParam processed_param = ProcessTransposeParam(param); - auto fwd = GetTransposeForward(param, data); + auto fwd = GetTransposeForward(processed_param, data); + fwd.SetNewMem(data, output); + fwd.Execute(); +} + + +void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output) { + const NumpyTransposeParam& param = nnvm::get(attrs.parsed); + const NumpyTransposeParam processed_param = ProcessNumpyTransposeParam(param); + + auto fwd = GetTransposeForward(processed_param, data); fwd.SetNewMem(data, output); fwd.Execute(); } } // namespace op } // namespace mxnet + #endif diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index 603355ad775d..3c825ab6a3b7 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -49,6 +49,11 @@ struct NumpyTransposeParam : public dmlc::Parameter { "By default, reverse the dimensions, otherwise permute " "the axes according to the values given."); } + + bool operator==(const NumpyTransposeParam &other) const { + return this->axes == other.axes; + } + void SetAttrDict(std::unordered_map* dict) { std::ostringstream axes_s; axes_s << axes; @@ -1865,7 +1870,20 @@ void NumpyDiagIndicesFromForward(const nnvm::NodeAttrs& attrs, }); } + + } // namespace op } // namespace mxnet +namespace std { +template<> +struct hash { + size_t operator()(const mxnet::op::NumpyTransposeParam& val) { + size_t ret = 0; + ret = dmlc::HashCombine(ret, val.axes); + return ret; + } +}; +} // namespace std + #endif // MXNET_OPERATOR_NUMPY_NP_MATRIX_OP_INL_H_ diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index dd7230e70970..b540031ec6bf 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -26,7 +26,11 @@ #include #include "./np_matrix_op-inl.h" #include "../nn/concat-inl.h" - +#if MXNET_USE_ONEDNN == 1 +#include "../nn/mkldnn/mkldnn_ops-inl.h" +#include "../nn/mkldnn/mkldnn_base-inl.h" +#include "../nn/mkldnn/mkldnn_slice-inl.h" +#endif namespace mxnet { namespace op { @@ -100,6 +104,38 @@ bool NumpyTransposeShape(const nnvm::NodeAttrs& attrs, SHAPE_ASSIGN_CHECK(*out_attrs, 0, ret); return shape_is_known(*in_attrs) && shape_is_known(*out_attrs); } +#if MXNET_USE_ONEDNN == 1 + +static void NumpyTransposeComputeExCPU(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { + if (req[0] == kNullOp) { + return; + } + CHECK(req[0] == kWriteTo || req[0] == kAddTo) << + "Transpose only supports kNullOp, kWriteTo and kAddTo"; + CHECK_EQ(inputs.size(), 1U); + CHECK_EQ(outputs.size(), 1U); + + if (SupportMKLDNNTranspose(inputs[0]) && req[0] == kWriteTo) { + MKLDNNRun(MKLDNNNumpyTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + return; + } + FallBackCompute(NumpyTranspose, attrs, ctx, inputs, req, outputs); +} + +inline static bool NumpyTransposeStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector* in_attrs, + std::vector* out_attrs) { + CHECK_EQ(in_attrs->size(), 1U); + CHECK_EQ(out_attrs->size(), 1U); + return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, out_attrs); +} +#endif NNVM_REGISTER_OP(_npi_transpose) .set_num_inputs(1) @@ -134,6 +170,11 @@ NNVM_REGISTER_OP(_npi_transpose) [](const NodeAttrs& attrs) { return std::vector{ResourceRequest::kTempSpace}; }) +#if MXNET_USE_ONEDNN == 1 + .set_attr("TIsMKLDNN", true) + .set_attr("FComputeEx", NumpyTransposeComputeExCPU) + .set_attr("FInferStorageType", NumpyTransposeStorageType) +#endif .set_attr("FListInputNames", [](const NodeAttrs& attrs) { return std::vector{"a"}; diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index fe9b62767dcf..68dc68c79f2c 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -309,13 +309,12 @@ static void TransposeComputeExCPU(const nnvm::NodeAttrs& attrs, if (req[0] == kNullOp) { return; } - const TransposeParam& param = nnvm::get(attrs.parsed); CHECK(req[0] == kWriteTo || req[0] == kAddTo) << "Transpose only supports kNullOp, kWriteTo and kAddTo"; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); - if (SupportMKLDNNTranspose(param, inputs[0]) && req[0] == kWriteTo) { + if (SupportMKLDNNTranspose(inputs[0]) && req[0] == kWriteTo) { MKLDNNRun(MKLDNNTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); return; } From ef31c8f008cf59edb748142fd53cc7f5c7f118bd Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Tue, 31 Aug 2021 12:04:57 +0200 Subject: [PATCH 02/27] remove unnecessary whitespace --- src/operator/numpy/np_matrix_op-inl.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index 3c825ab6a3b7..113ee446302b 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -1870,8 +1870,6 @@ void NumpyDiagIndicesFromForward(const nnvm::NodeAttrs& attrs, }); } - - } // namespace op } // namespace mxnet From 1e0daafee2e6da8e49804b10532e1bc0d2a9a79e Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Tue, 31 Aug 2021 12:53:18 +0200 Subject: [PATCH 03/27] remove unnecessary whitespace --- src/operator/nn/mkldnn/mkldnn_transpose.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/operator/nn/mkldnn/mkldnn_transpose.cc b/src/operator/nn/mkldnn/mkldnn_transpose.cc index fe77b9853c9c..21d89e44faa8 100644 --- a/src/operator/nn/mkldnn/mkldnn_transpose.cc +++ b/src/operator/nn/mkldnn/mkldnn_transpose.cc @@ -168,7 +168,6 @@ void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, fwd.Execute(); } - void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& data, @@ -183,5 +182,4 @@ void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, } } // namespace op } // namespace mxnet - #endif From f36271c372edf25288a37d94ca14dd321b419b4b Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Tue, 31 Aug 2021 13:16:43 +0200 Subject: [PATCH 04/27] remove unnecessary param --- src/operator/nn/mkldnn/mkldnn_base-inl.h | 1 - 1 file changed, 1 deletion(-) diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h b/src/operator/nn/mkldnn/mkldnn_base-inl.h index 147b3351da54..b245f718dfc3 100644 --- a/src/operator/nn/mkldnn/mkldnn_base-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h @@ -181,7 +181,6 @@ struct ConvolutionParam; struct DeconvolutionParam; struct SoftmaxParam; struct SoftmaxOutputParam; -struct TransposeParam; struct NumpyTransposeParam; struct ReshapeParam; struct LayerNormParam; From c51a3ee8e945808db883fb0269330647ff43d7c5 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Mon, 13 Sep 2021 10:05:08 +0200 Subject: [PATCH 05/27] formatting changes, cleanup --- src/operator/nn/mkldnn/mkldnn_base-inl.h | 1 - src/operator/nn/mkldnn/mkldnn_ops-inl.h | 18 ++--- src/operator/nn/mkldnn/mkldnn_transpose.cc | 88 +++++++++++----------- src/operator/tensor/matrix_op.cc | 4 +- 4 files changed, 54 insertions(+), 57 deletions(-) diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h b/src/operator/nn/mkldnn/mkldnn_base-inl.h index b245f718dfc3..8db1f22266eb 100644 --- a/src/operator/nn/mkldnn/mkldnn_base-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h @@ -181,7 +181,6 @@ struct ConvolutionParam; struct DeconvolutionParam; struct SoftmaxParam; struct SoftmaxOutputParam; -struct NumpyTransposeParam; struct ReshapeParam; struct LayerNormParam; bool SupportMKLDNNAct(const ActivationParam& param); diff --git a/src/operator/nn/mkldnn/mkldnn_ops-inl.h b/src/operator/nn/mkldnn/mkldnn_ops-inl.h index 90753317aa15..2e8a1573ebf9 100644 --- a/src/operator/nn/mkldnn/mkldnn_ops-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_ops-inl.h @@ -179,17 +179,17 @@ void MKLDNNLayerNormBackward(const nnvm::NodeAttrs& attrs, void MKLDNNSum(const mkldnn::memory& arr1, const mkldnn::memory& arr2, const mkldnn::memory& out); -void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output); +void MKLDNNNDArrayTransposeForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output); void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output); + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output); void MKLDNNReshapeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, diff --git a/src/operator/nn/mkldnn/mkldnn_transpose.cc b/src/operator/nn/mkldnn/mkldnn_transpose.cc index 21d89e44faa8..898aacc55213 100644 --- a/src/operator/nn/mkldnn/mkldnn_transpose.cc +++ b/src/operator/nn/mkldnn/mkldnn_transpose.cc @@ -25,10 +25,8 @@ #if MXNET_USE_ONEDNN == 1 -#include - -#include "../../tensor/matrix_op-inl.h" #include "../../numpy/np_matrix_op-inl.h" +#include "../../tensor/matrix_op-inl.h" namespace mxnet { namespace op { @@ -45,7 +43,7 @@ bool SupportMKLDNNTranspose(const NDArray& data) { typedef ParamOpSign MKLDNNTransposeSignature; -class MKLDNNTransposeForward { +class MKLDNNTransposeFwd { public: std::shared_ptr data_; std::shared_ptr out_; @@ -53,7 +51,7 @@ class MKLDNNTransposeForward { std::shared_ptr transpose_; public: - MKLDNNTransposeForward(const NumpyTransposeParam& param, const NDArray& data) { + MKLDNNTransposeFwd(const NumpyTransposeParam& param, const NDArray& data) { auto shape = data.shape(); auto data_ndim = shape.ndim(); auto axes_ndim = param.axes.ndim(); @@ -115,71 +113,69 @@ class MKLDNNTransposeForward { } }; -static MKLDNNTransposeForward& GetTransposeForward(const NumpyTransposeParam& param, - const NDArray& data) { +static MKLDNNTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, + const NDArray& data) { #if DMLC_CXX11_THREAD_LOCAL - static thread_local std::unordered_map - fwds; + static thread_local std::unordered_map fwds; #else - static MX_THREAD_LOCAL - std::unordered_map - fwds; + static MX_THREAD_LOCAL std::unordered_map + fwds; #endif MKLDNNTransposeSignature key(param); key.AddSign(data); auto it = fwds.find(key); if (it == fwds.end()) { - MKLDNNTransposeForward fwd(param, data); + MKLDNNTransposeFwd fwd(param, data); it = AddToCache(&fwds, key, fwd); } return it->second; } -NumpyTransposeParam ProcessTransposeParam(const TransposeParam ¶m) { - NumpyTransposeParam p; - if (param.axes.ndim() == 0) - { - p.axes = mxnet::TShape(-1,0); - } - else - { - p.axes = param.axes; +NumpyTransposeParam ProcessNDArrayTransposeParam(const nnvm::NodeAttrs& attrs) { + const TransposeParam& param_in = nnvm::get(attrs.parsed); + NumpyTransposeParam param_out; + if (param_in.axes.ndim() == 0) { + param_out.axes = mxnet::TShape(-1, 0); + } else { + param_out.axes = param_in.axes; } - return p; + return param_out; } -NumpyTransposeParam ProcessNumpyTransposeParam(const NumpyTransposeParam ¶m) { - NumpyTransposeParam p; - p.axes = common::CanonicalizeAxes(param.axes); - return p; +NumpyTransposeParam ProcessNumpyTransposeParam(const nnvm::NodeAttrs& attrs) { + const NumpyTransposeParam& param_in = nnvm::get(attrs.parsed); + NumpyTransposeParam param_out; + param_out.axes = common::CanonicalizeAxes(param_in.axes); + return param_out; } -void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output) { - const TransposeParam& param = nnvm::get(attrs.parsed); - const NumpyTransposeParam processed_param = ProcessTransposeParam(param); - - auto fwd = GetTransposeForward(processed_param, data); +void MKLDNNTransposeForwardExec(const NumpyTransposeParam& param, + const NDArray& data, + const NDArray& output) { + auto fwd = GetTransposeForward(param, data); fwd.SetNewMem(data, output); fwd.Execute(); } +void MKLDNNNDArrayTransposeForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output) { + const NumpyTransposeParam processed_param = ProcessNDArrayTransposeParam(attrs); + MKLDNNTransposeForwardExec(processed_param, data, output); +} + void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output) { - const NumpyTransposeParam& param = nnvm::get(attrs.parsed); - const NumpyTransposeParam processed_param = ProcessNumpyTransposeParam(param); - - auto fwd = GetTransposeForward(processed_param, data); - fwd.SetNewMem(data, output); - fwd.Execute(); + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output) { + const NumpyTransposeParam processed_param = ProcessNumpyTransposeParam(attrs); + MKLDNNTransposeForwardExec(processed_param, data, output); } + } // namespace op } // namespace mxnet #endif diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index 68dc68c79f2c..8454909a06ee 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -24,6 +24,7 @@ // this will be invoked by gcc and compile CPU version #include "./matrix_op-inl.h" #include "./elemwise_unary_op.h" +#include "../numpy/np_matrix_op-inl.h" #if MXNET_USE_ONEDNN == 1 #include "../nn/mkldnn/mkldnn_base-inl.h" #include "../nn/mkldnn/mkldnn_ops-inl.h" @@ -306,6 +307,7 @@ static void TransposeComputeExCPU(const nnvm::NodeAttrs& attrs, const std::vector& inputs, const std::vector& req, const std::vector& outputs) { + NumpyTransposeParam p; if (req[0] == kNullOp) { return; } @@ -315,7 +317,7 @@ static void TransposeComputeExCPU(const nnvm::NodeAttrs& attrs, CHECK_EQ(outputs.size(), 1U); if (SupportMKLDNNTranspose(inputs[0]) && req[0] == kWriteTo) { - MKLDNNRun(MKLDNNTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + MKLDNNRun(MKLDNNNDArrayTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); return; } FallBackCompute(Transpose, attrs, ctx, inputs, req, outputs); From 0fab65a7316178d961f07356e98c405959823b3c Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Mon, 13 Sep 2021 10:15:12 +0200 Subject: [PATCH 06/27] remove unnecessary lines --- src/operator/tensor/matrix_op.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index 8454909a06ee..dce3e5aefd33 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -24,7 +24,6 @@ // this will be invoked by gcc and compile CPU version #include "./matrix_op-inl.h" #include "./elemwise_unary_op.h" -#include "../numpy/np_matrix_op-inl.h" #if MXNET_USE_ONEDNN == 1 #include "../nn/mkldnn/mkldnn_base-inl.h" #include "../nn/mkldnn/mkldnn_ops-inl.h" @@ -307,7 +306,6 @@ static void TransposeComputeExCPU(const nnvm::NodeAttrs& attrs, const std::vector& inputs, const std::vector& req, const std::vector& outputs) { - NumpyTransposeParam p; if (req[0] == kNullOp) { return; } From 7f53e9e92e6489aa684ca120b55e2ec91d0fcf86 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Thu, 16 Sep 2021 15:07:59 +0200 Subject: [PATCH 07/27] template convert param --- src/operator/nn/mkldnn/mkldnn_ops-inl.h | 7 + src/operator/nn/mkldnn/mkldnn_transpose-inl.h | 77 +++++++++ src/operator/nn/mkldnn/mkldnn_transpose.cc | 161 +++++++----------- src/operator/numpy/np_matrix_op.cc | 4 +- src/operator/tensor/matrix_op.cc | 3 +- 5 files changed, 151 insertions(+), 101 deletions(-) create mode 100644 src/operator/nn/mkldnn/mkldnn_transpose-inl.h diff --git a/src/operator/nn/mkldnn/mkldnn_ops-inl.h b/src/operator/nn/mkldnn/mkldnn_ops-inl.h index 2e8a1573ebf9..ba96a2355c1b 100644 --- a/src/operator/nn/mkldnn/mkldnn_ops-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_ops-inl.h @@ -191,6 +191,13 @@ void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, const OpReqType& req, const NDArray& output); +template +void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output); + void MKLDNNReshapeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& input, diff --git a/src/operator/nn/mkldnn/mkldnn_transpose-inl.h b/src/operator/nn/mkldnn/mkldnn_transpose-inl.h new file mode 100644 index 000000000000..15de69d89ab0 --- /dev/null +++ b/src/operator/nn/mkldnn/mkldnn_transpose-inl.h @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file mkldnn_transpose-inl.h + * \brief + * \author Rafal Litka + */ + +#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_TRANSPOSE_INL_H_ +#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_TRANSPOSE_INL_H_ +#if MXNET_USE_ONEDNN == 1 +#include "./mkldnn_base-inl.h" +#include "./mkldnn_ops-inl.h" + +#include "../../numpy/np_matrix_op-inl.h" +#include "../../operator_common.h" +#include "../../tensor/matrix_op-inl.h" + +namespace mxnet { +namespace op { +bool SupportMKLDNNTranspose(const NDArray& data); +class MKLDNNTransposeFwd { + public: + std::shared_ptr data_; + std::shared_ptr out_; + std::shared_ptr dst_md_; + std::shared_ptr transpose_; + MKLDNNTransposeFwd(const NumpyTransposeParam& param, const NDArray& data); + void SetNewMem(const NDArray& data, const NDArray& output); + const mkldnn::reorder& GetFwd() const; + void Execute() const; +}; + +MKLDNNTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, const NDArray& data); + +template +NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); + +template <> +NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); + +template <> +NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); + +template +void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output) { + const NumpyTransposeParam param = ProcessTransposeParam(attrs); + auto fwd = GetTransposeForward(param, data); + fwd.SetNewMem(data, output); + fwd.Execute(); +} + +} // namespace op +} // namespace mxnet +#endif +#endif \ No newline at end of file diff --git a/src/operator/nn/mkldnn/mkldnn_transpose.cc b/src/operator/nn/mkldnn/mkldnn_transpose.cc index 898aacc55213..587909d53231 100644 --- a/src/operator/nn/mkldnn/mkldnn_transpose.cc +++ b/src/operator/nn/mkldnn/mkldnn_transpose.cc @@ -25,8 +25,7 @@ #if MXNET_USE_ONEDNN == 1 -#include "../../numpy/np_matrix_op-inl.h" -#include "../../tensor/matrix_op-inl.h" +#include "./mkldnn_transpose-inl.h" namespace mxnet { namespace op { @@ -43,78 +42,68 @@ bool SupportMKLDNNTranspose(const NDArray& data) { typedef ParamOpSign MKLDNNTransposeSignature; -class MKLDNNTransposeFwd { - public: - std::shared_ptr data_; - std::shared_ptr out_; - std::shared_ptr dst_md_; - std::shared_ptr transpose_; - - public: - MKLDNNTransposeFwd(const NumpyTransposeParam& param, const NDArray& data) { - auto shape = data.shape(); - auto data_ndim = shape.ndim(); - auto axes_ndim = param.axes.ndim(); - auto axes = mxnet::TShape(data_ndim, -1); - if (axes_ndim == -1) { - for (int i = 0; i < data_ndim; i++) { - axes[i] = data_ndim - i - 1; - } - } else { - axes = param.axes; +MKLDNNTransposeFwd::MKLDNNTransposeFwd(const NumpyTransposeParam& param, const NDArray& data) { + auto shape = data.shape(); + auto data_ndim = shape.ndim(); + auto axes_ndim = param.axes.ndim(); + auto axes = mxnet::TShape(data_ndim, -1); + if (!ndim_is_known(axes_ndim)) { + for (int i = 0; i < data_ndim; i++) { + axes[i] = data_ndim - i - 1; } + } else { + axes = param.axes; + } - auto engine = CpuEngine::Get()->get_engine(); - auto in_mem = data.GetMKLDNNData(); - auto src_md = in_mem->get_desc(); - data_ = std::make_shared(src_md, engine, nullptr); - - mkldnn_dims_t strides; - mkldnn_dims_t sh; - dim_t total_stride = 1; - for (int i = data_ndim - 1; i >= 0; i--) { - sh[i] = shape[i]; - strides[axes[i]] = total_stride; - total_stride *= shape[axes[i]]; - } + auto engine = CpuEngine::Get()->get_engine(); + auto in_mem = data.GetMKLDNNData(); + auto src_md = in_mem->get_desc(); + data_ = std::make_shared(src_md, engine, nullptr); + + mkldnn_dims_t strides; + mkldnn_dims_t sh; + dim_t total_stride = 1; + for (int i = data_ndim - 1; i >= 0; i--) { + sh[i] = shape[i]; + strides[axes[i]] = total_stride; + total_stride *= shape[axes[i]]; + } - mkldnn_memory_desc_t dst_fmt; - mkldnn_memory_desc_init_by_strides(&dst_fmt, data_ndim, sh, mkldnn_f32, strides); + mkldnn_memory_desc_t dst_fmt; + mkldnn_memory_desc_init_by_strides(&dst_fmt, data_ndim, sh, mkldnn_f32, strides); - dst_md_ = std::make_shared(dst_fmt); - out_ = std::make_shared(*dst_md_, engine, nullptr); + dst_md_ = std::make_shared(dst_fmt); + out_ = std::make_shared(*dst_md_, engine, nullptr); - transpose_ = std::make_shared(*data_, *out_); - } - - void SetNewMem(const NDArray& data, const NDArray& output) { - if (data.IsMKLDNNData()) { - this->data_->set_data_handle(data.GetMKLDNNData()->get_data_handle()); - } else { - MSHADOW_TYPE_SWITCH( - data.dtype(), DTYPE, { this->data_->set_data_handle(data.data().dptr()); }); - } + transpose_ = std::make_shared(*data_, *out_); +} - CHECK(!output.IsMKLDNNData()); +void MKLDNNTransposeFwd::SetNewMem(const NDArray& data, const NDArray& output) { + if (data.IsMKLDNNData()) { + this->data_->set_data_handle(data.GetMKLDNNData()->get_data_handle()); + } else { MSHADOW_TYPE_SWITCH( - output.dtype(), DTYPE, { this->out_->set_data_handle(output.data().dptr()); }); + data.dtype(), DTYPE, { this->data_->set_data_handle(data.data().dptr()); }); } - const mkldnn::reorder& GetFwd() const { - return *transpose_; - } + CHECK(!output.IsMKLDNNData()); + MSHADOW_TYPE_SWITCH( + output.dtype(), DTYPE, { this->out_->set_data_handle(output.data().dptr()); }); +} - void Execute() const { - auto stream = MKLDNNStream::Get(); - mkldnn_args_map_t net_args; - net_args.insert({{MKLDNN_ARG_FROM, *(data_)}, {MKLDNN_ARG_TO, *(out_)}}); - stream->RegisterPrimArgs(*transpose_, net_args); - stream->Submit(); - } -}; +const mkldnn::reorder& MKLDNNTransposeFwd::GetFwd() const { + return *transpose_; +} -static MKLDNNTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, - const NDArray& data) { +void MKLDNNTransposeFwd::Execute() const { + auto stream = MKLDNNStream::Get(); + mkldnn_args_map_t net_args; + net_args.insert({{MKLDNN_ARG_FROM, *(data_)}, {MKLDNN_ARG_TO, *(out_)}}); + stream->RegisterPrimArgs(*transpose_, net_args); + stream->Submit(); +} + +MKLDNNTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, const NDArray& data) { #if DMLC_CXX11_THREAD_LOCAL static thread_local std::unordered_map fwds; #else @@ -132,7 +121,16 @@ static MKLDNNTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, return it->second; } -NumpyTransposeParam ProcessNDArrayTransposeParam(const nnvm::NodeAttrs& attrs) { +template <> +NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs) { + const NumpyTransposeParam& param_in = nnvm::get(attrs.parsed); + NumpyTransposeParam param_out; + param_out.axes = common::CanonicalizeAxes(param_in.axes); + return param_out; +}; + +template <> +NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs) { const TransposeParam& param_in = nnvm::get(attrs.parsed); NumpyTransposeParam param_out; if (param_in.axes.ndim() == 0) { @@ -141,40 +139,7 @@ NumpyTransposeParam ProcessNDArrayTransposeParam(const nnvm::NodeAttrs& attrs) { param_out.axes = param_in.axes; } return param_out; -} - -NumpyTransposeParam ProcessNumpyTransposeParam(const nnvm::NodeAttrs& attrs) { - const NumpyTransposeParam& param_in = nnvm::get(attrs.parsed); - NumpyTransposeParam param_out; - param_out.axes = common::CanonicalizeAxes(param_in.axes); - return param_out; -} - -void MKLDNNTransposeForwardExec(const NumpyTransposeParam& param, - const NDArray& data, - const NDArray& output) { - auto fwd = GetTransposeForward(param, data); - fwd.SetNewMem(data, output); - fwd.Execute(); -} - -void MKLDNNNDArrayTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output) { - const NumpyTransposeParam processed_param = ProcessNDArrayTransposeParam(attrs); - MKLDNNTransposeForwardExec(processed_param, data, output); -} - -void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output) { - const NumpyTransposeParam processed_param = ProcessNumpyTransposeParam(attrs); - MKLDNNTransposeForwardExec(processed_param, data, output); -} +}; } // namespace op } // namespace mxnet diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index b540031ec6bf..c29309ceaeca 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -29,7 +29,7 @@ #if MXNET_USE_ONEDNN == 1 #include "../nn/mkldnn/mkldnn_ops-inl.h" #include "../nn/mkldnn/mkldnn_base-inl.h" -#include "../nn/mkldnn/mkldnn_slice-inl.h" +#include "../nn/mkldnn/mkldnn_transpose-inl.h" #endif namespace mxnet { namespace op { @@ -120,7 +120,7 @@ static void NumpyTransposeComputeExCPU(const nnvm::NodeAttrs& attrs, CHECK_EQ(outputs.size(), 1U); if (SupportMKLDNNTranspose(inputs[0]) && req[0] == kWriteTo) { - MKLDNNRun(MKLDNNNumpyTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + MKLDNNRun(MKLDNNTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); return; } FallBackCompute(NumpyTranspose, attrs, ctx, inputs, req, outputs); diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index dce3e5aefd33..0921e79ecac5 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -29,6 +29,7 @@ #include "../nn/mkldnn/mkldnn_ops-inl.h" #include "../nn/mkldnn/mkldnn_reshape-inl.h" #include "../nn/mkldnn/mkldnn_slice-inl.h" +#include "../nn/mkldnn/mkldnn_transpose-inl.h" #endif namespace mxnet { @@ -315,7 +316,7 @@ static void TransposeComputeExCPU(const nnvm::NodeAttrs& attrs, CHECK_EQ(outputs.size(), 1U); if (SupportMKLDNNTranspose(inputs[0]) && req[0] == kWriteTo) { - MKLDNNRun(MKLDNNNDArrayTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + MKLDNNRun(MKLDNNTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); return; } FallBackCompute(Transpose, attrs, ctx, inputs, req, outputs); From 61d194d350ed761dd5f2ec99b438dc19aa67a260 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Thu, 16 Sep 2021 15:12:48 +0200 Subject: [PATCH 08/27] newline at end --- src/operator/nn/mkldnn/mkldnn_transpose-inl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/operator/nn/mkldnn/mkldnn_transpose-inl.h b/src/operator/nn/mkldnn/mkldnn_transpose-inl.h index 15de69d89ab0..1f5b7edd3233 100644 --- a/src/operator/nn/mkldnn/mkldnn_transpose-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_transpose-inl.h @@ -74,4 +74,4 @@ void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, } // namespace op } // namespace mxnet #endif -#endif \ No newline at end of file +#endif From ee2af874de8b7eb2d355f1ef0e6f495aaf56df98 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Fri, 17 Sep 2021 11:10:38 +0200 Subject: [PATCH 09/27] remove unused declarations --- src/operator/nn/mkldnn/mkldnn_ops-inl.h | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/src/operator/nn/mkldnn/mkldnn_ops-inl.h b/src/operator/nn/mkldnn/mkldnn_ops-inl.h index ba96a2355c1b..0d32c49a766c 100644 --- a/src/operator/nn/mkldnn/mkldnn_ops-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_ops-inl.h @@ -179,18 +179,6 @@ void MKLDNNLayerNormBackward(const nnvm::NodeAttrs& attrs, void MKLDNNSum(const mkldnn::memory& arr1, const mkldnn::memory& arr2, const mkldnn::memory& out); -void MKLDNNNDArrayTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output); - -void MKLDNNNumpyTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output); - template void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, From 597dda694431a2f5f53ca0263ea2e34fc3c4fc22 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Fri, 17 Sep 2021 11:26:37 +0200 Subject: [PATCH 10/27] whitespace, guard comments --- src/operator/nn/mkldnn/mkldnn_transpose-inl.h | 7 +++++-- src/operator/nn/mkldnn/mkldnn_transpose.cc | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/src/operator/nn/mkldnn/mkldnn_transpose-inl.h b/src/operator/nn/mkldnn/mkldnn_transpose-inl.h index 1f5b7edd3233..9a7e8905b526 100644 --- a/src/operator/nn/mkldnn/mkldnn_transpose-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_transpose-inl.h @@ -35,7 +35,9 @@ namespace mxnet { namespace op { + bool SupportMKLDNNTranspose(const NDArray& data); + class MKLDNNTransposeFwd { public: std::shared_ptr data_; @@ -73,5 +75,6 @@ void MKLDNNTransposeForward(const nnvm::NodeAttrs& attrs, } // namespace op } // namespace mxnet -#endif -#endif + +#endif // MXNET_USE_ONEDNN == 1 +#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_TRANSPOSE_INL_H_ diff --git a/src/operator/nn/mkldnn/mkldnn_transpose.cc b/src/operator/nn/mkldnn/mkldnn_transpose.cc index 587909d53231..0b3cf97d0ebb 100644 --- a/src/operator/nn/mkldnn/mkldnn_transpose.cc +++ b/src/operator/nn/mkldnn/mkldnn_transpose.cc @@ -143,4 +143,4 @@ NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& } // namespace op } // namespace mxnet -#endif +#endif // MXNET_USE_ONEDNN == 1 From e4aa753c18aee541f55554ddc9301428f55b6ba3 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Fri, 17 Sep 2021 17:44:57 +0200 Subject: [PATCH 11/27] sanity fix --- src/operator/nn/mkldnn/mkldnn_transpose.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/operator/nn/mkldnn/mkldnn_transpose.cc b/src/operator/nn/mkldnn/mkldnn_transpose.cc index 0b3cf97d0ebb..5511fedc9703 100644 --- a/src/operator/nn/mkldnn/mkldnn_transpose.cc +++ b/src/operator/nn/mkldnn/mkldnn_transpose.cc @@ -127,7 +127,7 @@ NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeA NumpyTransposeParam param_out; param_out.axes = common::CanonicalizeAxes(param_in.axes); return param_out; -}; +} template <> NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs) { @@ -139,7 +139,7 @@ NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& param_out.axes = param_in.axes; } return param_out; -}; +} } // namespace op } // namespace mxnet From bfc3b64c87b89b06ffb0893d5ec3e4134cde9903 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Fri, 17 Sep 2021 17:53:13 +0200 Subject: [PATCH 12/27] formatting --- src/operator/numpy/np_matrix_op.cc | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index c29309ceaeca..d967f51a7741 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -107,30 +107,31 @@ bool NumpyTransposeShape(const nnvm::NodeAttrs& attrs, #if MXNET_USE_ONEDNN == 1 static void NumpyTransposeComputeExCPU(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { if (req[0] == kNullOp) { return; } - CHECK(req[0] == kWriteTo || req[0] == kAddTo) << - "Transpose only supports kNullOp, kWriteTo and kAddTo"; + CHECK(req[0] == kWriteTo || req[0] == kAddTo) + << "Transpose only supports kNullOp, kWriteTo and kAddTo"; CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); if (SupportMKLDNNTranspose(inputs[0]) && req[0] == kWriteTo) { - MKLDNNRun(MKLDNNTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + MKLDNNRun( + MKLDNNTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); return; } FallBackCompute(NumpyTranspose, attrs, ctx, inputs, req, outputs); } inline static bool NumpyTransposeStorageType(const nnvm::NodeAttrs& attrs, - const int dev_mask, - DispatchMode* dispatch_mode, - std::vector* in_attrs, - std::vector* out_attrs) { + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector* in_attrs, + std::vector* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, out_attrs); From db8cc9b67a040f27855640912e0b017bd8873367 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Mon, 11 Oct 2021 09:55:17 +0200 Subject: [PATCH 13/27] separate error tests transpose --- tests/python/unittest/test_numpy_op.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 488f1a80285d..7cf125e71b56 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2573,12 +2573,15 @@ def forward(self, a): @use_np -def test_np_transpose_error(): +def test_np_transpose_error2(): # Test for error raising dat = np.random.normal(0, 1, (3, 4, 5), dtype=np.float32) pytest.raises(ValueError, lambda: dat.transpose((0, 0, 1))) - pytest.raises(MXNetError, lambda: dat.transpose((0, 1, 3))) +@use_np +def test_np_transpose_error1(): + dat = np.random.normal(0, 1, (3, 4, 5), dtype=np.float32) + pytest.raises(MXNetError, lambda: dat.transpose((0, 1, 3))) @use_np def test_np_meshgrid(): From ebaadc910da55b8add80d4245806fae1365854b0 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Mon, 11 Oct 2021 10:16:37 +0200 Subject: [PATCH 14/27] formatting --- src/operator/numpy/np_matrix_op-inl.h | 6 +++--- src/operator/numpy/np_matrix_op.cc | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/operator/numpy/np_matrix_op-inl.h b/src/operator/numpy/np_matrix_op-inl.h index 113ee446302b..225fd4c40a67 100644 --- a/src/operator/numpy/np_matrix_op-inl.h +++ b/src/operator/numpy/np_matrix_op-inl.h @@ -50,7 +50,7 @@ struct NumpyTransposeParam : public dmlc::Parameter { "the axes according to the values given."); } - bool operator==(const NumpyTransposeParam &other) const { + bool operator==(const NumpyTransposeParam& other) const { return this->axes == other.axes; } @@ -1874,11 +1874,11 @@ void NumpyDiagIndicesFromForward(const nnvm::NodeAttrs& attrs, } // namespace mxnet namespace std { -template<> +template <> struct hash { size_t operator()(const mxnet::op::NumpyTransposeParam& val) { size_t ret = 0; - ret = dmlc::HashCombine(ret, val.axes); + ret = dmlc::HashCombine(ret, val.axes); return ret; } }; diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index d967f51a7741..b4bec4a1f78b 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -425,9 +425,10 @@ NNVM_REGISTER_OP(_npx_reshape) .set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ReshapeComputeExCPU) .set_attr("FInferStorageType", ReshapeStorageType) - .set_attr("FResourceRequest", [](const NodeAttrs& n) { - return std::vector{ResourceRequest::kTempSpace}; - }) + .set_attr("FResourceRequest", + [](const NodeAttrs& n) { + return std::vector{ResourceRequest::kTempSpace}; + }) #endif .set_attr("FInplaceOption", [](const NodeAttrs& attrs) { From fcbf637caf0a6364f3171549a169c734ab60f7bd Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Fri, 15 Oct 2021 11:08:02 +0200 Subject: [PATCH 15/27] separate transpose error tests --- tests/python/unittest/test_numpy_op.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 7cf125e71b56..7fe76d816e12 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2573,13 +2573,13 @@ def forward(self, a): @use_np -def test_np_transpose_error2(): +def test_np_transpose_error1(): # Test for error raising dat = np.random.normal(0, 1, (3, 4, 5), dtype=np.float32) pytest.raises(ValueError, lambda: dat.transpose((0, 0, 1))) @use_np -def test_np_transpose_error1(): +def test_np_transpose_error2(): dat = np.random.normal(0, 1, (3, 4, 5), dtype=np.float32) pytest.raises(MXNetError, lambda: dat.transpose((0, 1, 3))) From d436bb9a37f4e9372d284153834115481f33f954 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Sun, 17 Oct 2021 21:25:42 +0200 Subject: [PATCH 16/27] transpose header dnnl --- src/operator/nn/dnnl/dnnl_transpose-inl.h | 80 +++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 src/operator/nn/dnnl/dnnl_transpose-inl.h diff --git a/src/operator/nn/dnnl/dnnl_transpose-inl.h b/src/operator/nn/dnnl/dnnl_transpose-inl.h new file mode 100644 index 000000000000..908a337f4c86 --- /dev/null +++ b/src/operator/nn/dnnl/dnnl_transpose-inl.h @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +/*! + * \file dnnl_transpose-inl.h + * \brief + * \author Rafal Litka + */ + +#ifndef MXNET_OPERATOR_NN_DNNL_DNNL_TRANSPOSE_INL_H_ +#define MXNET_OPERATOR_NN_DNNL_DNNL_TRANSPOSE_INL_H_ +#if MXNET_USE_ONEDNN == 1 +#include "./dnnl_base-inl.h" +#include "./dnnl_ops-inl.h" + +#include "../../numpy/np_matrix_op-inl.h" +#include "../../operator_common.h" +#include "../../tensor/matrix_op-inl.h" + +namespace mxnet { +namespace op { + +bool SupportDNNLTranspose(const NDArray& data); + +class DNNLTransposeFwd { + public: + std::shared_ptr data_; + std::shared_ptr out_; + std::shared_ptr dst_md_; + std::shared_ptr transpose_; + DNNLTransposeFwd(const NumpyTransposeParam& param, const NDArray& data); + void SetNewMem(const NDArray& data, const NDArray& output); + const dnnl::reorder& GetFwd() const; + void Execute() const; +}; + +DNNLTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, const NDArray& data); + +template +NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); + +template <> +NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); + +template <> +NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); + +template +void DNNLTransposeForward(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output) { + const NumpyTransposeParam param = ProcessTransposeParam(attrs); + auto fwd = GetTransposeForward(param, data); + fwd.SetNewMem(data, output); + fwd.Execute(); +} + +} // namespace op +} // namespace mxnet + +#endif // MXNET_USE_ONEDNN == 1 +#endif // MXNET_OPERATOR_NN_DNNL_DNNL_TRANSPOSE_INL_H_ From 9f162f9566afdffd8ae1486c2041ca4fd0aa8155 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Sun, 17 Oct 2021 21:43:42 +0200 Subject: [PATCH 17/27] format files sanity --- src/operator/nn/dnnl/dnnl_transpose-inl.h | 8 ++++---- src/operator/nn/dnnl/dnnl_transpose.cc | 3 +-- src/operator/numpy/np_matrix_op.cc | 3 +-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/src/operator/nn/dnnl/dnnl_transpose-inl.h b/src/operator/nn/dnnl/dnnl_transpose-inl.h index 908a337f4c86..fd67280dd2d8 100644 --- a/src/operator/nn/dnnl/dnnl_transpose-inl.h +++ b/src/operator/nn/dnnl/dnnl_transpose-inl.h @@ -63,10 +63,10 @@ NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& template void DNNLTransposeForward(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const NDArray& data, - const OpReqType& req, - const NDArray& output) { + const OpContext& ctx, + const NDArray& data, + const OpReqType& req, + const NDArray& output) { const NumpyTransposeParam param = ProcessTransposeParam(attrs); auto fwd = GetTransposeForward(param, data); fwd.SetNewMem(data, output); diff --git a/src/operator/nn/dnnl/dnnl_transpose.cc b/src/operator/nn/dnnl/dnnl_transpose.cc index ffdbb5b47527..4f113d9734a7 100644 --- a/src/operator/nn/dnnl/dnnl_transpose.cc +++ b/src/operator/nn/dnnl/dnnl_transpose.cc @@ -107,8 +107,7 @@ DNNLTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, const ND #if DMLC_CXX11_THREAD_LOCAL static thread_local std::unordered_map fwds; #else - static MX_THREAD_LOCAL std::unordered_map - fwds; + static MX_THREAD_LOCAL std::unordered_map fwds; #endif DNNLTransposeSignature key(param); key.AddSign(data); diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 205222d206a6..9faa4d8eeb90 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -120,8 +120,7 @@ static void NumpyTransposeComputeExCPU(const nnvm::NodeAttrs& attrs, CHECK_EQ(outputs.size(), 1U); if (SupportDNNLTranspose(inputs[0]) && req[0] == kWriteTo) { - DNNLRun( - DNNLTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + DNNLRun(DNNLTransposeForward, attrs, ctx, inputs[0], req[0], outputs[0]); return; } FallBackCompute(NumpyTranspose, attrs, ctx, inputs, req, outputs); From 0d7d6bda8c85f7b3d54a6be297855b8c0a42bb23 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Sun, 7 Nov 2021 08:56:46 +0100 Subject: [PATCH 18/27] move include transpose --- src/operator/nn/dnnl/dnnl_transpose-inl.h | 5 ++--- src/operator/nn/dnnl/dnnl_transpose.cc | 1 + 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/operator/nn/dnnl/dnnl_transpose-inl.h b/src/operator/nn/dnnl/dnnl_transpose-inl.h index fd67280dd2d8..adc2fc4abe5d 100644 --- a/src/operator/nn/dnnl/dnnl_transpose-inl.h +++ b/src/operator/nn/dnnl/dnnl_transpose-inl.h @@ -28,14 +28,13 @@ #if MXNET_USE_ONEDNN == 1 #include "./dnnl_base-inl.h" #include "./dnnl_ops-inl.h" - #include "../../numpy/np_matrix_op-inl.h" -#include "../../operator_common.h" -#include "../../tensor/matrix_op-inl.h" namespace mxnet { namespace op { +struct TransposeParam; + bool SupportDNNLTranspose(const NDArray& data); class DNNLTransposeFwd { diff --git a/src/operator/nn/dnnl/dnnl_transpose.cc b/src/operator/nn/dnnl/dnnl_transpose.cc index 4f113d9734a7..26da5d49c684 100644 --- a/src/operator/nn/dnnl/dnnl_transpose.cc +++ b/src/operator/nn/dnnl/dnnl_transpose.cc @@ -25,6 +25,7 @@ #if MXNET_USE_ONEDNN == 1 +#include "../../tensor/matrix_op-inl.h" #include "./dnnl_transpose-inl.h" namespace mxnet { From 9c537786a1d1267936c21a2dd01020456c8545dd Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Sun, 7 Nov 2021 15:11:48 +0100 Subject: [PATCH 19/27] unify param templates --- src/operator/nn/dnnl/dnnl_ops-inl.h | 2 +- src/operator/nn/dnnl/dnnl_transpose-inl.h | 16 ++++++---------- src/operator/nn/dnnl/dnnl_transpose.cc | 22 ++++++++++------------ 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/src/operator/nn/dnnl/dnnl_ops-inl.h b/src/operator/nn/dnnl/dnnl_ops-inl.h index 4bb4ec9195de..4f2f272f301a 100644 --- a/src/operator/nn/dnnl/dnnl_ops-inl.h +++ b/src/operator/nn/dnnl/dnnl_ops-inl.h @@ -179,7 +179,7 @@ void DNNLLayerNormBackward(const nnvm::NodeAttrs& attrs, void DNNLSum(const dnnl::memory& arr1, const dnnl::memory& arr2, const dnnl::memory& out); -template +template void DNNLTransposeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& data, diff --git a/src/operator/nn/dnnl/dnnl_transpose-inl.h b/src/operator/nn/dnnl/dnnl_transpose-inl.h index adc2fc4abe5d..ade089c8bc79 100644 --- a/src/operator/nn/dnnl/dnnl_transpose-inl.h +++ b/src/operator/nn/dnnl/dnnl_transpose-inl.h @@ -33,6 +33,7 @@ namespace mxnet { namespace op { +struct NumpyTransposeParam; struct TransposeParam; bool SupportDNNLTranspose(const NDArray& data); @@ -51,22 +52,17 @@ class DNNLTransposeFwd { DNNLTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, const NDArray& data); -template -NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); +template +NumpyTransposeParam ProcessTransposeParam(const ParamType& param); -template <> -NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); - -template <> -NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs); - -template +template void DNNLTransposeForward(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const NDArray& data, const OpReqType& req, const NDArray& output) { - const NumpyTransposeParam param = ProcessTransposeParam(attrs); + const ParamType& org_param = nnvm::get(attrs.parsed); + auto param = ProcessTransposeParam(org_param); auto fwd = GetTransposeForward(param, data); fwd.SetNewMem(data, output); fwd.Execute(); diff --git a/src/operator/nn/dnnl/dnnl_transpose.cc b/src/operator/nn/dnnl/dnnl_transpose.cc index 26da5d49c684..d1ff6fc2240b 100644 --- a/src/operator/nn/dnnl/dnnl_transpose.cc +++ b/src/operator/nn/dnnl/dnnl_transpose.cc @@ -122,23 +122,21 @@ DNNLTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, const ND } template <> -NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs) { - const NumpyTransposeParam& param_in = nnvm::get(attrs.parsed); - NumpyTransposeParam param_out; - param_out.axes = common::CanonicalizeAxes(param_in.axes); - return param_out; +NumpyTransposeParam ProcessTransposeParam(const NumpyTransposeParam& param) { + NumpyTransposeParam numpy_param; + numpy_param.axes = common::CanonicalizeAxes(param.axes); + return numpy_param; } template <> -NumpyTransposeParam ProcessTransposeParam(const nnvm::NodeAttrs& attrs) { - const TransposeParam& param_in = nnvm::get(attrs.parsed); - NumpyTransposeParam param_out; - if (param_in.axes.ndim() == 0) { - param_out.axes = mxnet::TShape(-1, 0); +NumpyTransposeParam ProcessTransposeParam(const TransposeParam& param) { + NumpyTransposeParam numpy_param; + if (param.axes.ndim() == 0) { + numpy_param.axes = mxnet::TShape(-1, 0); } else { - param_out.axes = param_in.axes; + numpy_param.axes = param.axes; } - return param_out; + return numpy_param; } } // namespace op From 57a12c91b9daf2180f739fefa974c815bf1cf4e1 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Mon, 8 Nov 2021 09:45:43 +0100 Subject: [PATCH 20/27] format, rename funcs --- src/operator/nn/dnnl/dnnl_transpose-inl.h | 12 +++++------- src/operator/nn/dnnl/dnnl_transpose.cc | 7 ++++--- tests/python/unittest/test_numpy_op.py | 6 +----- 3 files changed, 10 insertions(+), 15 deletions(-) diff --git a/src/operator/nn/dnnl/dnnl_transpose-inl.h b/src/operator/nn/dnnl/dnnl_transpose-inl.h index ade089c8bc79..65be51c1e3de 100644 --- a/src/operator/nn/dnnl/dnnl_transpose-inl.h +++ b/src/operator/nn/dnnl/dnnl_transpose-inl.h @@ -19,23 +19,21 @@ /*! * \file dnnl_transpose-inl.h - * \brief * \author Rafal Litka */ #ifndef MXNET_OPERATOR_NN_DNNL_DNNL_TRANSPOSE_INL_H_ #define MXNET_OPERATOR_NN_DNNL_DNNL_TRANSPOSE_INL_H_ #if MXNET_USE_ONEDNN == 1 + #include "./dnnl_base-inl.h" #include "./dnnl_ops-inl.h" + #include "../../numpy/np_matrix_op-inl.h" namespace mxnet { namespace op { -struct NumpyTransposeParam; -struct TransposeParam; - bool SupportDNNLTranspose(const NDArray& data); class DNNLTransposeFwd { @@ -53,7 +51,7 @@ class DNNLTransposeFwd { DNNLTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, const NDArray& data); template -NumpyTransposeParam ProcessTransposeParam(const ParamType& param); +NumpyTransposeParam ConvertParamsToNumpy(const ParamType& param); template void DNNLTransposeForward(const nnvm::NodeAttrs& attrs, @@ -62,8 +60,8 @@ void DNNLTransposeForward(const nnvm::NodeAttrs& attrs, const OpReqType& req, const NDArray& output) { const ParamType& org_param = nnvm::get(attrs.parsed); - auto param = ProcessTransposeParam(org_param); - auto fwd = GetTransposeForward(param, data); + auto param = ConvertParamsToNumpy(org_param); + auto fwd = GetTransposeForward(param, data); fwd.SetNewMem(data, output); fwd.Execute(); } diff --git a/src/operator/nn/dnnl/dnnl_transpose.cc b/src/operator/nn/dnnl/dnnl_transpose.cc index d1ff6fc2240b..231c4fc6e3f3 100644 --- a/src/operator/nn/dnnl/dnnl_transpose.cc +++ b/src/operator/nn/dnnl/dnnl_transpose.cc @@ -25,9 +25,10 @@ #if MXNET_USE_ONEDNN == 1 -#include "../../tensor/matrix_op-inl.h" #include "./dnnl_transpose-inl.h" +#include "../../tensor/matrix_op-inl.h" + namespace mxnet { namespace op { @@ -122,14 +123,14 @@ DNNLTransposeFwd& GetTransposeForward(const NumpyTransposeParam& param, const ND } template <> -NumpyTransposeParam ProcessTransposeParam(const NumpyTransposeParam& param) { +NumpyTransposeParam ConvertParamsToNumpy(const NumpyTransposeParam& param) { NumpyTransposeParam numpy_param; numpy_param.axes = common::CanonicalizeAxes(param.axes); return numpy_param; } template <> -NumpyTransposeParam ProcessTransposeParam(const TransposeParam& param) { +NumpyTransposeParam ConvertParamsToNumpy(const TransposeParam& param) { NumpyTransposeParam numpy_param; if (param.axes.ndim() == 0) { numpy_param.axes = mxnet::TShape(-1, 0); diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 6d805c46173a..3c74dcacb58a 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2573,14 +2573,10 @@ def forward(self, a): @use_np -def test_np_transpose_error1(): +def test_np_transpose_error(): # Test for error raising dat = np.random.normal(0, 1, (3, 4, 5), dtype=np.float32) pytest.raises(ValueError, lambda: dat.transpose((0, 0, 1))) - -@use_np -def test_np_transpose_error2(): - dat = np.random.normal(0, 1, (3, 4, 5), dtype=np.float32) pytest.raises(MXNetError, lambda: dat.transpose((0, 1, 3))) @use_np From 0c54f6b0d06f10ce53538f3d0729419e3c9c2b17 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Mon, 8 Nov 2021 13:28:29 +0100 Subject: [PATCH 21/27] switch include order --- src/operator/nn/dnnl/dnnl_transpose.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/operator/nn/dnnl/dnnl_transpose.cc b/src/operator/nn/dnnl/dnnl_transpose.cc index 231c4fc6e3f3..40cba4109725 100644 --- a/src/operator/nn/dnnl/dnnl_transpose.cc +++ b/src/operator/nn/dnnl/dnnl_transpose.cc @@ -25,10 +25,10 @@ #if MXNET_USE_ONEDNN == 1 -#include "./dnnl_transpose-inl.h" - #include "../../tensor/matrix_op-inl.h" +#include "./dnnl_transpose-inl.h" + namespace mxnet { namespace op { From b09ed4b54dc4ac0f1250782f734548b0666b5213 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Mon, 8 Nov 2021 14:19:56 +0100 Subject: [PATCH 22/27] dont sort includes for transpose --- src/operator/nn/dnnl/dnnl_transpose.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/operator/nn/dnnl/dnnl_transpose.cc b/src/operator/nn/dnnl/dnnl_transpose.cc index 40cba4109725..a2c45fefd418 100644 --- a/src/operator/nn/dnnl/dnnl_transpose.cc +++ b/src/operator/nn/dnnl/dnnl_transpose.cc @@ -25,9 +25,11 @@ #if MXNET_USE_ONEDNN == 1 +// clang-format off #include "../../tensor/matrix_op-inl.h" #include "./dnnl_transpose-inl.h" +// clang-format on namespace mxnet { namespace op { From ca7e79fa716abca990f169fd884b3b1b91936ce2 Mon Sep 17 00:00:00 2001 From: Rafal Litka Date: Mon, 8 Nov 2021 14:44:37 +0100 Subject: [PATCH 23/27] remove clang off section --- src/operator/nn/dnnl/dnnl_transpose.cc | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/operator/nn/dnnl/dnnl_transpose.cc b/src/operator/nn/dnnl/dnnl_transpose.cc index a2c45fefd418..40cba4109725 100644 --- a/src/operator/nn/dnnl/dnnl_transpose.cc +++ b/src/operator/nn/dnnl/dnnl_transpose.cc @@ -25,11 +25,9 @@ #if MXNET_USE_ONEDNN == 1 -// clang-format off #include "../../tensor/matrix_op-inl.h" #include "./dnnl_transpose-inl.h" -// clang-format on namespace mxnet { namespace op { From d9adcb4da729dd289d3c4b47b5c66ccd38587ee2 Mon Sep 17 00:00:00 2001 From: RafLit Date: Mon, 8 Nov 2021 21:31:19 +0100 Subject: [PATCH 24/27] delete unnecessary newline --- tests/python/unittest/test_numpy_op.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 3c74dcacb58a..f21fe4d0e352 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2571,7 +2571,6 @@ def forward(self, a): np_out = x.asnumpy().transpose(*axes) assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) - @use_np def test_np_transpose_error(): # Test for error raising From 05e9cf0d7068d7b0a1bff2157d8cc9cce35a6291 Mon Sep 17 00:00:00 2001 From: RafLit Date: Mon, 8 Nov 2021 21:33:44 +0100 Subject: [PATCH 25/27] add newlines --- tests/python/unittest/test_numpy_op.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index f21fe4d0e352..6cc9abbb0d14 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2571,6 +2571,7 @@ def forward(self, a): np_out = x.asnumpy().transpose(*axes) assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) + @use_np def test_np_transpose_error(): # Test for error raising @@ -2578,6 +2579,7 @@ def test_np_transpose_error(): pytest.raises(ValueError, lambda: dat.transpose((0, 0, 1))) pytest.raises(MXNetError, lambda: dat.transpose((0, 1, 3))) + @use_np def test_np_meshgrid(): nx, ny = (4, 5) From c910e15c480669076e5d44138c3cd09c4a24130a Mon Sep 17 00:00:00 2001 From: RafLit Date: Mon, 8 Nov 2021 21:35:23 +0100 Subject: [PATCH 26/27] remove whitespace --- tests/python/unittest/test_numpy_op.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 6cc9abbb0d14..75162a0ffb01 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2570,8 +2570,8 @@ def forward(self, a): mx_out = x.transpose(*axes) np_out = x.asnumpy().transpose(*axes) assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) + - @use_np def test_np_transpose_error(): # Test for error raising @@ -2579,7 +2579,7 @@ def test_np_transpose_error(): pytest.raises(ValueError, lambda: dat.transpose((0, 0, 1))) pytest.raises(MXNetError, lambda: dat.transpose((0, 1, 3))) - + @use_np def test_np_meshgrid(): nx, ny = (4, 5) From 4a34f7373745415c837a0666d2a9f88465d664dc Mon Sep 17 00:00:00 2001 From: RafLit Date: Mon, 8 Nov 2021 21:36:38 +0100 Subject: [PATCH 27/27] remove whitespace --- tests/python/unittest/test_numpy_op.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/python/unittest/test_numpy_op.py b/tests/python/unittest/test_numpy_op.py index 75162a0ffb01..880e617522fd 100644 --- a/tests/python/unittest/test_numpy_op.py +++ b/tests/python/unittest/test_numpy_op.py @@ -2570,7 +2570,7 @@ def forward(self, a): mx_out = x.transpose(*axes) np_out = x.asnumpy().transpose(*axes) assert_almost_equal(mx_out.asnumpy(), np_out, rtol=1e-3, atol=1e-5, use_broadcast=False) - + @use_np def test_np_transpose_error():