From cfacba1ff085f4c6ebe2c78dfe80c55f320e91f3 Mon Sep 17 00:00:00 2001 From: Bartosz Kuncer Date: Mon, 22 Mar 2021 11:16:17 +0100 Subject: [PATCH] Change MXNET_MKLDNN* flag names to MXNET_ONEDNN* --- .../docs/tutorials/multi_threaded_inference.md | 2 +- docs/static_site/src/pages/api/faq/env_var.md | 4 ++-- example/multi_threaded_inference/Makefile | 16 ++++++++-------- src/common/utils.h | 6 +++--- src/operator/nn/mkldnn/mkldnn_base-inl.h | 4 ++-- .../subgraph/mkldnn/mkldnn_bn_relu_property.h | 4 ++-- .../subgraph/mkldnn/mkldnn_conv_property.h | 8 ++++---- .../mkldnn_elemwisemul_post_quantize_property.h | 4 ++-- src/operator/subgraph/mkldnn/mkldnn_fc.cc | 6 +++--- .../mkldnn/mkldnn_fc_post_quantize_property.h | 4 ++-- .../subgraph/mkldnn/mkldnn_fc_property.h | 4 ++-- tests/python/mkl/test_quantization_mkldnn.py | 4 ++-- tests/python/quantization/test_quantization.py | 4 ++-- 13 files changed, 35 insertions(+), 35 deletions(-) diff --git a/docs/static_site/src/pages/api/cpp/docs/tutorials/multi_threaded_inference.md b/docs/static_site/src/pages/api/cpp/docs/tutorials/multi_threaded_inference.md index ea9091ae00a4..bfb9c887f5c5 100644 --- a/docs/static_site/src/pages/api/cpp/docs/tutorials/multi_threaded_inference.md +++ b/docs/static_site/src/pages/api/cpp/docs/tutorials/multi_threaded_inference.md @@ -79,7 +79,7 @@ $ cd example/multi_threaded_inference $ make ``` -If you have built mxnet from source with cmake, please uncomment the specific lines for cmake build or set the following environment variables: `MKLDNN_BUILD_DIR (default is $(MXNET_ROOT)/3rdparty/onednn/build)`, `MKLDNN_INCLUDE_DIR (default is $(MXNET_ROOT)/3rdparty/onednn/include)`, `MXNET_LIB_DIR (default is $(MXNET_ROOT)/lib)`. +If you have built mxnet from source with cmake, please uncomment the specific lines for cmake build or set the following environment variables: `ONEDNN_BUILD_DIR (default is $(MXNET_ROOT)/3rdparty/onednn/build)`, `ONEDNN_INCLUDE_DIR (default is $(MXNET_ROOT)/3rdparty/onednn/include)`, `MXNET_LIB_DIR (default is $(MXNET_ROOT)/lib)`. ### Run multi threaded inference example The example is tested with models such as `imagenet1k-inception-bn`, `imagenet1k-resnet-50`, diff --git a/docs/static_site/src/pages/api/faq/env_var.md b/docs/static_site/src/pages/api/faq/env_var.md index eaead19383ba..d5234d49a82c 100644 --- a/docs/static_site/src/pages/api/faq/env_var.md +++ b/docs/static_site/src/pages/api/faq/env_var.md @@ -324,12 +324,12 @@ If ctypes is used, it must be `mxnet._ctypes.ndarray.NDArrayBase`. - Data directory in the filesystem for storage, for example when downloading gluon models. - Default in *nix is .mxnet APPDATA/mxnet in windows. -* MXNET_MKLDNN_ENABLED +* MXNET_ONEDNN_ENABLED - Values: 0, 1 ```(default=1)``` - Flag to enable or disable ONEDNN accelerator. On by default. - Only applies to mxnet that has been compiled with ONEDNN (```pip install mxnet``` or built from source with ```USE_ONEDNN=1```) -* MXNET_MKLDNN_CACHE_NUM +* MXNET_ONEDNN_CACHE_NUM - Values: Int ```(default=-1)``` - Flag to set num of elements that ONEDNN cache can hold. Default is -1 which means cache size is unbounded. Should only be set if your model has variable input shapes, as cache size may grow unbounded. The number represents the number of items in the cache and is proportional to the number of layers that use ONEDNN and different input shape. diff --git a/example/multi_threaded_inference/Makefile b/example/multi_threaded_inference/Makefile index 49403b580a83..10c0299cef26 100644 --- a/example/multi_threaded_inference/Makefile +++ b/example/multi_threaded_inference/Makefile @@ -27,21 +27,21 @@ ifndef USE_CUDA_PATH export USE_CUDA_PATH = /usr/local/cuda endif -ifndef MKLDNN_BUILD_DIR - export MKLDNN_BUILD_DIR = $(MXNET_ROOT)/3rdparty/onednn/build +ifndef ONEDNN_BUILD_DIR + export ONEDNN_BUILD_DIR = $(MXNET_ROOT)/3rdparty/onednn/build # Cmake build path by default # Uncomment below line for CMake build - #export MKLDNN_BUILD_DIR = $(MXNET_ROOT)/build/3rdparty/onednn + #export ONEDNN_BUILD_DIR = $(MXNET_ROOT)/build/3rdparty/onednn endif -ifndef MKLDNN_INCLUDE_DIR - export MKLDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/onednn/include +ifndef ONEDNN_INCLUDE_DIR + export ONEDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/onednn/include # Cmake build path by default # Uncomment below line for CMake build - #export MKLDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/onednn/include + #export ONEDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/onednn/include endif -CFLAGS += -I$(MXNET_ROOT)/include -I$(USE_CUDA_PATH)/include -I$(MKLDNN_INCLUDE_DIR) -I$(MKLDNN_BUILD_DIR)/include +CFLAGS += -I$(MXNET_ROOT)/include -I$(USE_CUDA_PATH)/include -I$(ONEDNN_INCLUDE_DIR) -I$(ONEDNN_BUILD_DIR)/include # If MXNET_LIB_DIR env variable set use that, otherwise defaults to MXNET_ROOT/build ifndef MXNET_LIB_DIR @@ -49,7 +49,7 @@ ifndef MXNET_LIB_DIR # Uncomment below line for CMake build #MXNET_LIB_DIR=$(MXNET_ROOT)/build endif -LDFLAGS += $(MXNET_LIB_DIR)/libmxnet.so -lpthread -L$(MKLDNN_BUILD_DIR)/src -lmkldnn -Wl,-rpath,'$${ORIGIN}' +LDFLAGS += $(MXNET_LIB_DIR)/libmxnet.so -lpthread -L$(ONEDNN_BUILD_DIR)/src -lmkldnn -Wl,-rpath,'$${ORIGIN}' multi_threaded_inference: multi_threaded_inference.o g++ -O3 -o multi_threaded_inference multi_threaded_inference.o $(LDFLAGS) diff --git a/src/common/utils.h b/src/common/utils.h index 2d01e6bca685..dfd32ac6f311 100644 --- a/src/common/utils.h +++ b/src/common/utils.h @@ -496,9 +496,9 @@ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_ONEDNN == 1 - if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. " - "You can re-enable by setting MXNET_MKLDNN_ENABLED=1"); - if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set." + if (!MKLDNNEnvSet()) common::LogOnce("MXNET_ONEDNN_ENABLED flag is off. " + "You can re-enable by setting MXNET_ONEDNN_ENABLED=1"); + if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_ONEDNN_CACHE_NUM is set." "Should only be set if " "your model has variable input shapes, " "as cache size may grow unbounded"); diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h b/src/operator/nn/mkldnn/mkldnn_base-inl.h index addfa16157cb..5385b5b3a1e9 100644 --- a/src/operator/nn/mkldnn/mkldnn_base-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h @@ -168,12 +168,12 @@ static inline bool SupportMKLDNN(const NDArray &input) { } static inline bool MKLDNNEnvSet() { - static bool is_mkldnn_enabled = dmlc::GetEnv("MXNET_MKLDNN_ENABLED", true); + static bool is_mkldnn_enabled = dmlc::GetEnv("MXNET_ONEDNN_ENABLED", true); return is_mkldnn_enabled; } static inline int GetMKLDNNCacheSize() { - static int mkldnn_cache_size = dmlc::GetEnv("MXNET_MKLDNN_CACHE_NUM", -1); + static int mkldnn_cache_size = dmlc::GetEnv("MXNET_ONEDNN_CACHE_NUM", -1); return mkldnn_cache_size; } diff --git a/src/operator/subgraph/mkldnn/mkldnn_bn_relu_property.h b/src/operator/subgraph/mkldnn/mkldnn_bn_relu_property.h index c19d282fd935..ce29778babdb 100644 --- a/src/operator/subgraph/mkldnn/mkldnn_bn_relu_property.h +++ b/src/operator/subgraph/mkldnn/mkldnn_bn_relu_property.h @@ -84,7 +84,7 @@ class SgMKLDNNBNReLUSelector : public SubgraphSelector { class SgMKLDNNBNReLUProperty : public SubgraphProperty { public: SgMKLDNNBNReLUProperty() { - disable_bn_relu_ = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_FUSE_BN_RELU", false); + disable_bn_relu_ = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_FUSE_BN_RELU", false); } void PrePartition(const nnvm::Graph& g, @@ -97,7 +97,7 @@ class SgMKLDNNBNReLUProperty : public SubgraphProperty { auto property = std::make_shared(); property->SetAttr("property_name", name); property->SetAttr("inference_only", true); - if (dmlc::GetEnv("MXNET_DISABLE_MKLDNN_BN_RELU_OPT", 0)) { + if (dmlc::GetEnv("MXNET_DISABLE_ONEDNN_BN_RELU_OPT", 0)) { property->SetAttr("disable", true); } return property; diff --git a/src/operator/subgraph/mkldnn/mkldnn_conv_property.h b/src/operator/subgraph/mkldnn/mkldnn_conv_property.h index 28ee14f6d3d4..0128527f6821 100644 --- a/src/operator/subgraph/mkldnn/mkldnn_conv_property.h +++ b/src/operator/subgraph/mkldnn/mkldnn_conv_property.h @@ -182,9 +182,9 @@ class SgMKLDNNConvSelector : public SubgraphSelector { class SgMKLDNNConvProperty : public SubgraphProperty { public: SgMKLDNNConvProperty() { - disable_conv_bn_ = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_FUSE_CONV_BN", 0); - disable_conv_act_ = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_FUSE_CONV_RELU", 0); - disable_conv_sum_ = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_FUSE_CONV_SUM", 0); + disable_conv_bn_ = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_FUSE_CONV_BN", 0); + disable_conv_act_ = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_FUSE_CONV_RELU", 0); + disable_conv_sum_ = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_FUSE_CONV_SUM", 0); disable_all_ = disable_conv_bn_ && disable_conv_act_ && disable_conv_sum_; } @@ -193,7 +193,7 @@ class SgMKLDNNConvProperty : public SubgraphProperty { auto property = std::make_shared(); property->SetAttr("property_name", name); property->SetAttr("inference_only", true); - if (dmlc::GetEnv("MXNET_DISABLE_MKLDNN_CONV_OPT", 0)) { + if (dmlc::GetEnv("MXNET_DISABLE_ONEDNN_CONV_OPT", 0)) { property->SetAttr("disable", true); } return property; diff --git a/src/operator/subgraph/mkldnn/mkldnn_elemwisemul_post_quantize_property.h b/src/operator/subgraph/mkldnn/mkldnn_elemwisemul_post_quantize_property.h index 4fc2cffc0537..ad816755a8dd 100644 --- a/src/operator/subgraph/mkldnn/mkldnn_elemwisemul_post_quantize_property.h +++ b/src/operator/subgraph/mkldnn/mkldnn_elemwisemul_post_quantize_property.h @@ -144,8 +144,8 @@ class ElemwiseMulPostQuantizeSelector : public SubgraphSelector { class ElemwiseMulPostQuantizeProperty : public SubgraphProperty { public: ElemwiseMulPostQuantizeProperty() { - disable_fuse_all = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_QEM_FUSE_ALL", false); - disable_float_output = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_QEM_FLOAT_OUTPUT", false); + disable_fuse_all = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_QEM_FUSE_ALL", false); + disable_float_output = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_QEM_FLOAT_OUTPUT", false); } static SubgraphPropertyPtr Create() { diff --git a/src/operator/subgraph/mkldnn/mkldnn_fc.cc b/src/operator/subgraph/mkldnn/mkldnn_fc.cc index 0d81d261bd53..8f18b676e1c8 100644 --- a/src/operator/subgraph/mkldnn/mkldnn_fc.cc +++ b/src/operator/subgraph/mkldnn/mkldnn_fc.cc @@ -141,7 +141,7 @@ void SgMKLDNNFCOp::Forward(const OpContext &ctx, } if (initialized_ && mkldnn_param.quantized && - dmlc::GetEnv("MXNET_MKLDNN_QFC_DYNAMIC_PARAMS", 0)) { + dmlc::GetEnv("MXNET_ONEDNN_QFC_DYNAMIC_PARAMS", 0)) { if (channel_wise_runtime_) { if (cached_min_data_ != min_data || cached_max_data_ != max_data || weight_ver_ != weight.version() || @@ -234,8 +234,8 @@ void SgMKLDNNFCOp::Forward(const OpContext &ctx, << "Currently, channel-wise quantization requires fuse requantize or dequantize." << " Please make sure the `min_calib_range` and `max_calib_range` are set when only" << " fuse requantize (outputs of FullyConnected are collected during calibration phase)," - << " or the env var of `MXNET_DISABLE_MKLDNN_QFC_FLOAT_OUTPUT` and " - << " `MXNET_DISABLE_MKLDNN_QFC_FUSE_ALL` are not set to true (default is false)"; + << " or the env var of `MXNET_DISABLE_ONEDNN_QFC_FLOAT_OUTPUT` and " + << " `MXNET_DISABLE_ONEDNN_QFC_FUSE_ALL` are not set to true (default is false)"; } support_channelwise_scale = support_channelwise_scale && channel_wise_runtime_; diff --git a/src/operator/subgraph/mkldnn/mkldnn_fc_post_quantize_property.h b/src/operator/subgraph/mkldnn/mkldnn_fc_post_quantize_property.h index e0328bf5d1ba..3404fdb4478a 100644 --- a/src/operator/subgraph/mkldnn/mkldnn_fc_post_quantize_property.h +++ b/src/operator/subgraph/mkldnn/mkldnn_fc_post_quantize_property.h @@ -144,8 +144,8 @@ class SgMKLDNNFCPostQuantizeSelector : public SubgraphSelector { class SgMKLDNNFCPostQuantizeProperty : public SubgraphProperty { public: SgMKLDNNFCPostQuantizeProperty() { - disable_fuse_all = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_QFC_FUSE_ALL", false); - disable_float_output = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_QFC_FLOAT_OUTPUT", false); + disable_fuse_all = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_QFC_FUSE_ALL", false); + disable_float_output = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_QFC_FLOAT_OUTPUT", false); } static SubgraphPropertyPtr Create() { diff --git a/src/operator/subgraph/mkldnn/mkldnn_fc_property.h b/src/operator/subgraph/mkldnn/mkldnn_fc_property.h index 5c455c54faf3..2fbfb8580a3f 100644 --- a/src/operator/subgraph/mkldnn/mkldnn_fc_property.h +++ b/src/operator/subgraph/mkldnn/mkldnn_fc_property.h @@ -158,7 +158,7 @@ class SgMKLDNNFCSelector : public SubgraphSelector { class SgMKLDNNFCProperty : public SubgraphProperty { public: SgMKLDNNFCProperty() { - disable_fc_eltwise_ = dmlc::GetEnv("MXNET_DISABLE_MKLDNN_FUSE_FC_ELTWISE", false); + disable_fc_eltwise_ = dmlc::GetEnv("MXNET_DISABLE_ONEDNN_FUSE_FC_ELTWISE", false); } static SubgraphPropertyPtr Create() { @@ -166,7 +166,7 @@ class SgMKLDNNFCProperty : public SubgraphProperty { auto property = std::make_shared(); property->SetAttr("property_name", name); property->SetAttr("inference_only", true); - if (dmlc::GetEnv("MXNET_DISABLE_MKLDNN_FC_OPT", 0)) { + if (dmlc::GetEnv("MXNET_DISABLE_ONEDNN_FC_OPT", 0)) { property->SetAttr("disable", true); } return property; diff --git a/tests/python/mkl/test_quantization_mkldnn.py b/tests/python/mkl/test_quantization_mkldnn.py index d3251ea604bc..055996acb96a 100644 --- a/tests/python/mkl/test_quantization_mkldnn.py +++ b/tests/python/mkl/test_quantization_mkldnn.py @@ -18,7 +18,7 @@ import sys import mxnet as mx -os.environ['ENABLE_MKLDNN_QUANTIZATION_TEST'] = '1' +os.environ['ENABLE_ONEDNN_QUANTIZATION_TEST'] = '1' os.environ['MXNET_SUBGRAPH_BACKEND'] = 'NONE' curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) sys.path.insert(0, os.path.join(curr_path, '../quantization')) @@ -27,5 +27,5 @@ if __name__ == '__main__': import pytest pytest.main() - del os.environ['ENABLE_MKLDNN_QUANTIZATION_TEST'] + del os.environ['ENABLE_ONEDNN_QUANTIZATION_TEST'] del os.environ['MXNET_SUBGRAPH_BACKEND'] diff --git a/tests/python/quantization/test_quantization.py b/tests/python/quantization/test_quantization.py index 948b2d3b8cee..df6e9c6ac9e0 100644 --- a/tests/python/quantization/test_quantization.py +++ b/tests/python/quantization/test_quantization.py @@ -52,12 +52,12 @@ def is_test_for_gpu(): def is_test_for_mkldnn(): return (mx.current_context().device_type == 'cpu' - and os.environ.get('ENABLE_MKLDNN_QUANTIZATION_TEST') == '1') + and os.environ.get('ENABLE_ONEDNN_QUANTIZATION_TEST') == '1') def is_test_for_native_cpu(): return (mx.current_context().device_type == 'cpu' - and os.environ.get('ENABLE_MKLDNN_QUANTIZATION_TEST') == None) + and os.environ.get('ENABLE_ONEDNN_QUANTIZATION_TEST') == None) def test_quantize_float32_to_int8():