diff --git a/.gitignore b/.gitignore
index 648d59c9e432..c35b5a1eceb2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -171,5 +171,5 @@ coverage.xml
cmake_options.yml
# header file generated at compile time
-include/mkldnn/oneapi/dnnl/dnnl_version.h
-include/mkldnn/oneapi/dnnl/dnnl_config.h
+include/onednn/oneapi/dnnl/dnnl_version.h
+include/onednn/oneapi/dnnl/dnnl_config.h
diff --git a/.gitmodules b/.gitmodules
index 85246d62328d..f3b95dabac22 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -10,9 +10,6 @@
[submodule "3rdparty/googletest"]
path = 3rdparty/googletest
url = https://github.com/google/googletest.git
-[submodule "3rdparty/mkldnn"]
- path = 3rdparty/mkldnn
- url = https://github.com/oneapi-src/oneDNN.git
[submodule "3rdparty/tvm"]
path = 3rdparty/tvm
url = https://github.com/apache/incubator-tvm.git
@@ -28,3 +25,6 @@
[submodule "3rdparty/intgemm"]
path = 3rdparty/intgemm
url = https://github.com/kpu/intgemm
+[submodule "3rdparty/onednn"]
+ path = 3rdparty/onednn
+ url = https://github.com/oneapi-src/oneDNN
diff --git a/3rdparty/mkldnn b/3rdparty/onednn
similarity index 100%
rename from 3rdparty/mkldnn
rename to 3rdparty/onednn
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 35348976805d..5c8865aa890a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -62,9 +62,9 @@ option(USE_F16C "Build with x86 F16C instruction support" ON) # autodetects supp
option(USE_LAPACK "Build with lapack support" ON)
option(USE_MKL_LAYERNORM "Use layer normalization from MKL, which is currently slower than internal. No effect unless USE_BLAS=MKL (or mkl)." OFF)
if((NOT APPLE) AND (NOT MSVC) AND (CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "x86_64") AND (NOT CMAKE_CROSSCOMPILING))
- option(USE_MKLDNN "Build with MKL-DNN support" ON)
+ option(USE_ONEDNN "Build with ONEDNN support" ON)
else()
- option(USE_MKLDNN "Build with MKL-DNN support" OFF)
+ option(USE_ONEDNN "Build with ONEDNN support" OFF)
endif()
cmake_dependent_option(USE_INTGEMM "Build with x86_64 intgemm library for low-precision multiplication" ON "CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64" OFF)
if(NOT MSVC)
@@ -257,7 +257,7 @@ endif()
if(USE_MKL_LAYERNORM)
add_definitions(-DMXNET_USE_MKL_LAYERNORM=1)
endif()
-if(USE_MKLDNN)
+if(USE_ONEDNN)
# CPU architecture (e.g., C5) can't run on another architecture (e.g., g3).
if(MSVC)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /EHsc")
@@ -272,7 +272,7 @@ if(USE_MKLDNN)
endif()
endif()
- function(load_mkldnn)
+ function(load_onednn)
set(MKLDNN_BUILD_TESTS OFF CACHE INTERNAL "" FORCE)
set(MKLDNN_BUILD_EXAMPLES OFF CACHE INTERNAL "" FORCE)
set(MKLDNN_ARCH_OPT_FLAGS "" CACHE INTERNAL "" FORCE)
@@ -285,13 +285,13 @@ if(USE_MKLDNN)
set(MKLDNN_CPU_RUNTIME SEQ CACHE INTERNAL "" FORCE)
endif()
- set(CMAKE_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/mkldnn")
- add_subdirectory(3rdparty/mkldnn)
+ set(CMAKE_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/onednn")
+ add_subdirectory(3rdparty/onednn)
endfunction()
- load_mkldnn()
- include_directories(3rdparty/mkldnn/include)
- include_directories(${PROJECT_BINARY_DIR}/3rdparty/mkldnn/include)
- add_definitions(-DMXNET_USE_MKLDNN=1)
+ load_onednn()
+ include_directories(3rdparty/onednn/include)
+ include_directories(${PROJECT_BINARY_DIR}/3rdparty/onednn/include)
+ add_definitions(-DMXNET_USE_ONEDNN=1)
list(APPEND mxnet_LINKER_LIBS dnnl)
set_target_properties(dnnl PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
endif()
@@ -836,12 +836,12 @@ if(USE_DIST_KVSTORE)
set_target_properties(pslite PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
endif()
-if(USE_MKLDNN)
+if(USE_ONEDNN)
add_custom_command(TARGET mxnet POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy
- ${CMAKE_BINARY_DIR}/3rdparty/mkldnn/include/oneapi/dnnl/dnnl_config.h ${CMAKE_SOURCE_DIR}/include/mkldnn/oneapi/dnnl/
+ ${CMAKE_BINARY_DIR}/3rdparty/onednn/include/oneapi/dnnl/dnnl_config.h ${CMAKE_SOURCE_DIR}/include/onednn/oneapi/dnnl/
COMMAND ${CMAKE_COMMAND} -E copy
- ${CMAKE_BINARY_DIR}/3rdparty/mkldnn/include/oneapi/dnnl/dnnl_version.h ${CMAKE_SOURCE_DIR}/include/mkldnn/oneapi/dnnl/)
+ ${CMAKE_BINARY_DIR}/3rdparty/onednn/include/oneapi/dnnl/dnnl_version.h ${CMAKE_SOURCE_DIR}/include/onednn/oneapi/dnnl/)
endif()
if(USE_INTGEMM)
diff --git a/CONTRIBUTORS.md b/CONTRIBUTORS.md
index 702daff7bf24..b3a71e23c9a4 100644
--- a/CONTRIBUTORS.md
+++ b/CONTRIBUTORS.md
@@ -290,6 +290,7 @@ List of Contributors
* [Paweł Głomski](https://github.com/PawelGlomski-Intel)
* [Andrzej Kotlowski](https://github.com/anko-intel)
* [Yingxiao Du](https://github.com/Duconnor)
+* [Bartosz Kuncer](https://github.com/bartekkuncer)
Label Bot
---------
diff --git a/LICENSE b/LICENSE
index f5c68dd722fb..13c9371dcd11 100644
--- a/LICENSE
+++ b/LICENSE
@@ -226,12 +226,12 @@
3rdparty/tvm/3rdparty/dmlc-core
3rdparty/tvm/3rdparty/dlpack
3rdparty/ps-lite
- 3rdparty/mkldnn
+ 3rdparty/onednn
3rdparty/googletest/googlemock/scripts/generator
3rdparty/onnx-tensorrt/third_party/onnx/third_party/benchmark
- 3rdparty/mkldnn/tests/benchdnn (Copy of the License available at top of current file)
+ 3rdparty/onednn/tests/benchdnn (Copy of the License available at top of current file)
src/operator/special_functions-inl.h Cephes Library Functions (Copy of the License available at top of current file)
- 3rdparty/mkldnn/doc/assets/mathjax (Copy of the License available at top of current file)
+ 3rdparty/onednn/doc/assets/mathjax (Copy of the License available at top of current file)
docs/python_docs/themes/mx-theme/mxtheme/static/material-design-icons-3.0.1 (Copy of the License available at top of current file)
docs/python_docs/themes/mx-theme/mxtheme/static/font/Roboto (Copy of the License available at top of current file)
3rdparty/tvm/3rdparty/bfloat16/bfloat16.cc (Copy of the License available at top of current file)
@@ -256,10 +256,10 @@
3-clause BSD license
=======================================================================================
- 3rdparty/mkldnn/src/cpu/x64/xbyak
- 3rdparty/mkldnn/tests/gtests/gtest
- 3rdparty/mkldnn/cmake/FindOpenCL.cmake (Copy of the License available at licenses/BSD3-cmake)
- 3rdparty/mkldnn/src/cpu/x64/jit_utils/jitprofiling/
+ 3rdparty/onednn/src/cpu/x64/xbyak
+ 3rdparty/onednn/tests/gtests/gtest
+ 3rdparty/onednn/cmake/FindOpenCL.cmake (Copy of the License available at licenses/BSD3-cmake)
+ 3rdparty/onednn/src/cpu/x64/jit_utils/jitprofiling/
3rdparty/onnx-tensorrt/third_party/onnx/third_party/pybind11/tools/FindPythonLibsNew.cmake
3rdparty/ctc_include/contrib/moderngpu
3rdparty/nvidia_cub
@@ -333,7 +333,7 @@
=======================================================================================
3rdparty/intgemm/test/3rd_party/catch.hpp (Copy of the License available at licenses/BOOST1_0)
- 3rdparty/mkldnn/src/common/primitive_hashing.hpp
+ 3rdparty/onednn/src/common/primitive_hashing.hpp
=======================================================================================
LLVM Release License
diff --git a/NEWS.md b/NEWS.md
index 0ba22152d4e0..d63ee0ed89a9 100644
--- a/NEWS.md
+++ b/NEWS.md
@@ -1973,7 +1973,7 @@ Note: this feature is still experimental, for more details, refer to [design doc
* Add back R tests and fix typo around R and perl tests (#13940)
* Fix document build (#13927)
* Temporarily disables windows pipeline to unblock PRs (#14261)
-* Fix USE_MKLDNN check in Makefile (#13775)
+* Fix USE_ONEDNN check in Makefile (#13775)
* Fix spelling in threaded_engine_test (#14709)
* Fix cmake options parsing in dev_menu (#13458)
* Add Local test stage and option to jump directly to menu item from commandline (#13809)
diff --git a/README.md b/README.md
index e37f41c1c9f5..8374b40e25b5 100644
--- a/README.md
+++ b/README.md
@@ -87,7 +87,7 @@ What's New
### Ecosystem News
-* [MKLDNN for Faster CPU Performance](docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md)
+* [ONEDNN for Faster CPU Performance](docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md)
* [MXNet Memory Monger, Training Deeper Nets with Sublinear Memory Cost](https://github.com/dmlc/mxnet-memonger)
* [Tutorial for NVidia GTC 2016](https://github.com/dmlc/mxnet-gtc-tutorial)
* [MXNet.js: Javascript Package for Deep Learning in Browser (without server)](https://github.com/dmlc/mxnet.js/)
diff --git a/benchmark/opperf/README.md b/benchmark/opperf/README.md
index 4935ea747ba9..bb3fb8ed851b 100644
--- a/benchmark/opperf/README.md
+++ b/benchmark/opperf/README.md
@@ -37,7 +37,7 @@ Benchmarks are usually done end-to-end for a given Network Architecture. For exa
2. A standard Network Architecture like ResNet-50 is made up of many operators Ex: Convolution2D, Softmax, Dense and more. Consider the following scenarios:
1. We improved the performance of Convolution2D operator, but due to a bug, Softmax performance went down. Overall, we may observe end to end benchmarks are running fine, we may miss out the performance degradation of a single operator which can accumulate and become untraceable.
2. You need to see in a given network, which operator is taking maximum time and plan optimization work. With end to end benchmarks, it is hard to get more fine grained numbers at operator level.
-3. We need to know on different hardware infrastructure (Ex: CPU with MKLDNN, GPU with NVIDIA CUDA and cuDNN) how different operators performs. With these details, we can plan the optimization work at operator level, which could exponentially boost up end to end performance.
+3. We need to know on different hardware infrastructure (Ex: CPU with ONEDNN, GPU with NVIDIA CUDA and cuDNN) how different operators performs. With these details, we can plan the optimization work at operator level, which could exponentially boost up end to end performance.
4. You want to have nightly performance tests across all operators in a deep learning framework to catch regressions early.
5. We can integrate this framework with a CI/CD system to run per operator performance tests for PRs. Example: When a PR modifies the kernel of TransposeConv2D, we can run benchmarks of TransposeConv2D operator to verify performance.
diff --git a/cd/README.md b/cd/README.md
index 356b7b6d811a..b06031426f9a 100644
--- a/cd/README.md
+++ b/cd/README.md
@@ -19,18 +19,18 @@
## Introduction
-MXNet aims to support a variety of frontends, e.g. Python, Java, Perl, R, etc. as well as environments (Windows, Linux, Mac, with or without GPU, with or without MKL-DNN support, etc.). This package contains a small continuous delivery (CD) framework used to automate the delivery nightly and release builds across our delivery channels.
+MXNet aims to support a variety of frontends, e.g. Python, Java, Perl, R, etc. as well as environments (Windows, Linux, Mac, with or without GPU, with or without ONEDNN support, etc.). This package contains a small continuous delivery (CD) framework used to automate the delivery nightly and release builds across our delivery channels.
The CD process is driven by the [CD pipeline job](Jenkinsfile_cd_pipeline), which orchestrates the order in which the artifacts are delivered. For instance, first publish the libmxnet library before publishing the pip package. It does this by triggering the [release job](Jenkinsfile_release_job) with a specific set of parameters for each delivery channel. The release job executes the specific release pipeline for a delivery channel across all MXNet *variants*.
-A variant is a specific environment or features for which MXNet is compiled. For instance CPU, GPU with CUDA v10.1, CUDA v10.2 with MKL-DNN support, etc.
+A variant is a specific environment or features for which MXNet is compiled. For instance CPU, GPU with CUDA v10.1, CUDA v10.2 with ONEDNN support, etc.
-Currently, below variants are supported. All of these variants except native have MKL-DNN backend enabled.
+Currently, below variants are supported. All of these variants except native have ONEDNN backend enabled.
* *cpu*: CPU
-* *native*: CPU without MKL-DNN
+* *native*: CPU without ONEDNN
* *cu101*: CUDA 10.1
* *cu102*: CUDA 10.2
* *cu110*: CUDA 11.0
diff --git a/cd/mxnet_lib/Jenkins_pipeline.groovy b/cd/mxnet_lib/Jenkins_pipeline.groovy
index 73fdfc6e8476..d38971b5b45c 100644
--- a/cd/mxnet_lib/Jenkins_pipeline.groovy
+++ b/cd/mxnet_lib/Jenkins_pipeline.groovy
@@ -33,7 +33,7 @@ licenses = 'licenses/*'
// libmxnet dependencies
mx_native_deps = 'lib/libgfortran.so.*, lib/libopenblas.so.0'
-mx_deps = 'lib/libgfortran.so.*, lib/libopenblas.so.0, include/mkldnn/oneapi/dnnl/dnnl_version.h, include/mkldnn/oneapi/dnnl/dnnl_config.h'
+mx_deps = 'lib/libgfortran.so.*, lib/libopenblas.so.0, include/onednn/oneapi/dnnl/dnnl_version.h, include/onednn/oneapi/dnnl/dnnl_config.h'
// library type
// either static or dynamic - depending on how it links to its dependencies
diff --git a/cd/python/pypi/pypi_package.sh b/cd/python/pypi/pypi_package.sh
index 3f9908a5a43a..076f85a2b1bf 100755
--- a/cd/python/pypi/pypi_package.sh
+++ b/cd/python/pypi/pypi_package.sh
@@ -23,14 +23,14 @@ export mxnet_variant=${1:?"Please specify the mxnet variant"}
# Due to this PR: https://github.com/apache/incubator-mxnet/pull/14899
# The setup.py expects that mkldnn_version.h be present in
-# mxnet-build/3rdparty/mkldnn/build/install/include
+# mxnet-build/3rdparty/onednn/build/install/include
# The artifact repository stores this file in the dependencies
# and CD unpacks it to a directory called cd_misc
# Nov. 2019 Update: With v1.1, MKL-DNN is renaming to DNNL. Hence changing the prefix of file name.
if [ -f "cd_misc/dnnl_version.h" ]; then
- mkdir -p 3rdparty/mkldnn/include/oneapi/dnnl
- cp cd_misc/dnnl_version.h 3rdparty/mkldnn/include/oneapi/dnnl/.
- cp cd_misc/dnnl_config.h 3rdparty/mkldnn/include/oneapi/dnnl/.
+ mkdir -p 3rdparty/onednn/include/oneapi/dnnl
+ cp cd_misc/dnnl_version.h 3rdparty/onednn/include/oneapi/dnnl/.
+ cp cd_misc/dnnl_config.h 3rdparty/onednn/include/oneapi/dnnl/.
fi
# Create wheel workspace
diff --git a/cd/utils/artifact_repository.md b/cd/utils/artifact_repository.md
index c37646b51a66..46a97d343b8b 100644
--- a/cd/utils/artifact_repository.md
+++ b/cd/utils/artifact_repository.md
@@ -55,11 +55,11 @@ If not set, derived through the value of sys.platform (https://docs.python.org/3
Manually configured through the --variant argument. The current variants are: cpu, native, cu101, cu102, cu110, cu112.
-As long as the tool is being run from the MXNet code base, the runtime feature detection tool (https://github.com/larroy/mxnet/blob/dd432b7f241c9da2c96bcb877c2dc84e6a1f74d4/docs/api/python/libinfo/libinfo.md) can be used to detect whether the library has been compiled with MKL (library has MKL-DNN feature enabled) and/or CUDA support (compiled with CUDA feature enabled).
+As long as the tool is being run from the MXNet code base, the runtime feature detection tool (https://github.com/larroy/mxnet/blob/dd432b7f241c9da2c96bcb877c2dc84e6a1f74d4/docs/api/python/libinfo/libinfo.md) can be used to detect whether the library has been compiled with MKL (library has ONEDNN feature enabled) and/or CUDA support (compiled with CUDA feature enabled).
If it has been compiled with CUDA support, the output of /usr/local/cuda/bin/nvcc --version can be mined for the exact CUDA version (eg. 8.0, 9.0, etc.).
-By knowing which features are enabled on the binary, and if necessary, which CUDA version is installed on the machine, the value for the variant argument can be calculated. Eg. if CUDA features are enabled, and nvcc reports cuda version 10.2, then the variant would be cu102. If neither MKL-DNN nor CUDA features are enabled, the variant would be native.
+By knowing which features are enabled on the binary, and if necessary, which CUDA version is installed on the machine, the value for the variant argument can be calculated. Eg. if CUDA features are enabled, and nvcc reports cuda version 10.2, then the variant would be cu102. If neither ONEDNN nor CUDA features are enabled, the variant would be native.
**Dependency Linking**
diff --git a/cd/utils/artifact_repository.py b/cd/utils/artifact_repository.py
index 41893d93020f..dd10a4b97957 100644
--- a/cd/utils/artifact_repository.py
+++ b/cd/utils/artifact_repository.py
@@ -313,7 +313,7 @@ def probe_gpu_variant(mxnet_features: Dict[str, bool]) -> Optional[str]:
if cuda_version:
variant = 'cu{}'.format(cuda_version)
if not mxnet_features['MKLDNN']:
- RuntimeError('Error determining mxnet variant: MKL-DNN should be enabled for cuda variants')
+ RuntimeError('Error determining mxnet variant: ONEDNN should be enabled for cuda variants')
logger.debug('variant is: {}'.format(variant))
return variant
diff --git a/ci/build_windows.py b/ci/build_windows.py
index 899d1ddcd332..2035e4c83bf0 100755
--- a/ci/build_windows.py
+++ b/ci/build_windows.py
@@ -79,7 +79,7 @@ class BuildFlavour(Enum):
'-DUSE_BLAS=open '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
- '-DUSE_MKLDNN=ON '
+ '-DUSE_ONEDNN=ON '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_CPU_MKLDNN_MKL': (
@@ -92,7 +92,7 @@ class BuildFlavour(Enum):
'-DUSE_BLAS=mkl '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
- '-DUSE_MKLDNN=ON '
+ '-DUSE_ONEDNN=ON '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_CPU_MKL': (
@@ -105,7 +105,7 @@ class BuildFlavour(Enum):
'-DUSE_BLAS=mkl '
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
- '-DUSE_MKLDNN=OFF '
+ '-DUSE_ONEDNN=OFF '
'-DCMAKE_BUILD_TYPE=Release')
, 'WIN_GPU': (
@@ -132,7 +132,7 @@ class BuildFlavour(Enum):
'-DUSE_LAPACK=ON '
'-DUSE_DIST_KVSTORE=OFF '
'-DMXNET_CUDA_ARCH="5.2" '
- '-DUSE_MKLDNN=ON '
+ '-DUSE_ONEDNN=ON '
'-DCMAKE_BUILD_TYPE=Release')
}
diff --git a/ci/docker/runtime_functions.sh b/ci/docker/runtime_functions.sh
index c66f72d16672..4a94449c9379 100755
--- a/ci/docker/runtime_functions.sh
+++ b/ci/docker/runtime_functions.sh
@@ -116,17 +116,17 @@ build_dynamic_libmxnet() {
export CXXFLAGS="-fabi-version=11 -fabi-compat-version=7"
if [[ ${mxnet_variant} = "cpu" ]]; then
cmake -DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_CUDA=OFF \
-G Ninja /work/mxnet
elif [[ ${mxnet_variant} = "native" ]]; then
cmake -DUSE_BLAS=Open \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_CUDA=OFF \
-G Ninja /work/mxnet
elif [[ ${mxnet_variant} =~ cu[0-9]+$ ]]; then
cmake -DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_DIST_KVSTORE=ON \
-DUSE_CUDA=ON \
-G Ninja /work/mxnet
@@ -263,7 +263,7 @@ build_centos7_cpu() {
export CXXFLAGS="-fabi-version=11 -fabi-compat-version=7"
cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_DIST_KVSTORE=ON \
-DUSE_CUDA=OFF \
-DBUILD_EXTENSION_PATH=/work/mxnet/example/extensions/lib_external_ops \
@@ -280,7 +280,7 @@ build_centos7_mkldnn() {
# Opt in to newer GCC C++ ABI. devtoolset defaults to ABI Version 2.
export CXXFLAGS="-fabi-version=11 -fabi-compat-version=7"
cmake -DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_CUDA=OFF \
-DUSE_INT64_TENSOR_SIZE=OFF \
-G Ninja /work/mxnet
@@ -296,7 +296,7 @@ build_centos7_gpu() {
cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_CUDA=ON \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_DIST_KVSTORE=ON \
@@ -318,7 +318,7 @@ build_ubuntu_cpu_openblas() {
-DENABLE_TESTCOVERAGE=ON \
-DUSE_TVM_OP=ON \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_CUDA=OFF \
-DUSE_DIST_KVSTORE=ON \
-DBUILD_CYTHON_MODULES=ON \
@@ -333,7 +333,7 @@ build_ubuntu_cpu_mkl() {
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DENABLE_TESTCOVERAGE=OFF \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_CUDA=OFF \
-DUSE_TVM_OP=ON \
-DUSE_MKL_LAYERNORM=ON \
@@ -385,7 +385,7 @@ build_ubuntu_cpu_cmake_asan() {
cmake \
-DUSE_CUDA=OFF \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_OPENMP=OFF \
-DUSE_OPENCV=OFF \
-DCMAKE_BUILD_TYPE=Debug \
@@ -444,7 +444,7 @@ build_ubuntu_cpu_clang6() {
export OpenBLAS_HOME=/usr/local/openblas-clang/
CXX=clang++-6.0 CC=clang-6.0 cmake \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_CUDA=OFF \
-DUSE_OPENMP=OFF \
-DUSE_DIST_KVSTORE=ON \
@@ -458,7 +458,7 @@ build_ubuntu_cpu_clang100() {
export OpenBLAS_HOME=/usr/local/openblas-clang/
CXX=clang++-10 CC=clang-10 cmake \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_CUDA=OFF \
-DUSE_OPENMP=ON \
-DUSE_DIST_KVSTORE=ON \
@@ -473,7 +473,7 @@ build_ubuntu_cpu_clang_tidy() {
# TODO(leezu) USE_OPENMP=OFF 3rdparty/dmlc-core/CMakeLists.txt:79 broken?
CXX=clang++-10 CC=clang-10 cmake \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_CUDA=OFF \
-DUSE_OPENMP=OFF \
-DCMAKE_BUILD_TYPE=Debug \
@@ -489,7 +489,7 @@ build_ubuntu_cpu_clang6_mkldnn() {
export OpenBLAS_HOME=/usr/local/openblas-clang/
CXX=clang++-6.0 CC=clang-6.0 cmake \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_CUDA=OFF \
-DUSE_OPENMP=OFF \
-G Ninja /work/mxnet
@@ -502,7 +502,7 @@ build_ubuntu_cpu_clang100_mkldnn() {
export OpenBLAS_HOME=/usr/local/openblas-clang/
CXX=clang++-10 CC=clang-10 cmake \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_CUDA=OFF \
-G Ninja /work/mxnet
ninja
@@ -516,7 +516,7 @@ build_ubuntu_cpu_mkldnn() {
-DENABLE_TESTCOVERAGE=ON \
-DUSE_TVM_OP=ON \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_CUDA=OFF \
-DBUILD_EXTENSION_PATH=/work/mxnet/example/extensions/lib_external_ops \
-G Ninja /work/mxnet
@@ -529,7 +529,7 @@ build_ubuntu_cpu_mkldnn_mkl() {
CC=gcc-7 CXX=g++-7 cmake \
-DCMAKE_BUILD_TYPE="RelWithDebInfo" \
-DENABLE_TESTCOVERAGE=OFF \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_CUDA=OFF \
-DUSE_TVM_OP=ON \
-DUSE_BLAS=MKL \
@@ -584,7 +584,7 @@ build_ubuntu_gpu_tensorrt() {
-DUSE_TENSORRT=1 \
-DUSE_OPENMP=0 \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=0 \
+ -DUSE_ONEDNN=0 \
-DUSE_NVML=OFF \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-G Ninja \
@@ -632,7 +632,7 @@ build_ubuntu_gpu() {
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CUDNN=ON \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_DIST_KVSTORE=ON \
-DBUILD_CYTHON_MODULES=ON \
-DBUILD_EXTENSION_PATH=/work/mxnet/example/extensions/lib_external_ops \
@@ -650,7 +650,7 @@ build_ubuntu_gpu_debug() {
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
-DUSE_CUDNN=ON \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=OFF \
+ -DUSE_ONEDNN=OFF \
-DUSE_DIST_KVSTORE=ON \
-DBUILD_CYTHON_MODULES=ON \
-G Ninja /work/mxnet
@@ -665,7 +665,7 @@ build_ubuntu_cpu_large_tensor() {
-DUSE_CUDA=OFF \
-DUSE_CUDNN=OFF \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-G Ninja \
/work/mxnet
@@ -681,7 +681,7 @@ build_ubuntu_gpu_large_tensor() {
-DUSE_CUDNN=ON \
-DUSE_NVML=OFF \
-DUSE_BLAS=Open \
- -DUSE_MKLDNN=ON \
+ -DUSE_ONEDNN=ON \
-DUSE_DIST_KVSTORE=ON \
-DCMAKE_BUILD_TYPE=Release \
-DMXNET_CUDA_ARCH="$CI_CMAKE_CUDA_ARCH" \
@@ -707,7 +707,7 @@ sanity_license() {
sanity_cpp() {
set -ex
- 3rdparty/dmlc-core/scripts/lint.py mxnet cpp include src plugin tests --exclude_path src/operator/contrib/ctc_include include/mkldnn
+ 3rdparty/dmlc-core/scripts/lint.py mxnet cpp include src plugin tests --exclude_path src/operator/contrib/ctc_include include/onednn
}
sanity_python() {
@@ -1291,10 +1291,10 @@ build_static_libmxnet() {
# Tests CD PyPI packaging in CI
ci_package_pypi() {
set -ex
- # copies mkldnn header files to 3rdparty/mkldnn/include/oneapi/dnnl/ as in CD
- mkdir -p 3rdparty/mkldnn/include/oneapi/dnnl
- cp include/mkldnn/oneapi/dnnl/dnnl_version.h 3rdparty/mkldnn/include/oneapi/dnnl/.
- cp include/mkldnn/oneapi/dnnl/dnnl_config.h 3rdparty/mkldnn/include/oneapi/dnnl/.
+ # copies mkldnn header files to 3rdparty/onednn/include/oneapi/dnnl/ as in CD
+ mkdir -p 3rdparty/onednn/include/oneapi/dnnl
+ cp include/onednn/oneapi/dnnl/dnnl_version.h 3rdparty/onednn/include/oneapi/dnnl/.
+ cp include/onednn/oneapi/dnnl/dnnl_config.h 3rdparty/onednn/include/oneapi/dnnl/.
local mxnet_variant=${1:?"This function requires a python command as the first argument"}
cd_package_pypi ${mxnet_variant}
cd_integration_test_pypi
diff --git a/ci/jenkins/Jenkins_steps.groovy b/ci/jenkins/Jenkins_steps.groovy
index ba7d052712cd..ac30fffcbc2a 100644
--- a/ci/jenkins/Jenkins_steps.groovy
+++ b/ci/jenkins/Jenkins_steps.groovy
@@ -37,7 +37,7 @@ mx_tensorrt_lib = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, buil
mx_lib_cpp_examples = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, example/extensions/lib_external_ops/build/libexternal_lib.so, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so'
mx_lib_cpp_examples_no_tvm_op = 'build/libmxnet.so, build/libcustomop_lib.so, build/libcustomop_gpu_lib.so, build/libsubgraph_lib.so, python/mxnet/_cy3/*.so, python/mxnet/_ffi/_cy3/*.so'
mx_lib_cpp_examples_cpu = 'build/libmxnet.so, build/3rdparty/tvm/libtvm_runtime.so, build/libtvmop.so, build/tvmop.conf'
-mx_cd_lib = 'lib/libmxnet.so, licenses/*, lib/libgfortran.so.*, lib/libopenblas.so.0, include/mkldnn/oneapi/dnnl/dnnl_version.h, include/mkldnn/oneapi/dnnl/dnnl_config.h'
+mx_cd_lib = 'lib/libmxnet.so, licenses/*, lib/libgfortran.so.*, lib/libopenblas.so.0, include/onednn/oneapi/dnnl/dnnl_version.h, include/onednn/oneapi/dnnl/dnnl_config.h'
// Python unittest for CPU
diff --git a/config/darwin.cmake b/config/darwin.cmake
index 2311da9c2ce6..1015a2f14dcb 100644
--- a/config/darwin.cmake
+++ b/config/darwin.cmake
@@ -45,7 +45,7 @@ set(OPENCV_ROOT "" CACHE BOOL "OpenCV install path. Supports autodetection.")
set(USE_OPENMP OFF CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
diff --git a/config/distribution/darwin_cpu.cmake b/config/distribution/darwin_cpu.cmake
index baff103f2863..ddda2ca30136 100644
--- a/config/distribution/darwin_cpu.cmake
+++ b/config/distribution/darwin_cpu.cmake
@@ -24,7 +24,7 @@ set(USE_BLAS "apple" CACHE STRING "BLAS Vendor")
set(USE_CUDA OFF CACHE BOOL "Build with CUDA support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP OFF CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/darwin_native.cmake b/config/distribution/darwin_native.cmake
index c9d8fa1d7665..4b256c6d5f6b 100644
--- a/config/distribution/darwin_native.cmake
+++ b/config/distribution/darwin_native.cmake
@@ -24,7 +24,7 @@ set(USE_BLAS "apple" CACHE STRING "BLAS Vendor")
set(USE_CUDA OFF CACHE BOOL "Build with CUDA support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP OFF CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN OFF CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN OFF CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/linux_cpu.cmake b/config/distribution/linux_cpu.cmake
index 9f0885d54501..9b8a979e69c7 100644
--- a/config/distribution/linux_cpu.cmake
+++ b/config/distribution/linux_cpu.cmake
@@ -23,7 +23,7 @@ set(USE_BLAS "open" CACHE STRING "BLAS Vendor")
set(USE_CUDA OFF CACHE BOOL "Build with CUDA support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/linux_cu100.cmake b/config/distribution/linux_cu100.cmake
index a328040b6c45..35ec5a302fe6 100644
--- a/config/distribution/linux_cu100.cmake
+++ b/config/distribution/linux_cu100.cmake
@@ -25,7 +25,7 @@ set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support")
set(USE_NCCL ON CACHE BOOL "Build with NCCL support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/linux_cu101.cmake b/config/distribution/linux_cu101.cmake
index 210e07e6354b..80f522d4fb05 100644
--- a/config/distribution/linux_cu101.cmake
+++ b/config/distribution/linux_cu101.cmake
@@ -27,7 +27,7 @@ set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support")
set(USE_NCCL ON CACHE BOOL "Build with NCCL support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/linux_cu102.cmake b/config/distribution/linux_cu102.cmake
index 1bfedb590137..d580354462fb 100644
--- a/config/distribution/linux_cu102.cmake
+++ b/config/distribution/linux_cu102.cmake
@@ -25,7 +25,7 @@ set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support")
set(USE_NCCL ON CACHE BOOL "Build with NCCL support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/linux_cu110.cmake b/config/distribution/linux_cu110.cmake
index b3cd6a77b262..0c239cb93787 100644
--- a/config/distribution/linux_cu110.cmake
+++ b/config/distribution/linux_cu110.cmake
@@ -25,7 +25,7 @@ set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support")
set(USE_NCCL ON CACHE BOOL "Build with NCCL support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/linux_cu112.cmake b/config/distribution/linux_cu112.cmake
index 8c1e74ad9349..031d12976f1c 100644
--- a/config/distribution/linux_cu112.cmake
+++ b/config/distribution/linux_cu112.cmake
@@ -25,7 +25,7 @@ set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support")
set(USE_NCCL ON CACHE BOOL "Build with NCCL support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/linux_cu92.cmake b/config/distribution/linux_cu92.cmake
index 7af9ded168f9..9466a529142d 100644
--- a/config/distribution/linux_cu92.cmake
+++ b/config/distribution/linux_cu92.cmake
@@ -25,7 +25,7 @@ set(USE_CUDNN ON CACHE BOOL "Build with CUDNN support")
set(USE_NCCL ON CACHE BOOL "Build with NCCL support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/distribution/linux_native.cmake b/config/distribution/linux_native.cmake
index 6d74b1213814..a0900f3601b2 100644
--- a/config/distribution/linux_native.cmake
+++ b/config/distribution/linux_native.cmake
@@ -23,7 +23,7 @@ set(USE_BLAS "open" CACHE STRING "BLAS Vendor")
set(USE_CUDA OFF CACHE BOOL "Build with CUDA support")
set(USE_OPENCV ON CACHE BOOL "Build with OpenCV support")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN OFF CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN OFF CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
set(USE_TVM_OP OFF CACHE BOOL "Enable use of TVM operator build system.")
set(USE_SSE ON CACHE BOOL "Build with x86 SSE instruction support")
diff --git a/config/linux.cmake b/config/linux.cmake
index 5d0e0a195841..0a0f2d95a1f7 100644
--- a/config/linux.cmake
+++ b/config/linux.cmake
@@ -62,7 +62,7 @@ set(OPENCV_ROOT "" CACHE BOOL "OpenCV install path. Supports autodetection.")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
diff --git a/config/linux_gpu.cmake b/config/linux_gpu.cmake
index 56b00d0f1bac..42ebc11b6d88 100644
--- a/config/linux_gpu.cmake
+++ b/config/linux_gpu.cmake
@@ -66,7 +66,7 @@ set(OPENCV_ROOT "" CACHE BOOL "OpenCV install path. Supports autodetection.")
set(USE_OPENMP ON CACHE BOOL "Build with Openmp support")
-set(USE_MKLDNN ON CACHE BOOL "Build with MKL-DNN support")
+set(USE_ONEDNN ON CACHE BOOL "Build with ONEDNN support")
set(USE_LAPACK ON CACHE BOOL "Build with lapack support")
diff --git a/docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md b/docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md
index dfe61cbc82fb..6438a5c99567 100644
--- a/docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md
+++ b/docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md
@@ -15,14 +15,14 @@
-# Install MXNet with MKL-DNN
+# Install MXNet with ONEDNN
-A better training and inference performance is expected to be achieved on Intel-Architecture CPUs with MXNet built with [Intel MKL-DNN](https://github.com/intel/mkl-dnn) on multiple operating system, including Linux, Windows and MacOS.
-In the following sections, you will find build instructions for MXNet with Intel MKL-DNN on Linux, MacOS and Windows.
+A better training and inference performance is expected to be achieved on Intel-Architecture CPUs with MXNet built with [Intel ONEDNN](https://github.com/oneapi-src/oneDNN) on multiple operating system, including Linux, Windows and MacOS.
+In the following sections, you will find build instructions for MXNet with Intel ONEDNN on Linux, MacOS and Windows.
-Please find MKL-DNN optimized operators and other features in the [MKL-DNN operator list](https://github.com/apache/incubator-mxnet/blob/v1.5.x/docs/tutorials/mkldnn/operator_list.md).
+Please find ONEDNN optimized operators and other features in the [ONEDNN operator list](https://github.com/apache/incubator-mxnet/blob/v1.5.x/docs/tutorials/mkldnn/operator_list.md).
-The detailed performance data collected on Intel Xeon CPU with MXNet built with Intel MKL-DNN can be found [here](https://mxnet.apache.org/api/faq/perf#intel-cpu).
+The detailed performance data collected on Intel Xeon CPU with MXNet built with Intel ONEDNN can be found [here](https://mxnet.apache.org/api/faq/perf#intel-cpu).
Contents
@@ -55,25 +55,25 @@ git clone --recursive https://github.com/apache/incubator-mxnet.git
cd incubator-mxnet
```
-### Build MXNet with MKL-DNN
+### Build MXNet with ONEDNN
To achieve better performance, the Intel OpenMP and llvm OpenMP are recommended as below instruction. Otherwise, default GNU OpenMP will be used and you may get the sub-optimal performance. If you don't have the full [MKL](https://software.intel.com/en-us/intel-mkl) library installation, you might use OpenBLAS as the blas library, by setting USE_BLAS=openblas.
```
# build with llvm OpenMP and Intel MKL/openblas
mkdir build && cd build
-cmake -DUSE_CUDA=OFF -DUSE_MKLDNN=ON -DUSE_OPENMP=ON -DUSE_OPENCV=ON ..
+cmake -DUSE_CUDA=OFF -DUSE_ONEDNN=ON -DUSE_OPENMP=ON -DUSE_OPENCV=ON ..
make -j $(nproc)
```
```
# build with Intel MKL and Intel OpenMP
-make -j $(nproc) USE_OPENCV=1 USE_MKLDNN=1 USE_BLAS=mkl USE_INTEL_PATH=/opt/intel
+make -j $(nproc) USE_OPENCV=1 USE_ONEDNN=1 USE_BLAS=mkl USE_INTEL_PATH=/opt/intel
```
```
# build with openblas and GNU OpenMP(sub-optimal performance)
-make -j $(nproc) USE_OPENCV=1 USE_MKLDNN=1 USE_BLAS=openblas
+make -j $(nproc) USE_OPENCV=1 USE_ONEDNN=1 USE_BLAS=openblas
```
MacOS
@@ -107,15 +107,15 @@ git clone --recursive https://github.com/apache/incubator-mxnet.git
cd incubator-mxnet
```
-### Build MXNet with MKL-DNN
+### Build MXNet with ONEDNN
```
-LIBRARY_PATH=$(brew --prefix llvm)/lib/ make -j $(sysctl -n hw.ncpu) CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ USE_OPENCV=1 USE_OPENMP=1 USE_MKLDNN=1 USE_BLAS=apple
+LIBRARY_PATH=$(brew --prefix llvm)/lib/ make -j $(sysctl -n hw.ncpu) CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++ USE_OPENCV=1 USE_OPENMP=1 USE_ONEDNN=1 USE_BLAS=apple
```
Windows
-On Windows, you can use [Micrsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/) and [Microsoft Visual Studio 2017](https://www.visualstudio.com/downloads/) to compile MXNet with Intel MKL-DNN.
+On Windows, you can use [Micrsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/) and [Microsoft Visual Studio 2017](https://www.visualstudio.com/downloads/) to compile MXNet with Intel ONEDNN.
[Micrsoft Visual Studio 2015](https://www.visualstudio.com/vs/older-downloads/) is recommended.
**Visual Studio 2015**
@@ -136,32 +136,32 @@ After you have installed all of the required dependencies, build the MXNet sourc
git clone --recursive https://github.com/apache/incubator-mxnet.git
cd C:\incubator-mxent
```
-2. Enable Intel MKL-DNN by -DUSE_MKLDNN=1. Use [CMake 3](https://cmake.org/) to create a Visual Studio solution in ```./build```. Make sure to specify the architecture in the
+2. Enable Intel ONEDNN by -DUSE_ONEDNN=1. Use [CMake 3](https://cmake.org/) to create a Visual Studio solution in ```./build```. Make sure to specify the architecture in the
command:
```
>mkdir build
>cd build
->cmake -G "Visual Studio 14 Win64" .. -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=open -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_NAME=All -DUSE_MKLDNN=1 -DCMAKE_BUILD_TYPE=Release
+>cmake -G "Visual Studio 14 Win64" .. -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=open -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_NAME=All -DUSE_ONEDNN=1 -DCMAKE_BUILD_TYPE=Release
```
-3. Enable Intel MKL-DNN and Intel MKL as BLAS library by the command:
+3. Enable Intel ONEDNN and Intel MKL as BLAS library by the command:
```
>"C:\Program Files (x86)\IntelSWTools\compilers_and_libraries\windows\mkl\bin\mklvars.bat" intel64
->cmake -G "Visual Studio 14 Win64" .. -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=mkl -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_NAME=All -DUSE_MKLDNN=1 -DCMAKE_BUILD_TYPE=Release
+>cmake -G "Visual Studio 14 Win64" .. -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=mkl -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_NAME=All -DUSE_ONEDNN=1 -DCMAKE_BUILD_TYPE=Release
```
4. After the CMake successfully completed, in Visual Studio, open the solution file ```.sln``` and compile it, or compile the MXNet source code by using following command:
```r
msbuild mxnet.sln /p:Configuration=Release;Platform=x64 /maxcpucount
```
- These commands produce mxnet library called ```libmxnet.dll``` in the ```./build/Release/``` or ```./build/Debug``` folder. Also ```libmkldnn.dll``` with be in the ```./build/3rdparty/mkldnn/src/Release/```
+ These commands produce mxnet library called ```libmxnet.dll``` in the ```./build/Release/``` or ```./build/Debug``` folder. Also ```libmkldnn.dll``` with be in the ```./build/3rdparty/onednn/src/Release/```
5. Make sure that all the dll files used above(such as `libmkldnn.dll`, `libmklml*.dll`, `libiomp5.dll`, `libopenblas*.dll`, etc) are added to the system PATH. For convinence, you can put all of them to ```\windows\system32```. Or you will come across `Not Found Dependencies` when loading MXNet.
**Visual Studio 2017**
-User can follow the same steps of Visual Studio 2015 to build MXNET with MKL-DNN, but change the version related command, for example,```C:\opencv\build\x64\vc15\bin``` and build command is as below:
+User can follow the same steps of Visual Studio 2015 to build MXNET with ONEDNN, but change the version related command, for example,```C:\opencv\build\x64\vc15\bin``` and build command is as below:
```
->cmake -G "Visual Studio 15 Win64" .. -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=mkl -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_NAME=All -DUSE_MKLDNN=1 -DCMAKE_BUILD_TYPE=Release
+>cmake -G "Visual Studio 15 Win64" .. -DUSE_CUDA=0 -DUSE_CUDNN=0 -DUSE_NVRTC=0 -DUSE_OPENCV=1 -DUSE_OPENMP=1 -DUSE_PROFILER=1 -DUSE_BLAS=mkl -DUSE_LAPACK=1 -DUSE_DIST_KVSTORE=0 -DCUDA_ARCH_NAME=All -DUSE_ONEDNN=1 -DCMAKE_BUILD_TYPE=Release
```
@@ -183,9 +183,9 @@ Expected Output:
[[ 2. 2. 2.]
[ 2. 2. 2.]]
```
-### Verify whether MKL-DNN works
+### Verify whether ONEDNN works
-After MXNet is installed, you can verify if MKL-DNN backend works well with a single Convolution layer.
+After MXNet is installed, you can verify if ONEDNN backend works well with a single Convolution layer.
```
import mxnet as mx
import numpy as np
@@ -212,7 +212,7 @@ More detailed debugging and profiling information can be logged by setting the e
```
export MKLDNN_VERBOSE=1
```
-For example, by running above code snippet, the following debugging logs providing more insights on MKL-DNN primitives `convolution` and `reorder`. That includes: Memory layout, infer shape and the time cost of primitive execution.
+For example, by running above code snippet, the following debugging logs providing more insights on ONEDNN primitives `convolution` and `reorder`. That includes: Memory layout, infer shape and the time cost of primitive execution.
```
dnnl_verbose,info,DNNL v1.1.2 (commit cb2cc7ac17ff4e2ef50805c7048d33256d82be4d)
dnnl_verbose,info,Detected ISA is Intel AVX-512 with Intel DL Boost
@@ -223,7 +223,7 @@ dnnl_verbose,exec,cpu,reorder,jit:uni,undef,src_f32::blocked:abcd:f0 dst_f32::bl
dnnl_verbose,exec,cpu,reorder,jit:uni,undef,src_f32::blocked:aBcd16b:f0 dst_f32::blocked:abcd:f0,,,32x32x256x256,35.9771
```
-You can find step-by-step guidance to do profiling for MKLDNN primitives in [Profiling MKLDNN Operators](https://mxnet.apache.org/api/python/docs/tutorials/performance/backend/profiler.html#Profiling-MKLDNN-Operators).
+You can find step-by-step guidance to do profiling for ONEDNN primitives in [Profiling ONEDNN Operators](https://mxnet.apache.org/api/python/docs/tutorials/performance/backend/profiler.html#Profiling-MKLDNN-Operators).
Enable MKL BLAS
@@ -293,7 +293,7 @@ This limitations of this experimental feature are:
Quantization and Inference with INT8
-Benefiting from Intel MKL-DNN, MXNet built with Intel MKL-DNN brings outstanding performance improvement on quantization and inference with INT8 Intel CPU Platform on Intel Xeon Scalable Platform.
+Benefiting from Intel ONEDNN, MXNet built with Intel ONEDNN brings outstanding performance improvement on quantization and inference with INT8 Intel CPU Platform on Intel Xeon Scalable Platform.
- [CNN Quantization Examples](https://github.com/apache/incubator-mxnet/tree/master/example/quantization).
@@ -303,6 +303,6 @@ Benefiting from Intel MKL-DNN, MXNet built with Intel MKL-DNN brings outstanding
- For questions or support specific to MKL, visit the [Intel MKL](https://software.intel.com/en-us/mkl) website.
-- For questions or support specific to MKL, visit the [Intel MKLDNN](https://github.com/intel/mkl-dnn) website.
+- For questions or support specific to ONEDNN, visit the [Intel ONEDNN](https://github.com/oneapi-src/oneDNN) website.
-- If you find bugs, please open an issue on GitHub for [MXNet with MKL](https://github.com/apache/incubator-mxnet/labels/MKL) or [MXNet with MKLDNN](https://github.com/apache/incubator-mxnet/labels/MKLDNN).
+- If you find bugs, please open an issue on GitHub for [MXNet with MKL](https://github.com/apache/incubator-mxnet/labels/MKL) or [MXNet with ONEDNN](https://github.com/apache/incubator-mxnet/labels/MKLDNN).
diff --git a/docs/python_docs/python/tutorials/performance/backend/profiler.md b/docs/python_docs/python/tutorials/performance/backend/profiler.md
index ecd9fc876022..5585ccd64f2f 100644
--- a/docs/python_docs/python/tutorials/performance/backend/profiler.md
+++ b/docs/python_docs/python/tutorials/performance/backend/profiler.md
@@ -210,12 +210,12 @@ Let's zoom in to check the time taken by operators
The above picture visualizes the sequence in which the operators were executed and the time taken by each operator.
-### Profiling MKLDNN Operators
-Reagrding MKLDNN operators, the library has already provided the internal profiling tool. Firstly, you need set `MKLDNN_VERBOSE=1` to enable internal profiler.
+### Profiling ONEDNN Operators
+Reagrding ONEDNN operators, the library has already provided the internal profiling tool. Firstly, you need set `MKLDNN_VERBOSE=1` to enable internal profiler.
`$ MKLDNN_VERBOSE=1 python my_script.py > mkldnn_verbose.log`
-Now, the detailed profiling insights of each mkldnn prmitive are saved into `mkldnn_verbose.log` (like below).
+Now, the detailed profiling insights of each ONEDNN prmitive are saved into `mkldnn_verbose.log` (like below).
```
dnnl_verbose,info,DNNL v1.1.2 (commit cb2cc7ac17ff4e2ef50805c7048d33256d82be4d)
diff --git a/docs/static_site/src/_includes/get_started/cloud/cpu.md b/docs/static_site/src/_includes/get_started/cloud/cpu.md
index 440582727b68..8dfcbb4b4c5d 100644
--- a/docs/static_site/src/_includes/get_started/cloud/cpu.md
+++ b/docs/static_site/src/_includes/get_started/cloud/cpu.md
@@ -13,4 +13,4 @@ the [Download page](https://mxnet.apache.org/get_started/download).
* **Amazon Web Services**
- [AWS Deep Learning AMI](https://aws.amazon.com/machine-learning/amis/) - Preinstalled
Conda environments
-for Python 2 or 3 with MXNet and MKL-DNN.
+for Python 2 or 3 with MXNet and ONEDNN.
diff --git a/docs/static_site/src/_includes/get_started/cloud/gpu.md b/docs/static_site/src/_includes/get_started/cloud/gpu.md
index 8f64a3ac5cba..3bdf0061f9c5 100644
--- a/docs/static_site/src/_includes/get_started/cloud/gpu.md
+++ b/docs/static_site/src/_includes/get_started/cloud/gpu.md
@@ -18,7 +18,7 @@ VM](https://docs.nvidia.com/ngc/ngc-alibaba-setup-guide/launching-nv-cloud-vm-co
MXNet models
- [AWS Deep Learning AMI](https://aws.amazon.com/machine-learning/amis/) - Preinstalled
Conda environments
-for Python 2 or 3 with MXNet, CUDA, cuDNN, MKL-DNN, and AWS Elastic Inference
+for Python 2 or 3 with MXNet, CUDA, cuDNN, ONEDNN, and AWS Elastic Inference
- [Dynamic Training on
AWS](https://github.com/awslabs/dynamic-training-with-apache-mxnet-on-aws) -
experimental manual EC2 setup or semi-automated CloudFormation setup
diff --git a/docs/static_site/src/pages/api/cpp/docs/tutorials/multi_threaded_inference.md b/docs/static_site/src/pages/api/cpp/docs/tutorials/multi_threaded_inference.md
index 08cfea115c7d..ea9091ae00a4 100644
--- a/docs/static_site/src/pages/api/cpp/docs/tutorials/multi_threaded_inference.md
+++ b/docs/static_site/src/pages/api/cpp/docs/tutorials/multi_threaded_inference.md
@@ -79,7 +79,7 @@ $ cd example/multi_threaded_inference
$ make
```
-If you have built mxnet from source with cmake, please uncomment the specific lines for cmake build or set the following environment variables: `MKLDNN_BUILD_DIR (default is $(MXNET_ROOT)/3rdparty/mkldnn/build)`, `MKLDNN_INCLUDE_DIR (default is $(MXNET_ROOT)/3rdparty/mkldnn/include)`, `MXNET_LIB_DIR (default is $(MXNET_ROOT)/lib)`.
+If you have built mxnet from source with cmake, please uncomment the specific lines for cmake build or set the following environment variables: `MKLDNN_BUILD_DIR (default is $(MXNET_ROOT)/3rdparty/onednn/build)`, `MKLDNN_INCLUDE_DIR (default is $(MXNET_ROOT)/3rdparty/onednn/include)`, `MXNET_LIB_DIR (default is $(MXNET_ROOT)/lib)`.
### Run multi threaded inference example
The example is tested with models such as `imagenet1k-inception-bn`, `imagenet1k-resnet-50`,
@@ -166,7 +166,7 @@ The above code outputs results for different threads and cleans up the thread sa
1. Only operators tested with the existing model coverage are supported. Other operators and operator types (stateful operators, custom operators are not supported. Existing model coverage is as follows (this list will keep growing as we test more models with different model types):
-|Models Tested|MKLDNN|CUDNN|NO-CUDNN|
+|Models Tested|ONEDNN|CUDNN|NO-CUDNN|
| --- | --- | --- | --- |
| imagenet1k-resnet-18 | Yes | Yes | Yes |
| imagenet1k-resnet-152 | Yes | Yes | Yes |
diff --git a/docs/static_site/src/pages/api/faq/cloud.md b/docs/static_site/src/pages/api/faq/cloud.md
index dd1643cb54a1..894b83ebdc48 100644
--- a/docs/static_site/src/pages/api/faq/cloud.md
+++ b/docs/static_site/src/pages/api/faq/cloud.md
@@ -55,7 +55,7 @@ on how to connect to a Jupyter notebook running on an EC2 instance.
[Deep Learning Base AMIs](https://aws.amazon.com/marketplace/search/results?x=0&y=0&searchTerms=Deep+Learning+Base+AMI)
provide a foundational image with NVIDIA CUDA, cuDNN, GPU drivers, Intel
-MKL-DNN, Docker and Nvidia-Docker, etc. for deploying your own custom deep
+ONEDNN, Docker and Nvidia-Docker, etc. for deploying your own custom deep
learning environment. You may follow the [MXNet Build From Source
instructions](https://mxnet.apache.org/get_started/build_from_source) easily on
the Deep Learning Base AMIs.
diff --git a/docs/static_site/src/pages/api/faq/env_var.md b/docs/static_site/src/pages/api/faq/env_var.md
index b28e27b02700..eaead19383ba 100644
--- a/docs/static_site/src/pages/api/faq/env_var.md
+++ b/docs/static_site/src/pages/api/faq/env_var.md
@@ -326,12 +326,12 @@ If ctypes is used, it must be `mxnet._ctypes.ndarray.NDArrayBase`.
* MXNET_MKLDNN_ENABLED
- Values: 0, 1 ```(default=1)```
- - Flag to enable or disable MKLDNN accelerator. On by default.
- - Only applies to mxnet that has been compiled with MKLDNN (```pip install mxnet-mkl``` or built from source with ```USE_MKLDNN=1```)
+ - Flag to enable or disable ONEDNN accelerator. On by default.
+ - Only applies to mxnet that has been compiled with ONEDNN (```pip install mxnet``` or built from source with ```USE_ONEDNN=1```)
* MXNET_MKLDNN_CACHE_NUM
- Values: Int ```(default=-1)```
- - Flag to set num of elements that MKLDNN cache can hold. Default is -1 which means cache size is unbounded. Should only be set if your model has variable input shapes, as cache size may grow unbounded. The number represents the number of items in the cache and is proportional to the number of layers that use MKLDNN and different input shape.
+ - Flag to set num of elements that ONEDNN cache can hold. Default is -1 which means cache size is unbounded. Should only be set if your model has variable input shapes, as cache size may grow unbounded. The number represents the number of items in the cache and is proportional to the number of layers that use ONEDNN and different input shape.
* MXNET_ENFORCE_DETERMINISM
- Values: 0(false) or 1(true) ```(default=0)```
@@ -371,9 +371,9 @@ If ctypes is used, it must be `mxnet._ctypes.ndarray.NDArrayBase`.
- This variable controls how many CuDNN dropout state resources to create for each GPU context for use in operator.
* MXNET_SUBGRAPH_BACKEND
- - Values: String ```(default="MKLDNN")``` if MKLDNN is avaliable, otherwise ```(default="")```
+ - Values: String ```(default="MKLDNN")``` if ONEDNN is avaliable, otherwise ```(default="")```
- This variable controls the subgraph partitioning in MXNet.
- - This variable is used to perform MKL-DNN FP32 operator fusion and quantization. Please refer to the [MKL-DNN operator list](https://github.com/apache/incubator-mxnet/blob/v1.5.x/docs/tutorials/mkldnn/operator_list.md) for how this variable is used and the list of fusion passes.
+ - This variable is used to perform ONEDNN FP32 operator fusion and quantization. Please refer to the [ONEDNN operator list](https://github.com/apache/incubator-mxnet/blob/v1.5.x/docs/tutorials/mkldnn/operator_list.md) for how this variable is used and the list of fusion passes.
- Set ```MXNET_SUBGRAPH_BACKEND=NONE``` to disable subgraph backend.
* MXNET_SAFE_ACCUMULATION
@@ -399,9 +399,9 @@ If ctypes is used, it must be `mxnet._ctypes.ndarray.NDArrayBase`.
- Values: 0(false) or 1(true) ```(default=1)```
- If this variable is set, MXNet will simplify the computation graph, eliminating duplicated operations on the same inputs.
-* MXNET_USE_MKLDNN_RNN
+* MXNET_USE_ONEDNN_RNN
- Values: 0(false) or 1(true) ```(default=1)```
- - This variable controls whether to use the MKL-DNN backend in fused RNN operator for CPU context. There are two fusion implementations of RNN operator in MXNet. The MKL-DNN implementation has a better performance than the naive one, but the latter is more stable in the backward operation currently.
+ - This variable controls whether to use the ONEDNN backend in fused RNN operator for CPU context. There are two fusion implementations of RNN operator in MXNet. The ONEDNN implementation has a better performance than the naive one, but the latter is more stable in the backward operation currently.
* MXNET_FC_TRUE_FP16
- Values: 0(false) or 1(true) ```(default=0)```
diff --git a/docs/static_site/src/pages/api/faq/large_tensor_support.md b/docs/static_site/src/pages/api/faq/large_tensor_support.md
index ab251a78fb0b..247720f713b3 100644
--- a/docs/static_site/src/pages/api/faq/large_tensor_support.md
+++ b/docs/static_site/src/pages/api/faq/large_tensor_support.md
@@ -141,9 +141,9 @@ Backward pass is partially supported and not completely tested, so it is conside
Not supported:
-* GPU and MKLDNN.
+* GPU and ONEDNN.
* Windows, ARM or any operating system other than Ubuntu
-* Any permutation of MXNet wheel that contains MKLDNN.
+* Any permutation of MXNet wheel that contains ONEDNN.
* Other language bindings like Scala, Java, R, and Julia.
diff --git a/docs/static_site/src/pages/api/faq/perf.md b/docs/static_site/src/pages/api/faq/perf.md
index c20d8da40f45..d085fc00a358 100644
--- a/docs/static_site/src/pages/api/faq/perf.md
+++ b/docs/static_site/src/pages/api/faq/perf.md
@@ -49,7 +49,7 @@ When using Intel Xeon CPUs for training and inference, the `mxnet-mkl` package i
$ pip install mxnet-mkl [--pre]
```
-Or build MXNet from source code with `USE_MKLDNN=1`. For Linux users, `USE_MKLDNN=1` will be turned on by default.
+Or build MXNet from source code with `USE_ONEDNN=1`. For Linux users, `USE_ONEDNN=1` will be turned on by default.
We also find that setting the following environment variables can help:
@@ -58,7 +58,7 @@ We also find that setting the following environment variables can help:
| :-------- | :---------- |
| `OMP_NUM_THREADS` | Suggested value: `vCPUs / 2` in which `vCPUs` is the number of virtual CPUs. For more information, please see the guide for [setting the number of threads using an OpenMP environment variable](https://software.intel.com/en-us/mkl-windows-developer-guide-setting-the-number-of-threads-using-an-openmp-environment-variable) |
| `KMP_AFFINITY` | Suggested value: `granularity=fine,compact,1,0`. For more information, please see the guide for [Thread Affinity Interface (Linux* and Windows*)](https://software.intel.com/en-us/node/522691). |
-| `MXNET_SUBGRAPH_BACKEND` | Set to MKLDNN to enable the [subgraph feature](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Graph+Optimization+and+Quantization+based+on+subgraph+and+MKL-DNN) for better performance. For more information please see [Build/Install MXNet with MKL-DNN](https://mxnet.apache.org/api/python/docs/tutorials/performance/backend/mkldnn/mkldnn_readme.html)|
+| `MXNET_SUBGRAPH_BACKEND` | Set to ONEDNN to enable the [subgraph feature](https://cwiki.apache.org/confluence/display/MXNET/MXNet+Graph+Optimization+and+Quantization+based+on+subgraph+and+MKL-DNN) for better performance. For more information please see [Build/Install MXNet with ONEDNN](https://mxnet.apache.org/api/python/docs/tutorials/performance/backend/mkldnn/mkldnn_readme.html)|
Note that _MXNet_ treats all CPUs on a single machine as a single device.
So whether you specify `cpu(0)` or `cpu()`, _MXNet_ will use all CPU cores on the machine.
diff --git a/docs/static_site/src/pages/api/faq/tensor_inspector_tutorial.md b/docs/static_site/src/pages/api/faq/tensor_inspector_tutorial.md
index 8d6838e425a0..1212524cf397 100644
--- a/docs/static_site/src/pages/api/faq/tensor_inspector_tutorial.md
+++ b/docs/static_site/src/pages/api/faq/tensor_inspector_tutorial.md
@@ -168,7 +168,7 @@ Notice: in `interactive_print()`, you could also do value dumping with command "
### Test Coverage and Limitations
-This utility has been tested on Mac and Ubuntu with and without CUDNN and MKLDNN. Supports for `Tensor`, `TBlob`, and `NDArray`, as well as for CPU and GPU have been manually tested.
+This utility has been tested on Mac and Ubuntu with and without CUDNN and ONEDNN. Supports for `Tensor`, `TBlob`, and `NDArray`, as well as for CPU and GPU have been manually tested.
Currently, this utility only supports non-empty tensors and tensors with known shapes i.e. `tb_.ndim() > 0`. Also, this utility only supports dense `NDArray` objects, i.e. when the type is `kDefaultStorage`.
diff --git a/example/README.md b/example/README.md
index d0eafa7dc54a..f145600b62af 100644
--- a/example/README.md
+++ b/example/README.md
@@ -106,7 +106,7 @@ If your tutorial depends on specific packages, simply add them to this provision
* [Kaggle 2nd national data science bowl](kaggle-ndsb2) - a tutorial for Kaggle Second Nation Data Science Bowl
* [Multi-task Learning](multi-task) - how to use MXNet for multi-task learning
* [Profiling](profiler) - generate profiling results in json files
-* [Quantization and Calibration Examples](quantization) - examples of quantizing a FP32 model to INT8 and performing low-precision inference with Intel MKL-DNN on CPU or cuDNN on GPU
+* [Quantization and Calibration Examples](quantization) - examples of quantizing a FP32 model to INT8 and performing low-precision inference with Intel ONEDNN on CPU or cuDNN on GPU
* [Recommender Systems](recommenders) - examples of how to build various kinds of recommender systems
* [Restricted Boltzmann Machine](restricted-boltzmann-machine) - an example of the binary restricted Boltzmann machine learning MNIST
* [Single Shot MultiBox Detector](ssd) - SSD object recognition example
diff --git a/example/multi_threaded_inference/Makefile b/example/multi_threaded_inference/Makefile
index 6dba1178c14e..49403b580a83 100644
--- a/example/multi_threaded_inference/Makefile
+++ b/example/multi_threaded_inference/Makefile
@@ -16,7 +16,7 @@
# under the License.
-CFLAGS=-std=c++17 -g -Wno-unknown-pragmas -Wall -DMXNET_USE_CUDA=1 -DMXNET_USE_CUDNN=1 -DMXNET_USE_MKLDNN=1
+CFLAGS=-std=c++17 -g -Wno-unknown-pragmas -Wall -DMXNET_USE_CUDA=1 -DMXNET_USE_CUDNN=1 -DMXNET_USE_ONEDNN=1
export MXNET_ROOT = `pwd`/../..
@@ -28,17 +28,17 @@ ifndef USE_CUDA_PATH
endif
ifndef MKLDNN_BUILD_DIR
- export MKLDNN_BUILD_DIR = $(MXNET_ROOT)/3rdparty/mkldnn/build
+ export MKLDNN_BUILD_DIR = $(MXNET_ROOT)/3rdparty/onednn/build
# Cmake build path by default
# Uncomment below line for CMake build
- #export MKLDNN_BUILD_DIR = $(MXNET_ROOT)/build/3rdparty/mkldnn
+ #export MKLDNN_BUILD_DIR = $(MXNET_ROOT)/build/3rdparty/onednn
endif
ifndef MKLDNN_INCLUDE_DIR
- export MKLDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/mkldnn/include
+ export MKLDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/onednn/include
# Cmake build path by default
# Uncomment below line for CMake build
- #export MKLDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/mkldnn/include
+ #export MKLDNN_INCLUDE_DIR = $(MXNET_ROOT)/3rdparty/onednn/include
endif
CFLAGS += -I$(MXNET_ROOT)/include -I$(USE_CUDA_PATH)/include -I$(MKLDNN_INCLUDE_DIR) -I$(MKLDNN_BUILD_DIR)/include
diff --git a/include/mkldnn/dnnl.h b/include/mkldnn/dnnl.h
deleted file mode 120000
index 44625f5e28e3..000000000000
--- a/include/mkldnn/dnnl.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl.h
\ No newline at end of file
diff --git a/include/mkldnn/dnnl.hpp b/include/mkldnn/dnnl.hpp
deleted file mode 120000
index 4dfc038d850e..000000000000
--- a/include/mkldnn/dnnl.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl.hpp
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_config.h b/include/mkldnn/dnnl_config.h
deleted file mode 120000
index ff3719dfc04c..000000000000
--- a/include/mkldnn/dnnl_config.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_config.h
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_debug.h b/include/mkldnn/dnnl_debug.h
deleted file mode 120000
index db549eddc337..000000000000
--- a/include/mkldnn/dnnl_debug.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_debug.h
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_ocl.h b/include/mkldnn/dnnl_ocl.h
deleted file mode 120000
index ecc7f34288a4..000000000000
--- a/include/mkldnn/dnnl_ocl.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_ocl.h
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_ocl.hpp b/include/mkldnn/dnnl_ocl.hpp
deleted file mode 120000
index 3f4fec4277cc..000000000000
--- a/include/mkldnn/dnnl_ocl.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_ocl.hpp
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_sycl.h b/include/mkldnn/dnnl_sycl.h
deleted file mode 120000
index 4c1bfe51acf1..000000000000
--- a/include/mkldnn/dnnl_sycl.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_sycl.h
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_sycl.hpp b/include/mkldnn/dnnl_sycl.hpp
deleted file mode 120000
index 8837231daae0..000000000000
--- a/include/mkldnn/dnnl_sycl.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_sycl.hpp
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_sycl_types.h b/include/mkldnn/dnnl_sycl_types.h
deleted file mode 120000
index 94461bc3d449..000000000000
--- a/include/mkldnn/dnnl_sycl_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_sycl_types.h
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_threadpool.h b/include/mkldnn/dnnl_threadpool.h
deleted file mode 120000
index 5ed7f6431e3d..000000000000
--- a/include/mkldnn/dnnl_threadpool.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_threadpool.h
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_threadpool.hpp b/include/mkldnn/dnnl_threadpool.hpp
deleted file mode 120000
index dff43b044767..000000000000
--- a/include/mkldnn/dnnl_threadpool.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_threadpool.hpp
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_threadpool_iface.hpp b/include/mkldnn/dnnl_threadpool_iface.hpp
deleted file mode 120000
index f651ff17f10a..000000000000
--- a/include/mkldnn/dnnl_threadpool_iface.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_threadpool_iface.hpp
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_types.h b/include/mkldnn/dnnl_types.h
deleted file mode 120000
index 750b64cc4bc3..000000000000
--- a/include/mkldnn/dnnl_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_types.h
\ No newline at end of file
diff --git a/include/mkldnn/dnnl_version.h b/include/mkldnn/dnnl_version.h
deleted file mode 120000
index a4fde025b179..000000000000
--- a/include/mkldnn/dnnl_version.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/dnnl_version.h
\ No newline at end of file
diff --git a/include/mkldnn/mkldnn.h b/include/mkldnn/mkldnn.h
deleted file mode 120000
index 873c515d113a..000000000000
--- a/include/mkldnn/mkldnn.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/mkldnn.h
\ No newline at end of file
diff --git a/include/mkldnn/mkldnn.hpp b/include/mkldnn/mkldnn.hpp
deleted file mode 120000
index 2cb212af8666..000000000000
--- a/include/mkldnn/mkldnn.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/mkldnn.hpp
\ No newline at end of file
diff --git a/include/mkldnn/mkldnn_config.h b/include/mkldnn/mkldnn_config.h
deleted file mode 120000
index 8f5259e54d1a..000000000000
--- a/include/mkldnn/mkldnn_config.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/mkldnn_config.h
\ No newline at end of file
diff --git a/include/mkldnn/mkldnn_debug.h b/include/mkldnn/mkldnn_debug.h
deleted file mode 120000
index a67617c2942a..000000000000
--- a/include/mkldnn/mkldnn_debug.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/mkldnn_debug.h
\ No newline at end of file
diff --git a/include/mkldnn/mkldnn_dnnl_mangling.h b/include/mkldnn/mkldnn_dnnl_mangling.h
deleted file mode 120000
index 876ad649aa8f..000000000000
--- a/include/mkldnn/mkldnn_dnnl_mangling.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/mkldnn_dnnl_mangling.h
\ No newline at end of file
diff --git a/include/mkldnn/mkldnn_types.h b/include/mkldnn/mkldnn_types.h
deleted file mode 120000
index 548b884e2edb..000000000000
--- a/include/mkldnn/mkldnn_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/mkldnn_types.h
\ No newline at end of file
diff --git a/include/mkldnn/mkldnn_version.h b/include/mkldnn/mkldnn_version.h
deleted file mode 120000
index 76927f2ef2e2..000000000000
--- a/include/mkldnn/mkldnn_version.h
+++ /dev/null
@@ -1 +0,0 @@
-../../3rdparty/mkldnn/include/mkldnn_version.h
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl.h b/include/mkldnn/oneapi/dnnl/dnnl.h
deleted file mode 120000
index ee11f505f1d2..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl.h
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl.hpp b/include/mkldnn/oneapi/dnnl/dnnl.hpp
deleted file mode 120000
index 22635d614bd3..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl.hpp
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_debug.h b/include/mkldnn/oneapi/dnnl/dnnl_debug.h
deleted file mode 120000
index 982bc2191721..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_debug.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_debug.h
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_ocl.h b/include/mkldnn/oneapi/dnnl/dnnl_ocl.h
deleted file mode 120000
index 85970c1e1f0e..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_ocl.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_ocl.h
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_ocl.hpp b/include/mkldnn/oneapi/dnnl/dnnl_ocl.hpp
deleted file mode 120000
index 4fcef2ca93b9..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_ocl.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_ocl.hpp
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_sycl.h b/include/mkldnn/oneapi/dnnl/dnnl_sycl.h
deleted file mode 120000
index e39828aeacbb..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_sycl.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_sycl.h
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_sycl.hpp b/include/mkldnn/oneapi/dnnl/dnnl_sycl.hpp
deleted file mode 120000
index 240f3ce94535..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_sycl.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_sycl.hpp
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_sycl_types.h b/include/mkldnn/oneapi/dnnl/dnnl_sycl_types.h
deleted file mode 120000
index 57e21d047c23..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_sycl_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_sycl_types.h
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_threadpool.h b/include/mkldnn/oneapi/dnnl/dnnl_threadpool.h
deleted file mode 120000
index 025a0d37e4a6..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_threadpool.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_threadpool.h
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_threadpool.hpp b/include/mkldnn/oneapi/dnnl/dnnl_threadpool.hpp
deleted file mode 120000
index f0d9325da1c8..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_threadpool.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_threadpool.hpp
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_threadpool_iface.hpp b/include/mkldnn/oneapi/dnnl/dnnl_threadpool_iface.hpp
deleted file mode 120000
index e650ecba6eae..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_threadpool_iface.hpp
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_threadpool_iface.hpp
\ No newline at end of file
diff --git a/include/mkldnn/oneapi/dnnl/dnnl_types.h b/include/mkldnn/oneapi/dnnl/dnnl_types.h
deleted file mode 120000
index 88baa15cb5d5..000000000000
--- a/include/mkldnn/oneapi/dnnl/dnnl_types.h
+++ /dev/null
@@ -1 +0,0 @@
-../../../../3rdparty/mkldnn/include/oneapi/dnnl/dnnl_types.h
\ No newline at end of file
diff --git a/include/mxnet/base.h b/include/mxnet/base.h
index 6d30f9814617..e610e2c381ad 100644
--- a/include/mxnet/base.h
+++ b/include/mxnet/base.h
@@ -541,7 +541,7 @@ inline std::ostream& operator<<(std::ostream &out, const Context &ctx) {
#define ADD_FILELINE "\n\nDefined in " __FILE__ ":L" STRINGIZE(__LINE__)
-#if MXNET_USE_MKLDNN == 1 || MXNET_USE_INTGEMM == 1
+#if MXNET_USE_ONEDNN == 1 || MXNET_USE_INTGEMM == 1
constexpr size_t kMKLDNNAlign = 64;
#endif
diff --git a/include/mxnet/libinfo.h b/include/mxnet/libinfo.h
index 9f640d79fffb..6eee0a98d200 100644
--- a/include/mxnet/libinfo.h
+++ b/include/mxnet/libinfo.h
@@ -103,8 +103,8 @@
#define MXNET_USE_LAPACK 0
#endif
-#ifndef MXNET_USE_MKLDNN
-#define MXNET_USE_MKLDNN 0
+#ifndef MXNET_USE_ONEDNN
+#define MXNET_USE_ONEDNN 0
#endif
#ifndef MXNET_USE_OPENMP
diff --git a/include/mxnet/ndarray.h b/include/mxnet/ndarray.h
index 7c406f4f801e..f01c67f80cd1 100644
--- a/include/mxnet/ndarray.h
+++ b/include/mxnet/ndarray.h
@@ -37,7 +37,7 @@
#include
#include
#include
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#endif
#include "./base.h"
@@ -727,7 +727,7 @@ class NDArray {
ptr_->CheckAndAllocAuxData(i, aux_shape);
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
/*
* Create NDArray from mkldnn memory.
* mkldnn_mem The mkldnn memory to be managed.
@@ -859,7 +859,7 @@ class NDArray {
*/
std::vector aux_handles;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
/*! This is created when data is stored in MKLDNN format.
*/
std::shared_ptr mkl_mem_;
@@ -1018,7 +1018,7 @@ class NDArray {
inline void CheckAndAlloc(void) {
if (delay_alloc) {
Storage::Get()->Alloc(&shandle);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
mkl_mem_ = nullptr;
#endif
delay_alloc = false;
@@ -1034,7 +1034,7 @@ class NDArray {
if (delay_alloc) {
shandle.size = dbytes;
Storage::Get()->Alloc(&shandle);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
mkl_mem_ = nullptr;
#endif
delay_alloc = false;
@@ -1044,7 +1044,7 @@ class NDArray {
// init storage
shandle.size = dbytes;
Storage::Get()->Alloc(&shandle);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
mkl_mem_ = nullptr;
#endif
}
@@ -1080,7 +1080,7 @@ class NDArray {
// and allocate new storage
void CheckAndAllocData(const mxnet::TShape &shape, int dtype);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
// Have MKL memory reference to the data in the default storage
// or create memory for MKLDNN.
void SetMKLMem(const mxnet::TShape &shape, int dtype);
diff --git a/include/onednn/dnnl.h b/include/onednn/dnnl.h
new file mode 120000
index 000000000000..bc5443ec2045
--- /dev/null
+++ b/include/onednn/dnnl.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl.h
\ No newline at end of file
diff --git a/include/onednn/dnnl.hpp b/include/onednn/dnnl.hpp
new file mode 120000
index 000000000000..db10025b2c83
--- /dev/null
+++ b/include/onednn/dnnl.hpp
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl.hpp
\ No newline at end of file
diff --git a/include/onednn/dnnl_config.h b/include/onednn/dnnl_config.h
new file mode 120000
index 000000000000..03a1060d515d
--- /dev/null
+++ b/include/onednn/dnnl_config.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_config.h
\ No newline at end of file
diff --git a/include/onednn/dnnl_debug.h b/include/onednn/dnnl_debug.h
new file mode 120000
index 000000000000..489991d97726
--- /dev/null
+++ b/include/onednn/dnnl_debug.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_debug.h
\ No newline at end of file
diff --git a/include/onednn/dnnl_ocl.h b/include/onednn/dnnl_ocl.h
new file mode 120000
index 000000000000..09509a3e2b8f
--- /dev/null
+++ b/include/onednn/dnnl_ocl.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_ocl.h
\ No newline at end of file
diff --git a/include/onednn/dnnl_ocl.hpp b/include/onednn/dnnl_ocl.hpp
new file mode 120000
index 000000000000..2ca3edcb41d5
--- /dev/null
+++ b/include/onednn/dnnl_ocl.hpp
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_ocl.hpp
\ No newline at end of file
diff --git a/include/onednn/dnnl_sycl.h b/include/onednn/dnnl_sycl.h
new file mode 120000
index 000000000000..b998f3886b2b
--- /dev/null
+++ b/include/onednn/dnnl_sycl.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_sycl.h
\ No newline at end of file
diff --git a/include/onednn/dnnl_sycl.hpp b/include/onednn/dnnl_sycl.hpp
new file mode 120000
index 000000000000..348184f88ba7
--- /dev/null
+++ b/include/onednn/dnnl_sycl.hpp
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_sycl.hpp
\ No newline at end of file
diff --git a/include/onednn/dnnl_sycl_types.h b/include/onednn/dnnl_sycl_types.h
new file mode 120000
index 000000000000..a710056e67fc
--- /dev/null
+++ b/include/onednn/dnnl_sycl_types.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_sycl_types.h
\ No newline at end of file
diff --git a/include/onednn/dnnl_threadpool.h b/include/onednn/dnnl_threadpool.h
new file mode 120000
index 000000000000..ee586c89dcbe
--- /dev/null
+++ b/include/onednn/dnnl_threadpool.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_threadpool.h
\ No newline at end of file
diff --git a/include/onednn/dnnl_threadpool.hpp b/include/onednn/dnnl_threadpool.hpp
new file mode 120000
index 000000000000..54e0af6521fb
--- /dev/null
+++ b/include/onednn/dnnl_threadpool.hpp
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_threadpool.hpp
\ No newline at end of file
diff --git a/include/onednn/dnnl_threadpool_iface.hpp b/include/onednn/dnnl_threadpool_iface.hpp
new file mode 120000
index 000000000000..1b05134b4b52
--- /dev/null
+++ b/include/onednn/dnnl_threadpool_iface.hpp
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_threadpool_iface.hpp
\ No newline at end of file
diff --git a/include/onednn/dnnl_types.h b/include/onednn/dnnl_types.h
new file mode 120000
index 000000000000..31bbbc9ae816
--- /dev/null
+++ b/include/onednn/dnnl_types.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_types.h
\ No newline at end of file
diff --git a/include/onednn/dnnl_version.h b/include/onednn/dnnl_version.h
new file mode 120000
index 000000000000..bb789dfd2ad3
--- /dev/null
+++ b/include/onednn/dnnl_version.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/dnnl_version.h
\ No newline at end of file
diff --git a/include/onednn/mkldnn.h b/include/onednn/mkldnn.h
new file mode 120000
index 000000000000..ef19407410d8
--- /dev/null
+++ b/include/onednn/mkldnn.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/mkldnn.h
\ No newline at end of file
diff --git a/include/onednn/mkldnn.hpp b/include/onednn/mkldnn.hpp
new file mode 120000
index 000000000000..e7f56e95d055
--- /dev/null
+++ b/include/onednn/mkldnn.hpp
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/mkldnn.hpp
\ No newline at end of file
diff --git a/include/onednn/mkldnn_config.h b/include/onednn/mkldnn_config.h
new file mode 120000
index 000000000000..714a58682a91
--- /dev/null
+++ b/include/onednn/mkldnn_config.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/mkldnn_config.h
\ No newline at end of file
diff --git a/include/onednn/mkldnn_debug.h b/include/onednn/mkldnn_debug.h
new file mode 120000
index 000000000000..ca0e6b99801e
--- /dev/null
+++ b/include/onednn/mkldnn_debug.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/mkldnn_debug.h
\ No newline at end of file
diff --git a/include/onednn/mkldnn_dnnl_mangling.h b/include/onednn/mkldnn_dnnl_mangling.h
new file mode 120000
index 000000000000..67bf8d0893a7
--- /dev/null
+++ b/include/onednn/mkldnn_dnnl_mangling.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/mkldnn_dnnl_mangling.h
\ No newline at end of file
diff --git a/include/onednn/mkldnn_types.h b/include/onednn/mkldnn_types.h
new file mode 120000
index 000000000000..334078bfafa1
--- /dev/null
+++ b/include/onednn/mkldnn_types.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/mkldnn_types.h
\ No newline at end of file
diff --git a/include/onednn/mkldnn_version.h b/include/onednn/mkldnn_version.h
new file mode 120000
index 000000000000..ed357587f6df
--- /dev/null
+++ b/include/onednn/mkldnn_version.h
@@ -0,0 +1 @@
+../../3rdparty/onednn/include/mkldnn_version.h
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl.h b/include/onednn/oneapi/dnnl/dnnl.h
new file mode 120000
index 000000000000..863d538ec5ad
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl.h
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl.h
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl.hpp b/include/onednn/oneapi/dnnl/dnnl.hpp
new file mode 120000
index 000000000000..4337527cfc9c
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl.hpp
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl.hpp
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_debug.h b/include/onednn/oneapi/dnnl/dnnl_debug.h
new file mode 120000
index 000000000000..aa0568240d6a
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_debug.h
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_debug.h
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_ocl.h b/include/onednn/oneapi/dnnl/dnnl_ocl.h
new file mode 120000
index 000000000000..4652f6535068
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_ocl.h
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_ocl.h
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_ocl.hpp b/include/onednn/oneapi/dnnl/dnnl_ocl.hpp
new file mode 120000
index 000000000000..a2f0eb9024c7
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_ocl.hpp
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_ocl.hpp
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_sycl.h b/include/onednn/oneapi/dnnl/dnnl_sycl.h
new file mode 120000
index 000000000000..2cc22455018d
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_sycl.h
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_sycl.h
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_sycl.hpp b/include/onednn/oneapi/dnnl/dnnl_sycl.hpp
new file mode 120000
index 000000000000..2a58d8da0e43
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_sycl.hpp
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_sycl.hpp
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_sycl_types.h b/include/onednn/oneapi/dnnl/dnnl_sycl_types.h
new file mode 120000
index 000000000000..5ac056d6d942
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_sycl_types.h
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_sycl_types.h
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_threadpool.h b/include/onednn/oneapi/dnnl/dnnl_threadpool.h
new file mode 120000
index 000000000000..86e888d77fd4
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_threadpool.h
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_threadpool.h
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_threadpool.hpp b/include/onednn/oneapi/dnnl/dnnl_threadpool.hpp
new file mode 120000
index 000000000000..0a579db11370
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_threadpool.hpp
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_threadpool.hpp
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_threadpool_iface.hpp b/include/onednn/oneapi/dnnl/dnnl_threadpool_iface.hpp
new file mode 120000
index 000000000000..a6b52b587484
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_threadpool_iface.hpp
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_threadpool_iface.hpp
\ No newline at end of file
diff --git a/include/onednn/oneapi/dnnl/dnnl_types.h b/include/onednn/oneapi/dnnl/dnnl_types.h
new file mode 120000
index 000000000000..c28dbed7ef1e
--- /dev/null
+++ b/include/onednn/oneapi/dnnl/dnnl_types.h
@@ -0,0 +1 @@
+../../../../3rdparty/onednn/include/oneapi/dnnl/dnnl_types.h
\ No newline at end of file
diff --git a/src/c_api/c_api.cc b/src/c_api/c_api.cc
index c8c8139e184d..e35f0f4a351a 100644
--- a/src/c_api/c_api.cc
+++ b/src/c_api/c_api.cc
@@ -155,7 +155,7 @@ void CustomFComputeDispatcher(const std::string op_name,
// convert inputs/outpus NDArray to C types to be passed to lib_api.h
for (size_t i = 0; i < inputs.size(); i++) {
NDArray const* in_nd = &(inputs[i]);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
// reorder data if in MKLDNN format
if (in_nd->IsMKLDNNData()) {
// convert from MKLDNN
@@ -1392,7 +1392,7 @@ void registerPasses(void *lib, int verbose, mxnet::ext::msgSize_t msgSize,
arg_names.push_back(in_arg_names[i].c_str());
const NDArray &in_arg = *(in_args_ptr[i]);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
// reorder data if in MKLDNN format
if (in_arg.IsMKLDNNData()) {
in_arg.Reorder2DefaultAsync();
@@ -1418,7 +1418,7 @@ void registerPasses(void *lib, int verbose, mxnet::ext::msgSize_t msgSize,
aux_names.push_back(in_aux_names[i].c_str());
const auto &in_aux = *(in_aux_ptr[i]);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
// reorder data if in MKLDNN format
if (in_aux.IsMKLDNNData()) {
in_aux.Reorder2DefaultAsync();
@@ -2237,7 +2237,7 @@ int MXNDArrayGetData(NDArrayHandle handle,
void **out_pdata) {
API_BEGIN();
NDArray *arr = static_cast(handle);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (arr->IsMKLDNNData()) {
arr->Reorder2DefaultAsync();
arr->WaitToRead();
diff --git a/src/common/exec_utils.h b/src/common/exec_utils.h
index 87cfde3c4ffe..80936a916b4d 100644
--- a/src/common/exec_utils.h
+++ b/src/common/exec_utils.h
@@ -36,7 +36,7 @@
namespace mxnet {
namespace common {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
// We have to make sure it's default storage and default layout.
#define DEFAULT_DATA(x) x.IsDefaultData()
#else
@@ -69,7 +69,7 @@ inline bool SetupDefaultBlobsIn(const std::vector& src,
(*idx_map)[i] = temp_dst->size();
NDArray temp = bufs != nullptr ? bufs->at(i) : NDArray(nd.shape(), nd.ctx(),
true, nd.dtype());
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
CHECK(temp.IsDefaultData());
#endif
temp_src->emplace_back(nd);
@@ -93,7 +93,7 @@ inline bool SetupDefaultBlobsOut(const std::vector& src,
for (size_t i = 0; i < src.size(); i++) {
const auto& nd = src[i];
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (req->at(i) == kWriteInplace && nd.IsMKLDNNData())
// If it's write inplace and the output array doesn't use the default
// layout, we'll generate a temporary output array below, which means
@@ -103,7 +103,7 @@ inline bool SetupDefaultBlobsOut(const std::vector& src,
// We have to make sure it's default storage and default layout.
#endif
if (!DEFAULT_DATA(nd)) {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
NDArray temp;
if (bufs != nullptr) {
temp = bufs->at(i);
diff --git a/src/common/utils.h b/src/common/utils.h
index aa36dc1eae47..2d01e6bca685 100644
--- a/src/common/utils.h
+++ b/src/common/utils.h
@@ -49,7 +49,7 @@
#include
#include "../operator/mxnet_op.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "../operator/nn/mkldnn/mkldnn_base-inl.h"
#endif
@@ -495,7 +495,7 @@ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs,
"0 to suppress this warning.";
os << "\nStorage type fallback detected:\n" << op_str << warning;
LogOnce(os.str());
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. "
"You can re-enable by setting MXNET_MKLDNN_ENABLED=1");
if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set."
diff --git a/src/imperative/attach_op_execs_pass.cc b/src/imperative/attach_op_execs_pass.cc
index c102f34af574..30e67f44a80b 100644
--- a/src/imperative/attach_op_execs_pass.cc
+++ b/src/imperative/attach_op_execs_pass.cc
@@ -36,7 +36,7 @@ namespace mxnet {
namespace exec {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#define CREATE_DEFAULT_INPUTS_MKLDNN(in_array, in_array_fallback, attrs) \
CREATE_DEFAULT_INPUTS(true, attrs, CreateDefaultInputs(in_array, in_array_fallback))
#else
diff --git a/src/imperative/imperative_utils.h b/src/imperative/imperative_utils.h
index 2ad98b222941..f53b0db91b2b 100644
--- a/src/imperative/imperative_utils.h
+++ b/src/imperative/imperative_utils.h
@@ -36,7 +36,7 @@
namespace mxnet {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
templateT *pntr(T &obj) { return &obj; } // NOLINT
templateT *pntr(T *obj) { return obj; }
@@ -534,7 +534,7 @@ inline bool SetupDefaultBlobsIn(const std::vector& src,
(*idx_map)[i] = temp_dst->size();
NDArray temp = bufs != nullptr ? bufs->at(i) : NDArray(nd.shape(), nd.ctx(),
true, nd.dtype());
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
CHECK(temp.IsDefaultData());
#endif
temp_src->emplace_back(nd);
@@ -558,7 +558,7 @@ inline bool SetupDefaultBlobsOut(const std::vector& src,
for (size_t i = 0; i < src.size(); i++) {
const auto& nd = *src[i];
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (req->at(i) == kWriteInplace && nd.IsMKLDNNData())
// If it's write inplace and the output array doesn't use the default
// layout, we'll generate a temporary output array below, which means
@@ -567,7 +567,7 @@ inline bool SetupDefaultBlobsOut(const std::vector& src,
req->at(i) = kWriteTo;
#endif
if (!DEFAULT_DATA(nd)) {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
NDArray temp;
if (bufs != nullptr) {
temp = bufs->at(i);
diff --git a/src/libinfo.cc b/src/libinfo.cc
index 9348b721e801..ae251460bfb4 100644
--- a/src/libinfo.cc
+++ b/src/libinfo.cc
@@ -78,7 +78,7 @@ class FeatureSet {
feature_bits.set(BLAS_MKL, MXNET_USE_BLAS_MKL);
feature_bits.set(BLAS_APPLE, MXNET_USE_BLAS_APPLE);
feature_bits.set(LAPACK, MXNET_USE_LAPACK);
- feature_bits.set(MKLDNN, MXNET_USE_MKLDNN);
+ feature_bits.set(MKLDNN, MXNET_USE_ONEDNN);
// Image
feature_bits.set(OPENCV, MXNET_USE_OPENCV);
diff --git a/src/ndarray/ndarray.cc b/src/ndarray/ndarray.cc
index f52fa257374e..bbce020232cb 100644
--- a/src/ndarray/ndarray.cc
+++ b/src/ndarray/ndarray.cc
@@ -110,7 +110,7 @@ void NDArray::SetShapeFromChunk() const {
struct ChunkMem {
Storage::Handle h;
std::vector aux_h;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
std::shared_ptr mem;
#endif
};
@@ -120,14 +120,14 @@ NDArray::Chunk::~Chunk() {
ChunkMem mem;
mem.h = this->shandle;
mem.aux_h = this->aux_handles;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
// We want to delete mkldnn memory after deleting the variable.
mem.mem = this->mkl_mem_;
#endif
if (auto engine = engine_ref_.lock()) {
engine->DeleteVariable([mem, skip_free](RunContext s) {
if (skip_free == false) {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (mem.mem) {
CHECK_LE(mem.mem->GetSize(), mem.h.size);
CHECK_EQ(mem.mem->GetDataHandle(), mem.h.dptr);
@@ -157,7 +157,7 @@ void NDArray::Chunk::CheckAndAllocData(const mxnet::TShape &shape, int dtype) {
// init storage
shandle.size = dbytes;
Storage::Get()->Alloc(&shandle);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
mkl_mem_ = nullptr;
#endif
}
@@ -185,7 +185,7 @@ nnvm::Symbol NDArray::get_autograd_symbol() const {
return ret;
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
NDArray::NDArray(const mkldnn::memory::desc &md)
: storage_type_(kDefaultStorage), autograd_entry_(nullptr) {
@@ -489,7 +489,7 @@ void NDArray::set_fresh_out_grad(bool state) const {
info.fresh_out_grad = state;
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
bool NDArray::Chunk::IsMKLDNN() const {
if (storage_type != kDefaultStorage)
@@ -886,7 +886,7 @@ void NDArray::SetTBlob() const {
char *dptr = static_cast(ptr_->shandle.dptr);
auto stype = storage_type();
if (stype == kDefaultStorage) {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
CHECK(!IsMKLDNNData()) << "We can't generate TBlob for MKLDNN data. "
<< "Please use Reorder2Default() to generate a new NDArray first";
#endif
@@ -1228,7 +1228,7 @@ inline void CopyFromToRspImpl(const NDArray& from, const NDArray& to, RunContext
// Make a copy of a dense NDArray
template
inline void CopyFromToDnsImpl(const NDArray& from, const NDArray& to, RunContext ctx) {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
// If neither is MKLDNN, we can copy data normally.
if (!from.IsMKLDNNData() && !to.IsMKLDNNData()) {
#endif
@@ -1237,7 +1237,7 @@ inline void CopyFromToDnsImpl(const NDArray& from, const NDArray& to, RunContext
TBlob tmp = to.data();
ndarray::Copy(from.data(), &tmp,
from.ctx(), to.ctx(), ctx);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
} else if (SupportMKLDNN(from.dtype(), from.shape())
&& SupportMKLDNN(to.dtype(), to.shape())
&& from.ctx().dev_mask() == cpu::kDevMask
@@ -1763,7 +1763,7 @@ void NDArray::Save(dmlc::Stream *strm) const {
} else {
this->WaitToRead();
nd_cpu = *this;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (nd_cpu.IsMKLDNNData())
nd_cpu = nd_cpu.Reorder2Default();
#endif
@@ -2164,7 +2164,7 @@ void NDArray::SyncCopyToCPU(void *data, size_t size) const {
if (this->ctx().dev_mask() == cpu::kDevMask) {
RunContext rctx{this->ctx(), nullptr, nullptr, false};
NDArray src = *this;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (src.IsMKLDNNData())
src = this->Reorder2Default();
#endif
diff --git a/src/operator/contrib/batch_norm_relu.cc b/src/operator/contrib/batch_norm_relu.cc
index 890239dfa8f0..52671a094c22 100644
--- a/src/operator/contrib/batch_norm_relu.cc
+++ b/src/operator/contrib/batch_norm_relu.cc
@@ -28,7 +28,7 @@
#include
#include "../elemwise_op_common.h"
#include "../operator_common.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "../nn/mkldnn/mkldnn_batch_norm-inl.h"
#endif
@@ -125,7 +125,7 @@ static bool BatchNormWithReLUType(const nnvm::NodeAttrs& attrs,
return true;
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
static inline bool SupportMKLDNNBNReLU(const NDArray &input, const BatchNormParam ¶m) {
if (mxnet::op::batchnorm::disable_mkl) return false;
const mxnet::TShape shape = input.shape();
@@ -181,7 +181,7 @@ static inline bool BatchNormWithReLUStorageType(const nnvm::NodeAttrs &attrs,
const BatchNormParam ¶m = nnvm::get(attrs.parsed);
bool dispatched = false;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (!dispatched) {
dispatched = MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode,
in_attrs, out_attrs);
@@ -273,11 +273,11 @@ An extented operator of Batch normalization which can fuse ReLU activation.
.set_attr("FInferShape", BatchNormWithReLUShape)
.set_attr("FInferType", BatchNormWithReLUType)
.set_attr("FInferStorageType", BatchNormWithReLUStorageType)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FComputeEx", BatchNormWithReLUComputeExCPU)
#endif
.set_attr("FGradient", BatchNormWithReLUGrad)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
@@ -305,7 +305,7 @@ NNVM_REGISTER_OP(_backward_contrib_BatchNormWithReLU)
.set_num_outputs(3)
.set_attr("TIsBackward", true)
.set_attr("FInferStorageType", BatchNormWithReLUStorageType)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
})
diff --git a/src/operator/leaky_relu.cc b/src/operator/leaky_relu.cc
index 6690834eff4e..95a825524f86 100644
--- a/src/operator/leaky_relu.cc
+++ b/src/operator/leaky_relu.cc
@@ -25,10 +25,10 @@
*/
#include "./leaky_relu-inl.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./nn/mkldnn/mkldnn_base-inl.h"
#include "./nn/mkldnn/mkldnn_ops-inl.h"
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#include
namespace mxnet {
@@ -84,7 +84,7 @@ static bool LeakyReLUShape(const nnvm::NodeAttrs& attrs,
return true;
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
static void LeakyReLUComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector& inputs,
@@ -141,7 +141,7 @@ inline static bool BackwardLeakyReLUStorageType(const nnvm::NodeAttrs& attrs,
return MKLDNNStorageType(attrs, dev_mask, SupportMKLDNNLeakyRelu(param),
dispatch_mode, in_attrs, out_attrs);
}
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
NNVM_REGISTER_OP(LeakyReLU)
.describe(R"code(Applies Leaky rectified linear unit activation element-wise to the input.
@@ -172,7 +172,7 @@ The following modified ReLU Activation functions are supported:
return param.act_type == leakyrelu::kRReLU ? 2 : 1;
})
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", LeakyReLUStorageType)
#endif
.set_attr("FListInputNames",
@@ -190,7 +190,7 @@ The following modified ReLU Activation functions are supported:
.set_attr("FInferShape", LeakyReLUShape)
.set_attr("FInferType", LeakyReLUType)
.set_attr("FCompute", LeakyReLUCompute)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", LeakyReLUComputeExCPU)
#endif
@@ -227,7 +227,7 @@ NNVM_REGISTER_OP(_backward_LeakyReLU)
return param.act_type == leakyrelu::kPReLU ? 2 : 1;
})
.set_attr("TIsBackward", true)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", BackwardLeakyReLUStorageType)
#endif
.set_attr("FInplaceOption", [](const NodeAttrs& attrs){
@@ -237,7 +237,7 @@ NNVM_REGISTER_OP(_backward_LeakyReLU)
return std::vector{ResourceRequest::kTempSpace};
})
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", LeakyReLUGradComputeExCPU)
#endif
diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc
index 622d3464371f..e9c5251404bd 100644
--- a/src/operator/nn/activation.cc
+++ b/src/operator/nn/activation.cc
@@ -27,10 +27,10 @@
#include "./activation-inl.h"
#include "../mshadow_op.h"
#include "../tensor/elemwise_unary_op.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./mkldnn/mkldnn_base-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#include "../operator_common.h"
#include "../../common/utils.h"
@@ -101,7 +101,7 @@ struct ActivationGrad {
}
};
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
static void ActivationComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector& inputs,
@@ -157,7 +157,7 @@ inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs,
return MKLDNNStorageType(attrs, dev_mask, SupportMKLDNNAct(param),
dispatch_mode, in_attrs, out_attrs);
}
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
MXNET_OPERATOR_REGISTER_UNARY(Activation)
@@ -174,7 +174,7 @@ The following activation functions are supported:
)code" ADD_FILELINE)
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", ActivationStorageType)
#endif
.set_attr("FListOutputNames",
@@ -182,7 +182,7 @@ The following activation functions are supported:
return std::vector{"output"};
})
.set_attr("FCompute", ActivationCompute)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", ActivationComputeExCPU)
#endif
@@ -196,7 +196,7 @@ NNVM_REGISTER_OP(_backward_Activation)
})
.set_num_outputs(1)
.set_attr("TIsBackward", true)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", BackwardActStorageType)
#endif
.set_attr("FInferShape", ElemwiseShape<-1, 1>)
@@ -204,13 +204,13 @@ NNVM_REGISTER_OP(_backward_Activation)
.set_attr("FInplaceOption", [](const NodeAttrs& attrs){
return std::vector >{{0, 0}};
})
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
})
#endif
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", ActivationGradComputeExCPU)
#endif
diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc
index 2a91a3706794..87456dd59f87 100644
--- a/src/operator/nn/batch_norm.cc
+++ b/src/operator/nn/batch_norm.cc
@@ -28,7 +28,7 @@
#include
#include "../elemwise_op_common.h"
#include "../operator_common.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./mkldnn/mkldnn_batch_norm-inl.h"
#endif
@@ -433,7 +433,7 @@ static bool BatchNormType(const nnvm::NodeAttrs& attrs,
return true;
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
static inline bool SupportMKLDNNBN(const NDArray &input, const BatchNormParam ¶m) {
if (mxnet::op::batchnorm::disable_mkl) return false;
const mxnet::TShape shape = input.shape();
@@ -489,7 +489,7 @@ static inline bool BatchNormStorageType(const nnvm::NodeAttrs &attrs,
const BatchNormParam ¶m = nnvm::get(attrs.parsed);
bool dispatched = false;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (!dispatched) {
dispatched = MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode,
in_attrs, out_attrs);
@@ -627,11 +627,11 @@ then set ``gamma`` to 1 and its gradient to 0.
.set_attr("FInferType", BatchNormType)
.set_attr("FInferStorageType", BatchNormStorageType)
.set_attr("FCompute", BatchNormCompute)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FComputeEx", BatchNormComputeExCPU)
#endif
.set_attr("FGradient", BatchNormGrad)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
@@ -666,7 +666,7 @@ NNVM_REGISTER_OP(_backward_BatchNorm)
return std::vector{ResourceRequest::kTempSpace};
})
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", BatchNormGradComputeExCPU)
#endif
diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc
index 2328e0a950ff..c50af8fea77d 100644
--- a/src/operator/nn/concat.cc
+++ b/src/operator/nn/concat.cc
@@ -198,14 +198,14 @@ inline static bool ConcatForwardInferStorageType(const nnvm::NodeAttrs& attrs,
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (!dispatched && dev_mask == mshadow::cpu::kDevMask
&& common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)
&& param.dim > 0) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
@@ -213,10 +213,10 @@ inline static bool ConcatForwardInferStorageType(const nnvm::NodeAttrs& attrs,
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (!MKLDNNEnvSet())
*dispatch_mode = DispatchMode::kFComputeFallback;
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
return dispatched;
}
@@ -226,7 +226,7 @@ inline static bool BackwardConcatStorageType(const nnvm::NodeAttrs& attrs,
std::vector *in_attrs,
std::vector *out_attrs) {
DispatchMode wanted_mode;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
const ConcatParam& param = nnvm::get(attrs.parsed);
CHECK_EQ(out_attrs->size(), in_attrs->size() - 1);
if (dev_mask == mshadow::cpu::kDevMask
@@ -234,16 +234,16 @@ inline static bool BackwardConcatStorageType(const nnvm::NodeAttrs& attrs,
&& param.dim > 0)
wanted_mode = DispatchMode::kFComputeEx;
else
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
wanted_mode = DispatchMode::kFCompute;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (!MKLDNNEnvSet())
wanted_mode = DispatchMode::kFComputeFallback;
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
bool SupportMKLDNNConcat(const std::vector &arrs) {
for (auto &arr : arrs) {
if (arr.IsView()) return false;
@@ -256,7 +256,7 @@ bool SupportMKLDNNConcat(const std::vector &arrs) {
}
return true;
}
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
static void ConcatComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& op_ctx,
const std::vector& inputs,
@@ -269,20 +269,20 @@ static void ConcatComputeExCPU(const nnvm::NodeAttrs& attrs,
if (common::ContainsOnlyStorage(inputs, kCSRStorage) &&
outputs[0].storage_type() == kCSRStorage) {
ConcatCSRImpl(attrs, op_ctx, inputs, req, outputs);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
} else if (SupportMKLDNNConcat(inputs)) {
MKLDNN_OPCHECK_INIT(false, outputs.size(), inputs, outputs);
MKLDNNRun(MKLDNNConcatForward, attrs, op_ctx, inputs, req, outputs);
MKLDNN_OPCHECK_RUN(ConcatCompute, attrs, op_ctx, inputs, req, outputs);
} else if (common::ContainsOnlyStorage(inputs, kDefaultStorage)) {
FallBackCompute(ConcatCompute, attrs, op_ctx, inputs, req, outputs);
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
} else {
LogUnimplementedOp(attrs, op_ctx, inputs, req, outputs);
}
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
static void ConcatGradComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector& inputs,
@@ -296,7 +296,7 @@ static void ConcatGradComputeExCPU(const nnvm::NodeAttrs& attrs,
}
FallBackCompute(ConcatGradCompute, attrs, ctx, inputs, req, outputs);
}
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
struct ConcatGrad {
const char *op_name;
@@ -304,11 +304,11 @@ struct ConcatGrad {
const std::vector& ograds) const {
CHECK_EQ(ograds.size(), 1);
std::vector heads(ograds.begin(), ograds.end());
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
for (size_t i = 0; i < n->inputs.size(); i++) {
heads.push_back(n->inputs[i]);
}
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
return MakeGradNode(op_name, n, heads, n->attrs.dict);
}
};
@@ -384,13 +384,13 @@ Example::
[ 5., 5., 8., 8.]]
)code" ADD_FILELINE)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
})
.set_attr("THasDeterministicOutput", true)
.set_attr("TIsMKLDNN", true)
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
CONCAT_FORWARD_ATTRS
.set_attr("FInferShape", ConcatShape)
.add_argument("data", "NDArray-or-Symbol[]", "List of arrays to concatenate")
@@ -398,7 +398,7 @@ CONCAT_FORWARD_ATTRS
NNVM_REGISTER_OP(_backward_Concat)
.set_num_inputs([](const NodeAttrs& attrs) {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
const ConcatParam& params = nnvm::get(attrs.parsed);
return 1 + params.num_args;
#else
@@ -410,17 +410,17 @@ NNVM_REGISTER_OP(_backward_Concat)
return params.num_args;
})
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
})
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
.set_attr("TIsBackward", true)
.set_attr("FInferStorageType", BackwardConcatStorageType)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", ConcatGradComputeExCPU)
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
.set_attr("FCompute", ConcatGradCompute);
// _rnn_param_concat is a custom concat op with specialized infer_shape,
@@ -428,11 +428,11 @@ NNVM_REGISTER_OP(_backward_Concat)
// unknown shape that can be inferred from output shape.
NNVM_REGISTER_OP(_rnn_param_concat)
.add_alias("_npi_rnn_param_concat")
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
})
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
CONCAT_FORWARD_ATTRS
.set_attr("THasDeterministicOutput", true)
.set_attr("FInferShape", RNNParamConcatShape)
diff --git a/src/operator/nn/convolution.cc b/src/operator/nn/convolution.cc
index 05d4cb74318b..556918a572a8 100644
--- a/src/operator/nn/convolution.cc
+++ b/src/operator/nn/convolution.cc
@@ -27,10 +27,10 @@
#include "./convolution-inl.h"
#include "../elemwise_op_common.h"
#include "../operator_common.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./mkldnn/mkldnn_base-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
-#endif // MXNET_USE_MKLDNN
+#endif // MXNET_USE_ONEDNN
namespace mxnet {
namespace op {
@@ -48,7 +48,7 @@ static inline std::vector ListArguments(const ConvolutionParam& par
}
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
static void ConvolutionComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector& inputs,
@@ -302,7 +302,7 @@ static bool ConvolutionType(const nnvm::NodeAttrs& attrs,
return true;
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
inline static bool ConvStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
@@ -491,11 +491,11 @@ There are other options to tune the performance.
})
.set_attr("FInferShape", ConvolutionShape)
.set_attr("FInferType", ConvolutionType)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", ConvStorageType)
#endif
.set_attr("FCompute", ConvolutionCompute)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", ConvolutionComputeExCPU)
#endif
@@ -519,14 +519,14 @@ NNVM_REGISTER_OP(_backward_Convolution)
return params.no_bias ? 2 : 3;
})
.set_attr("TIsBackward", true)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", BackwardConvStorageType)
#endif
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
})
.set_attr_parser(ConvolutionParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", ConvolutionGradComputeExCPU)
#endif
diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc
index 08d6306730ef..6dc8b319ce97 100644
--- a/src/operator/nn/deconvolution.cc
+++ b/src/operator/nn/deconvolution.cc
@@ -27,15 +27,15 @@
#include "./deconvolution-inl.h"
#include "../operator_common.h"
#include "../../common/utils.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./mkldnn/mkldnn_base-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
-#endif // MXNET_USE_MKLDNN
+#endif // MXNET_USE_ONEDNN
namespace mxnet {
namespace op {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
static void DeconvolutionComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector& inputs,
@@ -445,7 +445,7 @@ NNVM_REGISTER_OP(Deconvolution)
.set_attr("THasDeterministicOutput", true)
.set_attr("FCompute", DeconvolutionCompute)
.set_attr("FGradient", DeconvolutionGrad{"_backward_Deconvolution"})
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FInferStorageType", DeconvStorageType)
.set_attr("FComputeEx", DeconvolutionComputeExCPU)
@@ -470,7 +470,7 @@ NNVM_REGISTER_OP(_backward_Deconvolution)
return std::vector{ResourceRequest::kTempSpace};
})
.set_attr_parser(DeconvolutionParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FInferStorageType", BackwardDeconvStorageType)
.set_attr("FComputeEx", DeconvolutionGradComputeExCPU)
diff --git a/src/operator/nn/fully_connected.cc b/src/operator/nn/fully_connected.cc
index 7b243f1b2eb2..32bddf2e1880 100644
--- a/src/operator/nn/fully_connected.cc
+++ b/src/operator/nn/fully_connected.cc
@@ -95,7 +95,7 @@ void FullyConnectedComputeExCPU(const nnvm::NodeAttrs& attrs,
valid_bias = inputs[2].storage_type() == kDefaultStorage ||
inputs[2].storage_type() == kRowSparseStorage;
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (common::ContainsOnlyStorage(inputs, kDefaultStorage) &&
common::ContainsOnlyStorage(outputs, kDefaultStorage)) {
if (SupportMKLDNNFC(inputs[0])) {
@@ -139,7 +139,7 @@ void FullyConnectedComputeExCPU(const nnvm::NodeAttrs& attrs,
#endif
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
void FullyConnectedGradComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext &ctx,
const std::vector &inputs,
@@ -207,7 +207,7 @@ static bool FCStorageType(const nnvm::NodeAttrs& attrs,
dispatched = storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (!MKLDNNEnvSet())
*dispatch_mode = DispatchMode::kFComputeFallback;
#endif
@@ -239,7 +239,7 @@ static bool BackwardFCStorageType(const nnvm::NodeAttrs& attrs,
dispatched = storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (!MKLDNNEnvSet())
*dispatch_mode = DispatchMode::kFComputeFallback;
#endif
@@ -301,7 +301,7 @@ If ``no_bias`` is set to be true, then the ``bias`` term is ignored.
[](const NodeAttrs& attrs) {
return std::vector{"output"};
})
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FResourceRequest", [](const NodeAttrs& n) {
return std::vector{ResourceRequest::kTempSpace};
@@ -334,7 +334,7 @@ NNVM_REGISTER_OP(_backward_FullyConnected)
.set_attr("FGradient", FullyConnectedGradGrad{"_backward_backward_FullyConnected"})
.set_attr("FInferStorageType", BackwardFCStorageType)
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", FullyConnectedGradComputeExCPU)
#endif
diff --git a/src/operator/nn/log_softmax.cc b/src/operator/nn/log_softmax.cc
index 2a1d1b3323ed..6275d54b1653 100644
--- a/src/operator/nn/log_softmax.cc
+++ b/src/operator/nn/log_softmax.cc
@@ -26,7 +26,7 @@
#include "../tensor/elemwise_unary_op.h"
#include "../tensor/elemwise_binary_op.h"
#include "../operator_common.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "mkldnn/mkldnn_base-inl.h"
#include "mkldnn/mkldnn_ops-inl.h"
#endif
@@ -34,7 +34,7 @@
namespace mxnet {
namespace op {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
static void LogSoftmaxComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector& inputs,
@@ -125,7 +125,7 @@ Examples::
return std::vector{"data"};
})
.set_attr("FCompute", SoftmaxCompute)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", LogSoftmaxComputeExCPU)
.set_attr("FInferStorageType", LogSoftmaxStorageType)
@@ -151,7 +151,7 @@ NNVM_REGISTER_OP(_backward_log_softmax)
.set_attr("FInplaceOption", SoftmaxGradOpInplaceOption)
.add_argument("args", "NDArray-or-Symbol[]", "Positional input arguments")
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", LogSoftmaxGradComputeExCPU)
.set_attr("FInferStorageType", LogSoftmaxGradStorageType)
diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc
index 1ac384044f69..920f34ad393a 100644
--- a/src/operator/nn/lrn.cc
+++ b/src/operator/nn/lrn.cc
@@ -26,7 +26,7 @@
#include "./lrn-inl.h"
#include "../operator_common.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./mkldnn/mkldnn_lrn-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
#endif
@@ -82,7 +82,7 @@ struct LRNGrad {
}
};
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
bool LRNForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
@@ -163,7 +163,7 @@ number of kernels in the layer.
.set_attr_parser(ParamParser)
.set_attr("FInferShape", LRNShape)
.set_attr("FInferType", LRNType)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", LRNForwardInferStorageType)
#endif
.set_attr("FListInputNames",
@@ -175,7 +175,7 @@ number of kernels in the layer.
return std::vector{"output", "tmp_norm"};
})
.set_attr("FCompute", LRNCompute)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", LRNComputeExCPU)
#endif
@@ -187,11 +187,11 @@ NNVM_REGISTER_OP(_backward_LRN)
.set_num_inputs(3)
.set_num_outputs(1)
.set_attr_parser(ParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", LRNBackwardInferStorageType)
#endif
.set_attr("TIsBackward", true)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("TIsMKLDNN", true)
.set_attr("FComputeEx", LRNGradComputeExCPU)
// Native compute requires norm while MKLDNN does not so cannot be compared in debug mode
diff --git a/src/operator/nn/mkldnn/mkldnn_act-inl.h b/src/operator/nn/mkldnn/mkldnn_act-inl.h
index 70bf16a14369..0c6e856d4b24 100644
--- a/src/operator/nn/mkldnn/mkldnn_act-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_act-inl.h
@@ -28,7 +28,7 @@
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_ACT_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
#include "../activation-inl.h"
@@ -108,5 +108,5 @@ struct hash {
};
} // namespace std
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_ACT_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_act.cc b/src/operator/nn/mkldnn/mkldnn_act.cc
index e76a0629cc01..761ab86a2c7f 100644
--- a/src/operator/nn/mkldnn/mkldnn_act.cc
+++ b/src/operator/nn/mkldnn/mkldnn_act.cc
@@ -23,7 +23,7 @@
* \author Da Zheng
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h b/src/operator/nn/mkldnn/mkldnn_base-inl.h
index 25cd2514f553..9f8e4eb25d9b 100644
--- a/src/operator/nn/mkldnn/mkldnn_base-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h
@@ -46,7 +46,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BASE_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BASE_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
#include
diff --git a/src/operator/nn/mkldnn/mkldnn_base.cc b/src/operator/nn/mkldnn/mkldnn_base.cc
index 7aeb21b494ea..d4cb9780ab28 100644
--- a/src/operator/nn/mkldnn/mkldnn_base.cc
+++ b/src/operator/nn/mkldnn/mkldnn_base.cc
@@ -17,7 +17,7 @@
* under the License.
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include "./mkldnn_base-inl.h"
@@ -605,7 +605,7 @@ bool MKLDNNStorageType(const nnvm::NodeAttrs &attrs,
if (v == - 1) v = kDefaultStorage;
DispatchMode wanted_mode;
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
wanted_mode = DispatchMode::kFComputeFallback;
else if (dev_mask == mshadow::cpu::kDevMask && support_mkldnn)
diff --git a/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h b/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h
index 75c7c4dbf38a..963ed2c5c475 100644
--- a/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_batch_norm-inl.h
@@ -26,7 +26,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
#include
@@ -479,5 +479,5 @@ void MKLDNNBatchNormBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx,
}
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN
+#endif // MXNET_USE_ONEDNN
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_BATCH_NORM_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_concat-inl.h b/src/operator/nn/mkldnn/mkldnn_concat-inl.h
index ff47ef35f98f..cd1a5597cd14 100644
--- a/src/operator/nn/mkldnn/mkldnn_concat-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_concat-inl.h
@@ -26,7 +26,7 @@
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_CONCAT_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
#include "../concat-inl.h"
@@ -74,5 +74,5 @@ static MKLDNNConcatFwd &GetConcatForward(
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_CONCAT_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_concat.cc b/src/operator/nn/mkldnn/mkldnn_concat.cc
index aa30ffc557a1..946008be7025 100644
--- a/src/operator/nn/mkldnn/mkldnn_concat.cc
+++ b/src/operator/nn/mkldnn/mkldnn_concat.cc
@@ -23,7 +23,7 @@
* \author
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "mkldnn_concat-inl.h"
namespace mxnet {
@@ -101,4 +101,4 @@ void MKLDNNConcatBackward(const nnvm::NodeAttrs& attrs, const OpContext &ctx,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_convolution-inl.h b/src/operator/nn/mkldnn/mkldnn_convolution-inl.h
index 84299eeed2cf..dfa365fa57eb 100644
--- a/src/operator/nn/mkldnn/mkldnn_convolution-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_convolution-inl.h
@@ -25,7 +25,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_CONVOLUTION_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_CONVOLUTION_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
@@ -146,5 +146,5 @@ class MKLDNNConvBackward {
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_CONVOLUTION_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_convolution.cc b/src/operator/nn/mkldnn/mkldnn_convolution.cc
index eca7be240d47..325dfbed40d7 100644
--- a/src/operator/nn/mkldnn/mkldnn_convolution.cc
+++ b/src/operator/nn/mkldnn/mkldnn_convolution.cc
@@ -23,7 +23,7 @@
* \author Da Zheng
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "../convolution-inl.h"
#include "./mkldnn_ops-inl.h"
@@ -556,4 +556,4 @@ void MKLDNNConvolutionBackward(const nnvm::NodeAttrs& attrs, const OpContext &ct
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_copy.cc b/src/operator/nn/mkldnn/mkldnn_copy.cc
index a67847f9c882..8f8ee663e70b 100644
--- a/src/operator/nn/mkldnn/mkldnn_copy.cc
+++ b/src/operator/nn/mkldnn/mkldnn_copy.cc
@@ -26,7 +26,7 @@
#include "./mkldnn_ops-inl.h"
#include "./mkldnn_base-inl.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
namespace mxnet {
namespace op {
diff --git a/src/operator/nn/mkldnn/mkldnn_deconvolution.cc b/src/operator/nn/mkldnn/mkldnn_deconvolution.cc
index cdf3639cd86f..01d7c3bd3645 100644
--- a/src/operator/nn/mkldnn/mkldnn_deconvolution.cc
+++ b/src/operator/nn/mkldnn/mkldnn_deconvolution.cc
@@ -22,7 +22,7 @@
* \brief
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "../deconvolution-inl.h"
#include "./mkldnn_base-inl.h"
@@ -525,4 +525,4 @@ void MKLDNNDeconvolutionBackward(const nnvm::NodeAttrs &attrs,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_fully_connected-inl.h b/src/operator/nn/mkldnn/mkldnn_fully_connected-inl.h
index 1c9396e890f3..a91a4f6e412e 100644
--- a/src/operator/nn/mkldnn/mkldnn_fully_connected-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_fully_connected-inl.h
@@ -27,7 +27,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_FULLY_CONNECTED_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_FULLY_CONNECTED_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
@@ -127,5 +127,5 @@ void MKLDNNFCForwardFullFeature(const MKLDNNFCFullParam ¶m,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_FULLY_CONNECTED_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_fully_connected.cc b/src/operator/nn/mkldnn/mkldnn_fully_connected.cc
index 6e8a1505e15e..8e0a6e6b3805 100644
--- a/src/operator/nn/mkldnn/mkldnn_fully_connected.cc
+++ b/src/operator/nn/mkldnn/mkldnn_fully_connected.cc
@@ -24,7 +24,7 @@
* \author Da Zheng, Ciyong Chen
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "mkldnn_fully_connected-inl.h"
namespace mxnet {
@@ -326,4 +326,4 @@ void MKLDNNFCBackward(const nnvm::NodeAttrs& attrs, const OpContext &ctx,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_log_softmax.cc b/src/operator/nn/mkldnn/mkldnn_log_softmax.cc
index 0d992b252fa8..eb0ff379cdb5 100644
--- a/src/operator/nn/mkldnn/mkldnn_log_softmax.cc
+++ b/src/operator/nn/mkldnn/mkldnn_log_softmax.cc
@@ -26,7 +26,7 @@
#include "./mkldnn_ops-inl.h"
#include "./mkldnn_base-inl.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
namespace mxnet {
namespace op {
diff --git a/src/operator/nn/mkldnn/mkldnn_lrn-inl.h b/src/operator/nn/mkldnn/mkldnn_lrn-inl.h
index 6f7a1d917734..fa08c52ca3a6 100644
--- a/src/operator/nn/mkldnn/mkldnn_lrn-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_lrn-inl.h
@@ -25,7 +25,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_LRN_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_LRN_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
#include
@@ -266,5 +266,5 @@ void MKLDNNLRNBackward(const nnvm::NodeAttrs &attrs, const OpContext &ctx,
}
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_LRN_INL_H__
diff --git a/src/operator/nn/mkldnn/mkldnn_ops-inl.h b/src/operator/nn/mkldnn/mkldnn_ops-inl.h
index 15c2040da85a..890e111de914 100644
--- a/src/operator/nn/mkldnn/mkldnn_ops-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_ops-inl.h
@@ -36,7 +36,7 @@
#include
#include
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
namespace mxnet {
@@ -145,5 +145,5 @@ void MKLDNNReshapeForward(const nnvm::NodeAttrs& attrs,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_OPS_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_pooling-inl.h b/src/operator/nn/mkldnn/mkldnn_pooling-inl.h
index ae1e23ed4363..b475ba1342ba 100644
--- a/src/operator/nn/mkldnn/mkldnn_pooling-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_pooling-inl.h
@@ -24,7 +24,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_POOLING_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_POOLING_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
@@ -160,5 +160,5 @@ MKLDNNPoolingFwd &GetPoolingFwd(const PoolingParam ¶m,
const NDArray &output);
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_POOLING_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_pooling.cc b/src/operator/nn/mkldnn/mkldnn_pooling.cc
index bea09bf11165..b1f8fd397cf2 100644
--- a/src/operator/nn/mkldnn/mkldnn_pooling.cc
+++ b/src/operator/nn/mkldnn/mkldnn_pooling.cc
@@ -23,7 +23,7 @@
* \author Tao Lv
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./mkldnn_pooling-inl.h"
@@ -399,4 +399,4 @@ void MKLDNNPoolingGradCompute(const OpContext &ctx, const PoolingParam ¶m,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_reshape-inl.h b/src/operator/nn/mkldnn/mkldnn_reshape-inl.h
index c89e4585e85d..48dd93485792 100644
--- a/src/operator/nn/mkldnn/mkldnn_reshape-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_reshape-inl.h
@@ -26,7 +26,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RESHAPE_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RESHAPE_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include "mkldnn_base-inl.h"
#include "../../tensor/matrix_op-inl.h"
@@ -57,5 +57,5 @@ MKLDNNReshapeFwd &GetReshapeForward(const OpReqType &req, const NDArray &input,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RESHAPE_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_reshape.cc b/src/operator/nn/mkldnn/mkldnn_reshape.cc
index 81d96bb70670..45e5bcb6be53 100644
--- a/src/operator/nn/mkldnn/mkldnn_reshape.cc
+++ b/src/operator/nn/mkldnn/mkldnn_reshape.cc
@@ -23,7 +23,7 @@
* \author Tao Lv
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "../../tensor/elemwise_unary_op.h"
#include "./mkldnn_ops-inl.h"
#include "./mkldnn_base-inl.h"
diff --git a/src/operator/nn/mkldnn/mkldnn_rnn-inl.h b/src/operator/nn/mkldnn/mkldnn_rnn-inl.h
index a3c74402fe45..8652982ec789 100644
--- a/src/operator/nn/mkldnn/mkldnn_rnn-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_rnn-inl.h
@@ -27,7 +27,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include "../../rnn-inl.h"
@@ -467,7 +467,7 @@ class MKLDNNRnnOp {
};
inline bool SupportMKLDNNRnn(const int input_dtype) {
- if (input_dtype == mshadow::kFloat32 && dmlc::GetEnv("MXNET_USE_MKLDNN_RNN", 1)) {
+ if (input_dtype == mshadow::kFloat32 && dmlc::GetEnv("MXNET_USE_ONEDNN_RNN", 1)) {
return true;
}
return false;
@@ -481,5 +481,5 @@ inline bool SupportMKLDNNRnn(const RNNParam ¶m, const int input_dtype) {
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_RNN_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_rnn.cc b/src/operator/nn/mkldnn/mkldnn_rnn.cc
index c33ad484ddda..d6de8b20524c 100644
--- a/src/operator/nn/mkldnn/mkldnn_rnn.cc
+++ b/src/operator/nn/mkldnn/mkldnn_rnn.cc
@@ -24,7 +24,7 @@
* \author Zixuan Wei
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include "./mkldnn_rnn-inl.h"
@@ -1252,4 +1252,4 @@ void MKLDNNRnnOp::Backward(const OpContext& ctx,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_slice-inl.h b/src/operator/nn/mkldnn/mkldnn_slice-inl.h
index 0bb432da9f7f..0cc82578a5c5 100644
--- a/src/operator/nn/mkldnn/mkldnn_slice-inl.h
+++ b/src/operator/nn/mkldnn/mkldnn_slice-inl.h
@@ -26,7 +26,7 @@
#ifndef MXNET_OPERATOR_NN_MKLDNN_MKLDNN_SLICE_INL_H_
#define MXNET_OPERATOR_NN_MKLDNN_MKLDNN_SLICE_INL_H_
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include
@@ -62,5 +62,5 @@ void MKLDNNSlice(const nnvm::NodeAttrs& attrs, const OpContext& ctx,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_NN_MKLDNN_MKLDNN_SLICE_INL_H_
diff --git a/src/operator/nn/mkldnn/mkldnn_slice.cc b/src/operator/nn/mkldnn/mkldnn_slice.cc
index 26d4f096bef1..0efecc82a590 100644
--- a/src/operator/nn/mkldnn/mkldnn_slice.cc
+++ b/src/operator/nn/mkldnn/mkldnn_slice.cc
@@ -23,7 +23,7 @@
* \author Zhiyuan Huang
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./mkldnn_ops-inl.h"
#include "./mkldnn_base-inl.h"
@@ -105,4 +105,4 @@ void MKLDNNSlice(const nnvm::NodeAttrs& attrs, const OpContext& ctx,
} // namespace op
} // namespace mxnet
-#endif // MXNET_USE_MKLDNN == 1
+#endif // MXNET_USE_ONEDNN == 1
diff --git a/src/operator/nn/mkldnn/mkldnn_softmax.cc b/src/operator/nn/mkldnn/mkldnn_softmax.cc
index e96ab6c20ca3..f7a033005da4 100644
--- a/src/operator/nn/mkldnn/mkldnn_softmax.cc
+++ b/src/operator/nn/mkldnn/mkldnn_softmax.cc
@@ -27,7 +27,7 @@
#include "./mkldnn_ops-inl.h"
#include "./mkldnn_base-inl.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
namespace mxnet {
namespace op {
diff --git a/src/operator/nn/mkldnn/mkldnn_sum.cc b/src/operator/nn/mkldnn/mkldnn_sum.cc
index 747dde69ce13..e76f845c5d06 100644
--- a/src/operator/nn/mkldnn/mkldnn_sum.cc
+++ b/src/operator/nn/mkldnn/mkldnn_sum.cc
@@ -31,7 +31,7 @@
namespace mxnet {
namespace op {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
void MKLDNNSum(const mkldnn::memory &arr1,
const mkldnn::memory &arr2,
const mkldnn::memory &out) {
diff --git a/src/operator/nn/mkldnn/mkldnn_transpose.cc b/src/operator/nn/mkldnn/mkldnn_transpose.cc
index 23e385dc1469..9b3c5dec879e 100644
--- a/src/operator/nn/mkldnn/mkldnn_transpose.cc
+++ b/src/operator/nn/mkldnn/mkldnn_transpose.cc
@@ -23,7 +23,7 @@
* \author Tao Lv
*/
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include
#include "../../tensor/matrix_op-inl.h"
diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc
index d2edcc512b29..39bd32b5e473 100644
--- a/src/operator/nn/pooling.cc
+++ b/src/operator/nn/pooling.cc
@@ -25,10 +25,10 @@
*/
#include "../elemwise_op_common.h"
#include "./pooling-inl.h"
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
#include "./mkldnn/mkldnn_pooling-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
-#endif // MXNET_USE_MKLDNN
+#endif // MXNET_USE_ONEDNN
namespace mxnet {
namespace op {
@@ -58,7 +58,7 @@ void PoolingParamParser(nnvm::NodeAttrs *attrs) {
}
int GetNumOutputs(const PoolingParam ¶m) {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
return MKLDNNRequireWorkspace(param) && SupportMKLDNNPooling(param) ? 2 : 1;
#else
return 1;
@@ -66,7 +66,7 @@ int GetNumOutputs(const PoolingParam ¶m) {
}
int GetNumBackInputs(const PoolingParam ¶m) {
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
return MKLDNNRequireWorkspace(param) && SupportMKLDNNPooling(param) ? 5 : 3;
#else
return 3;
@@ -77,7 +77,7 @@ static bool PoolingType(const nnvm::NodeAttrs& attrs,
std::vector *in_attrs,
std::vector *out_attrs) {
out_attrs->at(0) = in_attrs->at(0);
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
const PoolingParam ¶m = nnvm::get(attrs.parsed);
if (MKLDNNRequireWorkspace(param) && SupportMKLDNNPooling(param)) {
CHECK_GT(out_attrs->size(), 1U);
@@ -145,7 +145,7 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
oshape[i] = 1;
out_shape->clear();
out_shape->push_back(oshape); // save output shape
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (MKLDNNRequireWorkspace(param) && SupportMKLDNNPooling(param))
out_shape->push_back(oshape); // for workspace
#endif
@@ -182,7 +182,7 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
ConvertLayout(oshape_ncw, mshadow::kNCW, mshadow::kNWC) : oshape_ncw;
out_shape->clear();
out_shape->push_back(oshape); // save output shape
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (MKLDNNRequireWorkspace(param) && SupportMKLDNNPooling(param))
out_shape->push_back(oshape); // for workspace
#endif
@@ -220,7 +220,7 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
ConvertLayout(oshape_nchw, mshadow::kNCHW, mshadow::kNHWC) : oshape_nchw;
out_shape->clear();
out_shape->push_back(oshape); // save output shape
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (MKLDNNRequireWorkspace(param) && SupportMKLDNNPooling(param))
out_shape->push_back(oshape); // for workspace
#endif
@@ -262,7 +262,7 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
ConvertLayout(oshape_ncdhw, mshadow::kNCDHW, mshadow::kNDHWC) : oshape_ncdhw;
out_shape->clear();
out_shape->push_back(oshape); // save output shape
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
if (MKLDNNRequireWorkspace(param) && SupportMKLDNNPooling(param))
out_shape->push_back(oshape); // for workspace
#endif
@@ -271,7 +271,7 @@ static bool PoolingShape(const nnvm::NodeAttrs &attrs,
return true;
}
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
void PoolingComputeExCPU(const nnvm::NodeAttrs &attrs, const OpContext &ctx,
const std::vector &inputs,
const std::vector &req,
@@ -424,7 +424,7 @@ For each window ``X``, the mathematical expression for Lp pooling is:
const PoolingParam ¶m = nnvm::get(attrs.parsed);
return GetNumOutputs(param);
})
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FNumVisibleOutputs",
[](const NodeAttrs& attrs) { return 1; })
#endif
@@ -441,13 +441,13 @@ For each window ``X``, the mathematical expression for Lp pooling is:
return std::vector{"output"};
})
.set_attr_parser(PoolingParamParser)
-#if MXNET_USE_MKLDNN == 1
+#if MXNET_USE_ONEDNN == 1
.set_attr("FInferStorageType", PoolingStorageType)
#endif
.set_attr("FInferType", PoolingType)
.set_attr("FInferShape", PoolingShape)
.set_attr("FCompute", PoolingCompute