Skip to content
This repository was archived by the owner on Nov 17, 2023. It is now read-only.

Commit be72ba1

Browse files
committed
Change inner mxnet flags nomenclature for oneDNN library
This change includes: * changing MXNET_USE_MKLDNN flag name to MXNET_USE_ONEDNN * changing USE_MKLDNN flag name to USE_ONEDNN * changing 3rdparty/mkldnn folder name to 3rdparty/onednn * changing include/mkldnn folder name to include/onednn * changing MKLDNN occurences in build and documentation files to ONEDNN * adding Bartosz Kuncer to contributors list
1 parent bb6c5ac commit be72ba1

File tree

224 files changed

+531
-530
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

224 files changed

+531
-530
lines changed

.gitignore

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -171,5 +171,5 @@ coverage.xml
171171
cmake_options.yml
172172

173173
# header file generated at compile time
174-
include/mkldnn/oneapi/dnnl/dnnl_version.h
175-
include/mkldnn/oneapi/dnnl/dnnl_config.h
174+
include/onednn/oneapi/dnnl/dnnl_version.h
175+
include/onednn/oneapi/dnnl/dnnl_config.h

.gitmodules

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,6 @@
1010
[submodule "3rdparty/googletest"]
1111
path = 3rdparty/googletest
1212
url = https://github.com/google/googletest.git
13-
[submodule "3rdparty/mkldnn"]
14-
path = 3rdparty/mkldnn
15-
url = https://github.com/oneapi-src/oneDNN.git
1613
[submodule "3rdparty/tvm"]
1714
path = 3rdparty/tvm
1815
url = https://github.com/apache/incubator-tvm.git
@@ -28,3 +25,6 @@
2825
[submodule "3rdparty/intgemm"]
2926
path = 3rdparty/intgemm
3027
url = https://github.com/kpu/intgemm
28+
[submodule "3rdparty/onednn"]
29+
path = 3rdparty/onednn
30+
url = https://github.com/oneapi-src/oneDNN

CMakeLists.txt

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -62,9 +62,9 @@ option(USE_F16C "Build with x86 F16C instruction support" ON) # autodetects supp
6262
option(USE_LAPACK "Build with lapack support" ON)
6363
option(USE_MKL_LAYERNORM "Use layer normalization from MKL, which is currently slower than internal. No effect unless USE_BLAS=MKL (or mkl)." OFF)
6464
if((NOT APPLE) AND (NOT MSVC) AND (CMAKE_HOST_SYSTEM_PROCESSOR STREQUAL "x86_64") AND (NOT CMAKE_CROSSCOMPILING))
65-
option(USE_MKLDNN "Build with MKL-DNN support" ON)
65+
option(USE_ONEDNN "Build with ONEDNN support" ON)
6666
else()
67-
option(USE_MKLDNN "Build with MKL-DNN support" OFF)
67+
option(USE_ONEDNN "Build with ONEDNN support" OFF)
6868
endif()
6969
cmake_dependent_option(USE_INTGEMM "Build with x86_64 intgemm library for low-precision multiplication" ON "CMAKE_SYSTEM_PROCESSOR STREQUAL x86_64" OFF)
7070
if(NOT MSVC)
@@ -257,7 +257,7 @@ endif()
257257
if(USE_MKL_LAYERNORM)
258258
add_definitions(-DMXNET_USE_MKL_LAYERNORM=1)
259259
endif()
260-
if(USE_MKLDNN)
260+
if(USE_ONEDNN)
261261
# CPU architecture (e.g., C5) can't run on another architecture (e.g., g3).
262262
if(MSVC)
263263
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /EHsc")
@@ -272,7 +272,7 @@ if(USE_MKLDNN)
272272
endif()
273273
endif()
274274

275-
function(load_mkldnn)
275+
function(load_onednn)
276276
set(MKLDNN_BUILD_TESTS OFF CACHE INTERNAL "" FORCE)
277277
set(MKLDNN_BUILD_EXAMPLES OFF CACHE INTERNAL "" FORCE)
278278
set(MKLDNN_ARCH_OPT_FLAGS "" CACHE INTERNAL "" FORCE)
@@ -285,13 +285,13 @@ if(USE_MKLDNN)
285285
set(MKLDNN_CPU_RUNTIME SEQ CACHE INTERNAL "" FORCE)
286286
endif()
287287

288-
set(CMAKE_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/mkldnn")
289-
add_subdirectory(3rdparty/mkldnn)
288+
set(CMAKE_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}/onednn")
289+
add_subdirectory(3rdparty/onednn)
290290
endfunction()
291-
load_mkldnn()
292-
include_directories(3rdparty/mkldnn/include)
293-
include_directories(${PROJECT_BINARY_DIR}/3rdparty/mkldnn/include)
294-
add_definitions(-DMXNET_USE_MKLDNN=1)
291+
load_onednn()
292+
include_directories(3rdparty/onednn/include)
293+
include_directories(${PROJECT_BINARY_DIR}/3rdparty/onednn/include)
294+
add_definitions(-DMXNET_USE_ONEDNN=1)
295295
list(APPEND mxnet_LINKER_LIBS dnnl)
296296
set_target_properties(dnnl PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
297297
endif()
@@ -836,12 +836,12 @@ if(USE_DIST_KVSTORE)
836836
set_target_properties(pslite PROPERTIES CXX_CLANG_TIDY "") # don't lint 3rdparty dependency
837837
endif()
838838

839-
if(USE_MKLDNN)
839+
if(USE_ONEDNN)
840840
add_custom_command(TARGET mxnet POST_BUILD
841841
COMMAND ${CMAKE_COMMAND} -E copy
842-
${CMAKE_BINARY_DIR}/3rdparty/mkldnn/include/oneapi/dnnl/dnnl_config.h ${CMAKE_SOURCE_DIR}/include/mkldnn/oneapi/dnnl/
842+
${CMAKE_BINARY_DIR}/3rdparty/onednn/include/oneapi/dnnl/dnnl_config.h ${CMAKE_SOURCE_DIR}/include/onednn/oneapi/dnnl/
843843
COMMAND ${CMAKE_COMMAND} -E copy
844-
${CMAKE_BINARY_DIR}/3rdparty/mkldnn/include/oneapi/dnnl/dnnl_version.h ${CMAKE_SOURCE_DIR}/include/mkldnn/oneapi/dnnl/)
844+
${CMAKE_BINARY_DIR}/3rdparty/onednn/include/oneapi/dnnl/dnnl_version.h ${CMAKE_SOURCE_DIR}/include/onednn/oneapi/dnnl/)
845845
endif()
846846

847847
if(USE_INTGEMM)

CONTRIBUTORS.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,7 @@ List of Contributors
289289
* [Nikolay Ulmasov](https://github.com/r3stl355)
290290
* [Paweł Głomski](https://github.com/PawelGlomski-Intel)
291291
* [Andrzej Kotlowski](https://github.com/anko-intel)
292+
* [Bartosz Kuncer](https://github.com/bartekkuncer)
292293

293294
Label Bot
294295
---------

LICENSE

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -226,12 +226,12 @@
226226
3rdparty/tvm/3rdparty/dmlc-core
227227
3rdparty/tvm/3rdparty/dlpack
228228
3rdparty/ps-lite
229-
3rdparty/mkldnn
229+
3rdparty/onednn
230230
3rdparty/googletest/googlemock/scripts/generator
231231
3rdparty/onnx-tensorrt/third_party/onnx/third_party/benchmark
232-
3rdparty/mkldnn/tests/benchdnn (Copy of the License available at top of current file)
232+
3rdparty/onednn/tests/benchdnn (Copy of the License available at top of current file)
233233
src/operator/special_functions-inl.h Cephes Library Functions (Copy of the License available at top of current file)
234-
3rdparty/mkldnn/doc/assets/mathjax (Copy of the License available at top of current file)
234+
3rdparty/onednn/doc/assets/mathjax (Copy of the License available at top of current file)
235235
docs/python_docs/themes/mx-theme/mxtheme/static/material-design-icons-3.0.1 (Copy of the License available at top of current file)
236236
docs/python_docs/themes/mx-theme/mxtheme/static/font/Roboto (Copy of the License available at top of current file)
237237
3rdparty/tvm/3rdparty/bfloat16/bfloat16.cc (Copy of the License available at top of current file)
@@ -256,10 +256,10 @@
256256
3-clause BSD license
257257
=======================================================================================
258258

259-
3rdparty/mkldnn/src/cpu/x64/xbyak
260-
3rdparty/mkldnn/tests/gtests/gtest
261-
3rdparty/mkldnn/cmake/FindOpenCL.cmake (Copy of the License available at licenses/BSD3-cmake)
262-
3rdparty/mkldnn/src/cpu/x64/jit_utils/jitprofiling/
259+
3rdparty/onednn/src/cpu/x64/xbyak
260+
3rdparty/onednn/tests/gtests/gtest
261+
3rdparty/onednn/cmake/FindOpenCL.cmake (Copy of the License available at licenses/BSD3-cmake)
262+
3rdparty/onednn/src/cpu/x64/jit_utils/jitprofiling/
263263
3rdparty/onnx-tensorrt/third_party/onnx/third_party/pybind11/tools/FindPythonLibsNew.cmake
264264
3rdparty/ctc_include/contrib/moderngpu
265265
3rdparty/nvidia_cub
@@ -333,7 +333,7 @@
333333
=======================================================================================
334334

335335
3rdparty/intgemm/test/3rd_party/catch.hpp (Copy of the License available at licenses/BOOST1_0)
336-
3rdparty/mkldnn/src/common/primitive_hashing.hpp
336+
3rdparty/onednn/src/common/primitive_hashing.hpp
337337

338338
=======================================================================================
339339
LLVM Release License

NEWS.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1973,7 +1973,7 @@ Note: this feature is still experimental, for more details, refer to [design doc
19731973
* Add back R tests and fix typo around R and perl tests (#13940)
19741974
* Fix document build (#13927)
19751975
* Temporarily disables windows pipeline to unblock PRs (#14261)
1976-
* Fix USE_MKLDNN check in Makefile (#13775)
1976+
* Fix USE_ONEDNN check in Makefile (#13775)
19771977
* Fix spelling in threaded_engine_test (#14709)
19781978
* Fix cmake options parsing in dev_menu (#13458)
19791979
* Add Local test stage and option to jump directly to menu item from commandline (#13809)

README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ What's New
8787

8888
### Ecosystem News
8989

90-
* [MKLDNN for Faster CPU Performance](docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md)
90+
* [ONEDNN for Faster CPU Performance](docs/python_docs/python/tutorials/performance/backend/mkldnn/mkldnn_readme.md)
9191
* [MXNet Memory Monger, Training Deeper Nets with Sublinear Memory Cost](https://github.com/dmlc/mxnet-memonger)
9292
* [Tutorial for NVidia GTC 2016](https://github.com/dmlc/mxnet-gtc-tutorial)
9393
* [MXNet.js: Javascript Package for Deep Learning in Browser (without server)](https://github.com/dmlc/mxnet.js/)

benchmark/opperf/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ Benchmarks are usually done end-to-end for a given Network Architecture. For exa
3737
2. A standard Network Architecture like ResNet-50 is made up of many operators Ex: Convolution2D, Softmax, Dense and more. Consider the following scenarios:
3838
1. We improved the performance of Convolution2D operator, but due to a bug, Softmax performance went down. Overall, we may observe end to end benchmarks are running fine, we may miss out the performance degradation of a single operator which can accumulate and become untraceable.
3939
2. You need to see in a given network, which operator is taking maximum time and plan optimization work. With end to end benchmarks, it is hard to get more fine grained numbers at operator level.
40-
3. We need to know on different hardware infrastructure (Ex: CPU with MKLDNN, GPU with NVIDIA CUDA and cuDNN) how different operators performs. With these details, we can plan the optimization work at operator level, which could exponentially boost up end to end performance.
40+
3. We need to know on different hardware infrastructure (Ex: CPU with ONEDNN, GPU with NVIDIA CUDA and cuDNN) how different operators performs. With these details, we can plan the optimization work at operator level, which could exponentially boost up end to end performance.
4141
4. You want to have nightly performance tests across all operators in a deep learning framework to catch regressions early.
4242
5. We can integrate this framework with a CI/CD system to run per operator performance tests for PRs. Example: When a PR modifies the kernel of TransposeConv2D, we can run benchmarks of TransposeConv2D operator to verify performance.
4343

cd/README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,18 +19,18 @@
1919

2020
## Introduction
2121

22-
MXNet aims to support a variety of frontends, e.g. Python, Java, Perl, R, etc. as well as environments (Windows, Linux, Mac, with or without GPU, with or without MKL-DNN support, etc.). This package contains a small continuous delivery (CD) framework used to automate the delivery nightly and release builds across our delivery channels.
22+
MXNet aims to support a variety of frontends, e.g. Python, Java, Perl, R, etc. as well as environments (Windows, Linux, Mac, with or without GPU, with or without ONEDNN support, etc.). This package contains a small continuous delivery (CD) framework used to automate the delivery nightly and release builds across our delivery channels.
2323

2424
<!-- TODO: Add links to the actual jobs, once this is live on PROD -->
2525

2626
The CD process is driven by the [CD pipeline job](Jenkinsfile_cd_pipeline), which orchestrates the order in which the artifacts are delivered. For instance, first publish the libmxnet library before publishing the pip package. It does this by triggering the [release job](Jenkinsfile_release_job) with a specific set of parameters for each delivery channel. The release job executes the specific release pipeline for a delivery channel across all MXNet *variants*.
2727

28-
A variant is a specific environment or features for which MXNet is compiled. For instance CPU, GPU with CUDA v10.1, CUDA v10.2 with MKL-DNN support, etc.
28+
A variant is a specific environment or features for which MXNet is compiled. For instance CPU, GPU with CUDA v10.1, CUDA v10.2 with ONEDNN support, etc.
2929

30-
Currently, below variants are supported. All of these variants except native have MKL-DNN backend enabled.
30+
Currently, below variants are supported. All of these variants except native have ONEDNN backend enabled.
3131

3232
* *cpu*: CPU
33-
* *native*: CPU without MKL-DNN
33+
* *native*: CPU without ONEDNN
3434
* *cu101*: CUDA 10.1
3535
* *cu102*: CUDA 10.2
3636
* *cu110*: CUDA 11.0

0 commit comments

Comments
 (0)