Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions cmake/modules/contrib/BNNS.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,8 @@
# under the License.

if(USE_BNNS STREQUAL "ON")
add_definitions(-DUSE_JSON_RUNTIME=1)
tvm_file_glob(GLOB BNNS_RELAY_CONTRIB_SRC src/relay/backend/contrib/bnns/*.cc)
list(APPEND COMPILER_SRCS ${BNNS_RELAY_CONTRIB_SRC})
list(APPEND COMPILER_SRCS ${JSON_RELAY_CONTRIB_SRC})

list(APPEND TVM_RUNTIME_LINKER_LIBS "-framework Accelerate")

Expand Down
70 changes: 63 additions & 7 deletions cmake/modules/contrib/DNNL.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -15,17 +15,73 @@
# specific language governing permissions and limitations
# under the License.

macro(find_dnnl)
# 1. Try to find via dnnl-config.cmake
find_package(dnnl CONFIG)

if (NOT dnnl_FOUND)
# 2. Try to find dnnl like a lib + headers distribution
find_library(EXTERN_LIBRARY_DNNL dnnl NO_CACHE)
if (EXTERN_LIBRARY_DNNL)
get_filename_component(DNNL_LIB_DIR ${EXTERN_LIBRARY_DNNL} DIRECTORY)
get_filename_component(DNNL_HDR_DIR ${DNNL_LIB_DIR} DIRECTORY)
string(APPEND DNNL_HDR_DIR "/include")

find_file(DNNL_CONFIG_HDR dnnl_config.h PATHS ${DNNL_HDR_DIR} NO_CACHEEEE)
if (DNNL_CONFIG_HDR)
file(READ ${DNNL_CONFIG_HDR} DNNL_CONFIG)
string(REGEX MATCH "DNNL_CPU_RUNTIME DNNL_RUNTIME_(OMP|SEQ|TBB)" DNNL_CPU_RUNTIME "${DNNL_CONFIG}")
string(REGEX MATCH "(OMP|SEQ|TBB)" DNNL_CPU_RUNTIME "${DNNL_CPU_RUNTIME}")

if (DNNL_CPU_RUNTIME)
add_library(DNNL::dnnl SHARED IMPORTED)
set_target_properties(DNNL::dnnl PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${DNNL_HDR_DIR}"
IMPORTED_LOCATION "${EXTERN_LIBRARY_DNNL}"
)

set(dnnl_FOUND TRUE)
set(dnnl_DIR "${DNNL_LIB_DIR}")
endif()
endif()

# because find_file put this value to cache
unset(EXTERN_LIBRARY_DNNL CACHE)
unset(DNNL_CONFIG_HDR CACHE)
endif()
endif()

if (NOT dnnl_FOUND)
message(FATAL_ERROR
"Cannot detect DNNL package. Please make sure that you have it properly installed "
"and corresponding variables are set (CMAKE_PREFIX_PATH or CMAKE_LIBRARY_PATH).")
endif()
endmacro(find_dnnl)


if (USE_DNNL_CODEGEN STREQUAL "ON" )
find_dnnl()

if (DNNL_CPU_RUNTIME STREQUAL "OMP" AND NOT USE_OPENMP)
message(WARNING
"DNNL and TVM are using different threading runtimes. Mixing of thread "
"pools may lead to significant performance penalty. Suggestion is to "
"switch TVM to use OpenMP (cmake flag: -DUSE_OPENMP=ON).")
endif()
endif()

if((USE_DNNL_CODEGEN STREQUAL "ON") OR (USE_DNNL_CODEGEN STREQUAL "JSON"))
add_definitions(-DUSE_JSON_RUNTIME=1)
tvm_file_glob(GLOB DNNL_RELAY_CONTRIB_SRC src/relay/backend/contrib/dnnl/*.cc)
list(APPEND COMPILER_SRCS ${DNNL_RELAY_CONTRIB_SRC})
list(APPEND COMPILER_SRCS ${JSON_RELAY_CONTRIB_SRC})
tvm_file_glob(GLOB DNNL_CONTRIB_SRC src/runtime/contrib/dnnl/*.cc)

find_library(EXTERN_LIBRARY_DNNL dnnl)
list(APPEND TVM_RUNTIME_LINKER_LIBS ${EXTERN_LIBRARY_DNNL})
tvm_file_glob(GLOB DNNL_CONTRIB_SRC src/runtime/contrib/dnnl/dnnl_json_runtime.cc)
list(APPEND COMPILER_SRCS ${DNNL_RELAY_CONTRIB_SRC})
list(APPEND RUNTIME_SRCS ${DNNL_CONTRIB_SRC})
message(STATUS "Build with DNNL JSON runtime: " ${EXTERN_LIBRARY_DNNL})
list(APPEND TVM_RUNTIME_LINKER_LIBS DNNL::dnnl)
# WA. Have to use system include path while TVM doesn't use targets to describe dependencies
include_directories(SYSTEM $<TARGET_PROPERTY:DNNL::dnnl,INTERFACE_INCLUDE_DIRECTORIES>)
add_definitions(-DUSE_JSON_RUNTIME=1)

message(STATUS "Build with DNNL JSON runtime: ${dnnl_DIR} (${DNNL_CPU_RUNTIME})" )
elseif(USE_DNNL_CODEGEN STREQUAL "C_SRC")
tvm_file_glob(GLOB DNNL_RELAY_CONTRIB_SRC src/relay/backend/contrib/dnnl/*.cc)
list(APPEND COMPILER_SRCS ${DNNL_RELAY_CONTRIB_SRC})
Expand Down
111 changes: 107 additions & 4 deletions python/tvm/relay/op/contrib/dnnl.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,18 +38,30 @@
from tvm.relay import transform
from tvm.relay.build_module import bind_params_by_name

from ...dataflow_pattern import wildcard, is_op
from ...dataflow_pattern import wildcard, is_op, is_constant
from .register import register_pattern_table


def get_dnnl_version():
"""Return tuple with version or DNNL library if known
Otherwise return unknown value which is bigger than any over real
versions.
"""
f = tvm.get_global_func("runtime.module.dnnl_version", allow_missing=True)
return tuple(int(el) for el in f().split(".")) if f else (100500,)


dnnl_version = get_dnnl_version()

logger = logging.getLogger("DNNL")


def _register_external_op_helper(op_name, supported=True):
"""The helper function to indicate that a given operator can be supported
by DNNL.

Paramters
---------
Parameters
----------
op_name : Str
The name of operator that will be registered.

Expand Down Expand Up @@ -159,6 +171,90 @@ def make_dnnl_pattern(op, with_bias, with_eltwise):
return dnnl_pattern


def make_qnn_conv2d_pattern(with_sum=False):
"""Make qnn.conv2d based pattern supported by DNNL

Parameters
----------
with_sum : bool
Indicate to append qnn.sum at the end of pattern

Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
weight = is_constant() # |const requirements, have to recalculate bias to compensate src_zp
bias = is_constant()

pat = wildcard()
pat = is_op("qnn.conv2d")(
pat, weight, is_constant(), is_constant(), is_constant(), is_constant()
)
pat = is_op("add")(pat, bias) | pat
pat = is_op("qnn.requantize")(pat, is_constant(), is_constant(), is_constant(), is_constant())
pat = is_op("clip")(pat)
pat = is_op("cast")(pat)
if with_sum is True:
pat = is_op("qnn.add")(
pat,
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pat = is_op("clip")(pat)

pat_name = "dnnl.qnn.conv2d_sum" if with_sum else "dnnl.qnn.conv2d"

return pat_name, pat


def make_qnn_dense_pattern(with_sum=False):
"""Make qnn.dense based pattern supported by DNNL

Parameters
----------
with_sum : bool
Indicate to append qnn.sum at the end of pattern

Returns
-------
pattern : Tuple(pattern_name, CallPattern)
Created pattern name, along with its CallPattern.
"""
weight = is_constant()
bias = is_constant()

pat = wildcard()
pat = is_op("qnn.dense")(
pat, weight, is_constant(), is_constant(), is_constant(), is_constant()
)
pat = is_op("add")(pat, bias) | pat
pat = is_op("qnn.requantize")(pat, is_constant(), is_constant(), is_constant(), is_constant())
pat = is_op("clip")(pat)
pat = is_op("cast")(pat)
if with_sum is True:
pat = is_op("qnn.add")(
pat,
wildcard(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
is_constant(),
)
pat = is_op("clip")(pat)

pat_name = "dnnl.qnn.dense_sum" if with_sum else "dnnl.qnn.dense"

return pat_name, pat


@register_pattern_table("dnnl")
def pattern_table():
"""Create dnnl patterns.
Expand All @@ -173,9 +269,16 @@ def pattern_table():
for with_bias in [True, False]:
for elt in elt_list:
if not with_bias and not elt:
return dnnl_patterns
continue
dnnl_patterns.append(make_dnnl_pattern("conv2d", with_bias, elt))
dnnl_patterns.append(make_dnnl_pattern("dense", with_bias, elt))

for with_sum in [True, False]:
dnnl_patterns.append(make_qnn_conv2d_pattern(with_sum))
# Old dnnl version doesn't support per channel o_scale
if dnnl_version >= (2, 2) or not with_sum:
dnnl_patterns.append(make_qnn_dense_pattern(with_sum))

return dnnl_patterns


Expand Down
2 changes: 1 addition & 1 deletion python/tvm/relay/op/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -434,7 +434,7 @@ def abs(data):


def sign(data):
"""Compute element-wise absolute of data.
"""Compute element-wise sign of data.

Parameters
----------
Expand Down
Loading