review comments.

This commit is contained in:
Srinivasan Narayanamoorthy 2020-05-13 13:47:15 -07:00
parent 3692d790f8
commit 3dcace488c
7 changed files with 27 additions and 159 deletions

View File

@ -143,11 +143,11 @@ build:mkl --define=tensorflow_mkldnn_contraction_kernel=0
build:mkl --define=build_with_mkl_dnn_v1_only=true
build:mkl -c opt
# config to build OneDNN backend with a user specified threadpool.
build:mkl_threadpool --define=build_with_mkl=true --define=enable_mkl=true
build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0
build:mkl_threadpool --define=build_with_mkldnn_threadpool=true
build:mkl_threadpool -c opt
# This config refers to building with CUDA available. It does not necessarily
# mean that we build CUDA op kernels.
build:using_cuda --define=using_cuda=true

View File

@ -328,9 +328,11 @@ def tf_copts(
if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"]) +
if_mkl_open_source_only(["-DINTEL_MKL_DNN_ONLY"]) +
if_mkl_v1_open_source_only(["-DENABLE_MKLDNN_V1"]) +
if_mkldnn_threadpool(["-DENABLE_MKLDNN_THREADPOOL"]) +
if_mkldnn_threadpool(["-DENABLE_MKLDNN_V1"]) +
if_mkldnn_threadpool(["-DINTEL_MKL_DNN_ONLY"]) +
if_mkldnn_threadpool([
"-DENABLE_MKLDNN_THREADPOOL",
"-DENABLE_MKLDNN_V1",
"-DINTEL_MKL_DNN_ONLY"
]) +
if_enable_mkl(["-DENABLE_MKL"]) +
if_ngraph(["-DINTEL_NGRAPH=1"]) +
if_android_arm(["-mfpu=neon"]) +

View File

@ -232,17 +232,6 @@ def tf_repositories(path_prefix = "", tf_repo_name = ""):
],
)
tf_http_archive(
name = "mkl_dnn_tp",
build_file = clean_dep("//third_party/mkl_dnn:mkldnn_threadpool.BUILD"),
sha256 = "54737bcb4dc1961d32ee75da3ecc529fa48198f8b2ca863a079e19a9c4adb70f",
strip_prefix = "oneDNN-1.4",
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/oneapi-src/oneDNN/archive/v1.4.tar.gz",
"https://github.com/oneapi-src/oneDNN/archive/v1.4.tar.gz",
],
)
tf_http_archive(
name = "com_google_absl",
build_file = clean_dep("//third_party:com_google_absl.BUILD"),

View File

@ -107,7 +107,7 @@ def mkl_deps():
return select({
"@org_tensorflow//third_party/mkl_dnn:build_with_mkl_dnn_only": ["@mkl_dnn"],
"@org_tensorflow//third_party/mkl_dnn:build_with_mkl_dnn_v1_only": ["@mkl_dnn_v1//:mkl_dnn"],
"@org_tensorflow//third_party/mkl_dnn:build_with_mkldnn_threadpool": ["@mkl_dnn_tp//:mkl_dnn"],
"@org_tensorflow//third_party/mkl_dnn:build_with_mkldnn_threadpool": ["@mkl_dnn_v1//:mkl_dnn"],
"@org_tensorflow//third_party/mkl:build_with_mkl_ml_only": ["@org_tensorflow//third_party/mkl:intel_binary_blob"],
"@org_tensorflow//third_party/mkl:build_with_mkl": [
"@org_tensorflow//third_party/mkl:intel_binary_blob",

View File

@ -34,10 +34,10 @@ def if_mkldnn_threadpool(if_true, if_false = []):
"""Returns `if_true` if MKL-DNN v1.x is used.
Shorthand for select()'ing on whether we're building with
MKL-DNN v1.x open source library only, without depending on MKL binary form.
MKL-DNN v1.x open source library only with user specified threadpool, without depending on MKL binary form.
Returns a select statement which evaluates to if_true if we're building
with MKL-DNN v1.x open source library only. Otherwise, the
with MKL-DNN v1.x open source library only with user specified threadpool. Otherwise, the
select statement evaluates to if_false.
"""

View File

@ -1,133 +0,0 @@
exports_files(["LICENSE"])
load(
"@org_tensorflow//third_party/mkl_dnn:build_defs.bzl",
"if_mkl_open_source_only",
"if_mkldnn_threadpool",
)
load(
"@org_tensorflow//third_party:common.bzl",
"template_rule",
)
config_setting(
name = "clang_linux_x86_64",
values = {
"cpu": "k8",
"define": "using_clang=true",
},
)
template_rule(
name = "dnnl_config_h",
src = "include/dnnl_config.h.in",
out = "include/dnnl_config.h",
substitutions = {
"#cmakedefine DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_${DNNL_CPU_THREADING_RUNTIME}": "#define DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_THREADPOOL",
"#cmakedefine DNNL_CPU_RUNTIME DNNL_RUNTIME_${DNNL_CPU_RUNTIME}": "#define DNNL_CPU_RUNTIME DNNL_RUNTIME_THREADPOOL",
"#cmakedefine DNNL_GPU_RUNTIME DNNL_RUNTIME_${DNNL_GPU_RUNTIME}": "#define DNNL_GPU_RUNTIME DNNL_RUNTIME_NONE",
},
)
# Create the file mkldnn_version.h with MKL-DNN version numbers.
# Currently, the version numbers are hard coded here. If MKL-DNN is upgraded then
# the version numbers have to be updated manually. The version numbers can be
# obtained from the PROJECT_VERSION settings in CMakeLists.txt. The variable is
# set to "version_major.version_minor.version_patch". The git hash version can
# be set to NA.
# TODO(agramesh1) Automatically get the version numbers from CMakeLists.txt.
template_rule(
name = "dnnl_version_h",
src = "include/dnnl_version.h.in",
out = "include/dnnl_version.h",
substitutions = {
"@DNNL_VERSION_MAJOR@": "1",
"@DNNL_VERSION_MINOR@": "4",
"@DNNL_VERSION_PATCH@": "0",
"@DNNL_VERSION_HASH@": "N/A",
},
)
cc_library(
name = "mkl_dnn",
srcs = glob([
"src/common/*.cpp",
"src/common/*.hpp",
"src/cpu/*.cpp",
"src/cpu/*.hpp",
"src/cpu/**/*.cpp",
"src/cpu/**/*.hpp",
"src/cpu/xbyak/*.h",
]) + if_mkldnn_threadpool([
":dnnl_config_h",
]) + [":dnnl_version_h"],
hdrs = glob(["include/*"]),
copts = [
"-fexceptions",
"-DUSE_MKL",
"-DUSE_CBLAS",
] + if_mkl_open_source_only([
"-UUSE_MKL",
"-UUSE_CBLAS",
]) + if_mkldnn_threadpool([
"-UUSE_MKL",
"-UUSE_CBLAS",
]) + select({
"@org_tensorflow//tensorflow:linux_x86_64": ["-fopenmp-simd"],
# TODO(ibiryukov): enable openmp with clang by including libomp as a
# dependency.
":clang_linux_x86_64": [],
"//conditions:default": [],
}),
includes = [
"include",
"src",
"src/common",
"src/cpu",
"src/cpu/gemm",
"src/cpu/xbyak",
],
visibility = ["//visibility:public"],
deps = select({
"@org_tensorflow//tensorflow:linux_x86_64": [
"@mkl_linux//:mkl_headers",
"@mkl_linux//:mkl_libs_linux",
],
"@org_tensorflow//tensorflow:macos": [
"@mkl_darwin//:mkl_headers",
"@mkl_darwin//:mkl_libs_darwin",
],
"@org_tensorflow//tensorflow:windows": [
"@mkl_windows//:mkl_headers",
"@mkl_windows//:mkl_libs_windows",
],
"//conditions:default": [],
}),
)
cc_library(
name = "mkldnn_single_threaded",
srcs = glob([
"src/common/*.cpp",
"src/common/*.hpp",
"src/cpu/*.cpp",
"src/cpu/*.hpp",
"src/cpu/**/*.cpp",
"src/cpu/**/*.hpp",
"src/cpu/xbyak/*.h",
]) + [":dnnl_config_h"],
hdrs = glob(["include/*"]),
copts = [
"-fexceptions",
"-DMKLDNN_THR=MKLDNN_THR_SEQ", # Disables threading.
],
includes = [
"include",
"src",
"src/common",
"src/cpu",
"src/cpu/gemm",
"src/cpu/xbyak",
],
visibility = ["//visibility:public"],
)

View File

@ -4,6 +4,7 @@ load(
"@org_tensorflow//third_party/mkl_dnn:build_defs.bzl",
"if_mkl_open_source_only",
"if_mkl_v1_open_source_only",
"if_mkldnn_threadpool",
)
load(
"@org_tensorflow//third_party:common.bzl",
@ -17,16 +18,26 @@ config_setting(
"define": "using_clang=true",
},
)
_DNNL_RUNTIME_OMP = {
"#cmakedefine DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_${DNNL_CPU_THREADING_RUNTIME}": "#define DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_OMP",
"#cmakedefine DNNL_CPU_RUNTIME DNNL_RUNTIME_${DNNL_CPU_RUNTIME}": "#define DNNL_CPU_RUNTIME DNNL_RUNTIME_OMP",
"#cmakedefine DNNL_GPU_RUNTIME DNNL_RUNTIME_${DNNL_GPU_RUNTIME}": "#define DNNL_GPU_RUNTIME DNNL_RUNTIME_NONE",
}
_DNNL_RUNTIME_THREADPOOL = {
"#cmakedefine DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_${DNNL_CPU_THREADING_RUNTIME}": "#define DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_THREADPOOL",
"#cmakedefine DNNL_CPU_RUNTIME DNNL_RUNTIME_${DNNL_CPU_RUNTIME}": "#define DNNL_CPU_RUNTIME DNNL_RUNTIME_THREADPOOL",
"#cmakedefine DNNL_GPU_RUNTIME DNNL_RUNTIME_${DNNL_GPU_RUNTIME}": "#define DNNL_GPU_RUNTIME DNNL_RUNTIME_NONE",
}
template_rule(
name = "dnnl_config_h",
src = "include/dnnl_config.h.in",
out = "include/dnnl_config.h",
substitutions = {
"#cmakedefine DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_${DNNL_CPU_THREADING_RUNTIME}": "#define DNNL_CPU_THREADING_RUNTIME DNNL_RUNTIME_OMP",
"#cmakedefine DNNL_CPU_RUNTIME DNNL_RUNTIME_${DNNL_CPU_RUNTIME}": "#define DNNL_CPU_RUNTIME DNNL_RUNTIME_OMP",
"#cmakedefine DNNL_GPU_RUNTIME DNNL_RUNTIME_${DNNL_GPU_RUNTIME}": "#define DNNL_GPU_RUNTIME DNNL_RUNTIME_NONE",
},
substitutions = if_mkldnn_threadpool(
_DNNL_RUNTIME_THREADPOOL,
if_false = _DNNL_RUNTIME_OMP,
),
)
# Create the file mkldnn_version.h with MKL-DNN version numbers.
@ -59,9 +70,8 @@ cc_library(
"src/cpu/**/*.cpp",
"src/cpu/**/*.hpp",
"src/cpu/xbyak/*.h",
]) + if_mkl_v1_open_source_only([
":dnnl_config_h",
]) + [":dnnl_version_h"],
]) + [":dnnl_config_h"]
+ [":dnnl_version_h"],
hdrs = glob(["include/*"]),
copts = [
"-fexceptions",