Bringing back PR .

PR : [INTEL MKL] MKL DNN v0.x cleanup - Reset MKL build config and remove DNN v0.x related macros

Imported from GitHub PR https://github.com/tensorflow/tensorflow/pull/42339

This is the first PR for MKL DNN v0.x clean - for Tensorflow 2.4, only MKL DNN v1.x will be supported
   (1) Reset MKL DNN related build config (mostly in third_part/mkl or mkl_dnn folder)
   (2) Remove MKL DNN v0.x related macros in core/util/mkl_types.c
   (3) Minor code style fix in one MKL kernel op.

There will be a sequence of PR's which will clean up all related MKL kernels ops.

PiperOrigin-RevId: 331205640
Change-Id: I2c3c3671fe8906a5dd18c6b017d05fc69a1e3f59
This commit is contained in:
Penporn Koanantakool 2020-09-11 12:56:58 -07:00 committed by TensorFlower Gardener
parent 4c961fb918
commit fe1bce6717
9 changed files with 7 additions and 224 deletions

View File

@ -162,13 +162,11 @@ build --host_java_toolchain=//third_party/toolchains/java:tf_java_toolchain
# environment variable "TF_MKL_ROOT" every time before build.
build:mkl --define=build_with_mkl=true --define=enable_mkl=true
build:mkl --define=tensorflow_mkldnn_contraction_kernel=0
build:mkl --define=build_with_mkl_dnn_v1_only=true
build:mkl -c opt
# config to build OneDNN backend with a user specified threadpool.
build:mkl_threadpool --define=build_with_mkl=true --define=enable_mkl=true
build:mkl_threadpool --define=tensorflow_mkldnn_contraction_kernel=0
build:mkl_threadpool --define=build_with_mkl_dnn_v1_only=true
build:mkl_threadpool --define=build_with_mkl_opensource=true
build:mkl_threadpool --define=build_with_mkldnn_threadpool=true
build:mkl_threadpool -c opt
@ -176,7 +174,6 @@ build:mkl_threadpool -c opt
# Config setting to build with oneDNN and without the binary blob
build:mkl_opensource_only --define=build_with_mkl=true --define=enable_mkl=true
build:mkl_opensource_only --define=tensorflow_mkldnn_contraction_kernel=0
build:mkl_opensource_only --define=build_with_mkl_dnn_v1_only=true
build:mkl_opensource_only --define=build_with_mkl_opensource=true
build:mkl_opensource_only -c opt

View File

@ -18,6 +18,9 @@ limitations under the License.
#ifdef INTEL_MKL
namespace tensorflow {
// MKL DNN 0.x will not be supported. So all related macro's have been removed
// This file will be removed once MKL DNN 0.x related source code is cleaned and
// all MKL DNN 1.x related macro's have been replaced.
#ifdef ENABLE_MKLDNN_V1
#define ADD_MD add_md
@ -116,106 +119,6 @@ namespace tensorflow {
#define TENSOR_FORMAT_NHWC MKL_TENSOR_FORMAT_NHWC
#define TENSOR_MAX_DIMS MKLDNN_MAX_NDIMS
#else
#define ADD_MD add_pd
#define ALGORITHM mkldnn
#define ALGORITHM_UNDEF ALGORITHM::algorithm_undef
#define BN_FLAGS mkldnn
#define CPU_STREAM(engine) stream(stream::kind::eager_nostore)
#define DATA_WITH_ENGINE(data, engine) data
#define DST_MD dst_pd
#define ENGINE_CPU engine::cpu
#define GET_CHECK_REORDER_MEM_ARGS(md, tensor, net_ptr, net_args, engine) \
memory::primitive_desc(md, engine), tensor, &net_ptr
#define GET_CHECK_REORDER_TO_OP_MEM_ARGS(pd, tensor, net_ptr, net_args, \
engine) \
pd, tensor, &net_ptr
#define GET_DESC get_primitive_desc()
#define GET_FORMAT_FROM_SHAPE(src_mkl_shape) \
static_cast<memory::format>(src_mkl_shape.GetMklLayout().data.format)
#define GET_BLOCK_STRIDES(strides, idx) strides[(idx)]
#define GET_MEMORY_DESC_CONSTRUCTOR(dims, type, fm) \
{ {dims}, MklDnnType<type>(), fm }
#define GET_MEMORY_SIZE_FROM_MD(md, engine) \
memory::primitive_desc(md, engine).get_size()
#define GET_SRC_DESC_FROM_OP_PD(op_pd) op_pd.get()->src_primitive_desc()
#define GET_DST_DESC_FROM_OP_PD(op_pd) op_pd.get()->dst_primitive_desc()
#define GET_BIAS_DESC_FROM_OP_PD(op_pd) op_pd.get()->bias_primitive_desc()
#define GET_DIFF_DST_DESC_FROM_OP_PD(op_pd) \
op_pd.get()->diff_dst_primitive_desc()
#define GET_WORKSPACE_DESC_FROM_OP_PD(op_pd) \
op_pd.get()->workspace_primitive_desc()
#define GET_TENSOR_FORMAT(fmt) fmt
#define GET_TF_DATA_FORMAT(shape, mem_desc) mem_desc.data.format
#define GET_USR_MEM_PRIM_DESC(src) src.GetUsrMemPrimDesc()
#define GET_WEIGHTS_DESC_FROM_OP_PD(op_pd) op_pd.get()->weights_primitive_desc()
#define GET_WEIGHTS_FORMAT_FROM_OP_PD(op_pd, op) op->GetFilterMemoryFormat()
#define IS_DIFF_DST_REORDER_NEEDED(diff_dst_md, op_pd, op) \
diff_dst_md.data.format != op->GetDiffDstMemoryFormat()
#define IS_DIFF_FILTER_REORDER_NEEDED(diff_filter_md, fmt, op_pd, op) \
fmt != op->GetDiffFilterMemoryFormat()
#define IS_FILTER_REORDER_NEEDED(filter_md, op_pd, op) \
filter_md.data.format != op->GetFilterMemoryFormat()
#define IS_SRC_REORDER_NEEDED(src_md, op_pd, op) \
src_md.data.format != op->GetSrcMemoryFormat()
#define IS_WEIGHTS_REORDER_NEEDED(weights_md, op_pd, op) \
weights_md.data.format != op->GetWeightMemoryFormat()
#define GET_MEMORY_DESC_FROM_MEM_PTR(mem_ptr) \
mem_ptr->get_primitive_desc().desc()
#define GET_MEMORY_PRIMITIVE_DESC_FROM_MEM_PTR(mem_ptr) \
mem_ptr->get_primitive_desc()
#define MEMORY_CONSTRUCTOR(mem_pd, engine, data) memory(mem_pd, data)
#define MEMORY_CONSTRUCTOR_PD(mem_pd, engine, data) memory(mem_pd, data)
#define MEMORY_CONSTRUCTOR_WITH_MEM_PD(mem_ptr, cpu_engine, data) \
memory({GET_MEMORY_DESC_FROM_MEM_PTR(mem_ptr), cpu_engine}, data)
#define MEMORY_CONSTRUCTOR_USING_MD(md, engine, data) memory({md, engine}, data)
#define MEMORY_CONSTRUCTOR_USING_MEM_PD(dims, type, fm, engine, data) \
memory({GET_MEMORY_DESC_CONSTRUCTOR(dims, type, fm), engine}, data)
#define MEMORY_CONSTRUCTOR_WITHOUT_DATA(mem_pd, engine) memory(mem_pd)
#define MEMORY_DATA_TYPE_UNDEF memory::data_type::data_undef
#define MEMORY_DESC memory::format
#define MEMORY_FORMAT mkldnn::memory::format
#define MEMORY_FORMAT_DESC layout_desc
#define MEMORY_FORMAT_UNDEF mkldnn::memory::format::format_undef
#define MEMORY_PD_CONSTRUCTOR(dims, type, fm, engine) \
memory::primitive_desc(GET_MEMORY_DESC_CONSTRUCTOR(dims, type, fm), engine)
#define MEMORY_PD_WITHOUT_DATA(pd, engine) pd
#define MEMORY_PRIMITIVE_DESC memory::primitive_desc
#define MEMORY_PD_CONSTRUCTOR_2_PARAMS(md, engine) \
MEMORY_PRIMITIVE_DESC(md, engine)
#define MKL_FMT_TAG tf_fmt
#define MKL_TENSOR_FORMAT memory::format
#define MKL_TENSOR_FORMAT_BLOCKED memory::format::blocked
#define MKL_TENSOR_FORMAT_IN_C mkldnn_memory_format_t
#define MKL_TENSOR_FORMAT_INVALID memory::format::format_undef
#define MKL_TENSOR_FORMAT_NC memory::format::nc
#define MKL_TENSOR_FORMAT_NCHW memory::format::nchw
#define MKL_TENSOR_FORMAT_NCDHW memory::format::ncdhw
#define MKL_TENSOR_FORMAT_NDHWC memory::format::ndhwc
#define MKL_TENSOR_FORMAT_NHWC memory::format::nhwc
#define MKL_TENSOR_FORMAT_TNC memory::format::tnc
#define MKL_TENSOR_FORMAT_X memory::format::x
#define MKL_TENSOR_FORMAT_UNDEF MKL_TENSOR_FORMAT_INVALID
#define NET_ARGS_PTR nullptr
#define OUTPUT_TF_MD output_tf_pd
#define PRIMITIVE_DESC_BIAS bias_primitive_desc()
#define PRIMITIVE_DESC_WEIGHTS weights_primitive_desc()
#define PRIMITIVE_DESC_DIFF_DST diff_dst_primitive_desc()
#define PRIMITIVE_DESC_DIFF_SRC diff_src_primitive_desc()
#define PRIMITIVE_DESC_DIFF_WEIGHTS diff_weights_primitive_desc()
#define PRIMITIVE_DESC_DST dst_primitive_desc()
#define PRIMITIVE_DESC_SRC src_primitive_desc()
#define PRIMITIVE_DESC_WORKSPACE workspace_primitive_desc()
#define REORDER_PD_CONSTRUCTOR(src_pd, dst_pd, engine) ReorderPd(src_pd, dst_pd)
#define REORDER_PD_CONSTRUCTOR_WITH_ATTR(src_pd, dst_pd, engine, prim_attr) \
ReorderPd(src_pd, dst_pd, prim_attr)
#define SKIP_INPUT_REORDER(input_mkl_shape, input_md) \
(input_mkl_shape.GetTfDataFormat() == input_md.data.format && \
input_mkl_shape.GetTfDataFormat() != MKL_TENSOR_FORMAT_BLOCKED)
#define SUMMAND_MD summand_pd
#define TENSOR_FORMAT TensorFormat
#define TENSOR_FORMAT_NHWC FORMAT_NHWC
#endif // ENABLE_MKLDNN_V1
} // namespace tensorflow

View File

@ -47,7 +47,6 @@ load(
load(
"//third_party/mkl_dnn:build_defs.bzl",
"if_mkl_open_source_only",
"if_mkl_v1",
"if_mkldnn_threadpool",
)
load(
@ -325,9 +324,8 @@ def tf_copts(
if_nvcc(["-DTENSORFLOW_USE_NVCC=1"]) +
if_xla_available(["-DTENSORFLOW_USE_XLA=1"]) +
if_tensorrt(["-DGOOGLE_TENSORRT=1"]) +
if_mkl(["-DINTEL_MKL=1", "-DEIGEN_USE_VML"]) +
if_mkl(["-DINTEL_MKL=1", "-DENABLE_MKLDNN_V1", "-DENABLE_INTEL_MKL_BFLOAT16"]) +
if_mkl_open_source_only(["-DINTEL_MKL_DNN_ONLY"]) +
if_mkl_v1(["-DENABLE_MKLDNN_V1", "-DENABLE_INTEL_MKL_BFLOAT16"]) +
if_mkldnn_threadpool(["-DENABLE_MKLDNN_THREADPOOL"]) +
if_enable_mkl(["-DENABLE_MKL"]) +
if_ngraph(["-DINTEL_NGRAPH=1"]) +

View File

@ -90,12 +90,7 @@ def mkl_deps():
inclusion in the deps attribute of rules.
"""
return select({
"@org_tensorflow//third_party/mkl_dnn:build_with_mkl_dnn_only": ["@mkl_dnn"],
"@org_tensorflow//third_party/mkl_dnn:build_with_mkl_dnn_v1_only": ["@mkl_dnn_v1//:mkl_dnn"],
"@org_tensorflow//third_party/mkl:build_with_mkl": [
"@org_tensorflow//third_party/mkl:intel_binary_blob",
"@mkl_dnn",
],
"@org_tensorflow//third_party/mkl:build_with_mkl": ["@mkl_dnn_v1//:mkl_dnn"],
"//conditions:default": [],
})

View File

@ -9,39 +9,19 @@ package(
exports_files(["LICENSE"])
config_setting(
name = "build_with_mkl_dnn_only",
define_values = {
"build_with_mkl": "true",
"build_with_mkl_dnn_only": "true",
},
visibility = ["//visibility:public"],
)
config_setting(
name = "build_with_mkl_opensource",
define_values = {
"build_with_mkl": "true",
"build_with_mkl_dnn_v1_only": "true",
"build_with_mkl_opensource": "true",
},
visibility = ["//visibility:public"],
)
config_setting(
name = "build_with_mkl_dnn_v1_only",
define_values = {
"build_with_mkl": "true",
"build_with_mkl_dnn_v1_only": "true",
},
visibility = ["//visibility:public"],
)
config_setting(
name = "build_with_mkldnn_threadpool",
define_values = {
"build_with_mkl": "true",
"build_with_mkl_dnn_v1_only": "true",
"build_with_mkl_opensource": "true",
"build_with_mkldnn_threadpool": "true",
},

View File

@ -14,22 +14,6 @@ def if_mkl_open_source_only(if_true, if_false = []):
"//conditions:default": if_false,
})
def if_mkl_v1(if_true, if_false = []):
"""Returns `if_true` if MKL-DNN v1.x is used.
Shorthand for select()'ing on whether we're building with
MKL-DNN v1.x open source library only, without depending on MKL binary form.
Returns a select statement which evaluates to if_true if we're building
with MKL-DNN v1.x open source library only. Otherwise, the
select statement evaluates to if_false.
"""
return select({
"@org_tensorflow//third_party/mkl_dnn:build_with_mkl_dnn_v1_only": if_true,
"//conditions:default": if_false,
})
def if_mkldnn_threadpool(if_true, if_false = []):
"""Returns `if_true` if MKL-DNN v1.x is used.

View File

@ -1,10 +1,5 @@
exports_files(["LICENSE"])
load(
"@org_tensorflow//third_party/mkl_dnn:build_defs.bzl",
"if_mkl_open_source_only",
"if_mkl_v1",
)
load(
"@org_tensorflow//third_party:common.bzl",
"template_rule",
@ -50,65 +45,6 @@ template_rule(
},
)
cc_library(
name = "mkl_dnn",
srcs = glob([
"src/common/*.cpp",
"src/common/*.hpp",
"src/cpu/*.cpp",
"src/cpu/*.hpp",
"src/cpu/**/*.cpp",
"src/cpu/**/*.hpp",
"src/cpu/xbyak/*.h",
]) + if_mkl_v1([
":mkldnn_config_h",
]) + [":mkldnn_version_h"],
hdrs = glob(["include/*"]),
copts = [
"-fexceptions",
"-DUSE_MKL",
"-DUSE_CBLAS",
] + if_mkl_open_source_only([
"-UUSE_MKL",
"-UUSE_CBLAS",
]) + if_mkl_v1([
"-UUSE_MKL",
"-UUSE_CBLAS",
]) + select({
"@org_tensorflow//tensorflow:linux_x86_64": [
"-fopenmp", # only works with gcc
],
# TODO(ibiryukov): enable openmp with clang by including libomp as a
# dependency.
":clang_linux_x86_64": [],
"//conditions:default": [],
}),
includes = [
"include",
"src",
"src/common",
"src/cpu",
"src/cpu/gemm",
"src/cpu/xbyak",
],
visibility = ["//visibility:public"],
deps = select({
"@org_tensorflow//tensorflow:linux_x86_64": [
"@mkl_linux//:mkl_headers",
"@mkl_linux//:mkl_libs_linux",
],
"@org_tensorflow//tensorflow:macos": [
"@mkl_darwin//:mkl_headers",
"@mkl_darwin//:mkl_libs_darwin",
],
"@org_tensorflow//tensorflow:windows": [
"@mkl_windows//:mkl_headers",
"@mkl_windows//:mkl_libs_windows",
],
"//conditions:default": [],
}),
)
cc_library(
name = "mkldnn_single_threaded",
srcs = glob([

View File

@ -3,7 +3,6 @@ exports_files(["LICENSE"])
load(
"@org_tensorflow//third_party/mkl_dnn:build_defs.bzl",
"if_mkl_open_source_only",
"if_mkl_v1",
"if_mkldnn_threadpool",
)
load(
@ -84,18 +83,9 @@ cc_library(
hdrs = glob(["include/*"]),
copts = [
"-fexceptions",
"-DUSE_MKL",
"-DUSE_CBLAS",
] + if_mkl_open_source_only([
"-UUSE_MKL",
"-UUSE_CBLAS",
]) + if_mkl_v1([
"-UUSE_MKL",
"-UUSE_CBLAS",
]) + if_mkldnn_threadpool([
"-UUSE_MKL",
"-UUSE_CBLAS",
]) + select({
] + select({
"@org_tensorflow//tensorflow:linux_x86_64": [
"-fopenmp", # only works with gcc
],

View File

@ -117,7 +117,7 @@ cc_library(
deps = [
":ngraph_headers",
"@eigen_archive//:eigen",
"@mkl_dnn",
"@mkl_dnn_v1//:mkl_dnn",
"@nlohmann_json_lib",
"@tbb",
],