Remove RTLD_GLOBAL when loading pywrap_tensorflow
Splits out a shared object (//tensorflow/libtensorflow_framework.so) with core TensorFlow functionality but neither ops nor kernels. This object does include registries for ops, kernels, filesystems, etc. The expectation is that shared objects containing custom ops will have a runtime dependency on this framework shared object: TensorFlow will load the custom op shared object, and the custom op shared object will use the symbols from the framework shared object to register its ops/kernels/etc. rather than (as before this change) relying on those symbols being in the global symbol table. In this mode, TensorFlow artifacts (_pywrap_tensorflow.so for Python, libtensorflow.so for the C API; currently excluding Android artifacts) will depend on the framework shared object, which will be packaged with the Python pip package and other language distributions. This means that custom ops targeting the framework shared object will work in any language (C++, Java, Go; previously custom ops in these languages required custom Bazel builds). Adds a config option which reproduces the old behavior (--config=monolithic), which for Python means building a monolithic pywrap_tensorflow shared object and loading its symbols into the global symbol table (with RTLD_GLOBAL). As before, there will be no extra-Bazel custom op support for other languages when compiling in this mode. Does not change behavior on Windows; the cmake build is still monolithic. Requires using tf_cc_binary, tf_cc_test, and (rarely) tf_cc_shared_object rules to link in the framework shared object when adding new TensorFlow build rules. PiperOrigin-RevId: 169572746
This commit is contained in:
parent
054b88233b
commit
5c7f9e316d
configure.py
tensorflow
BUILD
cc
compiler
aot
jit
tf2xla
xla
contrib
batching
boosted_trees/lib
cudnn_rnn
factorization/kernels
ffmpeg/default
hvx
learn/python/learn/learn_io
saved_model/cc/saved_model
session_bundle
tensor_forest
tpu/profiler
util
xla_tf_graph
core
docs_src/extend
examples
java
python
stream_executor
tensorflow.bzltools
benchmark
ci_build/builds
graph_transforms
lib_package
mlpbtxt
pip_package
proto_text
third_party
14
configure.py
14
configure.py
@ -949,6 +949,19 @@ def set_mkl():
|
||||
'time before build.')
|
||||
|
||||
|
||||
def set_monolithic():
|
||||
# Add --config=monolithic to your bazel command to use a mostly-static
|
||||
# build and disable modular op registration support (this will revert to
|
||||
# loading TensorFlow with RTLD_GLOBAL in Python). By default (without
|
||||
# --config=monolithic), TensorFlow will build with a dependence on
|
||||
# //tensorflow:libtensorflow_framework.so.
|
||||
write_to_bazelrc('build:monolithic --define framework_shared_object=false')
|
||||
# For projects which use TensorFlow as part of a Bazel build process, putting
|
||||
# nothing in a bazelrc will default to a monolithic build. The following line
|
||||
# opts in to modular op registration support by default:
|
||||
write_to_bazelrc('build --define framework_shared_object=true')
|
||||
|
||||
|
||||
def main():
|
||||
# Make a copy of os.environ to be clear when functions and getting and setting
|
||||
# environment variables.
|
||||
@ -1015,6 +1028,7 @@ def main():
|
||||
|
||||
set_cc_opt_flags(environ_cp)
|
||||
set_mkl()
|
||||
set_monolithic()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
114
tensorflow/BUILD
114
tensorflow/BUILD
@ -14,6 +14,12 @@ exports_files([
|
||||
"leakr_badfiles.dic",
|
||||
])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_shared_object")
|
||||
load(
|
||||
"//tensorflow/core:platform/default/build_config.bzl",
|
||||
"tf_additional_binary_deps",
|
||||
)
|
||||
|
||||
# Config setting for determining if we are building for Android.
|
||||
config_setting(
|
||||
name = "android",
|
||||
@ -197,6 +203,70 @@ config_setting(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Crosses between framework_shared_object and a bunch of other configurations
|
||||
# due to limitations in nested select() statements.
|
||||
config_setting(
|
||||
name = "framework_shared_object",
|
||||
define_values = {
|
||||
"framework_shared_object": "true",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "with_jemalloc_linux_x86_64_dynamic",
|
||||
define_values = {
|
||||
"with_jemalloc": "true",
|
||||
"framework_shared_object": "true",
|
||||
},
|
||||
values = {
|
||||
"cpu": "k8",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "with_jemalloc_linux_ppc64le_dynamic",
|
||||
define_values = {
|
||||
"with_jemalloc": "true",
|
||||
"framework_shared_object": "true",
|
||||
},
|
||||
values = {
|
||||
"cpu": "ppc",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "using_cuda_clang",
|
||||
define_values = {
|
||||
"using_cuda_clang": "true",
|
||||
},
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "using_cuda_clang_with_dynamic_build",
|
||||
define_values = {
|
||||
"using_cuda_clang": "true",
|
||||
"framework_shared_object": "true",
|
||||
},
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "using_cuda_nvcc",
|
||||
define_values = {
|
||||
"using_cuda_nvcc": "true",
|
||||
},
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "using_cuda_nvcc_with_dynamic_build",
|
||||
define_values = {
|
||||
"using_cuda_nvcc": "true",
|
||||
"framework_shared_object": "true",
|
||||
},
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "with_mpi_support",
|
||||
values = {"define": "with_mpi_support=true"},
|
||||
@ -451,6 +521,44 @@ filegroup(
|
||||
data = glob(["docs_src/**/*.md"]),
|
||||
)
|
||||
|
||||
# A shared object which includes registration mechanisms for ops and
|
||||
# kernels. Does not include the implementations of any ops or kernels. Instead,
|
||||
# the library which loads libtensorflow_framework.so
|
||||
# (e.g. _pywrap_tensorflow_internal.so for Python, libtensorflow.so for the C
|
||||
# API) is responsible for registering ops with libtensorflow_framework.so. In
|
||||
# addition to this core set of ops, user libraries which are loaded (via
|
||||
# TF_LoadLibrary/tf.load_op_library) register their ops and kernels with this
|
||||
# shared object directly.
|
||||
#
|
||||
# For example, from Python tf.load_op_library loads a custom op library (via
|
||||
# dlopen() on Linux), the library finds libtensorflow_framework.so (no
|
||||
# filesystem search takes place, since libtensorflow_framework.so has already
|
||||
# been loaded by pywrap_tensorflow) and registers its ops and kernels via
|
||||
# REGISTER_OP and REGISTER_KERNEL_BUILDER (which use symbols from
|
||||
# libtensorflow_framework.so), and pywrap_tensorflow can then use these
|
||||
# ops. Since other languages use the same libtensorflow_framework.so, op
|
||||
# libraries are language agnostic.
|
||||
#
|
||||
# This shared object is not used unless framework_shared_object=true (set in the
|
||||
# configure script unconditionally); otherwise if it is false or undefined, the
|
||||
# build is static and TensorFlow symbols (in Python only) are loaded into the
|
||||
# global symbol table in order to support op registration. This means that
|
||||
# projects building with Bazel and importing TensorFlow as a dependency will not
|
||||
# depend on libtensorflow_framework.so unless they opt in.
|
||||
tf_cc_shared_object(
|
||||
name = "libtensorflow_framework.so",
|
||||
framework_so = [],
|
||||
linkstatic = 1,
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//tensorflow/core:framework_internal_impl",
|
||||
"//tensorflow/core:lib_internal_impl",
|
||||
"//tensorflow/core:core_cpu_impl",
|
||||
"//tensorflow/stream_executor:stream_executor_impl",
|
||||
"//tensorflow/core:gpu_runtime_impl",
|
||||
] + tf_additional_binary_deps(),
|
||||
)
|
||||
|
||||
# -------------------------------------------
|
||||
# New rules should be added above this target.
|
||||
# -------------------------------------------
|
||||
@ -465,7 +573,7 @@ filegroup(
|
||||
# an "-exported_symbols_list" command. -z defs disallows undefined
|
||||
# symbols in object files and -s strips the output.
|
||||
|
||||
cc_binary(
|
||||
tf_cc_shared_object(
|
||||
name = "libtensorflow.so",
|
||||
linkopts = select({
|
||||
"//tensorflow:darwin": [
|
||||
@ -482,7 +590,6 @@ cc_binary(
|
||||
"//tensorflow/c:version_script.lds",
|
||||
],
|
||||
}),
|
||||
linkshared = 1,
|
||||
deps = [
|
||||
"//tensorflow/c:c_api",
|
||||
"//tensorflow/c:exported_symbols.lds",
|
||||
@ -492,7 +599,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_shared_object(
|
||||
name = "libtensorflow_cc.so",
|
||||
linkopts = select({
|
||||
"//tensorflow:darwin": [
|
||||
@ -508,7 +615,6 @@ cc_binary(
|
||||
"//tensorflow:tf_version_script.lds",
|
||||
],
|
||||
}),
|
||||
linkshared = 1,
|
||||
deps = [
|
||||
"//tensorflow:tf_exported_symbols.lds",
|
||||
"//tensorflow:tf_version_script.lds",
|
||||
|
@ -11,6 +11,7 @@ licenses(["notice"]) # Apache 2.0
|
||||
load(
|
||||
"//tensorflow:tensorflow.bzl",
|
||||
"tf_cc_test",
|
||||
"tf_cc_binary",
|
||||
"tf_copts",
|
||||
"tf_gen_op_wrappers_cc",
|
||||
"cc_library_with_android_deps",
|
||||
@ -536,7 +537,7 @@ tf_gen_op_wrappers_cc(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "tutorials_example_trainer",
|
||||
srcs = ["tutorials/example_trainer.cc"],
|
||||
copts = tf_copts(),
|
||||
|
@ -5,6 +5,8 @@ package(
|
||||
)
|
||||
|
||||
load("//tensorflow/compiler/aot:tfcompile.bzl", "tf_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
# Optional runtime utilities for use by code generated by tfcompile.
|
||||
cc_library(
|
||||
@ -17,7 +19,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "runtime_test",
|
||||
srcs = ["runtime_test.cc"],
|
||||
deps = [
|
||||
@ -76,7 +78,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "codegen_test",
|
||||
srcs = ["codegen_test.cc"],
|
||||
data = ["codegen_test_h.golden"],
|
||||
@ -89,7 +91,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "tfcompile",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [":tfcompile_main"],
|
||||
@ -188,7 +190,7 @@ cc_library(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "benchmark_test",
|
||||
srcs = ["benchmark_test.cc"],
|
||||
tags = ["manual"],
|
||||
|
@ -5,6 +5,7 @@ package(
|
||||
)
|
||||
|
||||
load("//tensorflow/compiler/aot:tfcompile.bzl", "tf_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
test_suite(
|
||||
name = "all_tests",
|
||||
@ -137,7 +138,7 @@ tf_library(
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "tfcompile_test",
|
||||
srcs = ["tfcompile_test.cc"],
|
||||
tags = ["manual"],
|
||||
|
@ -22,6 +22,7 @@ package(
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "cc_header_only_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_kernel_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
|
||||
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda_is_configured")
|
||||
|
||||
@ -260,7 +261,7 @@ cc_library(
|
||||
hdrs = ["union_find.h"],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "graph_to_functiondef_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
@ -282,7 +283,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "compilation_passes_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
|
@ -6,6 +6,8 @@ package(
|
||||
],
|
||||
)
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "graphcycles",
|
||||
srcs = ["graphcycles.cc"],
|
||||
@ -15,7 +17,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "graphcycles_test",
|
||||
srcs = ["graphcycles_test.cc"],
|
||||
deps = [
|
||||
|
@ -1,5 +1,7 @@
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
package_group(
|
||||
name = "internal",
|
||||
packages = [
|
||||
@ -145,7 +147,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "tf2xla_util_test",
|
||||
srcs = ["tf2xla_util_test.cc"],
|
||||
deps = [
|
||||
@ -157,7 +159,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "tf2xla_test",
|
||||
srcs = ["tf2xla_test.cc"],
|
||||
deps = [
|
||||
@ -176,7 +178,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "xla_compiler_test",
|
||||
srcs = ["xla_compiler_test.cc"],
|
||||
deps = [
|
||||
@ -199,7 +201,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "str_util_test",
|
||||
srcs = [
|
||||
"str_util_test.cc",
|
||||
@ -212,7 +214,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "literal_util_test",
|
||||
srcs = [
|
||||
"literal_util_test.cc",
|
||||
@ -240,7 +242,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "const_analysis_test",
|
||||
size = "small",
|
||||
srcs = ["const_analysis_test.cc"],
|
||||
@ -300,7 +302,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "functionalize_control_flow_test",
|
||||
srcs = ["functionalize_control_flow_test.cc"],
|
||||
deps = [
|
||||
|
@ -19,6 +19,7 @@ package_group(
|
||||
)
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "cc_header_only_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow/compiler/xla:xla.bzl", "xla_proto_library")
|
||||
|
||||
# Filegroup used to collect source files for dependency checking.
|
||||
@ -105,7 +106,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "status_macros_test",
|
||||
size = "small",
|
||||
srcs = ["status_macros_test.cc"],
|
||||
@ -144,7 +145,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "statusor_test",
|
||||
size = "small",
|
||||
srcs = ["statusor_test.cc"],
|
||||
@ -191,7 +192,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "util_test",
|
||||
srcs = ["util_test.cc"],
|
||||
deps = [
|
||||
@ -230,7 +231,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "shape_util_test",
|
||||
srcs = ["shape_util_test.cc"],
|
||||
deps = [
|
||||
@ -244,7 +245,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "layout_util_test",
|
||||
srcs = ["layout_util_test.cc"],
|
||||
deps = [
|
||||
@ -255,7 +256,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "index_util_test",
|
||||
srcs = ["index_util_test.cc"],
|
||||
deps = [
|
||||
@ -284,7 +285,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "literal_util_test",
|
||||
srcs = ["literal_util_test.cc"],
|
||||
deps = [
|
||||
@ -334,7 +335,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "array2d_test",
|
||||
srcs = ["array2d_test.cc"],
|
||||
deps = [
|
||||
@ -354,7 +355,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "array3d_test",
|
||||
srcs = ["array3d_test.cc"],
|
||||
deps = [
|
||||
@ -376,7 +377,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "array4d_test",
|
||||
srcs = ["array4d_test.cc"],
|
||||
deps = [
|
||||
@ -443,7 +444,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "text_literal_reader_test",
|
||||
srcs = ["text_literal_reader_test.cc"],
|
||||
deps = [
|
||||
@ -473,7 +474,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "text_literal_writer_test",
|
||||
srcs = ["text_literal_writer_test.cc"],
|
||||
deps = [
|
||||
@ -501,7 +502,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "shape_tree_test",
|
||||
srcs = ["shape_tree_test.cc"],
|
||||
deps = [
|
||||
@ -561,7 +562,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "reference_util_test",
|
||||
srcs = ["reference_util_test.cc"],
|
||||
deps = [
|
||||
|
@ -21,6 +21,8 @@ filegroup(
|
||||
]),
|
||||
)
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "global_data",
|
||||
srcs = ["global_data.cc"],
|
||||
@ -44,7 +46,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "padding_test",
|
||||
srcs = ["padding_test.cc"],
|
||||
deps = [
|
||||
|
@ -15,6 +15,8 @@ package(default_visibility = ["//tensorflow:internal"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "parse_flags_from_env",
|
||||
srcs = ["parse_flags_from_env.cc"],
|
||||
@ -27,7 +29,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "parse_flags_from_env_test",
|
||||
srcs = ["parse_flags_from_env_test.cc"],
|
||||
deps =
|
||||
@ -57,7 +59,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "debug_options_parsers_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
|
@ -13,6 +13,8 @@ package_group(
|
||||
)
|
||||
|
||||
load("//tensorflow/compiler/xla:xla.bzl", "xla_proto_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
xla_proto_library(
|
||||
name = "session_proto",
|
||||
@ -53,7 +55,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "shape_inference_test",
|
||||
srcs = ["shape_inference_test.cc"],
|
||||
deps = [
|
||||
@ -67,7 +69,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_opcode_test",
|
||||
srcs = ["hlo_opcode_test.cc"],
|
||||
deps = [
|
||||
@ -99,7 +101,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_evaluator_test",
|
||||
srcs = ["hlo_evaluator_test.cc"],
|
||||
deps = [
|
||||
@ -173,7 +175,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_reachability_test",
|
||||
srcs = ["hlo_reachability_test.cc"],
|
||||
deps = [
|
||||
@ -198,7 +200,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_matchers_test",
|
||||
srcs = ["hlo_matchers_test.cc"],
|
||||
deps = [
|
||||
@ -219,7 +221,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_instruction_test",
|
||||
srcs = ["hlo_instruction_test.cc"],
|
||||
deps = [
|
||||
@ -247,7 +249,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "call_graph_test",
|
||||
srcs = ["call_graph_test.cc"],
|
||||
deps = [
|
||||
@ -292,7 +294,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "call_inliner_test",
|
||||
size = "small",
|
||||
srcs = ["call_inliner_test.cc"],
|
||||
@ -314,7 +316,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "flatten_call_graph_test",
|
||||
srcs = ["flatten_call_graph_test.cc"],
|
||||
deps = [
|
||||
@ -355,7 +357,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "user_computation_test",
|
||||
srcs = ["user_computation_test.cc"],
|
||||
deps = [
|
||||
@ -729,7 +731,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "liveness_util_test",
|
||||
srcs = ["liveness_util_test.cc"],
|
||||
deps = [
|
||||
@ -764,7 +766,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "buffer_liveness_test",
|
||||
srcs = ["buffer_liveness_test.cc"],
|
||||
deps = [
|
||||
@ -806,7 +808,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "buffer_assignment_test",
|
||||
srcs = ["buffer_assignment_test.cc"],
|
||||
deps = [
|
||||
@ -852,7 +854,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_ordering_test",
|
||||
size = "small",
|
||||
srcs = ["hlo_ordering_test.cc"],
|
||||
@ -886,7 +888,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "heap_simulator_test",
|
||||
srcs = ["heap_simulator_test.cc"],
|
||||
deps = [
|
||||
@ -922,7 +924,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_scheduling_test",
|
||||
srcs = ["hlo_scheduling_test.cc"],
|
||||
deps = [
|
||||
@ -960,7 +962,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "instruction_fusion_test",
|
||||
srcs = ["instruction_fusion_test.cc"],
|
||||
deps = [
|
||||
@ -991,7 +993,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "batchnorm_rewriter_test",
|
||||
size = "small",
|
||||
srcs = ["batchnorm_rewriter_test.cc"],
|
||||
@ -1032,7 +1034,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "algebraic_simplifier_test",
|
||||
srcs = ["algebraic_simplifier_test.cc"],
|
||||
deps = [
|
||||
@ -1066,7 +1068,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "reshape_mover_test",
|
||||
srcs = ["reshape_mover_test.cc"],
|
||||
deps = [
|
||||
@ -1100,7 +1102,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "inliner_test",
|
||||
srcs = ["inliner_test.cc"],
|
||||
deps = [
|
||||
@ -1234,7 +1236,7 @@ cc_library(
|
||||
alwayslink = True, # Contains per-platform transfer manager registration
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "transfer_manager_test",
|
||||
srcs = ["transfer_manager_test.cc"],
|
||||
deps = [
|
||||
@ -1267,7 +1269,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_cost_analysis_test",
|
||||
srcs = ["hlo_cost_analysis_test.cc"],
|
||||
deps = [
|
||||
@ -1309,7 +1311,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_computation_test",
|
||||
srcs = ["hlo_computation_test.cc"],
|
||||
deps = [
|
||||
@ -1324,7 +1326,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "graphviz_example",
|
||||
srcs = ["graphviz_example.cc"],
|
||||
deps = [
|
||||
@ -1339,7 +1341,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_module_test",
|
||||
srcs = ["hlo_module_test.cc"],
|
||||
deps = [
|
||||
@ -1404,7 +1406,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_dataflow_analysis_test",
|
||||
srcs = ["hlo_dataflow_analysis_test.cc"],
|
||||
deps = [
|
||||
@ -1462,7 +1464,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_alias_analysis_test",
|
||||
srcs = ["hlo_alias_analysis_test.cc"],
|
||||
deps = [
|
||||
@ -1517,7 +1519,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "tuple_points_to_analysis_test",
|
||||
srcs = ["tuple_points_to_analysis_test.cc"],
|
||||
deps = [
|
||||
@ -1596,7 +1598,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "copy_insertion_test",
|
||||
srcs = ["copy_insertion_test.cc"],
|
||||
deps = [
|
||||
@ -1666,7 +1668,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_rematerialization_test",
|
||||
srcs = ["hlo_rematerialization_test.cc"],
|
||||
deps = [
|
||||
@ -1682,7 +1684,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_dce_test",
|
||||
srcs = ["hlo_dce_test.cc"],
|
||||
deps = [
|
||||
@ -1702,7 +1704,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "layout_assignment_test",
|
||||
srcs = ["layout_assignment_test.cc"],
|
||||
deps = [
|
||||
@ -1775,7 +1777,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_cse_test",
|
||||
srcs = ["hlo_cse_test.cc"],
|
||||
deps = [
|
||||
@ -1812,7 +1814,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_constant_folding_test",
|
||||
srcs = ["hlo_constant_folding_test.cc"],
|
||||
deps = [
|
||||
@ -1904,7 +1906,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_subcomputation_unification_test",
|
||||
srcs = ["hlo_subcomputation_unification_test.cc"],
|
||||
deps = [
|
||||
@ -1933,7 +1935,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_tfgraph_builder_test",
|
||||
srcs = ["hlo_tfgraph_builder_test.cc"],
|
||||
deps = [
|
||||
@ -1980,7 +1982,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "transpose_folding_test",
|
||||
srcs = ["transpose_folding_test.cc"],
|
||||
deps = [
|
||||
@ -2009,7 +2011,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "pool_test",
|
||||
srcs = ["pool_test.cc"],
|
||||
deps = [
|
||||
@ -2045,7 +2047,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "reduce_precision_insertion_test",
|
||||
size = "small",
|
||||
srcs = ["reduce_precision_insertion_test.cc"],
|
||||
|
@ -15,6 +15,8 @@ package_group(
|
||||
)
|
||||
|
||||
load(":build_defs.bzl", "runtime_copts")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
# Filegroup used to collect source files for dependency checking.
|
||||
filegroup(
|
||||
@ -243,7 +245,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "sample_harness",
|
||||
srcs = ["sample_harness.cc"],
|
||||
deps = [
|
||||
@ -455,7 +457,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "cpu_runtime_test",
|
||||
srcs = ["cpu_runtime_test.cc"],
|
||||
deps = [
|
||||
@ -474,7 +476,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "cpu_instruction_fusion_test",
|
||||
srcs = ["cpu_instruction_fusion_test.cc"],
|
||||
deps = [
|
||||
@ -487,7 +489,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "xfeed_manager_test",
|
||||
size = "small",
|
||||
srcs = ["xfeed_manager_test.cc"],
|
||||
@ -554,7 +556,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "layout_assignment_test",
|
||||
size = "small",
|
||||
srcs = ["layout_assignment_test.cc"],
|
||||
@ -593,7 +595,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "conv_canonicalization_test",
|
||||
srcs = ["conv_canonicalization_test.cc"],
|
||||
deps = [
|
||||
@ -616,7 +618,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "shape_partition_test",
|
||||
srcs = ["shape_partition_test.cc"],
|
||||
deps = [
|
||||
|
@ -21,6 +21,8 @@ filegroup(
|
||||
]),
|
||||
)
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "partition_assignment",
|
||||
srcs = [
|
||||
@ -41,7 +43,7 @@ cc_library(
|
||||
|
||||
# TODO(b/29140563) This target is flaky, disabled until flakiness is
|
||||
# root-caused. Failed on 2016-06-08.
|
||||
#cc_test(
|
||||
#tf_cc_test(
|
||||
# name = "partition_assignment_test",
|
||||
# srcs = [
|
||||
# "partition_assignment_test.cc",
|
||||
@ -74,7 +76,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "stream_assignment_test",
|
||||
srcs = [
|
||||
"stream_assignment_test.cc",
|
||||
@ -302,7 +304,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "convolution_folding_test",
|
||||
srcs = ["convolution_folding_test.cc"],
|
||||
deps = [
|
||||
@ -328,7 +330,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "instruction_fusion_test",
|
||||
srcs = ["instruction_fusion_test.cc"],
|
||||
deps = [
|
||||
@ -368,7 +370,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "fusion_merger_test",
|
||||
srcs = ["fusion_merger_test.cc"],
|
||||
deps = [
|
||||
@ -477,7 +479,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "layout_assignment_test",
|
||||
srcs = ["layout_assignment_test.cc"],
|
||||
deps = [
|
||||
@ -508,7 +510,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_schedule_test",
|
||||
srcs = [
|
||||
"hlo_schedule_test.cc",
|
||||
@ -539,7 +541,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "while_transformer_test",
|
||||
srcs = ["while_transformer_test.cc"],
|
||||
deps = [
|
||||
|
@ -11,6 +11,8 @@ package_group(
|
||||
],
|
||||
)
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "llvm_gpu_backend",
|
||||
srcs = [
|
||||
@ -49,7 +51,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "utils_test",
|
||||
size = "small",
|
||||
srcs = ["utils_test.cc"],
|
||||
|
@ -2,6 +2,11 @@ licenses(["restricted"])
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"//tensorflow/core:platform/default/build_config_root.bzl",
|
||||
"if_static",
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "compiler",
|
||||
srcs = ["compiler.cc"],
|
||||
@ -43,11 +48,12 @@ cc_library(
|
||||
srcs = ["platform_id.cc"],
|
||||
hdrs = ["platform_id.h"],
|
||||
deps = [
|
||||
"//tensorflow/core:stream_executor_headers_lib",
|
||||
"@nsync//:nsync_headers",
|
||||
"@protobuf_archive//:protobuf_headers",
|
||||
"@protobuf_archive//:protoc_lib",
|
||||
],
|
||||
"//tensorflow/core:stream_executor_headers_lib",
|
||||
] + if_static(
|
||||
["@protobuf_archive//:protobuf"],
|
||||
["@protobuf_archive//:protobuf_headers"],
|
||||
),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
|
@ -28,6 +28,8 @@ load("//tensorflow/compiler/xla/tests:build_defs.bzl", "xla_test")
|
||||
load("//tensorflow/compiler/xla/tests:build_defs.bzl", "xla_test_library")
|
||||
load("//tensorflow/compiler/xla/tests:build_defs.bzl", "generate_backend_suites")
|
||||
load("//tensorflow/compiler/xla/tests:build_defs.bzl", "generate_backend_test_macros")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
# Generate test_suites for all backends, named "${backend}_tests".
|
||||
generate_backend_suites()
|
||||
@ -125,7 +127,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "local_client_aot_test_helper",
|
||||
srcs = ["local_client_aot_test_helper.cc"],
|
||||
deps = [
|
||||
@ -1422,7 +1424,7 @@ xla_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "local_client_aot_test",
|
||||
srcs = [
|
||||
"local_client_aot_test.cc",
|
||||
@ -1437,7 +1439,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "hlo_metadata_test",
|
||||
srcs = [
|
||||
"hlo_metadata_test.cc",
|
||||
@ -1524,7 +1526,7 @@ xla_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "literal_test_util_test",
|
||||
srcs = ["literal_test_util_test.cc"],
|
||||
deps = [
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
load("@local_config_cuda//cuda:build_defs.bzl", "cuda_is_configured")
|
||||
load("//tensorflow/compiler/xla/tests:plugin.bzl", "plugins")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
all_backends = ["cpu", "cpu_parallel", "gpu"] + plugins.keys()
|
||||
|
||||
@ -140,11 +141,11 @@ def xla_test(name,
|
||||
for lib_dep in xla_test_library_deps:
|
||||
backend_deps += ["%s_%s" % (lib_dep, backend)]
|
||||
|
||||
native.cc_test(
|
||||
tf_cc_test(
|
||||
name=test_name,
|
||||
srcs=srcs,
|
||||
tags=tags + backend_tags.get(backend, []) + this_backend_tags,
|
||||
copts=copts + ["-DXLA_TEST_BACKEND_%s=1" % backend.upper()] +
|
||||
extra_copts=copts + ["-DXLA_TEST_BACKEND_%s=1" % backend.upper()] +
|
||||
this_backend_copts,
|
||||
args=args + this_backend_args,
|
||||
deps=deps + backend_deps,
|
||||
|
@ -14,7 +14,9 @@ filegroup(
|
||||
visibility = ["//tensorflow/compiler/xla:internal"],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
tf_cc_binary(
|
||||
name = "hex_floats_to_packed_literal",
|
||||
srcs = ["hex_floats_to_packed_literal.cc"],
|
||||
deps = [
|
||||
@ -43,14 +45,14 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "dumped_computation_to_graphviz",
|
||||
deps = [
|
||||
":dumped_computation_to_graphviz_library",
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "show_signature",
|
||||
srcs = ["show_signature.cc"],
|
||||
deps = [
|
||||
@ -90,7 +92,7 @@ cc_library(
|
||||
alwayslink = True,
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "replay_computation_cpu",
|
||||
deps = [
|
||||
":replay_computation_library",
|
||||
@ -98,7 +100,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "replay_computation_gpu",
|
||||
deps = [
|
||||
":replay_computation_library",
|
||||
@ -106,7 +108,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "replay_computation_interpreter",
|
||||
deps = [
|
||||
":replay_computation_library",
|
||||
@ -114,7 +116,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "show_literal",
|
||||
srcs = ["show_literal.cc"],
|
||||
deps = [
|
||||
@ -125,7 +127,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "convert_computation",
|
||||
srcs = ["convert_computation.cc"],
|
||||
deps = [
|
||||
@ -136,7 +138,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "show_text_literal",
|
||||
srcs = ["show_text_literal.cc"],
|
||||
deps = [
|
||||
@ -149,7 +151,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "dumped_computation_to_text",
|
||||
srcs = ["dumped_computation_to_text.cc"],
|
||||
deps = [
|
||||
@ -167,7 +169,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "dumped_computation_to_operation_list",
|
||||
srcs = ["dumped_computation_to_operation_list.cc"],
|
||||
deps = [
|
||||
@ -185,7 +187,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "dumped_computation_to_tf_graphdef",
|
||||
srcs = ["dumped_computation_to_tf_graphdef.cc"],
|
||||
deps = [
|
||||
|
@ -1,15 +1,20 @@
|
||||
"""Wrapper around cc_proto_library used inside the XLA codebase."""
|
||||
|
||||
load("@protobuf_archive//:protobuf.bzl", "cc_proto_library")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl",
|
||||
"cc_proto_library")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl",
|
||||
"if_static")
|
||||
|
||||
# xla_proto_library() is a convenience wrapper around cc_proto_library.
|
||||
def xla_proto_library(name, srcs=[], deps=[], visibility=None, testonly=0):
|
||||
cc_proto_library(name=name,
|
||||
srcs=srcs,
|
||||
deps=deps,
|
||||
cc_libs = ["@protobuf_archive//:protobuf"],
|
||||
cc_libs = if_static(
|
||||
["@protobuf_archive//:protobuf"],
|
||||
otherwise=["@protobuf_archive//:protobuf_headers"],
|
||||
),
|
||||
protoc="@protobuf_archive//:protoc",
|
||||
default_runtime="@protobuf_archive//:protobuf",
|
||||
testonly=testonly,
|
||||
visibility=visibility,)
|
||||
|
||||
|
@ -6,6 +6,8 @@ package(
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "batch_scheduler_hdrs",
|
||||
hdrs = ["batch_scheduler.h"],
|
||||
@ -22,7 +24,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "batch_scheduler_test",
|
||||
srcs = ["batch_scheduler_test.cc"],
|
||||
deps = [
|
||||
@ -54,7 +56,7 @@ cc_library(
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "shared_batch_scheduler_test",
|
||||
srcs = ["shared_batch_scheduler_test.cc"],
|
||||
deps = [
|
||||
@ -75,7 +77,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "basic_batch_scheduler_test",
|
||||
srcs = ["basic_batch_scheduler_test.cc"],
|
||||
deps = [
|
||||
@ -87,7 +89,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "basic_batch_scheduler_benchmark",
|
||||
srcs = ["basic_batch_scheduler_benchmark.cc"],
|
||||
tags = [
|
||||
|
@ -6,6 +6,8 @@ package(
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
@ -38,7 +40,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "periodic_function_test",
|
||||
srcs = ["periodic_function_test.cc"],
|
||||
deps = [
|
||||
|
@ -12,6 +12,8 @@ package(
|
||||
)
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "py_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
@ -55,7 +57,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "sparse_column_iterable_test",
|
||||
size = "small",
|
||||
srcs = ["utils/sparse_column_iterable_test.cc"],
|
||||
@ -67,7 +69,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "examples_iterable_test",
|
||||
size = "small",
|
||||
srcs = ["utils/examples_iterable_test.cc"],
|
||||
@ -79,7 +81,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "batch_features_test",
|
||||
size = "small",
|
||||
srcs = ["utils/batch_features_test.cc"],
|
||||
@ -92,7 +94,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "dropout_utils_test",
|
||||
size = "small",
|
||||
srcs = ["utils/dropout_utils_test.cc"],
|
||||
@ -120,7 +122,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "multiple_additive_trees_test",
|
||||
size = "small",
|
||||
srcs = ["models/multiple_additive_trees_test.cc"],
|
||||
@ -163,7 +165,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "random_tree_gen_main",
|
||||
srcs = ["testutil/random_tree_gen_main.cc"],
|
||||
deps = [
|
||||
@ -189,7 +191,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "weighted_quantiles_buffer_test",
|
||||
size = "small",
|
||||
srcs = ["quantiles/weighted_quantiles_buffer_test.cc"],
|
||||
@ -202,7 +204,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "weighted_quantiles_summary_test",
|
||||
size = "small",
|
||||
srcs = ["quantiles/weighted_quantiles_summary_test.cc"],
|
||||
@ -215,7 +217,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "weighted_quantiles_stream_test",
|
||||
size = "small",
|
||||
srcs = ["quantiles/weighted_quantiles_stream_test.cc"],
|
||||
@ -240,7 +242,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "trees_test",
|
||||
size = "small",
|
||||
srcs = ["trees/decision_tree_test.cc"],
|
||||
@ -336,7 +338,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "feature-stats-accumulator_test",
|
||||
size = "small",
|
||||
srcs = ["learner/common/accumulators/feature-stats-accumulator_test.cc"],
|
||||
@ -360,7 +362,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "example_partitioner_test",
|
||||
size = "small",
|
||||
srcs = ["learner/common/partitioners/example_partitioner_test.cc"],
|
||||
@ -398,7 +400,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "feature-column-handlers_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
@ -453,7 +455,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "node-stats_test",
|
||||
size = "small",
|
||||
srcs = ["learner/stochastic/stats/node-stats_test.cc"],
|
||||
|
@ -15,6 +15,7 @@ load("//tensorflow:tensorflow.bzl", "tf_gen_op_wrapper_py")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_kernel_library")
|
||||
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_custom_op_py_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
tf_custom_op_library(
|
||||
name = "python/ops/_cudnn_rnn_ops.so",
|
||||
@ -136,7 +137,7 @@ cuda_py_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "cudnn_rnn_ops_test_cc",
|
||||
size = "small",
|
||||
srcs = [
|
||||
|
@ -6,6 +6,8 @@ exports_files(["LICENSE"])
|
||||
|
||||
package(default_visibility = ["//tensorflow:__subpackages__"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "all_kernels",
|
||||
deps = [
|
||||
@ -50,7 +52,7 @@ cc_library(
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "clustering_ops_test",
|
||||
srcs = ["clustering_ops_test.cc"],
|
||||
deps = [
|
||||
|
@ -8,6 +8,8 @@ exports_files(["LICENSE"])
|
||||
|
||||
package(default_visibility = ["//tensorflow:__subpackages__"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "ffmpeg_lib",
|
||||
srcs = ["ffmpeg_lib.cc"],
|
||||
@ -21,7 +23,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "ffmpeg_lib_installed_test",
|
||||
srcs = ["ffmpeg_lib_test.cc"],
|
||||
args = [
|
||||
@ -42,7 +44,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "ffmpeg_lib_uninstalled_test",
|
||||
srcs = ["ffmpeg_lib_test.cc"],
|
||||
args = [
|
||||
|
@ -6,6 +6,7 @@ licenses(["notice"]) # Apache 2.0
|
||||
load(
|
||||
"//tensorflow:tensorflow.bzl",
|
||||
"tf_copts",
|
||||
"tf_cc_binary",
|
||||
)
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
@ -24,7 +25,7 @@ filegroup(
|
||||
visibility = ["//tensorflow:__subpackages__"],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "clock_cycle_profiling",
|
||||
testonly = 1,
|
||||
srcs = ["clock_cycle_profiling_main.cc"],
|
||||
|
@ -8,6 +8,8 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
@ -19,7 +21,7 @@ filegroup(
|
||||
),
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "hvx_ops_support_checker",
|
||||
testonly = 1,
|
||||
srcs = ["hvx_ops_support_checker_main.cc"],
|
||||
|
@ -18,14 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import sys
|
||||
|
||||
# TODO: #6568 Remove this hack that makes dlopen() not crash.
|
||||
if hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'):
|
||||
import ctypes
|
||||
|
||||
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
|
||||
|
||||
import numpy as np
|
||||
from tensorflow.contrib.learn.python.learn.learn_io import generator_io
|
||||
from tensorflow.python.framework import errors
|
||||
|
@ -22,6 +22,8 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "signature_def_utils",
|
||||
srcs = ["signature_def_utils.cc"],
|
||||
@ -35,7 +37,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "signature_def_utils_test",
|
||||
srcs = ["signature_def_utils_test.cc"],
|
||||
deps = [
|
||||
|
@ -16,6 +16,7 @@ load(
|
||||
"if_mobile",
|
||||
"if_not_mobile",
|
||||
"py_test",
|
||||
"tf_cc_test",
|
||||
)
|
||||
|
||||
filegroup(
|
||||
@ -231,7 +232,7 @@ cc_library(
|
||||
]),
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "session_bundle_test",
|
||||
size = "medium",
|
||||
srcs = ["session_bundle_test.cc"],
|
||||
@ -339,7 +340,7 @@ cc_library(
|
||||
]),
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "signature_test",
|
||||
size = "small",
|
||||
srcs = ["signature_test.cc"],
|
||||
@ -395,7 +396,7 @@ cc_library(
|
||||
]),
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "bundle_shim_test",
|
||||
size = "small",
|
||||
srcs = ["bundle_shim_test.cc"],
|
||||
|
@ -4,10 +4,13 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "py_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_gen_op_libs")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_shared_object")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_gen_op_wrapper_py")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_custom_op_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_kernel_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_custom_op_py_library")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
@ -88,8 +91,15 @@ tf_custom_op_library(
|
||||
srcs = [
|
||||
":v2_op_defs",
|
||||
":v2_op_sources",
|
||||
] + if_static(
|
||||
extra_deps = [],
|
||||
otherwise = [
|
||||
":libforestprotos.so",
|
||||
],
|
||||
),
|
||||
deps = [
|
||||
":tree_utils",
|
||||
],
|
||||
deps = [":tree_utils"],
|
||||
)
|
||||
|
||||
py_library(
|
||||
@ -142,7 +152,7 @@ tf_custom_op_py_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "tensor_forest_ops_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
@ -190,17 +200,27 @@ py_library(
|
||||
# Model Ops.
|
||||
cc_library(
|
||||
name = "model_ops_lib",
|
||||
srcs = ["kernels/model_ops.cc"],
|
||||
srcs = [
|
||||
"kernels/model_ops.cc",
|
||||
],
|
||||
deps = [
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//third_party/eigen3",
|
||||
"//tensorflow/contrib/tensor_forest:tree_utils",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:decision-tree-resource",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:input_data",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
"//third_party/eigen3",
|
||||
],
|
||||
] + if_static(
|
||||
extra_deps = [
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
otherwise = [
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
@ -227,7 +247,14 @@ tf_kernel_library(
|
||||
|
||||
tf_custom_op_library(
|
||||
name = "python/ops/_model_ops.so",
|
||||
srcs = ["ops/model_ops.cc"],
|
||||
srcs = [
|
||||
"ops/model_ops.cc",
|
||||
] + if_static(
|
||||
extra_deps = [],
|
||||
otherwise = [
|
||||
":libforestprotos.so",
|
||||
],
|
||||
),
|
||||
deps = [":model_ops_lib"],
|
||||
)
|
||||
|
||||
@ -248,7 +275,7 @@ tf_custom_op_py_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "model_ops_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
@ -256,7 +283,9 @@ cc_test(
|
||||
"ops/model_ops.cc",
|
||||
],
|
||||
deps = [
|
||||
":forest_proto_impl",
|
||||
":model_ops_lib",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:decision-tree-resource_impl",
|
||||
"//tensorflow/core",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
"//tensorflow/core:test",
|
||||
@ -271,16 +300,22 @@ cc_library(
|
||||
name = "stats_ops_lib",
|
||||
srcs = ["kernels/stats_ops.cc"],
|
||||
deps = [
|
||||
"//third_party/eigen3",
|
||||
"//tensorflow/contrib/tensor_forest:tree_utils",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:decision-tree-resource",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:fertile-stats-resource",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:input_data",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:input_target",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:params",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
"//third_party/eigen3",
|
||||
],
|
||||
] + if_static(
|
||||
extra_deps = [
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
],
|
||||
otherwise = [
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
@ -307,7 +342,14 @@ tf_kernel_library(
|
||||
|
||||
tf_custom_op_library(
|
||||
name = "python/ops/_stats_ops.so",
|
||||
srcs = ["ops/stats_ops.cc"],
|
||||
srcs = [
|
||||
"ops/stats_ops.cc",
|
||||
] + if_static(
|
||||
extra_deps = [],
|
||||
otherwise = [
|
||||
":libforestprotos.so",
|
||||
],
|
||||
),
|
||||
deps = [":stats_ops_lib"],
|
||||
)
|
||||
|
||||
@ -327,7 +369,7 @@ tf_custom_op_py_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "stats_ops_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
@ -335,7 +377,9 @@ cc_test(
|
||||
"ops/stats_ops.cc",
|
||||
],
|
||||
deps = [
|
||||
":forest_proto_impl",
|
||||
":stats_ops_lib",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:decision-tree-resource_impl",
|
||||
"//tensorflow/core",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
"//tensorflow/core:test",
|
||||
@ -360,6 +404,30 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "forest_proto_impl",
|
||||
deps = [
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
)
|
||||
|
||||
# Protocol buffer dependencies shared between multiple op shared objects. This
|
||||
# avoids attempting to register the same protocol buffer multiple times.
|
||||
tf_cc_shared_object(
|
||||
name = "libforestprotos.so",
|
||||
# This object does not depend on TensorFlow.
|
||||
framework_so = [],
|
||||
linkstatic = 1,
|
||||
deps = [
|
||||
":forest_proto_impl",
|
||||
"//tensorflow/contrib/tensor_forest/kernels/v4:decision-tree-resource_impl",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
)
|
||||
|
||||
# --------------------------------- Python -------------------------------- #
|
||||
|
||||
py_library(
|
||||
|
@ -1,5 +1,8 @@
|
||||
# TensorFlow code for training random forests.
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
|
||||
|
||||
package(
|
||||
default_visibility = ["//visibility:public"],
|
||||
)
|
||||
@ -13,18 +16,33 @@ filegroup(
|
||||
srcs = glob(["**/*"]),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "decision-tree-resource",
|
||||
srcs = ["decision-tree-resource.cc"],
|
||||
hdrs = ["decision-tree-resource.h"],
|
||||
deps = [
|
||||
":decision_node_evaluator",
|
||||
":input_data",
|
||||
":leaf_model_operators",
|
||||
DECISION_TREE_RESOURCE_DEPS = [
|
||||
":decision_node_evaluator",
|
||||
":input_data",
|
||||
":leaf_model_operators",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "decision-tree-resource",
|
||||
hdrs = ["decision-tree-resource.h"],
|
||||
deps = DECISION_TREE_RESOURCE_DEPS + if_static([":decision-tree-resource_impl"]),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "decision-tree-resource_impl",
|
||||
srcs = ["decision-tree-resource.cc"],
|
||||
hdrs = ["decision-tree-resource.h"],
|
||||
deps = DECISION_TREE_RESOURCE_DEPS,
|
||||
)
|
||||
|
||||
cc_library(
|
||||
@ -37,10 +55,19 @@ cc_library(
|
||||
":input_target",
|
||||
":leaf_model_operators",
|
||||
":split_collection_operators",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
@ -48,11 +75,18 @@ cc_library(
|
||||
srcs = ["input_data.cc"],
|
||||
hdrs = ["input_data.h"],
|
||||
deps = [
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/contrib/tensor_forest:tree_utils",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
@ -71,19 +105,29 @@ cc_library(
|
||||
deps = [
|
||||
":input_target",
|
||||
":params",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "leaf_model_operators_test",
|
||||
srcs = ["leaf_model_operators_test.cc"],
|
||||
deps = [
|
||||
":leaf_model_operators",
|
||||
":test_utils",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
"//tensorflow/core",
|
||||
"//tensorflow/core:lib",
|
||||
@ -102,21 +146,31 @@ cc_library(
|
||||
":input_target",
|
||||
":params",
|
||||
":stat_utils",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest:tree_utils",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "grow_stats_test",
|
||||
srcs = ["grow_stats_test.cc"],
|
||||
deps = [
|
||||
":grow_stats",
|
||||
":test_utils",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
"//tensorflow/core",
|
||||
"//tensorflow/core:lib",
|
||||
@ -132,13 +186,20 @@ cc_library(
|
||||
deps = [
|
||||
":input_data",
|
||||
":input_target",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/core:core_cpu",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:protos_all_cc",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
@ -147,19 +208,27 @@ cc_library(
|
||||
hdrs = ["decision_node_evaluator.h"],
|
||||
deps = [
|
||||
":input_data",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "decision_node_evaluator_test",
|
||||
srcs = ["decision_node_evaluator_test.cc"],
|
||||
deps = [
|
||||
":decision_node_evaluator",
|
||||
":test_utils",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/core",
|
||||
"//tensorflow/core:test",
|
||||
"//tensorflow/core:test_main",
|
||||
@ -177,12 +246,21 @@ cc_library(
|
||||
":leaf_model_operators",
|
||||
":params",
|
||||
":stat_utils",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/contrib/tensor_forest:tree_utils",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_extensions_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
@ -197,11 +275,19 @@ cc_library(
|
||||
":leaf_model_operators",
|
||||
":params",
|
||||
":split_collection_operators",
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest:tree_utils",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
@ -209,10 +295,17 @@ cc_library(
|
||||
srcs = ["stat_utils.cc"],
|
||||
hdrs = ["stat_utils.h"],
|
||||
deps = [
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:fertile_stats_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
@ -229,12 +322,20 @@ cc_library(
|
||||
srcs = ["params.cc"],
|
||||
hdrs = ["params.h"],
|
||||
deps = [
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
],
|
||||
] + if_static(
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc",
|
||||
],
|
||||
[
|
||||
"//tensorflow/contrib/decision_trees/proto:generic_tree_model_cc_headers_only",
|
||||
"//tensorflow/contrib/tensor_forest/proto:tensor_forest_params_proto_cc_headers_only",
|
||||
],
|
||||
),
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "params_test",
|
||||
srcs = ["params_test.cc"],
|
||||
deps = [
|
||||
|
@ -21,6 +21,11 @@ using decision_trees::DecisionTree;
|
||||
using decision_trees::Leaf;
|
||||
using decision_trees::TreeNode;
|
||||
|
||||
DecisionTreeResource::DecisionTreeResource(const TensorForestParams& params)
|
||||
: params_(params), decision_tree_(new decision_trees::Model()) {
|
||||
model_op_ = LeafModelOperatorFactory::CreateLeafModelOperator(params_);
|
||||
}
|
||||
|
||||
int32 DecisionTreeResource::TraverseTree(
|
||||
const std::unique_ptr<TensorDataSet>& input_data, int example,
|
||||
int32* leaf_depth, TreePath* path) const {
|
||||
|
@ -31,10 +31,7 @@ namespace tensorforest {
|
||||
class DecisionTreeResource : public ResourceBase {
|
||||
public:
|
||||
// Constructor.
|
||||
explicit DecisionTreeResource(const TensorForestParams& params)
|
||||
: params_(params), decision_tree_(new decision_trees::Model()) {
|
||||
model_op_ = LeafModelOperatorFactory::CreateLeafModelOperator(params_);
|
||||
}
|
||||
explicit DecisionTreeResource(const TensorForestParams& params);
|
||||
|
||||
string DebugString() override {
|
||||
return strings::StrCat("DecisionTree[size=",
|
||||
|
@ -1,5 +1,7 @@
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library_cc")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_additional_all_protos")
|
||||
@ -42,7 +44,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "capture_tpu_profile",
|
||||
srcs = ["capture_tpu_profile.cc"],
|
||||
visibility = ["//tensorflow/contrib/tpu/profiler:__subpackages__"],
|
||||
@ -75,7 +77,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "trace_events_to_json_test",
|
||||
srcs = ["trace_events_to_json_test.cc"],
|
||||
deps = [
|
||||
|
@ -7,6 +7,9 @@ exports_files(["LICENSE"])
|
||||
|
||||
package(default_visibility = ["//tensorflow:__subpackages__"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
# Convertor of a frozen graph definition into the memmapped format.
|
||||
cc_library(
|
||||
name = "convert_graphdef_memmapped_format_lib",
|
||||
@ -22,7 +25,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "convert_graphdef_memmapped_format",
|
||||
srcs = ["convert_graphdef_memmapped_format.cc"],
|
||||
deps = [
|
||||
@ -32,7 +35,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "convert_graphdef_memmapped_format_test",
|
||||
srcs = ["convert_graphdef_memmapped_format_test.cc"],
|
||||
linkopts = select({
|
||||
@ -51,7 +54,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "inspect_checkpoint",
|
||||
srcs = ["inspect_checkpoint.cc"],
|
||||
deps = [
|
||||
|
@ -9,6 +9,8 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
@ -39,7 +41,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "xla_tf_graph_util_test",
|
||||
srcs = ["xla_tf_graph_util_test.cc"],
|
||||
linkstatic = 1,
|
||||
|
@ -52,6 +52,16 @@
|
||||
# cc_library ":android_tensorflow_lib_selective_registration" - Native library
|
||||
# supporting SELECTIVE_REGISTRATION feature.
|
||||
# portable_proto_library ":android_proto_lib" (Google-internal)
|
||||
#
|
||||
# Note that :framework and :lib have incomplete transitive dependencies (they
|
||||
# declare but do not define some symbols) if framework_shared_object=True
|
||||
# (meaning there is an explicit framework shared object). Missing symbols are
|
||||
# included in //tensorflow:libtensorflow_framework.so. This split supports
|
||||
# custom op registration; see comments on
|
||||
# //tensorflow:libtensorflow_framework.so. It does mean that TensorFlow cc_test
|
||||
# and cc_binary rules will not build. Using tf_cc_test and tf_cc_binary (from
|
||||
# //tensorflow/tensorflow.bzl) will include the necessary symbols in binary
|
||||
# build targets.
|
||||
|
||||
package(default_visibility = [
|
||||
"//tensorflow:internal",
|
||||
@ -123,10 +133,13 @@ load(
|
||||
"tf_pyclif_proto_library",
|
||||
"tf_jspb_proto_library",
|
||||
"tf_nano_proto_library",
|
||||
"tf_protos_all",
|
||||
"tf_protos_all_impl",
|
||||
)
|
||||
load(
|
||||
"//tensorflow/core:platform/default/build_config_root.bzl",
|
||||
"tf_cuda_tests_tags",
|
||||
"if_static",
|
||||
)
|
||||
load(
|
||||
"//third_party/mkl:build_defs.bzl",
|
||||
@ -200,6 +213,7 @@ tf_proto_library(
|
||||
name = "protos_all",
|
||||
srcs = CORE_PROTO_SRCS + ADDITIONAL_CORE_PROTO_SRCS,
|
||||
cc_api_version = 2,
|
||||
default_header = True,
|
||||
go_api_version = 2,
|
||||
j2objc_api_version = 1,
|
||||
java_api_version = 2,
|
||||
@ -257,6 +271,10 @@ cc_library(
|
||||
deps = tf_lib_proto_parsing_deps(),
|
||||
)
|
||||
|
||||
# This build rule (along with :lib_internal, :framework, and
|
||||
# :framework_internal) purposefully omits the definitions of many declared
|
||||
# symbols, which are included in //tensorflow:libtensorflow_framework.so. Using
|
||||
# tf_cc_test and tf_cc_binary will include the necessary symbols.
|
||||
cc_library(
|
||||
name = "lib",
|
||||
hdrs = [
|
||||
@ -362,6 +380,10 @@ cc_library(
|
||||
] + tf_additional_test_deps(),
|
||||
)
|
||||
|
||||
# This build rule (along with :framework_internal, :lib, and :lib_internal)
|
||||
# purposefully omits the definitions of many declared symbols, which are
|
||||
# included in //tensorflow:libtensorflow_framework.so. Using tf_cc_test and tf_cc_binary
|
||||
# will include the necessary symbols.
|
||||
tf_cuda_library(
|
||||
name = "framework",
|
||||
hdrs = [
|
||||
@ -855,6 +877,7 @@ cc_library(
|
||||
deps = [
|
||||
":core_cpu",
|
||||
":core_cpu_internal",
|
||||
":core_cpu_lib",
|
||||
":framework",
|
||||
":framework_internal",
|
||||
":lib",
|
||||
@ -978,9 +1001,10 @@ cc_library(
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":protos_cc",
|
||||
":protos_all_cc_impl",
|
||||
"//third_party/eigen3",
|
||||
"@nsync//:nsync_cpp",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
@ -1000,12 +1024,13 @@ cc_library(
|
||||
copts = tf_copts() + ["-Os"] + ["-std=c++11"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":protos_cc",
|
||||
":protos_all_cc_impl",
|
||||
"//third_party/eigen3",
|
||||
"//third_party/fft2d:fft2d_headers",
|
||||
"@fft2d//:fft2d",
|
||||
"@gemmlowp//:gemmlowp",
|
||||
"@nsync//:nsync_cpp",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
@ -1041,9 +1066,10 @@ cc_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":android_tensorflow_lib_lite",
|
||||
":protos_cc",
|
||||
":protos_all_cc_impl",
|
||||
"//tensorflow/core/kernels:android_tensorflow_kernels",
|
||||
"//third_party/eigen3",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
@ -1065,8 +1091,9 @@ cc_library(
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":protos_cc",
|
||||
":protos_all_cc_impl",
|
||||
"//third_party/eigen3",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
@ -1086,9 +1113,10 @@ cc_library(
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":protos_cc",
|
||||
":protos_all_cc_impl",
|
||||
"//third_party/eigen3",
|
||||
"@nsync//:nsync_cpp",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
@ -1299,17 +1327,104 @@ tf_proto_library_cc(
|
||||
],
|
||||
)
|
||||
|
||||
LIB_INTERNAL_PRIVATE_HEADERS = ["framework/resource_handle.h"] + glob(
|
||||
[
|
||||
"lib/**/*.h",
|
||||
"platform/*.h",
|
||||
"platform/profile_utils/**/*.h",
|
||||
],
|
||||
exclude = [
|
||||
"**/*test*",
|
||||
"lib/gif/**/*",
|
||||
"lib/jpeg/**/*",
|
||||
"platform/gif.h",
|
||||
"platform/jpeg.h",
|
||||
"platform/**/cuda.h",
|
||||
"platform/**/stream_executor.h",
|
||||
],
|
||||
) + tf_additional_lib_srcs(
|
||||
exclude = [
|
||||
"**/*.cc",
|
||||
"**/*test*",
|
||||
"platform/**/cuda.h",
|
||||
"platform/**/stream_executor.h",
|
||||
],
|
||||
)
|
||||
|
||||
LIB_INTERNAL_PUBLIC_HEADERS = tf_additional_lib_hdrs() + [
|
||||
"lib/core/blocking_counter.h",
|
||||
"lib/core/refcount.h",
|
||||
"lib/gtl/edit_distance.h",
|
||||
"lib/gtl/int_type.h",
|
||||
"lib/gtl/iterator_range.h",
|
||||
"lib/gtl/manual_constructor.h",
|
||||
"lib/gtl/map_util.h",
|
||||
"lib/gtl/stl_util.h",
|
||||
"lib/gtl/top_n.h",
|
||||
"lib/hash/hash.h",
|
||||
"lib/io/inputbuffer.h",
|
||||
"lib/io/iterator.h",
|
||||
"lib/io/snappy/snappy_inputbuffer.h",
|
||||
"lib/io/snappy/snappy_outputbuffer.h",
|
||||
"lib/io/zlib_compression_options.h",
|
||||
"lib/io/zlib_inputstream.h",
|
||||
"lib/io/zlib_outputbuffer.h",
|
||||
"lib/monitoring/collected_metrics.h",
|
||||
"lib/monitoring/collection_registry.h",
|
||||
"lib/monitoring/metric_def.h",
|
||||
"lib/monitoring/mobile_counter.h",
|
||||
"lib/monitoring/mobile_sampler.h",
|
||||
"lib/png/png_io.h",
|
||||
"lib/random/random.h",
|
||||
"lib/random/random_distributions.h",
|
||||
"lib/random/weighted_picker.h",
|
||||
"lib/strings/base64.h",
|
||||
"lib/strings/ordered_code.h",
|
||||
"lib/strings/proto_text_util.h",
|
||||
"lib/strings/scanner.h",
|
||||
"lib/wav/wav_io.h",
|
||||
"platform/demangle.h",
|
||||
"platform/denormal.h",
|
||||
"platform/host_info.h",
|
||||
"platform/platform.h",
|
||||
"platform/protobuf_internal.h",
|
||||
"platform/setround.h",
|
||||
"platform/tensor_coding.h",
|
||||
"platform/tracing.h",
|
||||
]
|
||||
|
||||
cc_library(
|
||||
name = "lib_internal",
|
||||
srcs = glob(
|
||||
srcs = LIB_INTERNAL_PRIVATE_HEADERS,
|
||||
hdrs = LIB_INTERNAL_PUBLIC_HEADERS,
|
||||
copts = tf_copts(),
|
||||
defines = tf_additional_lib_defines() + [
|
||||
"SNAPPY",
|
||||
] + tf_additional_verbs_lib_defines() +
|
||||
tf_additional_mpi_lib_defines() +
|
||||
tf_additional_gdr_lib_defines(),
|
||||
linkopts = select({
|
||||
"//tensorflow:freebsd": [],
|
||||
"//tensorflow:windows": [],
|
||||
"//tensorflow:windows_msvc": [],
|
||||
"//conditions:default": [
|
||||
"-ldl",
|
||||
"-lpthread",
|
||||
],
|
||||
}),
|
||||
deps = tf_additional_lib_deps() + [
|
||||
"//third_party/eigen3",
|
||||
"//tensorflow/core/platform/default/build_config:platformlib",
|
||||
] + if_static([":lib_internal_impl"]),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "lib_internal_impl",
|
||||
srcs = LIB_INTERNAL_PRIVATE_HEADERS + glob(
|
||||
[
|
||||
"lib/**/*.h",
|
||||
"lib/**/*.cc",
|
||||
"platform/*.h",
|
||||
"platform/*.cc",
|
||||
"platform/profile_utils/**/*.h",
|
||||
"platform/profile_utils/**/*.cc",
|
||||
"framework/resource_handle.h",
|
||||
"framework/resource_handle.cc",
|
||||
],
|
||||
exclude = [
|
||||
@ -1318,12 +1433,8 @@ cc_library(
|
||||
"lib/hash/crc32c_accelerate.cc",
|
||||
"lib/gif/**/*",
|
||||
"lib/jpeg/**/*",
|
||||
"platform/gif.h",
|
||||
"platform/jpeg.h",
|
||||
"platform/**/env_time.cc",
|
||||
"platform/**/cuda.h",
|
||||
"platform/**/cuda_libdevice_path.cc",
|
||||
"platform/**/stream_executor.h",
|
||||
"platform/**/gpu_tracer.cc",
|
||||
"platform/variant_coding.cc",
|
||||
"platform/**/variant_cord_coding.cc",
|
||||
@ -1343,71 +1454,17 @@ cc_library(
|
||||
# dependency.
|
||||
tf_additional_proto_srcs(),
|
||||
),
|
||||
hdrs = tf_additional_lib_hdrs() + [
|
||||
"lib/core/blocking_counter.h",
|
||||
"lib/core/refcount.h",
|
||||
"lib/gtl/edit_distance.h",
|
||||
"lib/gtl/int_type.h",
|
||||
"lib/gtl/iterator_range.h",
|
||||
"lib/gtl/manual_constructor.h",
|
||||
"lib/gtl/map_util.h",
|
||||
"lib/gtl/stl_util.h",
|
||||
"lib/gtl/top_n.h",
|
||||
"lib/hash/hash.h",
|
||||
"lib/io/inputbuffer.h",
|
||||
"lib/io/iterator.h",
|
||||
"lib/io/snappy/snappy_inputbuffer.h",
|
||||
"lib/io/snappy/snappy_outputbuffer.h",
|
||||
"lib/io/zlib_compression_options.h",
|
||||
"lib/io/zlib_inputstream.h",
|
||||
"lib/io/zlib_outputbuffer.h",
|
||||
"lib/monitoring/collected_metrics.h",
|
||||
"lib/monitoring/collection_registry.h",
|
||||
"lib/monitoring/metric_def.h",
|
||||
"lib/monitoring/mobile_counter.h",
|
||||
"lib/monitoring/mobile_sampler.h",
|
||||
"lib/png/png_io.h",
|
||||
"lib/random/random.h",
|
||||
"lib/random/random_distributions.h",
|
||||
"lib/random/weighted_picker.h",
|
||||
"lib/strings/base64.h",
|
||||
"lib/strings/ordered_code.h",
|
||||
"lib/strings/proto_text_util.h",
|
||||
"lib/strings/scanner.h",
|
||||
"lib/wav/wav_io.h",
|
||||
"platform/demangle.h",
|
||||
"platform/denormal.h",
|
||||
"platform/host_info.h",
|
||||
"platform/platform.h",
|
||||
"platform/protobuf_internal.h",
|
||||
"platform/setround.h",
|
||||
"platform/tensor_coding.h",
|
||||
"platform/tracing.h",
|
||||
],
|
||||
hdrs = LIB_INTERNAL_PUBLIC_HEADERS,
|
||||
copts = tf_copts(),
|
||||
defines = tf_additional_lib_defines() + [
|
||||
"SNAPPY",
|
||||
] + tf_additional_verbs_lib_defines() +
|
||||
tf_additional_mpi_lib_defines() +
|
||||
tf_additional_gdr_lib_defines(),
|
||||
linkopts = select({
|
||||
"//tensorflow:freebsd": [],
|
||||
"//tensorflow:windows": [],
|
||||
"//tensorflow:windows_msvc": [],
|
||||
"//conditions:default": [
|
||||
"-ldl",
|
||||
"-lpthread",
|
||||
],
|
||||
}),
|
||||
deps = tf_additional_lib_deps() + [
|
||||
":lib_hash_crc32c_accelerate_internal",
|
||||
":lib_proto_parsing",
|
||||
":protos_all_cc",
|
||||
"//third_party/eigen3",
|
||||
"//tensorflow/core/platform/default/build_config:platformlib",
|
||||
"@snappy",
|
||||
"@zlib_archive//:zlib",
|
||||
],
|
||||
"@protobuf_archive//:protobuf",
|
||||
] + tf_protos_all_impl(),
|
||||
)
|
||||
|
||||
# File compiled with extra flags to get cpu-specific acceleration.
|
||||
@ -1552,7 +1609,6 @@ proto_text_hdrs_and_srcs = tf_generate_proto_text_sources(
|
||||
|
||||
cc_library(
|
||||
name = "proto_text",
|
||||
srcs = proto_text_hdrs_and_srcs.srcs,
|
||||
hdrs = proto_text_hdrs_and_srcs.hdrs,
|
||||
deps = [
|
||||
":lib",
|
||||
@ -1570,22 +1626,95 @@ cc_library(
|
||||
copts = tf_copts(),
|
||||
)
|
||||
|
||||
FRAMEWORK_INTERNAL_PRIVATE_HEADERS = [
|
||||
"platform/variant_coding.h",
|
||||
"graph/edgeset.h",
|
||||
"graph/graph.h",
|
||||
] + glob(
|
||||
[
|
||||
"example/**/*.h",
|
||||
"framework/**/*.h",
|
||||
"util/**/*.h",
|
||||
],
|
||||
exclude = [
|
||||
"**/*test*",
|
||||
"**/*main.cc",
|
||||
"example/example_parser_configuration.*",
|
||||
"util/reporter.h",
|
||||
"util/reporter.cc",
|
||||
"framework/fake_input.*",
|
||||
"framework/op_gen_lib.*",
|
||||
"framework/reader_base.*",
|
||||
"util/memmapped_file_system.*",
|
||||
"util/memmapped_file_system_writer.*",
|
||||
"util/version_info.cc",
|
||||
],
|
||||
) + select({
|
||||
"//tensorflow:windows": [],
|
||||
"//conditions:default": [
|
||||
"util/memmapped_file_system.h",
|
||||
"util/memmapped_file_system_writer.h",
|
||||
],
|
||||
})
|
||||
|
||||
FRAMEWORK_INTERNAL_PUBLIC_HEADERS = [
|
||||
"framework/op_segment.h",
|
||||
"framework/rendezvous.h", # only needed for tests
|
||||
"framework/tensor_reference.h",
|
||||
"framework/tracking_allocator.h", # only needed for tests
|
||||
"framework/unique_tensor_references.h",
|
||||
"framework/variant.h",
|
||||
"platform/variant_coding.h",
|
||||
"util/command_line_flags.h",
|
||||
"util/env_var.h",
|
||||
"util/equal_graph_def.h",
|
||||
"util/presized_cuckoo_map.h",
|
||||
"util/tensor_slice_set.h",
|
||||
"util/tensor_slice_util.h",
|
||||
] + tf_additional_framework_hdrs()
|
||||
|
||||
tf_cuda_library(
|
||||
name = "framework_internal",
|
||||
srcs = glob(
|
||||
srcs = FRAMEWORK_INTERNAL_PRIVATE_HEADERS,
|
||||
hdrs = FRAMEWORK_INTERNAL_PUBLIC_HEADERS,
|
||||
deps = [
|
||||
":framework_internal_headers_lib",
|
||||
"//third_party/eigen3",
|
||||
":lib",
|
||||
] + if_static(
|
||||
extra_deps = [
|
||||
":framework_internal_impl",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
otherwise = [
|
||||
"@protobuf_archive//:protobuf_headers",
|
||||
],
|
||||
),
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_header_only_library(
|
||||
name = "framework_internal_headers_lib",
|
||||
deps = [
|
||||
":lib",
|
||||
":lib_internal",
|
||||
":version_lib",
|
||||
"//tensorflow/core/kernels:bounds_check",
|
||||
"//tensorflow/core/platform/default/build_config:platformlib",
|
||||
],
|
||||
)
|
||||
|
||||
tf_cuda_library(
|
||||
name = "framework_internal_impl",
|
||||
srcs = FRAMEWORK_INTERNAL_PRIVATE_HEADERS + [
|
||||
"platform/variant_coding.cc",
|
||||
] + glob(
|
||||
[
|
||||
"example/**/*.h",
|
||||
"example/**/*.cc",
|
||||
"framework/**/*.h",
|
||||
"framework/**/*.cc",
|
||||
"util/**/*.h",
|
||||
"util/**/*.cc",
|
||||
] + [
|
||||
"platform/variant_coding.cc",
|
||||
"platform/variant_coding.h",
|
||||
"graph/edgeset.h",
|
||||
"graph/edgeset.cc",
|
||||
"graph/graph.h",
|
||||
"graph/graph.cc",
|
||||
"graph/while_context.h",
|
||||
"graph/while_context.cc",
|
||||
@ -1594,7 +1723,6 @@ tf_cuda_library(
|
||||
"**/*test*",
|
||||
"**/*main.cc",
|
||||
"example/example_parser_configuration.*",
|
||||
"util/reporter.h",
|
||||
"util/reporter.cc",
|
||||
"framework/fake_input.*",
|
||||
"framework/op_gen_lib.*",
|
||||
@ -1608,27 +1736,11 @@ tf_cuda_library(
|
||||
"//tensorflow:windows": [],
|
||||
"//tensorflow:windows_msvc": [],
|
||||
"//conditions:default": [
|
||||
"util/memmapped_file_system.h",
|
||||
"util/memmapped_file_system.cc",
|
||||
"util/memmapped_file_system_writer.h",
|
||||
"util/memmapped_file_system_writer.cc",
|
||||
],
|
||||
}) + tf_additional_framework_srcs(),
|
||||
hdrs = [
|
||||
"framework/variant.h",
|
||||
"framework/op_segment.h",
|
||||
"framework/rendezvous.h", # only needed for tests
|
||||
"framework/tensor_reference.h",
|
||||
"framework/tracking_allocator.h", # only needed for tests
|
||||
"framework/unique_tensor_references.h",
|
||||
"platform/variant_coding.h",
|
||||
"util/command_line_flags.h",
|
||||
"util/env_var.h",
|
||||
"util/equal_graph_def.h",
|
||||
"util/presized_cuckoo_map.h",
|
||||
"util/tensor_slice_set.h",
|
||||
"util/tensor_slice_util.h",
|
||||
] + tf_additional_framework_hdrs(),
|
||||
}) + proto_text_hdrs_and_srcs.srcs + tf_additional_framework_srcs(),
|
||||
hdrs = FRAMEWORK_INTERNAL_PUBLIC_HEADERS,
|
||||
copts = tf_copts(),
|
||||
linkopts = select({
|
||||
"//tensorflow:freebsd": [],
|
||||
@ -1647,7 +1759,14 @@ tf_cuda_library(
|
||||
"//tensorflow/core/platform/default/build_config:platformlib",
|
||||
"//tensorflow/core/kernels:bounds_check",
|
||||
"//third_party/eigen3",
|
||||
] + if_mkl(["//third_party/mkl:intel_binary_blob"]),
|
||||
] + if_static(
|
||||
extra_deps = ["@protobuf_archive//:protobuf"],
|
||||
otherwise = ["@protobuf_archive//:protobuf_headers"],
|
||||
) + if_mkl(
|
||||
[
|
||||
"//third_party/mkl:intel_binary_blob",
|
||||
],
|
||||
),
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
@ -1759,15 +1878,57 @@ tf_cuda_library(
|
||||
":lib_internal",
|
||||
":proto_text",
|
||||
":protos_all_cc",
|
||||
"//third_party/eigen3",
|
||||
] + if_static([
|
||||
":function_ops_op_lib",
|
||||
":functional_grad",
|
||||
":functional_ops_op_lib",
|
||||
"//tensorflow/core/kernels:bounds_check",
|
||||
"//tensorflow/core/kernels:required",
|
||||
"//third_party/eigen3",
|
||||
],
|
||||
":core_cpu_impl",
|
||||
]),
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
CORE_CPU_LIB_HEADERS = CORE_CPU_BASE_HDRS + [
|
||||
"common_runtime/allocator_retry.h",
|
||||
"common_runtime/bfc_allocator.h",
|
||||
"common_runtime/build_graph_options.h",
|
||||
"common_runtime/constant_folding.h",
|
||||
"common_runtime/copy_tensor.h",
|
||||
"common_runtime/costmodel_manager.h",
|
||||
"common_runtime/debugger_state_interface.h",
|
||||
"common_runtime/device_factory.h",
|
||||
"common_runtime/device_mgr.h",
|
||||
"common_runtime/device_set.h",
|
||||
"common_runtime/dma_helper.h",
|
||||
"common_runtime/eigen_thread_pool.h",
|
||||
"common_runtime/executor.h",
|
||||
"common_runtime/function.h",
|
||||
"common_runtime/graph_optimizer.h",
|
||||
"common_runtime/local_device.h",
|
||||
"common_runtime/memory_types.h",
|
||||
"common_runtime/mkl_cpu_allocator.h",
|
||||
"common_runtime/optimization_registry.h",
|
||||
"common_runtime/pending_counts.h",
|
||||
"common_runtime/process_function_library_runtime.h",
|
||||
"common_runtime/process_util.h",
|
||||
"common_runtime/profile_handler.h",
|
||||
"common_runtime/renamed_device.h",
|
||||
"common_runtime/rendezvous_mgr.h",
|
||||
"common_runtime/rendezvous_util.h",
|
||||
"common_runtime/session_factory.h",
|
||||
"common_runtime/placer.h",
|
||||
"common_runtime/stats_publisher_interface.h",
|
||||
"common_runtime/step_stats_collector.h",
|
||||
"common_runtime/threadpool_device.h",
|
||||
"common_runtime/visitable_allocator.h",
|
||||
"graph/gradients.h",
|
||||
"graph/quantize_training.h",
|
||||
]
|
||||
|
||||
tf_cuda_library(
|
||||
name = "core_cpu_internal",
|
||||
name = "core_cpu_impl",
|
||||
srcs = [
|
||||
"common_runtime/allocator_retry.cc",
|
||||
"common_runtime/bfc_allocator.cc",
|
||||
@ -1782,7 +1943,6 @@ tf_cuda_library(
|
||||
"common_runtime/device_set.cc",
|
||||
"common_runtime/executor.cc",
|
||||
"common_runtime/function.cc",
|
||||
"common_runtime/graph_execution_state.cc",
|
||||
"common_runtime/graph_optimizer.cc",
|
||||
"common_runtime/graph_runner.cc",
|
||||
"common_runtime/local_device.cc",
|
||||
@ -1811,63 +1971,63 @@ tf_cuda_library(
|
||||
"public/session_options.h",
|
||||
"public/version.h",
|
||||
],
|
||||
hdrs = CORE_CPU_BASE_HDRS + [
|
||||
"common_runtime/allocator_retry.h",
|
||||
"common_runtime/bfc_allocator.h",
|
||||
"common_runtime/build_graph_options.h",
|
||||
"common_runtime/constant_folding.h",
|
||||
"common_runtime/copy_tensor.h",
|
||||
"common_runtime/costmodel_manager.h",
|
||||
"common_runtime/debugger_state_interface.h",
|
||||
"common_runtime/device_factory.h",
|
||||
"common_runtime/device_mgr.h",
|
||||
"common_runtime/device_set.h",
|
||||
"common_runtime/dma_helper.h",
|
||||
"common_runtime/eigen_thread_pool.h",
|
||||
"common_runtime/executor.h",
|
||||
"common_runtime/function.h",
|
||||
"common_runtime/graph_optimizer.h",
|
||||
"common_runtime/local_device.h",
|
||||
"common_runtime/memory_types.h",
|
||||
"common_runtime/mkl_cpu_allocator.h",
|
||||
"common_runtime/optimization_registry.h",
|
||||
"common_runtime/pending_counts.h",
|
||||
"common_runtime/process_function_library_runtime.h",
|
||||
"common_runtime/process_util.h",
|
||||
"common_runtime/profile_handler.h",
|
||||
"common_runtime/renamed_device.h",
|
||||
"common_runtime/rendezvous_mgr.h",
|
||||
"common_runtime/rendezvous_util.h",
|
||||
"common_runtime/session_factory.h",
|
||||
"common_runtime/graph_execution_state.h",
|
||||
"common_runtime/placer.h",
|
||||
"common_runtime/stats_publisher_interface.h",
|
||||
"common_runtime/step_stats_collector.h",
|
||||
"common_runtime/threadpool_device.h",
|
||||
"common_runtime/visitable_allocator.h",
|
||||
"graph/gradients.h",
|
||||
"graph/quantize_training.h",
|
||||
],
|
||||
hdrs = CORE_CPU_LIB_HEADERS,
|
||||
copts = tf_copts(),
|
||||
deps = [
|
||||
":core_cpu_base",
|
||||
":framework",
|
||||
":framework_internal",
|
||||
":function_ops_op_lib",
|
||||
":functional_grad",
|
||||
":functional_ops_op_lib",
|
||||
":lib",
|
||||
":lib_internal",
|
||||
":proto_text",
|
||||
":protos_all_cc",
|
||||
"//tensorflow/core/grappler:grappler_item",
|
||||
"//tensorflow/core/grappler/clusters:utils",
|
||||
"//tensorflow/core/grappler/clusters:virtual_cluster",
|
||||
"//tensorflow/core/grappler/optimizers:meta_optimizer",
|
||||
"//third_party/eigen3",
|
||||
"//tensorflow/core/kernels:required",
|
||||
] + if_mkl(["//third_party/mkl:intel_binary_blob"]) +
|
||||
tf_additional_core_deps(),
|
||||
":framework",
|
||||
":framework_internal",
|
||||
":lib",
|
||||
":lib_internal",
|
||||
":proto_text",
|
||||
":protos_all_cc",
|
||||
"//third_party/eigen3",
|
||||
"//tensorflow/core/grappler:grappler_item",
|
||||
] + if_mkl(
|
||||
[
|
||||
"//third_party/mkl:intel_binary_blob",
|
||||
],
|
||||
),
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
tf_cuda_library(
|
||||
name = "core_cpu_lib",
|
||||
hdrs = CORE_CPU_LIB_HEADERS,
|
||||
deps = [
|
||||
":core_cpu_base",
|
||||
":proto_text",
|
||||
"//tensorflow/core/grappler:grappler_item",
|
||||
] + if_static([":core_cpu_impl"]) + tf_protos_all(),
|
||||
)
|
||||
|
||||
tf_cuda_library(
|
||||
name = "core_cpu_internal",
|
||||
srcs = [
|
||||
"common_runtime/graph_execution_state.cc",
|
||||
],
|
||||
hdrs = [
|
||||
"common_runtime/graph_execution_state.h",
|
||||
] + CORE_CPU_LIB_HEADERS,
|
||||
copts = tf_copts(),
|
||||
deps = [
|
||||
":framework",
|
||||
":framework_internal",
|
||||
":function_ops_op_lib",
|
||||
":functional_grad",
|
||||
":functional_ops_op_lib",
|
||||
":lib",
|
||||
":lib_internal",
|
||||
":proto_text",
|
||||
":protos_all_cc",
|
||||
"//tensorflow/core/grappler:grappler_item",
|
||||
"//tensorflow/core/grappler/clusters:utils",
|
||||
"//tensorflow/core/grappler/clusters:virtual_cluster",
|
||||
"//tensorflow/core/grappler/optimizers:meta_optimizer",
|
||||
"//third_party/eigen3",
|
||||
"//tensorflow/core/kernels:required",
|
||||
] + if_mkl(
|
||||
["//third_party/mkl:intel_binary_blob"],
|
||||
) + tf_additional_core_deps() + if_static([":core_cpu_impl"]),
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
@ -1944,8 +2104,20 @@ tf_cuda_library(
|
||||
] + tf_additional_gpu_tracer_deps(),
|
||||
)
|
||||
|
||||
GPU_RUNTIME_HEADERS = [
|
||||
"common_runtime/gpu/gpu_bfc_allocator.h",
|
||||
"common_runtime/gpu/gpu_cudamalloc_allocator.h",
|
||||
"common_runtime/gpu/gpu_debug_allocator.h",
|
||||
"common_runtime/gpu/gpu_device.h",
|
||||
"common_runtime/gpu/gpu_init.h",
|
||||
"common_runtime/gpu/gpu_stream_util.h",
|
||||
"common_runtime/gpu/gpu_util.h",
|
||||
"common_runtime/gpu/pool_allocator.h",
|
||||
"common_runtime/gpu/process_state.h",
|
||||
]
|
||||
|
||||
tf_cuda_library(
|
||||
name = "gpu_runtime",
|
||||
name = "gpu_runtime_impl",
|
||||
srcs = [
|
||||
"common_runtime/gpu/gpu_bfc_allocator.cc",
|
||||
"common_runtime/gpu/gpu_cudamalloc_allocator.cc",
|
||||
@ -1959,25 +2131,14 @@ tf_cuda_library(
|
||||
"common_runtime/gpu/process_state.cc",
|
||||
"common_runtime/gpu_device_context.h",
|
||||
],
|
||||
hdrs = [
|
||||
"common_runtime/gpu/gpu_bfc_allocator.h",
|
||||
"common_runtime/gpu/gpu_cudamalloc_allocator.h",
|
||||
"common_runtime/gpu/gpu_debug_allocator.h",
|
||||
"common_runtime/gpu/gpu_device.h",
|
||||
"common_runtime/gpu/gpu_init.h",
|
||||
"common_runtime/gpu/gpu_stream_util.h",
|
||||
"common_runtime/gpu/gpu_util.h",
|
||||
"common_runtime/gpu/pool_allocator.h",
|
||||
"common_runtime/gpu/process_state.h",
|
||||
],
|
||||
hdrs = GPU_RUNTIME_HEADERS,
|
||||
copts = tf_copts(),
|
||||
linkstatic = 1,
|
||||
deps = [
|
||||
":core_cpu",
|
||||
":core_cpu_internal",
|
||||
":core_cpu_impl",
|
||||
":core_cpu_lib",
|
||||
":framework",
|
||||
":framework_internal",
|
||||
":gpu_init",
|
||||
":gpu_init_impl",
|
||||
":gpu_lib",
|
||||
":lib",
|
||||
":lib_internal",
|
||||
@ -1988,8 +2149,39 @@ tf_cuda_library(
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
tf_cuda_library(
|
||||
name = "gpu_runtime",
|
||||
hdrs = GPU_RUNTIME_HEADERS,
|
||||
linkstatic = 1,
|
||||
deps = [
|
||||
":core_cpu_lib",
|
||||
":framework",
|
||||
":framework_internal",
|
||||
":lib",
|
||||
":lib_internal",
|
||||
":protos_all_cc",
|
||||
"//third_party/eigen3",
|
||||
] + if_static([":gpu_runtime_impl"]),
|
||||
)
|
||||
|
||||
tf_cuda_library(
|
||||
name = "gpu_init",
|
||||
hdrs = [
|
||||
"common_runtime/gpu/gpu_init.h",
|
||||
],
|
||||
deps = [
|
||||
":framework",
|
||||
":framework_internal",
|
||||
":lib",
|
||||
":lib_internal",
|
||||
":stream_executor",
|
||||
] + if_static(
|
||||
[":gpu_init_impl"],
|
||||
),
|
||||
)
|
||||
|
||||
tf_cuda_library(
|
||||
name = "gpu_init_impl",
|
||||
srcs = [
|
||||
"common_runtime/gpu/gpu_init.cc",
|
||||
],
|
||||
@ -2254,7 +2446,7 @@ tf_cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "lib_jpeg_jpeg_mem_unittest",
|
||||
srcs = ["lib/jpeg/jpeg_mem_unittest.cc"],
|
||||
data = glob(["lib/jpeg/testdata/*.jpg"]),
|
||||
@ -2267,10 +2459,10 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "lib_strings_ordered_code_test",
|
||||
srcs = ["lib/strings/ordered_code_test.cc"],
|
||||
copts = ["$(STACK_FRAME_UNLIMITED)"], # Tests initialize large vectors
|
||||
extra_copts = ["$(STACK_FRAME_UNLIMITED)"], # Tests initialize large vectors
|
||||
deps = [
|
||||
":lib",
|
||||
":lib_internal",
|
||||
@ -2279,7 +2471,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "lib_random_weighted_picker_test",
|
||||
size = "medium",
|
||||
srcs = ["lib/random/weighted_picker_test.cc"],
|
||||
@ -3096,7 +3288,7 @@ tf_cc_tests(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "example_example_parser_configuration_test",
|
||||
size = "small",
|
||||
srcs = ["example/example_parser_configuration_test.cc"],
|
||||
|
@ -27,6 +27,7 @@ filegroup(
|
||||
]),
|
||||
)
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_cc_tests")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_copts")
|
||||
@ -56,7 +57,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "partial_run_mgr_test",
|
||||
size = "small",
|
||||
srcs = ["partial_run_mgr_test.cc"],
|
||||
@ -82,7 +83,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "message_wrappers_test",
|
||||
size = "small",
|
||||
srcs = ["message_wrappers_test.cc"],
|
||||
@ -121,7 +122,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "session_mgr_test",
|
||||
size = "small",
|
||||
srcs = ["session_mgr_test.cc"],
|
||||
@ -185,7 +186,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "call_options_test",
|
||||
size = "small",
|
||||
srcs = ["call_options_test.cc"],
|
||||
@ -196,7 +197,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "tensor_coding_test",
|
||||
size = "small",
|
||||
srcs = ["tensor_coding_test.cc"],
|
||||
@ -204,6 +205,7 @@ cc_test(
|
||||
deps = [
|
||||
":worker_interface",
|
||||
"//tensorflow/core:core_cpu",
|
||||
"//tensorflow/core:core_cpu_base",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:protos_all_cc",
|
||||
|
@ -32,6 +32,7 @@ load(
|
||||
)
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_cc_tests")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
# For platform specific build config
|
||||
load(
|
||||
@ -297,7 +298,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "grpc_tensorflow_server",
|
||||
srcs = [
|
||||
"grpc_tensorflow_server.cc",
|
||||
@ -326,7 +327,7 @@ tf_cuda_library(
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "grpc_testlib_server",
|
||||
testonly = 1,
|
||||
srcs = [
|
||||
|
@ -1,5 +1,6 @@
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_library")
|
||||
|
||||
filegroup(
|
||||
@ -36,7 +37,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "utils_test",
|
||||
srcs = ["utils_test.cc"],
|
||||
deps = [
|
||||
@ -97,7 +98,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "grappler_item_test",
|
||||
srcs = ["grappler_item_test.cc"],
|
||||
deps = [
|
||||
@ -109,7 +110,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "grappler_item_builder_test",
|
||||
srcs = ["grappler_item_builder_test.cc"],
|
||||
deps = [
|
||||
|
@ -1,5 +1,6 @@
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_library")
|
||||
|
||||
filegroup(
|
||||
@ -72,7 +73,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "virtual_cluster_test",
|
||||
srcs = ["virtual_cluster_test.cc"],
|
||||
deps = [
|
||||
@ -105,7 +106,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "single_machine_test",
|
||||
srcs = ["single_machine_test.cc"],
|
||||
args = ["--heap_check=local"], # The GPU tracer leaks memory
|
||||
|
@ -1,6 +1,6 @@
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_library", "tf_cc_test")
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
@ -54,7 +54,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "graph_properties_test",
|
||||
srcs = ["graph_properties_test.cc"],
|
||||
args = ["--heap_check=local"], # The GPU tracer leaks memory
|
||||
@ -91,7 +91,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "graph_memory_test",
|
||||
srcs = ["graph_memory_test.cc"],
|
||||
args = ["--heap_check=local"], # The GPU tracer leaks memory
|
||||
@ -111,7 +111,7 @@ cc_library(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "robust_stats_test",
|
||||
srcs = ["robust_stats_test.cc"],
|
||||
deps = [
|
||||
@ -139,7 +139,7 @@ tf_cuda_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "utils_test",
|
||||
srcs = ["utils_test.cc"],
|
||||
visibility = ["//visibility:public"],
|
||||
@ -180,7 +180,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "virtual_placer_test",
|
||||
srcs = ["virtual_placer_test.cc"],
|
||||
deps = [
|
||||
@ -214,7 +214,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "virtual_scheduler_test",
|
||||
srcs = ["virtual_scheduler_test.cc"],
|
||||
deps = [
|
||||
@ -264,7 +264,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "op_level_cost_estimator_test",
|
||||
srcs = ["op_level_cost_estimator_test.cc"],
|
||||
deps = [
|
||||
@ -296,7 +296,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "analytical_cost_estimator_test",
|
||||
srcs = ["analytical_cost_estimator_test.cc"],
|
||||
deps = [
|
||||
|
@ -1,5 +1,7 @@
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
@ -27,7 +29,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "utils_test",
|
||||
srcs = [
|
||||
"utils_test.cc",
|
||||
|
@ -1,5 +1,7 @@
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
@ -33,7 +35,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "static_schedule_test",
|
||||
srcs = ["static_schedule_test.cc"],
|
||||
deps = [
|
||||
@ -68,7 +70,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "auto_parallel_test",
|
||||
srcs = ["auto_parallel_test.cc"],
|
||||
deps = [
|
||||
@ -103,7 +105,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "constant_folding_test",
|
||||
srcs = ["constant_folding_test.cc"],
|
||||
shard_count = 5,
|
||||
@ -169,7 +171,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "arithmetic_optimizer_test",
|
||||
size = "small",
|
||||
srcs = ["arithmetic_optimizer_test.cc"],
|
||||
@ -202,7 +204,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "model_pruner_test",
|
||||
srcs = ["model_pruner_test.cc"],
|
||||
deps = [
|
||||
@ -237,7 +239,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "memory_optimizer_test",
|
||||
srcs = ["memory_optimizer_test.cc"],
|
||||
deps = [
|
||||
@ -274,7 +276,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "layout_optimizer_test",
|
||||
srcs = ["layout_optimizer_test.cc"],
|
||||
deps = [
|
||||
|
@ -1,5 +1,7 @@
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
filegroup(
|
||||
name = "all_files",
|
||||
srcs = glob(
|
||||
@ -27,7 +29,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "scc_test",
|
||||
srcs = ["scc_test.cc"],
|
||||
data = [
|
||||
@ -58,7 +60,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "topological_sort_test",
|
||||
srcs = ["topological_sort_test.cc"],
|
||||
deps = [
|
||||
@ -83,7 +85,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "frame_test",
|
||||
size = "small",
|
||||
srcs = ["frame_test.cc"],
|
||||
|
@ -30,6 +30,7 @@ load(
|
||||
"if_android",
|
||||
"tf_cc_test",
|
||||
"tf_cc_tests",
|
||||
"tf_cc_binary",
|
||||
"tf_copts",
|
||||
"tf_opts_nortti_if_android",
|
||||
"tf_kernel_library",
|
||||
@ -220,6 +221,7 @@ cc_library(
|
||||
":initializable_lookup_table",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:lib_internal",
|
||||
],
|
||||
)
|
||||
|
||||
@ -3666,7 +3668,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "loss_test",
|
||||
size = "small",
|
||||
srcs = ["loss_test.cc"],
|
||||
@ -4055,7 +4057,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "spectrogram_convert_test_data",
|
||||
testonly = 1,
|
||||
srcs = ["spectrogram_convert_test_data.cc"],
|
||||
@ -4733,11 +4735,12 @@ cc_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//tensorflow/core:android_tensorflow_lib_lite",
|
||||
"//tensorflow/core:protos_cc",
|
||||
"//tensorflow/core:protos_all_cc_impl",
|
||||
"//third_party/eigen3",
|
||||
"//third_party/fft2d:fft2d_headers",
|
||||
"@fft2d//:fft2d",
|
||||
"@gemmlowp//:gemmlowp",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
@ -4879,7 +4882,7 @@ tf_cc_test(
|
||||
)
|
||||
|
||||
# Android-only test for quantization utilities.
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "quantization_utils_test_android_only",
|
||||
testonly = 1,
|
||||
srcs = ["quantization_utils_test.cc"],
|
||||
@ -6064,7 +6067,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "summary_interface_test",
|
||||
srcs = ["summary_interface_test.cc"],
|
||||
deps = [
|
||||
|
@ -35,323 +35,6 @@ limitations under the License.
|
||||
#include "tensorflow/core/platform/macros.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace lookup {
|
||||
namespace {
|
||||
|
||||
static const int kInputBufferSize = 1 * 1024 * 1024; /* bytes */
|
||||
static const int kLineNumber = -1;
|
||||
static const int kWholeLine = -2;
|
||||
|
||||
// Iterator to initialize tables given 'keys' and 'values' tensors.
|
||||
//
|
||||
// The two tensors are returned in the first iteration. It doesn't loop
|
||||
// over each element of the tensor since insertions in the lookup table can
|
||||
// process batches.
|
||||
class KeyValueTensorIterator
|
||||
: public InitializableLookupTable::InitTableIterator {
|
||||
public:
|
||||
// keys and values are not owned by the iterator.
|
||||
explicit KeyValueTensorIterator(const Tensor* keys, const Tensor* values)
|
||||
: keys_(keys), values_(values), valid_(true), status_(Status::OK()) {
|
||||
TensorShape key_shape = keys_->shape();
|
||||
if (!key_shape.IsSameSize(values_->shape())) {
|
||||
valid_ = false;
|
||||
status_ = errors::InvalidArgument(
|
||||
"keys and values should have the same dimension.",
|
||||
key_shape.DebugString(), " vs ", values_->shape().DebugString());
|
||||
}
|
||||
if (key_shape.num_elements() == 0) {
|
||||
valid_ = false;
|
||||
status_ =
|
||||
errors::InvalidArgument("keys and values cannot be empty tensors.");
|
||||
}
|
||||
}
|
||||
|
||||
bool Valid() const override { return valid_; }
|
||||
|
||||
void Next() override {
|
||||
valid_ = false;
|
||||
status_ = errors::OutOfRange("No more data.");
|
||||
}
|
||||
|
||||
const Tensor& keys() const override { return *keys_; }
|
||||
|
||||
const Tensor& values() const override { return *values_; }
|
||||
|
||||
Status status() const override { return status_; }
|
||||
|
||||
int64 total_size() const override {
|
||||
return keys_ == nullptr ? -1 : keys_->NumElements();
|
||||
}
|
||||
|
||||
private:
|
||||
TF_DISALLOW_COPY_AND_ASSIGN(KeyValueTensorIterator);
|
||||
|
||||
const Tensor* keys_; // Doesn't own it.
|
||||
const Tensor* values_; // Doesn't own it.
|
||||
bool valid_; // true if the iterator points to an existing range.
|
||||
Status status_;
|
||||
};
|
||||
|
||||
Status GetNumLinesInTextFile(Env* env, const string& vocab_file,
|
||||
int64* num_lines) {
|
||||
std::unique_ptr<RandomAccessFile> file;
|
||||
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(vocab_file, &file));
|
||||
|
||||
io::InputBuffer input_buffer(file.get(), kInputBufferSize);
|
||||
string line;
|
||||
Status s = input_buffer.ReadLine(&line);
|
||||
int64 next_id = 0;
|
||||
while (s.ok()) {
|
||||
next_id++;
|
||||
s = input_buffer.ReadLine(&line);
|
||||
}
|
||||
if (!errors::IsOutOfRange(s)) {
|
||||
return s;
|
||||
}
|
||||
*num_lines = next_id;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Iterator that reads a text file. Each iteration process one line, it parses
|
||||
// the line and populates the keys and values tensors used for initialization
|
||||
// with a single key and corresponding value.
|
||||
//
|
||||
// What information of the line to populate the key or values is specified by
|
||||
// providing key_index and value_index.
|
||||
class TextFileLineIterator
|
||||
: public InitializableLookupTable::InitTableIterator {
|
||||
public:
|
||||
TextFileLineIterator()
|
||||
: valid_(false),
|
||||
vocab_size_(-1),
|
||||
status_(errors::FailedPrecondition("Not initialized")) {}
|
||||
|
||||
// Initialize iterator.
|
||||
//
|
||||
// Prepares the file 'filename' and sets the data types to return the keys and
|
||||
// values tensors. It requires the indices of the tokens in the line given a
|
||||
// delimiter to specify where to pick the data from.
|
||||
//
|
||||
// - Index -2 means the entire line as string.
|
||||
// - Index -1 means the line number stored in int64.
|
||||
// - Index >= 0 represent index (starting at zero) of the split line based on
|
||||
// delimiter.
|
||||
Status Init(const string& filename, int64 vocab_size, char delimiter,
|
||||
DataType key_dtype, int64 key_index, DataType value_dtype,
|
||||
int64 value_index, Env* env) {
|
||||
if (vocab_size == -1) {
|
||||
TF_RETURN_IF_ERROR(GetNumLinesInTextFile(env, filename, &vocab_size));
|
||||
}
|
||||
filename_ = filename;
|
||||
vocab_size_ = vocab_size;
|
||||
delimiter_ = delimiter;
|
||||
key_ = Tensor(key_dtype, TensorShape({}));
|
||||
value_ = Tensor(value_dtype, TensorShape({}));
|
||||
key_index_ = key_index;
|
||||
value_index_ = value_index;
|
||||
|
||||
status_ = env->NewRandomAccessFile(filename_, &file_);
|
||||
if (!status_.ok()) return status_;
|
||||
|
||||
input_buffer_.reset(new io::InputBuffer(file_.get(), kInputBufferSize));
|
||||
valid_ = true;
|
||||
next_id_ = 0;
|
||||
ignore_split_ = std::max(key_index_, value_index_) < 0;
|
||||
Next();
|
||||
return status_;
|
||||
}
|
||||
|
||||
void Next() override {
|
||||
if (!valid_) return;
|
||||
|
||||
string line;
|
||||
status_ = input_buffer_->ReadLine(&line);
|
||||
if (!status_.ok()) {
|
||||
if (errors::IsOutOfRange(status_) && next_id_ != vocab_size_) {
|
||||
status_ = errors::InvalidArgument("Invalid vocab_size in ", filename_,
|
||||
": expected ", vocab_size_,
|
||||
" but got ", next_id_);
|
||||
}
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
if (next_id_ >= vocab_size_) {
|
||||
LOG(WARNING) << "Truncated " << filename_ << " before its end at "
|
||||
<< vocab_size_ << " records.";
|
||||
LOG(WARNING) << "next_id_ : " << next_id_;
|
||||
status_ = errors::OutOfRange("Finished reading ", vocab_size_,
|
||||
" of lines from ", filename_);
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
if (line.empty()) {
|
||||
status_ = errors::InvalidArgument("Invalid content in ", filename_,
|
||||
": empty line found at position ",
|
||||
input_buffer_->Tell(), ".");
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<string> tokens;
|
||||
if (!ignore_split_) {
|
||||
tokens = str_util::Split(line, delimiter_);
|
||||
if (std::max(key_index_, value_index_) >= tokens.size()) {
|
||||
status_ = errors::InvalidArgument(
|
||||
"Invalid number of columns in ", filename_, " line ", next_id_,
|
||||
" (", line, ") : expected ", std::max(key_index_, value_index_),
|
||||
" got ", tokens.size());
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
status_ = SetValue(line, tokens, key_index_, &key_);
|
||||
if (!status_.ok()) {
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
status_ = SetValue(line, tokens, value_index_, &value_);
|
||||
if (!status_.ok()) {
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
|
||||
next_id_++;
|
||||
}
|
||||
|
||||
bool Valid() const override { return valid_; }
|
||||
|
||||
const Tensor& keys() const override { return key_; }
|
||||
|
||||
const Tensor& values() const override { return value_; }
|
||||
|
||||
Status status() const override { return status_; }
|
||||
|
||||
int64 total_size() const override { return vocab_size_; }
|
||||
|
||||
private:
|
||||
Tensor key_;
|
||||
Tensor value_;
|
||||
bool valid_; // true if the iterator points to an existing range.
|
||||
int64 key_index_;
|
||||
int64 value_index_;
|
||||
int64 next_id_;
|
||||
int64 vocab_size_;
|
||||
string filename_;
|
||||
char delimiter_;
|
||||
Status status_;
|
||||
bool ignore_split_;
|
||||
std::unique_ptr<RandomAccessFile> file_; // must outlive input_buffer_
|
||||
std::unique_ptr<io::InputBuffer> input_buffer_;
|
||||
|
||||
// Set the corresponding value from line or tokens based on 'index' into the
|
||||
// tensor 't'. The value is transformed to the given data type 'dtype'.
|
||||
Status SetValue(const string& line, const std::vector<string>& tokens,
|
||||
int64 index, Tensor* tensor) {
|
||||
if (index == kLineNumber) {
|
||||
tensor->flat<int64>()(0) = next_id_;
|
||||
return Status::OK();
|
||||
}
|
||||
const string& token = (index == kWholeLine) ? line : tokens[index];
|
||||
const DataType& dtype = tensor->dtype();
|
||||
switch (dtype) {
|
||||
case DT_INT32: {
|
||||
int32 value;
|
||||
if (!strings::safe_strto32(token.c_str(), &value)) {
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Field ", token, " in line ", next_id_,
|
||||
" is not a valid int32.");
|
||||
}
|
||||
tensor->flat<int32>()(0) = value;
|
||||
} break;
|
||||
case DT_INT64: {
|
||||
int64 value;
|
||||
if (!strings::safe_strto64(token.c_str(), &value)) {
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Field ", token, " in line ", next_id_,
|
||||
" is not a valid int64.");
|
||||
}
|
||||
tensor->flat<int64>()(0) = value;
|
||||
} break;
|
||||
case DT_FLOAT: {
|
||||
float value;
|
||||
if (!strings::safe_strtof(token.c_str(), &value)) {
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Field ", token, " in line ", next_id_,
|
||||
" is not a valid float.");
|
||||
}
|
||||
tensor->flat<float>()(0) = value;
|
||||
} break;
|
||||
case DT_DOUBLE: {
|
||||
double value;
|
||||
if (!strings::safe_strtod(token.c_str(), &value)) {
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Field ", token, " in line ", next_id_,
|
||||
" is not a valid double.");
|
||||
}
|
||||
tensor->flat<double>()(0) = value;
|
||||
} break;
|
||||
case DT_STRING:
|
||||
tensor->flat<string>()(0) = token;
|
||||
break;
|
||||
default:
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Data type ", dtype, " not supported.");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
TF_DISALLOW_COPY_AND_ASSIGN(TextFileLineIterator);
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
// Helper function to initialize an InitializableLookupTable from a text file.
|
||||
Status InitializeTableFromTextFile(const string& filename, int64 vocab_size,
|
||||
char delimiter, int32 key_index,
|
||||
int32 value_index, Env* env,
|
||||
InitializableLookupTable* table) {
|
||||
if (key_index == kLineNumber && table->key_dtype() != DT_INT64) {
|
||||
return errors::InvalidArgument(
|
||||
"Key index for line number requires table key dtype of int64, got ",
|
||||
table->key_dtype());
|
||||
}
|
||||
const DataType& key_dtype = table->key_dtype();
|
||||
const DataType& value_dtype = table->value_dtype();
|
||||
if (key_index == kWholeLine && !DataTypeIsInteger(key_dtype) &&
|
||||
key_dtype != DT_STRING) {
|
||||
return errors::InvalidArgument(
|
||||
"Key index for whole line requires string or integer table key, got ",
|
||||
table->key_dtype());
|
||||
}
|
||||
if (value_index == kLineNumber && value_dtype != DT_INT64) {
|
||||
return errors::InvalidArgument(
|
||||
"Value index for line number requires table value dtype of int64, got ",
|
||||
table->value_dtype());
|
||||
}
|
||||
if (value_index == kWholeLine && value_dtype != DT_STRING) {
|
||||
return errors::InvalidArgument(
|
||||
"Value index for whole line requires table value dtype of string, got ",
|
||||
table->value_dtype());
|
||||
}
|
||||
|
||||
TextFileLineIterator iter;
|
||||
TF_RETURN_IF_ERROR(iter.Init(filename, vocab_size, delimiter, key_dtype,
|
||||
key_index, value_dtype, value_index, env));
|
||||
// For initialization from files, ignore if the table is already
|
||||
// initialized. The table shared name should contain the filename to
|
||||
// avoid trying to initialize the same table from the same file at the same
|
||||
// time.
|
||||
Status s = table->Initialize(iter);
|
||||
if (errors::IsFailedPrecondition(s) && table->is_initialized()) {
|
||||
LOG(WARNING) << "Table trying to initialize from file " << filename
|
||||
<< " is already initialized.";
|
||||
return Status::OK();
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
} // namespace lookup
|
||||
|
||||
// Kernel to initialize a look table given a key and value tensors.
|
||||
// After this operation, the table becomes read-only.
|
||||
|
@ -18,11 +18,227 @@ limitations under the License.
|
||||
#include "tensorflow/core/framework/tensor.h"
|
||||
#include "tensorflow/core/framework/tensor_shape.h"
|
||||
#include "tensorflow/core/lib/core/errors.h"
|
||||
#include "tensorflow/core/lib/io/inputbuffer.h"
|
||||
|
||||
namespace tensorflow {
|
||||
namespace lookup {
|
||||
namespace {
|
||||
|
||||
static const int kInputBufferSize = 1 * 1024 * 1024; /* bytes */
|
||||
static const int kLineNumber = -1;
|
||||
static const int kWholeLine = -2;
|
||||
|
||||
Status GetNumLinesInTextFile(Env* env, const string& vocab_file,
|
||||
int64* num_lines) {
|
||||
std::unique_ptr<RandomAccessFile> file;
|
||||
TF_RETURN_IF_ERROR(env->NewRandomAccessFile(vocab_file, &file));
|
||||
|
||||
io::InputBuffer input_buffer(file.get(), kInputBufferSize);
|
||||
string line;
|
||||
Status s = input_buffer.ReadLine(&line);
|
||||
int64 next_id = 0;
|
||||
while (s.ok()) {
|
||||
next_id++;
|
||||
s = input_buffer.ReadLine(&line);
|
||||
}
|
||||
if (!errors::IsOutOfRange(s)) {
|
||||
return s;
|
||||
}
|
||||
*num_lines = next_id;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Iterator that reads a text file. Each iteration process one line, it parses
|
||||
// the line and populates the keys and values tensors used for initialization
|
||||
// with a single key and corresponding value.
|
||||
//
|
||||
// What information of the line to populate the key or values is specified by
|
||||
// providing key_index and value_index.
|
||||
class TextFileLineIterator
|
||||
: public InitializableLookupTable::InitTableIterator {
|
||||
public:
|
||||
TextFileLineIterator()
|
||||
: valid_(false),
|
||||
vocab_size_(-1),
|
||||
status_(errors::FailedPrecondition("Not initialized")) {}
|
||||
|
||||
// Initialize iterator.
|
||||
//
|
||||
// Prepares the file 'filename' and sets the data types to return the keys and
|
||||
// values tensors. It requires the indices of the tokens in the line given a
|
||||
// delimiter to specify where to pick the data from.
|
||||
//
|
||||
// - Index -2 means the entire line as string.
|
||||
// - Index -1 means the line number stored in int64.
|
||||
// - Index >= 0 represent index (starting at zero) of the split line based on
|
||||
// delimiter.
|
||||
Status Init(const string& filename, int64 vocab_size, char delimiter,
|
||||
DataType key_dtype, int64 key_index, DataType value_dtype,
|
||||
int64 value_index, Env* env) {
|
||||
if (vocab_size == -1) {
|
||||
TF_RETURN_IF_ERROR(GetNumLinesInTextFile(env, filename, &vocab_size));
|
||||
}
|
||||
filename_ = filename;
|
||||
vocab_size_ = vocab_size;
|
||||
delimiter_ = delimiter;
|
||||
key_ = Tensor(key_dtype, TensorShape({}));
|
||||
value_ = Tensor(value_dtype, TensorShape({}));
|
||||
key_index_ = key_index;
|
||||
value_index_ = value_index;
|
||||
|
||||
status_ = env->NewRandomAccessFile(filename_, &file_);
|
||||
if (!status_.ok()) return status_;
|
||||
|
||||
input_buffer_.reset(new io::InputBuffer(file_.get(), kInputBufferSize));
|
||||
valid_ = true;
|
||||
next_id_ = 0;
|
||||
ignore_split_ = std::max(key_index_, value_index_) < 0;
|
||||
Next();
|
||||
return status_;
|
||||
}
|
||||
|
||||
void Next() override {
|
||||
if (!valid_) return;
|
||||
|
||||
string line;
|
||||
status_ = input_buffer_->ReadLine(&line);
|
||||
if (!status_.ok()) {
|
||||
if (errors::IsOutOfRange(status_) && next_id_ != vocab_size_) {
|
||||
status_ = errors::InvalidArgument("Invalid vocab_size in ", filename_,
|
||||
": expected ", vocab_size_,
|
||||
" but got ", next_id_);
|
||||
}
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
if (next_id_ >= vocab_size_) {
|
||||
LOG(WARNING) << "Truncated " << filename_ << " before its end at "
|
||||
<< vocab_size_ << " records.";
|
||||
LOG(WARNING) << "next_id_ : " << next_id_;
|
||||
status_ = errors::OutOfRange("Finished reading ", vocab_size_,
|
||||
" of lines from ", filename_);
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
if (line.empty()) {
|
||||
status_ = errors::InvalidArgument("Invalid content in ", filename_,
|
||||
": empty line found at position ",
|
||||
input_buffer_->Tell(), ".");
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
|
||||
std::vector<string> tokens;
|
||||
if (!ignore_split_) {
|
||||
tokens = str_util::Split(line, delimiter_);
|
||||
if (std::max(key_index_, value_index_) >= tokens.size()) {
|
||||
status_ = errors::InvalidArgument(
|
||||
"Invalid number of columns in ", filename_, " line ", next_id_,
|
||||
" (", line, ") : expected ", std::max(key_index_, value_index_),
|
||||
" got ", tokens.size());
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
}
|
||||
status_ = SetValue(line, tokens, key_index_, &key_);
|
||||
if (!status_.ok()) {
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
status_ = SetValue(line, tokens, value_index_, &value_);
|
||||
if (!status_.ok()) {
|
||||
valid_ = false;
|
||||
return;
|
||||
}
|
||||
|
||||
next_id_++;
|
||||
}
|
||||
|
||||
bool Valid() const override { return valid_; }
|
||||
|
||||
const Tensor& keys() const override { return key_; }
|
||||
|
||||
const Tensor& values() const override { return value_; }
|
||||
|
||||
Status status() const override { return status_; }
|
||||
|
||||
int64 total_size() const override { return vocab_size_; }
|
||||
|
||||
private:
|
||||
Tensor key_;
|
||||
Tensor value_;
|
||||
bool valid_; // true if the iterator points to an existing range.
|
||||
int64 key_index_;
|
||||
int64 value_index_;
|
||||
int64 next_id_;
|
||||
int64 vocab_size_;
|
||||
string filename_;
|
||||
char delimiter_;
|
||||
Status status_;
|
||||
bool ignore_split_;
|
||||
std::unique_ptr<RandomAccessFile> file_; // must outlive input_buffer_
|
||||
std::unique_ptr<io::InputBuffer> input_buffer_;
|
||||
|
||||
// Set the corresponding value from line or tokens based on 'index' into the
|
||||
// tensor 't'. The value is transformed to the given data type 'dtype'.
|
||||
Status SetValue(const string& line, const std::vector<string>& tokens,
|
||||
int64 index, Tensor* tensor) {
|
||||
if (index == kLineNumber) {
|
||||
tensor->flat<int64>()(0) = next_id_;
|
||||
return Status::OK();
|
||||
}
|
||||
const string& token = (index == kWholeLine) ? line : tokens[index];
|
||||
const DataType& dtype = tensor->dtype();
|
||||
switch (dtype) {
|
||||
case DT_INT32: {
|
||||
int32 value;
|
||||
if (!strings::safe_strto32(token.c_str(), &value)) {
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Field ", token, " in line ", next_id_,
|
||||
" is not a valid int32.");
|
||||
}
|
||||
tensor->flat<int32>()(0) = value;
|
||||
} break;
|
||||
case DT_INT64: {
|
||||
int64 value;
|
||||
if (!strings::safe_strto64(token.c_str(), &value)) {
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Field ", token, " in line ", next_id_,
|
||||
" is not a valid int64.");
|
||||
}
|
||||
tensor->flat<int64>()(0) = value;
|
||||
} break;
|
||||
case DT_FLOAT: {
|
||||
float value;
|
||||
if (!strings::safe_strtof(token.c_str(), &value)) {
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Field ", token, " in line ", next_id_,
|
||||
" is not a valid float.");
|
||||
}
|
||||
tensor->flat<float>()(0) = value;
|
||||
} break;
|
||||
case DT_DOUBLE: {
|
||||
double value;
|
||||
if (!strings::safe_strtod(token.c_str(), &value)) {
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Field ", token, " in line ", next_id_,
|
||||
" is not a valid double.");
|
||||
}
|
||||
tensor->flat<double>()(0) = value;
|
||||
} break;
|
||||
case DT_STRING:
|
||||
tensor->flat<string>()(0) = token;
|
||||
break;
|
||||
default:
|
||||
valid_ = false;
|
||||
return errors::InvalidArgument("Data type ", dtype, " not supported.");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
TF_DISALLOW_COPY_AND_ASSIGN(TextFileLineIterator);
|
||||
};
|
||||
|
||||
Status GetTableHandle(const string& input_name, OpKernelContext* ctx,
|
||||
string* container, string* table_handle) {
|
||||
{
|
||||
@ -105,5 +321,50 @@ Status CheckTableDataTypes(const LookupInterface& table, DataType key_dtype,
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Helper function to initialize an InitializableLookupTable from a text file.
|
||||
Status InitializeTableFromTextFile(const string& filename, int64 vocab_size,
|
||||
char delimiter, int32 key_index,
|
||||
int32 value_index, Env* env,
|
||||
InitializableLookupTable* table) {
|
||||
if (key_index == kLineNumber && table->key_dtype() != DT_INT64) {
|
||||
return errors::InvalidArgument(
|
||||
"Key index for line number requires table key dtype of int64, got ",
|
||||
table->key_dtype());
|
||||
}
|
||||
const DataType& key_dtype = table->key_dtype();
|
||||
const DataType& value_dtype = table->value_dtype();
|
||||
if (key_index == kWholeLine && !DataTypeIsInteger(key_dtype) &&
|
||||
key_dtype != DT_STRING) {
|
||||
return errors::InvalidArgument(
|
||||
"Key index for whole line requires string or integer table key, got ",
|
||||
table->key_dtype());
|
||||
}
|
||||
if (value_index == kLineNumber && value_dtype != DT_INT64) {
|
||||
return errors::InvalidArgument(
|
||||
"Value index for line number requires table value dtype of int64, got ",
|
||||
table->value_dtype());
|
||||
}
|
||||
if (value_index == kWholeLine && value_dtype != DT_STRING) {
|
||||
return errors::InvalidArgument(
|
||||
"Value index for whole line requires table value dtype of string, got ",
|
||||
table->value_dtype());
|
||||
}
|
||||
|
||||
TextFileLineIterator iter;
|
||||
TF_RETURN_IF_ERROR(iter.Init(filename, vocab_size, delimiter, key_dtype,
|
||||
key_index, value_dtype, value_index, env));
|
||||
// For initialization from files, ignore if the table is already
|
||||
// initialized. The table shared name should contain the filename to
|
||||
// avoid trying to initialize the same table from the same file at the same
|
||||
// time.
|
||||
Status s = table->Initialize(iter);
|
||||
if (errors::IsFailedPrecondition(s) && table->is_initialized()) {
|
||||
LOG(WARNING) << "Table trying to initialize from file " << filename
|
||||
<< " is already initialized.";
|
||||
return Status::OK();
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
} // namespace lookup
|
||||
} // namespace tensorflow
|
||||
|
@ -40,6 +40,63 @@ Status GetInitializableLookupTable(const string& input_name,
|
||||
// table's data types.
|
||||
Status CheckTableDataTypes(const LookupInterface& table, DataType key_dtype,
|
||||
DataType value_dtype, const string& table_name);
|
||||
|
||||
Status InitializeTableFromTextFile(const string& filename, int64 vocab_size,
|
||||
char delimiter, int32 key_index,
|
||||
int32 value_index, Env* env,
|
||||
InitializableLookupTable* table);
|
||||
|
||||
// Iterator to initialize tables given 'keys' and 'values' tensors.
|
||||
//
|
||||
// The two tensors are returned in the first iteration. It doesn't loop
|
||||
// over each element of the tensor since insertions in the lookup table can
|
||||
// process batches.
|
||||
class KeyValueTensorIterator
|
||||
: public InitializableLookupTable::InitTableIterator {
|
||||
public:
|
||||
// keys and values are not owned by the iterator.
|
||||
explicit KeyValueTensorIterator(const Tensor* keys, const Tensor* values)
|
||||
: keys_(keys), values_(values), valid_(true), status_(Status::OK()) {
|
||||
TensorShape key_shape = keys_->shape();
|
||||
if (!key_shape.IsSameSize(values_->shape())) {
|
||||
valid_ = false;
|
||||
status_ = errors::InvalidArgument(
|
||||
"keys and values should have the same dimension.",
|
||||
key_shape.DebugString(), " vs ", values_->shape().DebugString());
|
||||
}
|
||||
if (key_shape.num_elements() == 0) {
|
||||
valid_ = false;
|
||||
status_ =
|
||||
errors::InvalidArgument("keys and values cannot be empty tensors.");
|
||||
}
|
||||
}
|
||||
|
||||
bool Valid() const override { return valid_; }
|
||||
|
||||
void Next() override {
|
||||
valid_ = false;
|
||||
status_ = errors::OutOfRange("No more data.");
|
||||
}
|
||||
|
||||
const Tensor& keys() const override { return *keys_; }
|
||||
|
||||
const Tensor& values() const override { return *values_; }
|
||||
|
||||
Status status() const override { return status_; }
|
||||
|
||||
int64 total_size() const override {
|
||||
return keys_ == nullptr ? -1 : keys_->NumElements();
|
||||
}
|
||||
|
||||
private:
|
||||
TF_DISALLOW_COPY_AND_ASSIGN(KeyValueTensorIterator);
|
||||
|
||||
const Tensor* keys_; // Doesn't own it.
|
||||
const Tensor* values_; // Doesn't own it.
|
||||
bool valid_; // true if the iterator points to an existing range.
|
||||
Status status_;
|
||||
};
|
||||
|
||||
} // namespace lookup
|
||||
} // namespace tensorflow
|
||||
|
||||
|
@ -10,7 +10,11 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load(
|
||||
"//tensorflow:tensorflow.bzl",
|
||||
"tf_cc_test",
|
||||
"tf_cc_binary",
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "op_compatibility_lib",
|
||||
@ -44,7 +48,7 @@ tf_cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "update_ops",
|
||||
srcs = ["update_ops_main.cc"],
|
||||
deps = [
|
||||
|
@ -1,9 +1,15 @@
|
||||
# Platform-specific build configurations.
|
||||
|
||||
load("@protobuf_archive//:protobuf.bzl", "cc_proto_library")
|
||||
load("@protobuf_archive//:protobuf.bzl", "proto_gen")
|
||||
load("@protobuf_archive//:protobuf.bzl", "py_proto_library")
|
||||
load("//tensorflow:tensorflow.bzl", "if_not_mobile")
|
||||
load("//tensorflow:tensorflow.bzl", "if_not_windows")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
|
||||
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda")
|
||||
load(
|
||||
"//third_party/mkl:build_defs.bzl",
|
||||
"if_mkl",
|
||||
)
|
||||
|
||||
# Appends a suffix to a list of deps.
|
||||
def tf_deps(deps, suffix):
|
||||
@ -22,15 +28,134 @@ def tf_deps(deps, suffix):
|
||||
|
||||
return tf_deps
|
||||
|
||||
|
||||
def _proto_cc_hdrs(srcs, use_grpc_plugin=False):
|
||||
ret = [s[:-len(".proto")] + ".pb.h" for s in srcs]
|
||||
if use_grpc_plugin:
|
||||
ret += [s[:-len(".proto")] + ".grpc.pb.h" for s in srcs]
|
||||
return ret
|
||||
|
||||
def _proto_cc_srcs(srcs, use_grpc_plugin=False):
|
||||
ret = [s[:-len(".proto")] + ".pb.cc" for s in srcs]
|
||||
if use_grpc_plugin:
|
||||
ret += [s[:-len(".proto")] + ".grpc.pb.cc" for s in srcs]
|
||||
return ret
|
||||
|
||||
# Re-defined protocol buffer rule to allow building "header only" protocol
|
||||
# buffers, to avoid duplicate registrations. Also allows non-iterable cc_libs
|
||||
# containing select() statements.
|
||||
def cc_proto_library(
|
||||
name,
|
||||
srcs=[],
|
||||
deps=[],
|
||||
cc_libs=[],
|
||||
include=None,
|
||||
protoc="@protobuf_archive//:protoc",
|
||||
internal_bootstrap_hack=False,
|
||||
use_grpc_plugin=False,
|
||||
default_header=False,
|
||||
**kargs):
|
||||
"""Bazel rule to create a C++ protobuf library from proto source files.
|
||||
|
||||
Args:
|
||||
name: the name of the cc_proto_library.
|
||||
srcs: the .proto files of the cc_proto_library.
|
||||
deps: a list of dependency labels; must be cc_proto_library.
|
||||
cc_libs: a list of other cc_library targets depended by the generated
|
||||
cc_library.
|
||||
include: a string indicating the include path of the .proto files.
|
||||
protoc: the label of the protocol compiler to generate the sources.
|
||||
internal_bootstrap_hack: a flag indicate the cc_proto_library is used only
|
||||
for bootstraping. When it is set to True, no files will be generated.
|
||||
The rule will simply be a provider for .proto files, so that other
|
||||
cc_proto_library can depend on it.
|
||||
use_grpc_plugin: a flag to indicate whether to call the grpc C++ plugin
|
||||
when processing the proto files.
|
||||
default_header: Controls the naming of generated rules. If True, the `name`
|
||||
rule will be header-only, and an _impl rule will contain the
|
||||
implementation. Otherwise the header-only rule (name + "_headers_only")
|
||||
must be referred to explicitly.
|
||||
**kargs: other keyword arguments that are passed to cc_library.
|
||||
"""
|
||||
|
||||
includes = []
|
||||
if include != None:
|
||||
includes = [include]
|
||||
|
||||
if internal_bootstrap_hack:
|
||||
# For pre-checked-in generated files, we add the internal_bootstrap_hack
|
||||
# which will skip the codegen action.
|
||||
proto_gen(
|
||||
name=name + "_genproto",
|
||||
srcs=srcs,
|
||||
deps=[s + "_genproto" for s in deps],
|
||||
includes=includes,
|
||||
protoc=protoc,
|
||||
visibility=["//visibility:public"],
|
||||
)
|
||||
# An empty cc_library to make rule dependency consistent.
|
||||
native.cc_library(
|
||||
name=name,
|
||||
**kargs)
|
||||
return
|
||||
|
||||
grpc_cpp_plugin = None
|
||||
if use_grpc_plugin:
|
||||
grpc_cpp_plugin = "//external:grpc_cpp_plugin"
|
||||
|
||||
gen_srcs = _proto_cc_srcs(srcs, use_grpc_plugin)
|
||||
gen_hdrs = _proto_cc_hdrs(srcs, use_grpc_plugin)
|
||||
outs = gen_srcs + gen_hdrs
|
||||
|
||||
proto_gen(
|
||||
name=name + "_genproto",
|
||||
srcs=srcs,
|
||||
deps=[s + "_genproto" for s in deps],
|
||||
includes=includes,
|
||||
protoc=protoc,
|
||||
plugin=grpc_cpp_plugin,
|
||||
plugin_language="grpc",
|
||||
gen_cc=1,
|
||||
outs=outs,
|
||||
visibility=["//visibility:public"],
|
||||
)
|
||||
|
||||
if use_grpc_plugin:
|
||||
cc_libs += ["//external:grpc_lib"]
|
||||
|
||||
if default_header:
|
||||
header_only_name = name
|
||||
impl_name = name + "_impl"
|
||||
else:
|
||||
header_only_name = name + "_headers_only"
|
||||
impl_name = name
|
||||
|
||||
native.cc_library(
|
||||
name=impl_name,
|
||||
srcs=gen_srcs,
|
||||
hdrs=gen_hdrs,
|
||||
deps=cc_libs + deps,
|
||||
includes=includes,
|
||||
**kargs)
|
||||
native.cc_library(
|
||||
name=header_only_name,
|
||||
deps=["@protobuf_archive//:protobuf_headers"] + if_static([impl_name]),
|
||||
hdrs=gen_hdrs,
|
||||
**kargs)
|
||||
|
||||
def tf_proto_library_cc(name, srcs = [], has_services = None,
|
||||
protodeps = [], visibility = [], testonly = 0,
|
||||
protodeps = [],
|
||||
visibility = [], testonly = 0,
|
||||
cc_libs = [],
|
||||
cc_stubby_versions = None,
|
||||
cc_grpc_version = None,
|
||||
j2objc_api_version = 1,
|
||||
cc_api_version = 2, go_api_version = 2,
|
||||
java_api_version = 2, py_api_version = 2,
|
||||
js_api_version = 2, js_codegen = "jspb"):
|
||||
js_api_version = 2, js_codegen = "jspb",
|
||||
default_header = False):
|
||||
js_codegen = js_codegen # unused argument
|
||||
js_api_version = js_api_version # unused argument
|
||||
native.filegroup(
|
||||
name = name + "_proto_srcs",
|
||||
srcs = srcs + tf_deps(protodeps, "_proto_srcs"),
|
||||
@ -45,17 +170,20 @@ def tf_proto_library_cc(name, srcs = [], has_services = None,
|
||||
name = name + "_cc",
|
||||
srcs = srcs,
|
||||
deps = tf_deps(protodeps, "_cc") + ["@protobuf_archive//:cc_wkt_protos"],
|
||||
cc_libs = cc_libs + ["@protobuf_archive//:protobuf"],
|
||||
cc_libs = cc_libs + if_static(
|
||||
["@protobuf_archive//:protobuf"],
|
||||
["@protobuf_archive//:protobuf_headers"]
|
||||
),
|
||||
copts = if_not_windows([
|
||||
"-Wno-unknown-warning-option",
|
||||
"-Wno-unused-but-set-variable",
|
||||
"-Wno-sign-compare",
|
||||
]),
|
||||
protoc = "@protobuf_archive//:protoc",
|
||||
default_runtime = "@protobuf_archive//:protobuf",
|
||||
use_grpc_plugin = use_grpc_plugin,
|
||||
testonly = testonly,
|
||||
visibility = visibility,
|
||||
default_header = default_header,
|
||||
)
|
||||
|
||||
def tf_proto_library_py(name, srcs=[], protodeps=[], deps=[], visibility=[],
|
||||
@ -79,14 +207,18 @@ def tf_nano_proto_library(**kwargs):
|
||||
pass
|
||||
|
||||
def tf_proto_library(name, srcs = [], has_services = None,
|
||||
protodeps = [], visibility = [], testonly = 0,
|
||||
protodeps = [],
|
||||
visibility = [], testonly = 0,
|
||||
cc_libs = [],
|
||||
cc_api_version = 2, cc_grpc_version = None,
|
||||
go_api_version = 2,
|
||||
j2objc_api_version = 1,
|
||||
java_api_version = 2, py_api_version = 2,
|
||||
js_api_version = 2, js_codegen = "jspb"):
|
||||
js_api_version = 2, js_codegen = "jspb",
|
||||
default_header = False):
|
||||
"""Make a proto library, possibly depending on other proto libraries."""
|
||||
js_api_version = js_api_version # unused argument
|
||||
js_codegen = js_codegen # unused argument
|
||||
tf_proto_library_cc(
|
||||
name = name,
|
||||
srcs = srcs,
|
||||
@ -95,6 +227,7 @@ def tf_proto_library(name, srcs = [], has_services = None,
|
||||
cc_libs = cc_libs,
|
||||
testonly = testonly,
|
||||
visibility = visibility,
|
||||
default_header = default_header,
|
||||
)
|
||||
|
||||
tf_proto_library_py(
|
||||
@ -166,6 +299,15 @@ def tf_additional_proto_srcs():
|
||||
def tf_additional_all_protos():
|
||||
return ["//tensorflow/core:protos_all"]
|
||||
|
||||
|
||||
def tf_protos_all_impl():
|
||||
return ["//tensorflow/core:protos_all_cc_impl"]
|
||||
|
||||
def tf_protos_all():
|
||||
return if_static(
|
||||
extra_deps=tf_protos_all_impl(),
|
||||
otherwise=["//tensorflow/core:protos_all_cc"])
|
||||
|
||||
def tf_env_time_hdrs():
|
||||
return [
|
||||
"platform/env_time.h",
|
||||
@ -232,9 +374,14 @@ def tf_additional_lib_defines():
|
||||
})
|
||||
|
||||
def tf_additional_lib_deps():
|
||||
return ["@nsync//:nsync_cpp"] + select({
|
||||
"//tensorflow:with_jemalloc_linux_x86_64": ["@jemalloc"],
|
||||
"//tensorflow:with_jemalloc_linux_ppc64le": ["@jemalloc"],
|
||||
return if_static(
|
||||
["@nsync//:nsync_cpp"],
|
||||
["@nsync//:nsync_headers"]
|
||||
) + select({
|
||||
"//tensorflow:with_jemalloc_linux_x86_64_dynamic": ["@jemalloc//:jemalloc_headers"],
|
||||
"//tensorflow:with_jemalloc_linux_ppc64le_dynamic": ["@jemalloc//:jemalloc_headers"],
|
||||
"//tensorflow:with_jemalloc_linux_x86_64": ["@jemalloc//:jemalloc_impl"],
|
||||
"//tensorflow:with_jemalloc_linux_ppc64le": ["@jemalloc//:jemalloc_impl"],
|
||||
"//conditions:default": [],
|
||||
})
|
||||
|
||||
@ -302,3 +449,25 @@ def tf_additional_gdr_lib_defines():
|
||||
def tf_pyclif_proto_library(name, proto_lib, proto_srcfile="", visibility=None,
|
||||
**kwargs):
|
||||
pass
|
||||
|
||||
def tf_additional_binary_deps():
|
||||
return ["@nsync//:nsync_cpp"] + if_cuda(
|
||||
[
|
||||
"//tensorflow/stream_executor:cuda_platform",
|
||||
"//tensorflow/core/platform/default/build_config:cuda",
|
||||
],
|
||||
) + select({
|
||||
"//tensorflow:with_jemalloc_linux_x86_64": ["@jemalloc//:jemalloc_impl"],
|
||||
"//tensorflow:with_jemalloc_linux_ppc64le": ["@jemalloc//:jemalloc_impl"],
|
||||
"//conditions:default": [],
|
||||
}) + [
|
||||
# TODO(allenl): Split these out into their own shared objects (they are
|
||||
# here because they are shared between contrib/ op shared objects and
|
||||
# core).
|
||||
"//tensorflow/core/kernels:lookup_util",
|
||||
"//tensorflow/core/util/tensor_bundle",
|
||||
] + if_mkl(
|
||||
[
|
||||
"//third_party/mkl:intel_binary_blob",
|
||||
],
|
||||
)
|
||||
|
@ -10,6 +10,7 @@ exports_files(["LICENSE"])
|
||||
load("//tensorflow:tensorflow.bzl", "if_cuda")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_copts")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_library")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
|
||||
load("@local_config_sycl//sycl:platform.bzl", "sycl_library_path")
|
||||
|
||||
cc_library(
|
||||
@ -32,9 +33,13 @@ tf_cuda_library(
|
||||
name = "stream_executor",
|
||||
deps = [
|
||||
"//tensorflow/stream_executor",
|
||||
] + if_cuda(
|
||||
["//tensorflow/stream_executor:cuda_platform"],
|
||||
) + select({
|
||||
] + select({
|
||||
"//tensorflow:using_cuda_clang": ["//tensorflow/stream_executor:cuda_platform"],
|
||||
"//tensorflow:using_cuda_nvcc": ["//tensorflow/stream_executor:cuda_platform"],
|
||||
"//tensorflow:using_cuda_clang_with_dynamic_build": [],
|
||||
"//tensorflow:using_cuda_nvcc_with_dynamic_build": [],
|
||||
"//conditions:default": [],
|
||||
}) + select({
|
||||
"@local_config_cuda//cuda:darwin": ["IOKit"],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
@ -44,8 +49,9 @@ cc_library(
|
||||
name = "stream_executor_cuda",
|
||||
deps = [
|
||||
"//tensorflow/stream_executor",
|
||||
"//tensorflow/stream_executor:cuda_platform",
|
||||
] + select({
|
||||
] + if_static(
|
||||
["//tensorflow/stream_executor:cuda_platform"],
|
||||
) + select({
|
||||
"@local_config_cuda//cuda:darwin": ["IOKit"],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
@ -130,6 +136,14 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "protos_cc_impl",
|
||||
copts = tf_copts(),
|
||||
deps = [
|
||||
"//tensorflow/core:protos_all_cc_impl",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "protos_cc",
|
||||
copts = tf_copts(),
|
||||
|
@ -47,3 +47,9 @@ def tf_additional_gdr_deps():
|
||||
],
|
||||
"//conditions:default": [],
|
||||
})
|
||||
|
||||
def if_static(extra_deps, otherwise=[]):
|
||||
return select({
|
||||
"//tensorflow:framework_shared_object": otherwise,
|
||||
"//conditions:default": extra_deps,
|
||||
})
|
||||
|
@ -19,7 +19,11 @@ filegroup(
|
||||
visibility = ["//tensorflow:__subpackages__"],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_additional_all_protos")
|
||||
|
||||
tf_cc_binary(
|
||||
name = "profiler",
|
||||
srcs = ["profiler.cc"],
|
||||
deps = [
|
||||
@ -38,9 +42,6 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_additional_all_protos")
|
||||
|
||||
tf_proto_library(
|
||||
name = "protos_all",
|
||||
srcs = glob(["**/*.proto"]),
|
||||
|
@ -12,6 +12,7 @@ load(
|
||||
"cc_header_only_library",
|
||||
"if_not_windows",
|
||||
"tf_copts",
|
||||
"tf_cc_test",
|
||||
)
|
||||
|
||||
# To be exported to tensorflow/core:mobile_srcs.
|
||||
@ -37,7 +38,7 @@ cc_library(
|
||||
copts = tf_copts() + if_not_windows(["-Wno-sign-compare"]),
|
||||
deps = [
|
||||
":naming",
|
||||
"//tensorflow/core:core_cpu_internal",
|
||||
"//tensorflow/core:core_cpu_lib",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:framework_internal",
|
||||
"//tensorflow/core:lib",
|
||||
@ -60,7 +61,7 @@ cc_library(
|
||||
deps = ["//tensorflow/core:lib"],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "tensor_bundle_test",
|
||||
srcs = ["tensor_bundle_test.cc"],
|
||||
deps = [
|
||||
|
@ -317,15 +317,17 @@ You should be able to compile `zero_out.cc` with a `C++` compiler such as `g++`
|
||||
or `clang` available on your system. The binary PIP package installs the header
|
||||
files and the library that you need to compile your op in locations that are
|
||||
system specific. However, the TensorFlow python library provides the
|
||||
`get_include` function to get the header directory.
|
||||
Here is the output of this function on an Ubuntu machine.
|
||||
`get_include` function to get the header directory, and the `get_lib` directory
|
||||
has a shared object to link against.
|
||||
Here are the outputs of these functions on an Ubuntu machine.
|
||||
|
||||
```bash
|
||||
$ python
|
||||
>>> import tensorflow as tf
|
||||
>>> tf.sysconfig.get_include()
|
||||
'/usr/local/lib/python2.7/site-packages/tensorflow/include'
|
||||
|
||||
>>> tf.sysconfig.get_lib()
|
||||
'/usr/local/lib/python2.7/site-packages/tensorflow'
|
||||
```
|
||||
|
||||
Assuming you have `g++` installed, here is the sequence of commands you can use
|
||||
@ -333,8 +335,8 @@ to compile your op into a dynamic library.
|
||||
|
||||
```bash
|
||||
TF_INC=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_include())')
|
||||
|
||||
g++ -std=c++11 -shared zero_out.cc -o zero_out.so -fPIC -I$TF_INC -I$TF_INC/external/nsync/public -O2
|
||||
TF_LIB=$(python -c 'import tensorflow as tf; print(tf.sysconfig.get_lib())')
|
||||
g++ -std=c++11 -shared zero_out.cc -o zero_out.so -fPIC -I$TF_INC -I$TF_INC/external/nsync/public -L$TF_LIB -ltensorflow_framework -O2
|
||||
```
|
||||
|
||||
On Mac OS X, the additional flag "-undefined dynamic_lookup" is required when
|
||||
@ -1222,7 +1224,7 @@ nvcc -std=c++11 -c -o cuda_op_kernel.cu.o cuda_op_kernel.cu.cc \
|
||||
-I $TF_INC -I$TF_INC/external/nsync/public -D GOOGLE_CUDA=1 -x cu -Xcompiler -fPIC
|
||||
|
||||
g++ -std=c++11 -shared -o cuda_op_kernel.so cuda_op_kernel.cc \
|
||||
cuda_op_kernel.cu.o -I $TF_INC -I$TF_INC/external/nsync/public -fPIC -lcudart
|
||||
cuda_op_kernel.cu.o -I $TF_INC -I$TF_INC/external/nsync/public -fPIC -lcudart -L$TF_LIB -ltensorflow_framework
|
||||
```
|
||||
|
||||
`cuda_op_kernel.so` produced above can be loaded as usual in Python, using the
|
||||
|
@ -9,6 +9,7 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_custom_op_library")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cuda_tests_tags")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
@ -130,7 +131,7 @@ py_test(
|
||||
deps = ["//tensorflow:tensorflow_py"],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "attr_examples",
|
||||
srcs = ["attr_examples.cc"],
|
||||
deps = [
|
||||
|
@ -9,7 +9,9 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
cc_binary(
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
tf_cc_binary(
|
||||
name = "label_image",
|
||||
srcs = [
|
||||
"main.cc",
|
||||
|
@ -9,7 +9,9 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
cc_binary(
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
tf_cc_binary(
|
||||
name = "detect_objects",
|
||||
srcs = [
|
||||
"main.cc",
|
||||
|
@ -10,6 +10,8 @@ exports_files([
|
||||
"LICENSE",
|
||||
])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_py_test")
|
||||
|
||||
py_library(
|
||||
@ -123,7 +125,7 @@ tf_py_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "label_wav_cc",
|
||||
srcs = [
|
||||
"label_wav.cc",
|
||||
@ -177,7 +179,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "recognize_commands_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
@ -211,7 +213,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "accuracy_utils_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
@ -227,7 +229,7 @@ cc_test(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "test_streaming_accuracy",
|
||||
srcs = [
|
||||
"test_streaming_accuracy.cc",
|
||||
|
@ -9,6 +9,9 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_test")
|
||||
|
||||
cc_library(
|
||||
name = "wav_to_spectrogram_lib",
|
||||
srcs = ["wav_to_spectrogram.cc"],
|
||||
@ -24,7 +27,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "wav_to_spectrogram",
|
||||
srcs = ["main.cc"],
|
||||
deps = [
|
||||
@ -34,7 +37,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "wav_to_spectrogram_test",
|
||||
size = "medium",
|
||||
srcs = ["wav_to_spectrogram_test.cc"],
|
||||
|
@ -7,7 +7,12 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load(":build_defs.bzl", "JAVACOPTS")
|
||||
load(":src/gen/gen_ops.bzl", "tf_java_op_gen_srcjar")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_copts")
|
||||
load(
|
||||
"//tensorflow:tensorflow.bzl",
|
||||
"tf_copts",
|
||||
"tf_cc_binary",
|
||||
"tf_java_test",
|
||||
)
|
||||
|
||||
java_library(
|
||||
name = "tensorflow",
|
||||
@ -95,7 +100,7 @@ java_library(
|
||||
deps = [":tensorflow"],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "GraphTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/GraphTest.java"],
|
||||
@ -108,7 +113,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "OperationBuilderTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/OperationBuilderTest.java"],
|
||||
@ -121,7 +126,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "OperationTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/OperationTest.java"],
|
||||
@ -134,7 +139,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "SavedModelBundleTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/SavedModelBundleTest.java"],
|
||||
@ -148,7 +153,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "SessionTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/SessionTest.java"],
|
||||
@ -161,7 +166,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "ShapeTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/ShapeTest.java"],
|
||||
@ -174,7 +179,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "TensorFlowTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/TensorFlowTest.java"],
|
||||
@ -186,7 +191,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "TensorTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/TensorTest.java"],
|
||||
@ -199,7 +204,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "ScopeTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/op/ScopeTest.java"],
|
||||
@ -212,7 +217,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "PrimitiveOpTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/op/PrimitiveOpTest.java"],
|
||||
@ -225,7 +230,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "OperandsTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/op/OperandsTest.java"],
|
||||
@ -238,7 +243,7 @@ java_test(
|
||||
],
|
||||
)
|
||||
|
||||
java_test(
|
||||
tf_java_test(
|
||||
name = "ConstantTest",
|
||||
size = "small",
|
||||
srcs = ["src/test/java/org/tensorflow/op/core/ConstantTest.java"],
|
||||
@ -264,7 +269,7 @@ LINKER_VERSION_SCRIPT = ":config/version_script.lds"
|
||||
|
||||
LINKER_EXPORTED_SYMBOLS = ":config/exported_symbols.lds"
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "libtensorflow_jni.so",
|
||||
# Set linker options to strip out anything except the JNI
|
||||
# symbols from the library. This reduces the size of the library
|
||||
@ -301,7 +306,7 @@ genrule(
|
||||
tools = [":generate_pom"],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "generate_pom",
|
||||
srcs = ["generate_pom.cc"],
|
||||
deps = ["//tensorflow/c:c_api"],
|
||||
|
@ -1,6 +1,9 @@
|
||||
# -*- Python -*-
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_copts")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary", "tf_copts")
|
||||
load(
|
||||
"//tensorflow/core:platform/default/build_config_root.bzl",
|
||||
"if_static")
|
||||
|
||||
# Given a list of "ops_libs" (a list of files in the core/ops directory
|
||||
# without their .cc extensions), generate Java wrapper code for all operations
|
||||
@ -34,7 +37,7 @@ def tf_java_op_gen_srcjar(name,
|
||||
gen_lib = ops_lib[:ops_lib.rfind("_")]
|
||||
out_gen_tool = out_dir + ops_lib + "_gen_tool"
|
||||
|
||||
native.cc_binary(
|
||||
tf_cc_binary(
|
||||
name=out_gen_tool,
|
||||
copts=tf_copts(),
|
||||
linkopts=["-lm"],
|
||||
@ -51,7 +54,10 @@ def tf_java_op_gen_srcjar(name,
|
||||
gen_srcjar = out_dir + name + ".srcjar"
|
||||
gen_cmds += ["$(location @local_jdk//:jar) cMf $(location :" + gen_srcjar + ") -C $(@D) ."]
|
||||
gen_tools += ["@local_jdk//:jar"] + ["@local_jdk//:jdk"]
|
||||
|
||||
gen_tools += if_static(
|
||||
extra_deps=[],
|
||||
otherwise=["//tensorflow:libtensorflow_framework.so"]
|
||||
)
|
||||
native.genrule(
|
||||
name=name,
|
||||
outs=[gen_srcjar],
|
||||
|
@ -42,7 +42,8 @@ import java.io.InputStream;
|
||||
final class NativeLibrary {
|
||||
private static final boolean DEBUG =
|
||||
System.getProperty("org.tensorflow.NativeLibrary.DEBUG") != null;
|
||||
private static final String LIBNAME = "tensorflow_jni";
|
||||
private static final String JNI_LIBNAME = "tensorflow_jni";
|
||||
private static final String FRAMEWORK_LIBNAME = "tensorflow_framework";
|
||||
|
||||
public static void load() {
|
||||
if (isLoaded() || tryLoadLibrary()) {
|
||||
@ -57,11 +58,17 @@ final class NativeLibrary {
|
||||
return;
|
||||
}
|
||||
// Native code is not present, perhaps it has been packaged into the .jar file containing this.
|
||||
final String resourceName = makeResourceName();
|
||||
log("resourceName: " + resourceName);
|
||||
final InputStream resource =
|
||||
NativeLibrary.class.getClassLoader().getResourceAsStream(resourceName);
|
||||
if (resource == null) {
|
||||
// Extract the JNI library itself
|
||||
final String jniResourceName = makeResourceName(JNI_LIBNAME);
|
||||
log("jniResourceName: " + jniResourceName);
|
||||
final InputStream jniResource =
|
||||
NativeLibrary.class.getClassLoader().getResourceAsStream(jniResourceName);
|
||||
// Extract the JNI's dependency
|
||||
final String frameworkResourceName = makeResourceName(FRAMEWORK_LIBNAME);
|
||||
log("frameworkResourceName: " + frameworkResourceName);
|
||||
final InputStream frameworkResource =
|
||||
NativeLibrary.class.getClassLoader().getResourceAsStream(frameworkResourceName);
|
||||
if (jniResource == null || frameworkResource == null) {
|
||||
throw new UnsatisfiedLinkError(
|
||||
String.format(
|
||||
"Cannot find TensorFlow native library for OS: %s, architecture: %s. See "
|
||||
@ -72,7 +79,14 @@ final class NativeLibrary {
|
||||
os(), architecture()));
|
||||
}
|
||||
try {
|
||||
System.load(extractResource(resource));
|
||||
// Create a temporary directory for the extracted resource and its dependencies.
|
||||
final File tempPath = createTemporaryDirectory();
|
||||
// Deletions are in the reverse order of requests, so we need to request that the directory be
|
||||
// deleted first, so that it is empty when the request is fulfilled.
|
||||
tempPath.deleteOnExit();
|
||||
final String tempDirectory = tempPath.toString();
|
||||
extractResource(frameworkResource, FRAMEWORK_LIBNAME, tempDirectory);
|
||||
System.load(extractResource(jniResource, JNI_LIBNAME, tempDirectory));
|
||||
} catch (IOException e) {
|
||||
throw new UnsatisfiedLinkError(
|
||||
String.format(
|
||||
@ -82,7 +96,7 @@ final class NativeLibrary {
|
||||
|
||||
private static boolean tryLoadLibrary() {
|
||||
try {
|
||||
System.loadLibrary(LIBNAME);
|
||||
System.loadLibrary(JNI_LIBNAME);
|
||||
return true;
|
||||
} catch (UnsatisfiedLinkError e) {
|
||||
log("tryLoadLibraryFailed: " + e.getMessage());
|
||||
@ -100,15 +114,12 @@ final class NativeLibrary {
|
||||
}
|
||||
}
|
||||
|
||||
private static String extractResource(InputStream resource) throws IOException {
|
||||
final String sampleFilename = System.mapLibraryName(LIBNAME);
|
||||
final int dot = sampleFilename.indexOf(".");
|
||||
final String prefix = (dot < 0) ? sampleFilename : sampleFilename.substring(0, dot);
|
||||
final String suffix = (dot < 0) ? null : sampleFilename.substring(dot);
|
||||
|
||||
final File dst = File.createTempFile(prefix, suffix);
|
||||
final String dstPath = dst.getAbsolutePath();
|
||||
private static String extractResource(
|
||||
InputStream resource, String resourceName, String extractToDirectory)
|
||||
throws IOException {
|
||||
final File dst = new File(extractToDirectory, System.mapLibraryName(resourceName));
|
||||
dst.deleteOnExit();
|
||||
final String dstPath = dst.toString();
|
||||
log("extracting native library to: " + dstPath);
|
||||
final long nbytes = copy(resource, dst);
|
||||
log(String.format("copied %d bytes to %s", nbytes, dstPath));
|
||||
@ -139,10 +150,10 @@ final class NativeLibrary {
|
||||
}
|
||||
}
|
||||
|
||||
private static String makeResourceName() {
|
||||
private static String makeResourceName(String baseName) {
|
||||
return "org/tensorflow/native/"
|
||||
+ String.format("%s-%s/", os(), architecture())
|
||||
+ System.mapLibraryName(LIBNAME);
|
||||
+ System.mapLibraryName(baseName);
|
||||
}
|
||||
|
||||
private static long copy(InputStream src, File dstFile) throws IOException {
|
||||
@ -162,5 +173,22 @@ final class NativeLibrary {
|
||||
}
|
||||
}
|
||||
|
||||
// Shamelessly adapted from Guava to avoid using java.nio, for Android API
|
||||
// compatibility.
|
||||
private static File createTemporaryDirectory() {
|
||||
File baseDirectory = new File(System.getProperty("java.io.tmpdir"));
|
||||
String directoryName
|
||||
= "tensorflow_native_libraries-" + System.currentTimeMillis() + "-";
|
||||
for (int attempt = 0; attempt < 1000; attempt++) {
|
||||
File temporaryDirectory = new File(baseDirectory, directoryName + attempt);
|
||||
if (temporaryDirectory.mkdir()) {
|
||||
return temporaryDirectory;
|
||||
}
|
||||
}
|
||||
throw new IllegalStateException(
|
||||
"Could not create a temporary directory (tried to make "
|
||||
+ directoryName + "*) to extract TensorFlow native libraries.");
|
||||
}
|
||||
|
||||
private NativeLibrary() {}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ load("//tensorflow:tensorflow.bzl", "tf_py_test")
|
||||
load("//tensorflow:tensorflow.bzl", "py_tests")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_py_build_info_genrule")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_py_wrap_cc")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_shared_object")
|
||||
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
|
||||
load("//tensorflow:tensorflow.bzl", "cuda_py_tests")
|
||||
load("//tensorflow/core:platform/default/build_config.bzl", "tf_proto_library")
|
||||
@ -32,6 +33,7 @@ load("//tensorflow/python:build_defs.bzl", "tf_gen_op_wrapper_private_py")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_additional_verbs_deps")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_additional_mpi_deps")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "tf_additional_gdr_deps")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
|
||||
|
||||
py_library(
|
||||
name = "python",
|
||||
@ -314,7 +316,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_shared_object(
|
||||
name = "framework/test_file_system.so",
|
||||
srcs = ["framework/test_file_system.cc"],
|
||||
copts = if_not_windows(["-Wno-sign-compare"]),
|
||||
@ -324,7 +326,6 @@ cc_binary(
|
||||
],
|
||||
"//tensorflow:darwin": [],
|
||||
}),
|
||||
linkshared = 1,
|
||||
deps = [
|
||||
"//tensorflow/core:framework_headers_lib",
|
||||
"@protobuf_archive//:protobuf_headers",
|
||||
@ -338,7 +339,10 @@ py_test(
|
||||
data = [":framework/test_file_system.so"],
|
||||
main = "framework/file_system_test.py",
|
||||
srcs_version = "PY2AND3",
|
||||
tags = ["no_windows"],
|
||||
tags = [
|
||||
"no_pip", # Path issues due to test environment
|
||||
"no_windows",
|
||||
],
|
||||
deps = [
|
||||
":client_testlib",
|
||||
":data_flow_ops",
|
||||
@ -2927,7 +2931,13 @@ tf_cuda_library(
|
||||
|
||||
py_library(
|
||||
name = "pywrap_tensorflow",
|
||||
srcs = ["pywrap_tensorflow.py"],
|
||||
srcs = [
|
||||
"pywrap_tensorflow.py",
|
||||
] + if_static(
|
||||
["pywrap_dlopen_global_flags.py"],
|
||||
# Import will fail, indicating no global dlopen flags
|
||||
otherwise = [],
|
||||
),
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [":pywrap_tensorflow_internal"],
|
||||
)
|
||||
@ -3214,7 +3224,7 @@ tf_cuda_library(
|
||||
"//tensorflow/core",
|
||||
"//tensorflow/core:framework",
|
||||
"//tensorflow/core:lib",
|
||||
"//tensorflow/core:protos_cc",
|
||||
"//tensorflow/core:protos_all_cc",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -2,6 +2,7 @@ licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "py_test")
|
||||
load("//tensorflow:tensorflow.bzl", "cuda_py_test")
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
cc_library(
|
||||
name = "pywrap_tfe_lib",
|
||||
@ -213,7 +214,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "python_eager_op_gen_demo",
|
||||
deps = [
|
||||
":python_eager_op_gen_main",
|
||||
|
@ -1,6 +1,9 @@
|
||||
"""For eager-mode Python."""
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "clean_dep", "tf_copts")
|
||||
load("//tensorflow:tensorflow.bzl",
|
||||
"clean_dep",
|
||||
"tf_copts",
|
||||
"tf_cc_binary")
|
||||
|
||||
def tfe_gen_op_wrapper_py(name,
|
||||
out=None,
|
||||
@ -12,7 +15,7 @@ def tfe_gen_op_wrapper_py(name,
|
||||
tool_name = "gen_" + name + "_py_wrappers_cc"
|
||||
if not deps:
|
||||
deps = [str(Label("//tensorflow/core:" + name + "_op_lib"))]
|
||||
native.cc_binary(
|
||||
tf_cc_binary(
|
||||
name=tool_name,
|
||||
linkopts=["-lm"],
|
||||
copts=tf_copts(),
|
||||
|
@ -49,7 +49,7 @@ def get_lib():
|
||||
The directory as string.
|
||||
"""
|
||||
import tensorflow as tf
|
||||
return _os_path.join(_os_path.dirname(tf.__file__), 'core')
|
||||
return _os_path.join(_os_path.dirname(tf.__file__))
|
||||
|
||||
_allowed_symbols = []
|
||||
remove_undocumented(__name__, _allowed_symbols)
|
||||
|
51
tensorflow/python/pywrap_dlopen_global_flags.py
Normal file
51
tensorflow/python/pywrap_dlopen_global_flags.py
Normal file
@ -0,0 +1,51 @@
|
||||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# =============================================================================
|
||||
"""If possible, exports all symbols with RTLD_GLOBAL.
|
||||
|
||||
Note that this file is only imported by pywrap_tensorflow.py if this is a static
|
||||
build (meaning there is no explicit framework cc_binary shared object dependency
|
||||
of _pywrap_tensorflow_internal.so). For regular (non-static) builds, RTLD_GLOBAL
|
||||
is not necessary, since the dynamic dependencies of custom/contrib ops are
|
||||
explicit.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import ctypes
|
||||
import sys
|
||||
|
||||
# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated
|
||||
# python library that dynamically loads _pywrap_tensorflow.so. The
|
||||
# default mode for loading keeps all the symbol private and not
|
||||
# visible to other libraries that may be loaded. Setting the mode to
|
||||
# RTLD_GLOBAL to make the symbols visible, so that custom op libraries
|
||||
# imported using `tf.load_op_library()` can access symbols defined in
|
||||
# _pywrap_tensorflow.so.
|
||||
_use_rtld_global = (hasattr(sys, 'getdlopenflags')
|
||||
and hasattr(sys, 'setdlopenflags'))
|
||||
if _use_rtld_global:
|
||||
_default_dlopen_flags = sys.getdlopenflags()
|
||||
|
||||
|
||||
def set_dlopen_flags():
|
||||
if _use_rtld_global:
|
||||
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_GLOBAL)
|
||||
|
||||
|
||||
def reset_dlopen_flags():
|
||||
if _use_rtld_global:
|
||||
sys.setdlopenflags(_default_dlopen_flags)
|
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# =============================================================================
|
||||
"""pywrap_tensorflow wrapper that exports all symbols with RTLD_GLOBAL."""
|
||||
"""A wrapper for TensorFlow SWIG-generated bindings."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
@ -31,25 +31,38 @@ self_check.preload_check()
|
||||
|
||||
# pylint: disable=wildcard-import,g-import-not-at-top,unused-import,line-too-long
|
||||
|
||||
# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated
|
||||
# python library that dynamically loads _pywrap_tensorflow.so. The
|
||||
# default mode for loading keeps all the symbol private and not
|
||||
# visible to other libraries that may be loaded. Setting the mode to
|
||||
# RTLD_GLOBAL to make the symbols visible, so that custom op libraries
|
||||
# imported using `tf.load_op_library()` can access symbols defined in
|
||||
# _pywrap_tensorflow.so.
|
||||
try:
|
||||
# TODO(keveman,mrry): Support dynamic op loading on platforms that do not
|
||||
# use `dlopen()` for dynamic loading.
|
||||
_use_rtld_global = hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags')
|
||||
if _use_rtld_global:
|
||||
_default_dlopen_flags = sys.getdlopenflags()
|
||||
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_GLOBAL)
|
||||
# This import is expected to fail if there is an explicit shared object
|
||||
# dependency (with_framework_lib=true), since we do not need RTLD_GLOBAL.
|
||||
from tensorflow.python import pywrap_dlopen_global_flags
|
||||
_use_dlopen_global_flags = True
|
||||
except ImportError:
|
||||
_use_dlopen_global_flags = False
|
||||
|
||||
# On UNIX-based platforms, pywrap_tensorflow is a SWIG-generated
|
||||
# python library that dynamically loads _pywrap_tensorflow.so.
|
||||
_can_set_rtld_local = (hasattr(sys, 'getdlopenflags')
|
||||
and hasattr(sys, 'setdlopenflags'))
|
||||
if _can_set_rtld_local:
|
||||
_default_dlopen_flags = sys.getdlopenflags()
|
||||
|
||||
try:
|
||||
if _use_dlopen_global_flags:
|
||||
pywrap_dlopen_global_flags.set_dlopen_flags()
|
||||
elif _can_set_rtld_local:
|
||||
# Ensure RTLD_LOCAL behavior for platforms where it isn't the default
|
||||
# (macOS). On Linux RTLD_LOCAL is 0, so this does nothing (and would not
|
||||
# override an RTLD_GLOBAL in _default_dlopen_flags).
|
||||
sys.setdlopenflags(_default_dlopen_flags | ctypes.RTLD_LOCAL)
|
||||
|
||||
from tensorflow.python.pywrap_tensorflow_internal import *
|
||||
from tensorflow.python.pywrap_tensorflow_internal import __version__
|
||||
from tensorflow.python.pywrap_tensorflow_internal import __git_version__
|
||||
from tensorflow.python.pywrap_tensorflow_internal import __compiler_version__
|
||||
if _use_rtld_global:
|
||||
|
||||
if _use_dlopen_global_flags:
|
||||
pywrap_dlopen_global_flags.reset_dlopen_flags()
|
||||
elif _can_set_rtld_local:
|
||||
sys.setdlopenflags(_default_dlopen_flags)
|
||||
except ImportError:
|
||||
msg = """%s\n\nFailed to load the native TensorFlow runtime.\n
|
||||
|
@ -1,9 +1,19 @@
|
||||
licenses(["restricted"])
|
||||
|
||||
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda_is_configured")
|
||||
load("//tensorflow/core:platform/default/build_config_root.bzl", "if_static")
|
||||
|
||||
STREAM_EXECUTOR_HEADERS = glob([
|
||||
"*.h",
|
||||
"cuda/*.h",
|
||||
"host/*.h",
|
||||
"lib/*.h",
|
||||
"lib/gtl/*.h",
|
||||
"platform/**/*.h",
|
||||
])
|
||||
|
||||
cc_library(
|
||||
name = "stream_executor",
|
||||
name = "stream_executor_impl",
|
||||
srcs = glob(
|
||||
[
|
||||
"*.cc",
|
||||
@ -16,14 +26,7 @@ cc_library(
|
||||
"**/*_test.cc",
|
||||
],
|
||||
),
|
||||
hdrs = glob([
|
||||
"*.h",
|
||||
"cuda/*.h",
|
||||
"host/*.h",
|
||||
"lib/*.h",
|
||||
"lib/gtl/*.h",
|
||||
"platform/**/*.h",
|
||||
]),
|
||||
hdrs = STREAM_EXECUTOR_HEADERS,
|
||||
linkopts = select({
|
||||
"//tensorflow:freebsd": [],
|
||||
"//conditions:default": ["-ldl"],
|
||||
@ -36,6 +39,16 @@ cc_library(
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "stream_executor",
|
||||
hdrs = STREAM_EXECUTOR_HEADERS,
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//tensorflow/core:lib",
|
||||
"@local_config_cuda//cuda:cuda_headers",
|
||||
] + if_static([":stream_executor_impl"]),
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "cuda_platform",
|
||||
srcs = if_cuda_is_configured(
|
||||
|
@ -14,7 +14,8 @@ load(
|
||||
"//tensorflow/core:platform/default/build_config_root.bzl",
|
||||
"tf_cuda_tests_tags",
|
||||
"tf_sycl_tests_tags",
|
||||
"tf_additional_xla_deps_py",)
|
||||
"tf_additional_xla_deps_py",
|
||||
"if_static",)
|
||||
load("@local_config_cuda//cuda:build_defs.bzl", "if_cuda", "cuda_default_copts")
|
||||
|
||||
load(
|
||||
@ -212,6 +213,86 @@ def tf_gen_op_libs(op_lib_names, deps=None):
|
||||
linkstatic=1,)
|
||||
|
||||
|
||||
def _make_search_paths(prefix, levels_to_root):
|
||||
return ",".join(
|
||||
["-rpath,%s/%s" % (prefix, "/".join([".."] * search_level))
|
||||
for search_level in range(levels_to_root + 1)])
|
||||
|
||||
|
||||
def _rpath_linkopts(name):
|
||||
# Search parent directories up to the TensorFlow root directory for shared
|
||||
# object dependencies, even if this op shared object is deeply nested
|
||||
# (e.g. tensorflow/contrib/package:python/ops/_op_lib.so). tensorflow/ is then
|
||||
# the root and tensorflow/libtensorflow_framework.so should exist when
|
||||
# deployed. Other shared object dependencies (e.g. shared between contrib/
|
||||
# ops) are picked up as long as they are in either the same or a parent
|
||||
# directory in the tensorflow/ tree.
|
||||
levels_to_root = PACKAGE_NAME.count("/") + name.count("/")
|
||||
return select({
|
||||
clean_dep("//tensorflow:darwin"): [
|
||||
"-Wl,%s" % (_make_search_paths("@loader_path", levels_to_root),),
|
||||
],
|
||||
"//conditions:default": [
|
||||
"-Wl,%s" % (_make_search_paths("$$ORIGIN", levels_to_root),),
|
||||
],
|
||||
})
|
||||
|
||||
|
||||
def tf_cc_shared_object(
|
||||
name,
|
||||
srcs=[],
|
||||
deps=[],
|
||||
linkopts=[],
|
||||
framework_so=if_static(
|
||||
extra_deps=[],
|
||||
otherwise=["//tensorflow:libtensorflow_framework.so"]),
|
||||
**kwargs):
|
||||
native.cc_binary(
|
||||
name=name,
|
||||
srcs=srcs + framework_so,
|
||||
deps=deps,
|
||||
linkshared = 1,
|
||||
linkopts=linkopts + _rpath_linkopts(name) + select({
|
||||
clean_dep("//tensorflow:darwin"): [
|
||||
"-Wl,-install_name,@rpath/" + name.split("/")[-1],
|
||||
],
|
||||
"//conditions:default": [
|
||||
],
|
||||
}),
|
||||
**kwargs)
|
||||
|
||||
|
||||
# Bazel-generated shared objects which must be linked into TensorFlow binaries
|
||||
# to define symbols from //tensorflow/core:framework and //tensorflow/core:lib.
|
||||
def _binary_additional_srcs():
|
||||
return if_static(
|
||||
extra_deps=[],
|
||||
otherwise=[
|
||||
clean_dep("//tensorflow:libtensorflow_framework.so"),
|
||||
])
|
||||
|
||||
|
||||
# Links in the framework shared object
|
||||
# (//third_party/tensorflow:libtensorflow_framework.so) when not building
|
||||
# statically. Also adds linker options (rpaths) so that the framework shared
|
||||
# object can be found.
|
||||
def tf_cc_binary(name,
|
||||
srcs=[],
|
||||
deps=[],
|
||||
linkopts=[],
|
||||
**kwargs):
|
||||
native.cc_binary(
|
||||
name=name,
|
||||
srcs=srcs + _binary_additional_srcs(),
|
||||
deps=deps + if_mkl(
|
||||
[
|
||||
"//third_party/mkl:intel_binary_blob",
|
||||
],
|
||||
),
|
||||
linkopts=linkopts + _rpath_linkopts(name),
|
||||
**kwargs)
|
||||
|
||||
|
||||
def tf_gen_op_wrapper_cc(name,
|
||||
out_ops_file,
|
||||
pkg="",
|
||||
@ -223,7 +304,7 @@ def tf_gen_op_wrapper_cc(name,
|
||||
tool = out_ops_file + "_gen_cc"
|
||||
if deps == None:
|
||||
deps = [pkg + ":" + name + "_op_lib"]
|
||||
native.cc_binary(
|
||||
tf_cc_binary(
|
||||
name=tool,
|
||||
copts=tf_copts(),
|
||||
linkopts=["-lm"],
|
||||
@ -243,7 +324,7 @@ def tf_gen_op_wrapper_cc(name,
|
||||
out_ops_file + "_internal.h", out_ops_file + "_internal.cc"
|
||||
],
|
||||
srcs=srcs,
|
||||
tools=[":" + tool],
|
||||
tools=[":" + tool] + _binary_additional_srcs(),
|
||||
cmd=("$(location :" + tool + ") $(location :" + out_ops_file + ".h) " +
|
||||
"$(location :" + out_ops_file + ".cc) " + override_arg + " " +
|
||||
str(include_internal_ops)))
|
||||
@ -375,7 +456,7 @@ def tf_gen_op_wrapper_py(name,
|
||||
tool_name = "gen_" + name + "_py_wrappers_cc"
|
||||
if not deps:
|
||||
deps = [str(Label("//tensorflow/core:" + name + "_op_lib"))]
|
||||
native.cc_binary(
|
||||
tf_cc_binary(
|
||||
name=tool_name,
|
||||
linkopts=["-lm"],
|
||||
copts=tf_copts(),
|
||||
@ -433,29 +514,43 @@ def tf_gen_op_wrapper_py(name,
|
||||
|
||||
|
||||
# Define a bazel macro that creates cc_test for tensorflow.
|
||||
#
|
||||
# Links in the framework shared object
|
||||
# (//third_party/tensorflow:libtensorflow_framework.so) when not building
|
||||
# statically. Also adds linker options (rpaths) so that the framework shared
|
||||
# object can be found.
|
||||
#
|
||||
# TODO(opensource): we need to enable this to work around the hidden symbol
|
||||
# __cudaRegisterFatBinary error. Need more investigations.
|
||||
def tf_cc_test(name,
|
||||
srcs,
|
||||
deps,
|
||||
linkstatic=0,
|
||||
tags=[],
|
||||
data=[],
|
||||
size="medium",
|
||||
extra_copts=[],
|
||||
suffix="",
|
||||
args=None,
|
||||
linkopts=[]):
|
||||
linkopts=[],
|
||||
**kwargs):
|
||||
native.cc_test(
|
||||
name="%s%s" % (name, suffix),
|
||||
srcs=srcs,
|
||||
size=size,
|
||||
args=args,
|
||||
copts=tf_copts(),
|
||||
data=data,
|
||||
deps=deps,
|
||||
linkopts=["-lpthread", "-lm"] + linkopts,
|
||||
linkstatic=linkstatic,
|
||||
tags=tags)
|
||||
srcs=srcs + _binary_additional_srcs(),
|
||||
copts=tf_copts() + extra_copts,
|
||||
linkopts=["-lpthread", "-lm"] + linkopts + _rpath_linkopts(name),
|
||||
deps=deps + if_mkl(
|
||||
[
|
||||
"//third_party/mkl:intel_binary_blob",
|
||||
],
|
||||
),
|
||||
# Nested select() statements seem not to be supported when passed to
|
||||
# linkstatic, and we already have a cuda select() passed in to this
|
||||
# function.
|
||||
linkstatic=linkstatic or select({
|
||||
# cc_tests with ".so"s in srcs incorrectly link on Darwin unless
|
||||
# linkstatic=1 (https://github.com/bazelbuild/bazel/issues/3450).
|
||||
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
|
||||
clean_dep("//tensorflow:darwin"): 1,
|
||||
"//conditions:default": 0,
|
||||
}),
|
||||
**kwargs)
|
||||
|
||||
|
||||
# Part of the testing workflow requires a distinguishable name for the build
|
||||
@ -504,8 +599,16 @@ def tf_cuda_cc_test(name,
|
||||
name=name,
|
||||
srcs=srcs,
|
||||
suffix="_gpu",
|
||||
deps=deps + if_cuda([clean_dep("//tensorflow/core:gpu_runtime")]),
|
||||
linkstatic=if_cuda(1, 0),
|
||||
deps=deps + if_cuda([
|
||||
clean_dep("//tensorflow/core:gpu_runtime"),
|
||||
]),
|
||||
linkstatic=select({
|
||||
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
|
||||
clean_dep("//tensorflow:darwin"): 1,
|
||||
"@local_config_cuda//cuda:using_nvcc": 1,
|
||||
"@local_config_cuda//cuda:using_clang": 1,
|
||||
"//conditions:default": 0,
|
||||
}),
|
||||
tags=tags + tf_cuda_tests_tags(),
|
||||
data=data,
|
||||
size=size,
|
||||
@ -522,19 +625,24 @@ def tf_cuda_only_cc_test(name,
|
||||
args=[],
|
||||
linkopts=[]):
|
||||
native.cc_test(
|
||||
name="%s%s" % (name, "_gpu"),
|
||||
srcs=srcs,
|
||||
size=size,
|
||||
args=args,
|
||||
copts= _cuda_copts() + tf_copts(),
|
||||
data=data,
|
||||
deps=deps + if_cuda([
|
||||
clean_dep("//tensorflow/core:cuda"),
|
||||
clean_dep("//tensorflow/core:gpu_lib"),
|
||||
]),
|
||||
linkopts=["-lpthread", "-lm"] + linkopts,
|
||||
linkstatic=linkstatic,
|
||||
tags=tags)
|
||||
name="%s%s" % (name, "_gpu"),
|
||||
srcs=srcs + _binary_additional_srcs(),
|
||||
size=size,
|
||||
args=args,
|
||||
copts= _cuda_copts() + tf_copts(),
|
||||
data=data,
|
||||
deps=deps + if_cuda([
|
||||
clean_dep("//tensorflow/core:cuda"),
|
||||
clean_dep("//tensorflow/core:gpu_lib")]),
|
||||
linkopts=["-lpthread", "-lm"] + linkopts + _rpath_linkopts(name),
|
||||
linkstatic=linkstatic or select({
|
||||
# cc_tests with ".so"s in srcs incorrectly link on Darwin
|
||||
# unless linkstatic=1.
|
||||
# TODO(allenl): Remove Mac static linking when Bazel 0.6 is out.
|
||||
clean_dep("//tensorflow:darwin"): 1,
|
||||
"//conditions:default": 0,
|
||||
}),
|
||||
tags=tags)
|
||||
|
||||
# Create a cc_test for each of the tensorflow tests listed in "tests"
|
||||
def tf_cc_tests(srcs,
|
||||
@ -596,6 +704,17 @@ def tf_cuda_cc_tests(srcs,
|
||||
args=args,
|
||||
linkopts=linkopts)
|
||||
|
||||
def tf_java_test(name,
|
||||
srcs=[],
|
||||
deps=[],
|
||||
*args,
|
||||
**kwargs):
|
||||
native.java_test(
|
||||
name=name,
|
||||
srcs=srcs,
|
||||
deps=deps + _binary_additional_srcs(),
|
||||
*args,
|
||||
**kwargs)
|
||||
|
||||
def _cuda_copts():
|
||||
"""Gets the appropriate set of copts for (maybe) CUDA compilation.
|
||||
@ -1000,14 +1119,12 @@ def tf_custom_op_library(name, srcs=[], gpu_srcs=[], deps=[]):
|
||||
clean_dep("//tensorflow/core:framework"),
|
||||
clean_dep("//tensorflow/core:lib")
|
||||
])
|
||||
|
||||
native.cc_binary(
|
||||
tf_cc_shared_object(
|
||||
name=name,
|
||||
srcs=srcs,
|
||||
deps=deps + if_cuda(cuda_deps),
|
||||
data=[name + "_check_deps"],
|
||||
copts=tf_copts(),
|
||||
linkshared=1,
|
||||
linkopts=select({
|
||||
"//conditions:default": [
|
||||
"-lm",
|
||||
@ -1085,7 +1202,7 @@ def tf_py_wrap_cc(name,
|
||||
]
|
||||
})
|
||||
|
||||
native.cc_binary(
|
||||
tf_cc_shared_object(
|
||||
name=cc_library_name,
|
||||
srcs=[module_name + ".cc"],
|
||||
copts=(copts + if_not_windows([
|
||||
@ -1093,7 +1210,6 @@ def tf_py_wrap_cc(name,
|
||||
]) + tf_extension_copts()),
|
||||
linkopts=tf_extension_linkopts() + extra_linkopts,
|
||||
linkstatic=1,
|
||||
linkshared=1,
|
||||
deps=deps + extra_deps)
|
||||
native.genrule(
|
||||
name="gen_" + cc_library_pyd_name,
|
||||
|
@ -9,6 +9,7 @@ load(
|
||||
"//tensorflow:tensorflow.bzl",
|
||||
"tf_copts",
|
||||
"tf_cc_test",
|
||||
"tf_cc_binary",
|
||||
)
|
||||
|
||||
exports_files(["LICENSE"])
|
||||
@ -64,7 +65,7 @@ tf_cc_test(
|
||||
# --crosstool_top=//external:android/crosstool \
|
||||
# --cpu=armeabi-v7a \
|
||||
# --host_crosstool_top=@bazel_tools//tools/cpp:toolchain
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "benchmark_model",
|
||||
testonly = 1,
|
||||
srcs = ["benchmark_model_main.cc"],
|
||||
|
@ -79,6 +79,8 @@ pushd "${TMP_DIR}"
|
||||
# Obtain paths include and lib paths to the TensorFlow installation
|
||||
TF_INC=$("${PYTHON_BIN_PATH}" \
|
||||
-c 'import tensorflow as tf; print(tf.sysconfig.get_include())')
|
||||
TF_LIB=$("${PYTHON_BIN_PATH}" \
|
||||
-c 'import tensorflow as tf; print(tf.sysconfig.get_lib())')
|
||||
|
||||
if [[ -z "${TF_INC}" ]]; then
|
||||
die "FAILED to determine TensorFlow include path"
|
||||
@ -143,7 +145,7 @@ if [[ ${IS_GPU} == "0" ]]; then
|
||||
|
||||
"${GPP_BIN}" -std=c++11 ${EXTRA_GPP_FLAGS} \
|
||||
-shared "${SRC_FILE}" -o "${USER_OP_SO}" \
|
||||
-fPIC ${TF_INCLUDE_PATH} || \
|
||||
-fPIC ${TF_INCLUDE_PATH} -L "${TF_LIB}" -ltensorflow_framework || \
|
||||
die "g++ compilation of ${SRC_FILE} FAILED"
|
||||
|
||||
else
|
||||
|
@ -9,6 +9,7 @@ load(
|
||||
"//tensorflow:tensorflow.bzl",
|
||||
"if_not_windows",
|
||||
"tf_copts",
|
||||
"tf_cc_binary",
|
||||
"tf_cc_test",
|
||||
"tf_py_test",
|
||||
)
|
||||
@ -218,7 +219,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "transform_graph",
|
||||
copts = tf_copts(),
|
||||
linkstatic = 1,
|
||||
@ -264,7 +265,7 @@ cc_library(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "summarize_graph",
|
||||
copts = tf_copts(),
|
||||
linkstatic = 1,
|
||||
@ -274,7 +275,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "compare_graphs",
|
||||
srcs = ["compare_graphs.cc"],
|
||||
copts = tf_copts(),
|
||||
|
@ -42,6 +42,14 @@ pkg_tar(
|
||||
# are resolved, otherwise these rules break when built
|
||||
# with Python 3.
|
||||
tags = ["manual"],
|
||||
deps = [":common_deps"],
|
||||
)
|
||||
|
||||
# Shared objects that all TensorFlow libraries depend on.
|
||||
pkg_tar(
|
||||
name = "common_deps",
|
||||
files = ["//tensorflow:libtensorflow_framework.so"],
|
||||
tags = ["manual"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
@ -66,6 +74,7 @@ pkg_tar(
|
||||
# are resolved, otherwise these rules break when built
|
||||
# with Python 3.
|
||||
tags = ["manual"],
|
||||
deps = [":common_deps"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
|
@ -38,8 +38,9 @@ cd ${TEST_TMPDIR}
|
||||
mkdir tensorflow
|
||||
${TAR} -xzf ${TARFILE} -Ctensorflow
|
||||
|
||||
# Compile the test .c file
|
||||
${CC} ${CFILE} -Itensorflow/include -Ltensorflow/lib -ltensorflow -oa.out
|
||||
# Compile the test .c file. Assumes with_framework_lib=True.
|
||||
${CC} ${CFILE} -Itensorflow/include -Ltensorflow/lib\
|
||||
-ltensorflow_framework -ltensorflow -oa.out
|
||||
|
||||
# Execute it, with the shared library available.
|
||||
# DYLD_LIBRARY_PATH is used on OS X, LD_LIBRARY_PATH on Linux.
|
||||
|
@ -6,12 +6,14 @@ package(default_visibility = ["//visibility:private"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("//tensorflow:tensorflow.bzl", "tf_cc_binary")
|
||||
|
||||
exports_files([
|
||||
"LICENSE",
|
||||
"placeholder.txt",
|
||||
])
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "tomlpbtxt",
|
||||
srcs = ["tomlpbtxt.cc"],
|
||||
deps = [
|
||||
@ -21,7 +23,7 @@ cc_binary(
|
||||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
tf_cc_binary(
|
||||
name = "frommlpbtxt",
|
||||
srcs = ["frommlpbtxt.cc"],
|
||||
deps = [
|
||||
|
@ -179,6 +179,7 @@ sh_binary(
|
||||
"//tensorflow/python/eager:eager_pip",
|
||||
"//tensorflow/python/saved_model:saved_model",
|
||||
"//tensorflow/python/tools:tools_pip",
|
||||
"//tensorflow/python:test_ops",
|
||||
"//tensorflow/tools/dist_test/server:grpc_tensorflow_server",
|
||||
],
|
||||
}) + if_mkl(["//third_party/mkl:intel_binary_blob"]),
|
||||
|
@ -20,6 +20,7 @@ exports_files([
|
||||
load(
|
||||
"//tensorflow:tensorflow.bzl",
|
||||
"tf_generate_proto_text_sources",
|
||||
"tf_cc_test",
|
||||
)
|
||||
|
||||
# For platform specific build config
|
||||
@ -35,6 +36,7 @@ cc_binary(
|
||||
deps = [
|
||||
":gen_proto_text_functions_lib",
|
||||
"//tensorflow/core:lib_proto_parsing",
|
||||
"@protobuf_archive//:protobuf",
|
||||
],
|
||||
)
|
||||
|
||||
@ -75,7 +77,7 @@ tf_generate_proto_text_sources(
|
||||
srcs_relative_dir = "tensorflow/tools/proto_text/",
|
||||
)
|
||||
|
||||
cc_test(
|
||||
tf_cc_test(
|
||||
name = "gen_proto_text_functions_lib_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
|
19
third_party/grpc/grpc.patch
vendored
19
third_party/grpc/grpc.patch
vendored
@ -1,5 +1,5 @@
|
||||
diff --git a/BUILD b/BUILD
|
||||
index 6dcc76eb7a..903739bfad 100644
|
||||
index 6552d5879e..59adb1ce1c 100644
|
||||
--- a/BUILD
|
||||
+++ b/BUILD
|
||||
@@ -287,6 +287,7 @@ grpc_cc_library(
|
||||
@ -10,6 +10,23 @@ index 6dcc76eb7a..903739bfad 100644
|
||||
"grpc_unsecure",
|
||||
],
|
||||
)
|
||||
@@ -1519,13 +1520,13 @@ grpc_cc_library(
|
||||
|
||||
grpc_cc_library(
|
||||
name = "grpc++_config_proto",
|
||||
- external_deps = [
|
||||
- "protobuf",
|
||||
- ],
|
||||
language = "c++",
|
||||
public_hdrs = [
|
||||
"include/grpc++/impl/codegen/config_protobuf.h",
|
||||
],
|
||||
+ deps = [
|
||||
+ "@protobuf_archive//:protobuf_headers",
|
||||
+ ],
|
||||
)
|
||||
|
||||
grpc_cc_library(
|
||||
diff --git a/bazel/grpc_build_system.bzl b/bazel/grpc_build_system.bzl
|
||||
index f793cae56d..0295adb8ab 100644
|
||||
--- a/bazel/grpc_build_system.bzl
|
||||
|
11
third_party/jemalloc.BUILD
vendored
11
third_party/jemalloc.BUILD
vendored
@ -8,7 +8,14 @@ exports_files(["COPYING"])
|
||||
load("@%ws%//third_party:common.bzl", "template_rule")
|
||||
|
||||
cc_library(
|
||||
name = "jemalloc",
|
||||
name = "jemalloc_headers",
|
||||
hdrs = ["include/jemalloc/jemalloc.h"],
|
||||
includes = ["include"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "jemalloc_impl",
|
||||
srcs = [
|
||||
"src/arena.c",
|
||||
"src/atomic.c",
|
||||
@ -79,7 +86,6 @@ cc_library(
|
||||
"include/jemalloc/internal/util.h",
|
||||
"include/jemalloc/internal/valgrind.h",
|
||||
"include/jemalloc/internal/witness.h",
|
||||
"include/jemalloc/jemalloc.h",
|
||||
],
|
||||
# Same flags that jemalloc uses to build.
|
||||
copts = [
|
||||
@ -101,6 +107,7 @@ cc_library(
|
||||
],
|
||||
}),
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [":jemalloc_headers"],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
|
Loading…
Reference in New Issue
Block a user