Make flatbuffer_translate_lib dynamic linked

To do this, some static registered translated functions are moved to a
seperated c++ file and target. Only the binaries requires these translates
functions needs to link them statically.

This cl also removes the tensorflow/core:lib dependence from the
quantize_model target.

PiperOrigin-RevId: 302364991
Change-Id: I89c7898fd320d84d340810c690098cc69a21c471
This commit is contained in:
Christian Sigg 2020-03-22 22:45:40 -07:00 committed by TensorFlower Gardener
parent 0c487d6417
commit 7116a21f17
14 changed files with 1527 additions and 1586 deletions

View File

@ -224,6 +224,7 @@ cc_library(
deps = [
":tensorflow_lite_ops_inc_gen",
":validators",
"//tensorflow/compiler/mlir/tensorflow",
"//tensorflow/compiler/mlir/tensorflow:tensorflow_types",
"//tensorflow/lite/schema:schema_fbs",
"@llvm-project//llvm:support",
@ -553,14 +554,14 @@ cc_library(
cc_library(
name = "flatbuffer_translate_lib",
srcs = [
"flatbuffer_export.cc",
"flatbuffer_import.cc",
"flatbuffer_translate.cc",
"utils/convert_type.cc",
],
hdrs = [
"flatbuffer_export.h",
"flatbuffer_export_flags.h",
"flatbuffer_import.h",
"flatbuffer_translate.h",
"flatbuffer_translate_flags.h",
"utils/convert_type.h",
],
deps = [
@ -578,10 +579,8 @@ cc_library(
"//tensorflow/compiler/mlir/tensorflow:tensorflow_types",
"//tensorflow/compiler/xla:statusor",
"//tensorflow/core:framework",
"//tensorflow/core:lib",
"//tensorflow/core:protos_all_cc",
"//tensorflow/core/platform:errors",
"//tensorflow/core/platform:logging",
"//tensorflow/core/platform:status",
"//tensorflow/lite:framework",
"//tensorflow/lite:schema_fbs_version",
"//tensorflow/lite:string_util",
@ -602,32 +601,15 @@ cc_library(
"@llvm-project//mlir:Support",
"@llvm-project//mlir:Translation",
],
)
cc_library(
name = "flatbuffer_translate_registeration",
srcs = [
"flatbuffer_translate.cc",
],
deps = [
":flatbuffer_translate_lib",
"//tensorflow/compiler/mlir/tensorflow:mlir_roundtrip_flags",
"@llvm-project//llvm:support",
"@llvm-project//mlir:IR",
"@llvm-project//mlir:LoopOpsTransforms",
"@llvm-project//mlir:MlirTranslateMain",
"@llvm-project//mlir:QuantOps",
"@llvm-project//mlir:StandardOps",
"@llvm-project//mlir:Support",
"@llvm-project//mlir:Translation",
],
alwayslink = 1,
)
tf_cc_binary(
name = "flatbuffer_translate",
deps = [
":flatbuffer_translate_registeration",
":flatbuffer_translate_lib",
"@llvm-project//mlir:LoopOpsTransforms",
"@llvm-project//mlir:MlirTranslateMain",
],
)
@ -665,13 +647,10 @@ filegroup(
tf_cc_binary(
name = "tf_tfl_translate",
srcs = [
":tf_tfl_translate_main",
],
srcs = [":tf_tfl_translate_main"],
deps = [
":common",
":flatbuffer_translate_lib",
":flatbuffer_translate_registeration",
":tensorflow_lite",
":tf_tfl_passes",
":tf_tfl_translate_cl_options",
@ -693,18 +672,15 @@ tf_cc_binary(
tf_cc_binary(
name = "mlir-tflite-runner",
srcs = [
"mlir_tflite_runner.cc",
],
srcs = ["mlir_tflite_runner.cc"],
deps = [
":flatbuffer_translate_lib",
":flatbuffer_translate_registeration",
"//tensorflow/compiler/mlir/tensorflow:mlir_roundtrip_flags",
"//tensorflow/core:lib",
"//tensorflow/core/platform:logging",
"//tensorflow/lite:framework",
"//tensorflow/lite/delegates/flex:delegate",
"//tensorflow/lite/kernels:builtin_ops",
"@com_google_absl//absl/base:core_headers",
"@com_google_absl//absl/strings",
"@llvm-project//llvm:support",
"@llvm-project//mlir:IR",

File diff suppressed because it is too large Load Diff

View File

@ -63,16 +63,20 @@ limitations under the License.
#include "mlir/Support/LLVM.h" // TF:llvm-project
#include "mlir/Translation.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/lite/flatbuffer_operator.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate_flags.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h"
#include "tensorflow/compiler/mlir/lite/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_ops.h"
#include "tensorflow/compiler/mlir/tensorflow/ir/tf_types.h"
#include "tensorflow/compiler/mlir/tensorflow/translate/mlir_roundtrip_flags.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/mangling_util.h"
#include "tensorflow/compiler/xla/statusor.h"
#include "tensorflow/core/framework/tensor.pb.h"
#include "tensorflow/core/framework/tensor_shape.pb.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/platform/status.h"
#include "tensorflow/lite/model.h"
#include "tensorflow/lite/schema/schema_generated.h"
@ -96,6 +100,45 @@ using xla::StatusOr;
namespace errors = tensorflow::errors;
namespace tfl = mlir::TFL;
using llvm::cl::opt;
// Commandline flag to enable the control of flatbuffer import.
bool use_external_constant;
// Commandline flag to enable graph pruning.
bool experimental_prune_unreachable_nodes_unconditionally;
// NOLINTNEXTLINE
static opt<bool, true> use_external_constant_flag(
"use-external-constant",
llvm::cl::desc("Use external constant during flatbuffer import"),
llvm::cl::location(use_external_constant), llvm::cl::init(false));
// TODO(b/147111261): After the importer supports generic custom ops, we should
// change the flag to a more lightwise flag, e.g.
// "import_custom_ops_as_side_effect_free_ops", and let the MLIR DCE to prune
// the operations.
// NOLINTNEXTLINE
static opt<bool, true> experimental_prune_unreachable_nodes_unconditionally_flg(
"experimental-prune-unreachable-nodes-unconditionally",
llvm::cl::desc("Prune nodes that are not ancestors of the output nodes."),
llvm::cl::location(experimental_prune_unreachable_nodes_unconditionally),
llvm::cl::init(false));
// NOLINTNEXTLINE
static opt<std::string> input_arrays_flag(
"input-arrays",
llvm::cl::desc(
"List of input tensors, if different from the default inputs"),
llvm::cl::init(""));
// NOLINTNEXTLINE
static opt<std::string> output_arrays_flag(
"output-arrays",
llvm::cl::desc(
"List of output tensors, if different from the default outputs"),
llvm::cl::init(""));
namespace {
bool IsScalar(const TensorT& tensor) {
// TODO(b/138222071) We can't distinguish scalars and unranked tensors
@ -1020,3 +1063,42 @@ OwningModuleRef tflite::FlatBufferToMlir(
return OwningModuleRef(module);
}
static OwningModuleRef FlatBufferFileToMlirTrans(
llvm::SourceMgr* source_mgr, MLIRContext* context,
bool use_external_constant,
bool experimental_prune_unreachable_nodes_unconditionally) {
const llvm::MemoryBuffer* input =
source_mgr->getMemoryBuffer(source_mgr->getMainFileID());
std::string error;
auto loc =
mlir::FileLineColLoc::get(input->getBufferIdentifier(), 0, 0, context);
// Parses input/output names from command line options.
std::vector<std::string> inputs;
std::vector<std::string> outputs;
// Use output parser since we only have tensor names.
if (!tensorflow::ParseOutputArrayInfo(input_arrays_flag, &inputs).ok()) {
return emitError(loc, "parsing input array info failed ")
<< input_arrays_flag,
nullptr;
}
if (!tensorflow::ParseOutputArrayInfo(output_arrays_flag, &outputs).ok()) {
return emitError(loc, "parsing output array info failed ")
<< output_arrays_flag,
nullptr;
}
return tflite::FlatBufferToMlir(
absl::string_view(input->getBufferStart(), input->getBufferSize()),
context, loc, use_external_constant, inputs, outputs,
experimental_prune_unreachable_nodes_unconditionally);
}
static mlir::TranslateToMLIRRegistration FlatBufferFileToMlirTransReg(
"tflite-flatbuffer-to-mlir",
[](llvm::SourceMgr& source_mgr, MLIRContext* context) {
return FlatBufferFileToMlirTrans(
&source_mgr, context, use_external_constant,
experimental_prune_unreachable_nodes_unconditionally);
});

File diff suppressed because it is too large Load Diff

View File

@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_EXPORT_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_EXPORT_H_
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TRANSLATE_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TRANSLATE_H_
#include <string>
@ -40,4 +40,4 @@ bool MlirToFlatBufferTranslateFunction(
tensorflow::OpOrArgNameMapper* op_or_arg_name_mapper);
} // namespace tflite
#endif // TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_EXPORT_H_
#endif // TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TRANSLATE_H_

View File

@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_EXPORT_FLAGS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_EXPORT_FLAGS_H_
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TRANSLATE_FLAGS_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TRANSLATE_FLAGS_H_
#include <string>
@ -28,4 +28,4 @@ extern bool lower_tensor_list_ops;
// The flag to control whether debug info gets stripped on export.
extern bool strip_debug_info;
#endif // TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_EXPORT_FLAGS_H_
#endif // TENSORFLOW_COMPILER_MLIR_LITE_FLATBUFFER_TRANSLATE_FLAGS_H_

View File

@ -34,8 +34,8 @@ limitations under the License.
#include "mlir/IR/MLIRContext.h" // TF:llvm-project
#include "mlir/IR/Module.h" // TF:llvm-project
#include "mlir/Parser.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_export_flags.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate_flags.h"
#include "tensorflow/core/platform/init_main.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/lite/delegates/flex/delegate.h"

View File

@ -23,8 +23,8 @@ limitations under the License.
#include "mlir/Pass/Pass.h" // TF:llvm-project
#include "mlir/Pass/PassManager.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate.h"
#include "tensorflow/compiler/mlir/lite/quantization/quantization_config.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
#include "tensorflow/compiler/mlir/lite/utils/convert_type.h"

View File

@ -23,8 +23,8 @@ limitations under the License.
#include "mlir/Pass/Pass.h" // TF:llvm-project
#include "mlir/Pass/PassManager.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_import.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate.h"
#include "tensorflow/compiler/mlir/lite/utils/convert_type.h"
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/framework/types.pb.h"

View File

@ -28,8 +28,8 @@ limitations under the License.
#include "mlir/Support/FileUtilities.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/init_mlir.h"
#include "tensorflow/compiler/mlir/lite/common/tfl_pass_config.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_export_flags.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate_flags.h"
#include "tensorflow/compiler/mlir/lite/tf_tfl_passes.h"
#include "tensorflow/compiler/mlir/lite/tf_tfl_translate_cl.h"
#include "tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.h"

View File

@ -25,7 +25,7 @@ limitations under the License.
#include "mlir/Pass/Pass.h" // TF:llvm-project
#include "mlir/Support/FileUtilities.h" // TF:llvm-project
#include "mlir/Transforms/Passes.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/lite/flatbuffer_export.h"
#include "tensorflow/compiler/mlir/lite/flatbuffer_translate.h"
#include "tensorflow/compiler/mlir/lite/quantization/quantization_config.h"
#include "tensorflow/compiler/mlir/lite/transforms/passes.h"
#include "tensorflow/compiler/mlir/tensorflow/transforms/decode_constant.h"

View File

@ -811,8 +811,7 @@ cc_library(
srcs = ["utils/error_util.cc"],
hdrs = ["utils/error_util.h"],
deps = [
"//tensorflow/core/platform:errors",
"//tensorflow/core/platform:status",
"//tensorflow/core:lib",
"@llvm-project//llvm:support",
"@llvm-project//mlir:IR",
],

View File

@ -15,7 +15,7 @@ limitations under the License.
#include "tensorflow/compiler/mlir/tensorflow/utils/error_util.h"
#include "tensorflow/core/platform/errors.h"
#include "tensorflow/core/lib/core/errors.h"
namespace mlir {

View File

@ -21,7 +21,7 @@ limitations under the License.
#include "mlir/IR/Diagnostics.h" // TF:llvm-project
#include "mlir/IR/Location.h" // TF:llvm-project
#include "mlir/IR/MLIRContext.h" // TF:llvm-project
#include "tensorflow/core/platform/status.h"
#include "tensorflow/core/lib/core/status.h"
// Error utilities for MLIR when interacting with code using Status returns.
namespace mlir {