Add experimental directory under mlir/lite and add new interface for hardwares.

This is experimental and can be deleted at any time.

PiperOrigin-RevId: 301880113
Change-Id: I7e7df4e83d3219b36712626b6afbc0bf72a79d41
This commit is contained in:
Karim Nosir 2020-03-19 13:09:11 -07:00 committed by TensorFlower Gardener
parent b65b240474
commit f13d9b12f9
6 changed files with 141 additions and 1 deletions

View File

@ -26,6 +26,7 @@ package_group(
filegroup(
name = "tensorflow_lite_ops_td_files",
srcs = [
"experimental/tfl_hardware_interfaces.td",
"ir/tfl_op_interfaces.td",
"ir/tfl_ops.td",
"//tensorflow/compiler/mlir/lite/quantization:quantization_td_files",
@ -204,6 +205,7 @@ cc_library(
cc_library(
name = "tensorflow_lite",
srcs = [
"experimental/estimators/estimator.h",
"ir/tfl_ops.cc",
"ir/tfl_ops.cc.inc",
"ir/tfl_ops.h.inc",
@ -439,6 +441,7 @@ genrule(
srcs = [
"ir/tfl_ops.td",
"ir/tfl_op_interfaces.td",
"experimental/tfl_hardware_interfaces.td",
"@llvm-project//mlir:include/mlir/Interfaces/LoopLikeInterface.td",
"//tensorflow/compiler/mlir/lite/quantization:quantization_td_files",
],

View File

@ -0,0 +1,56 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_ESTIMATORS_ESTIMATOR_H_
#define TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_ESTIMATORS_ESTIMATOR_H_
#include "llvm/Support/raw_ostream.h"
#include "mlir/IR/Operation.h" // TF:llvm-project
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h.inc"
namespace hardware {
// Empty classes that represents hardware types.
class CPU {};
class GPU {};
} // namespace hardware
template <typename Op, typename TargetHardware>
class TFLiteCostEstimator {
public:
static double GetCost(mlir::Operation* op) {
llvm::errs() << "No defined cost function for op: "
<< op->getName().getStringRef().str();
return 0.0;
}
static bool IsSupported(mlir::Operation* op) {
llvm::errs() << "No defined support for op: "
<< op->getName().getStringRef().str();
return false;
}
};
// All ops on CPU are supported.
// TODO(karimnosseir): Only allow TFL ops in the "TFL_OP" param.
template <typename TFL_OP>
class TFLiteCostEstimator<TFL_OP, hardware::CPU> {
public:
// TODO(karimnosseir): Update and use table based method and lookup
// cost from a loadable table ?
static double GetCost(mlir::Operation* op) { return 0.0; }
static bool IsSupported(mlir::Operation* op) { return true; }
};
#endif // TENSORFLOW_COMPILER_MLIR_LITE_EXPERIMENTAL_ESTIMATORS_ESTIMATOR_H_

View File

@ -0,0 +1,76 @@
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// WARNING: This Interface is experimental, DO NOT USE.
// This is the Target Hardware operation interfacea definition file
// for TensorFlow Lite.
#ifndef TFL_TARGET_HARDWARE_OP_INTERFACES
#define TFL_TARGET_HARDWARE_OP_INTERFACES
def TFL_CpuTargetOp : OpInterface<"CpuOpTargetInterface"> {
let description = [{
Interface for ops to run on CPU.
}];
let methods = [
InterfaceMethod<
[{Returns the cost of running this op on CPU.}],
// TODO(karimnosseir): Change to return Cost object instead.
"double", "GetOpCost", (ins "mlir::Operation*":$op_to_check), [{
// TODO(karimnosseir): Consider changing to another way that doesn't
// rely on template param name.
return TFL::TFLiteCostEstimator<ConcreteOp, TFL::hardware::CPU>::GetCost(op_to_check);
}]
>,
InterfaceMethod<
[{Returns whether this op can be run on CPU.}],
"bool", "IsSupported", (ins "mlir::Operation*":$op_to_check), [{
// TODO(karimnosseir): Consider changing to another way that doesn't
// rely on template param name.
return TFL::TFLiteCostEstimator<ConcreteOp, TFL::hardware::CPU>::IsSupported(op_to_check);
}]
>,
];
}
def TFL_GpuTargetOp : OpInterface<"GpuOpTargetInterface"> {
let description = [{
Interface for ops to run on GPU.
}];
let methods = [
InterfaceMethod<
[{Returns the cost of running this op on GPU.}],
// TODO(karimnosseir): Change to return Cost object instead.
"double", "GetOpCost", (ins "Operation*":$op_to_check), [{
// TODO(karimnosseir): Consider changing to another way that doesn't
// rely on template param name.
return TFL::TFLiteCostEstimator<ConcreteOp, TFL::hardware::GPU>::GetCost(op_to_check);
}]
>,
InterfaceMethod<
[{Returns whether this op can be run on GPU.}],
"bool", "IsSupported", (ins "Operation*":$op_to_check), [{
// TODO(karimnosseir): Consider changing to another way that doesn't
// rely on template param name.
return TFL::TFLiteCostEstimator<ConcreteOp, TFL::hardware::GPU>::IsSupported(op_to_check);
}]
>,
];
}
#endif // TFL_TARGET_HARDWARE_OP_INTERFACES

View File

@ -19,6 +19,7 @@ limitations under the License.
#define TFL_OP_INTERFACES
include "mlir/IR/OpBase.td"
include "tensorflow/compiler/mlir/lite/experimental/tfl_hardware_interfaces.td"
//===----------------------------------------------------------------------===//
// TFL op interface for stateful operands.

View File

@ -49,6 +49,7 @@ class TensorFlowLiteDialect : public Dialect {
Location loc) override;
};
#include "tensorflow/compiler/mlir/lite/experimental/estimators/estimator.h"
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops_interface.h.inc"
#define GET_OP_CLASSES
#include "tensorflow/compiler/mlir/lite/ir/tfl_ops.h.inc"

View File

@ -285,7 +285,10 @@ def TFL_ComparisonBinaryBuilder : OpBuilder<
class TFL_Op<string mnemonic, list<OpTrait> traits = []> :
Op<TFL_Dialect, mnemonic, !listconcat(traits,
[DeclareOpInterfaceMethods<TFL_RuntimeVerification>])> {
[DeclareOpInterfaceMethods<TFL_RuntimeVerification>,
// All TFL ops are supported on CPU.
DeclareOpInterfaceMethods<TFL_CpuTargetOp>
])> {
// FlatBuffer generation specific information.
// -------------------------------------------
// When generating the FlatBuffer output some operations have