Add relu6/minimun/maximum to gpu target.

PiperOrigin-RevId: 303879029
Change-Id: I508e540415ae683d6d08d2ddb30d23b4dd9b7505
This commit is contained in:
Renjie Liu 2020-03-30 20:16:35 -07:00 committed by TensorFlower Gardener
parent 21539c733c
commit 4cb2faa19a
2 changed files with 51 additions and 3 deletions

View File

@ -136,6 +136,32 @@ class TFLiteCostEstimator<MirrorPadOp, hardware::GPU> {
static bool IsSupported(mlir::Operation* op) { return true; }
};
// tfl.maximum
template <>
class TFLiteCostEstimator<MaximumOp, hardware::GPU> {
public:
static double GetCost(mlir::Operation* op) {
llvm::errs() << "No defined cost function for op: "
<< op->getName().getStringRef().str();
return 0.0;
}
static bool IsSupported(mlir::Operation* op) { return true; }
};
// tfl.minimum
template <>
class TFLiteCostEstimator<MinimumOp, hardware::GPU> {
public:
static double GetCost(mlir::Operation* op) {
llvm::errs() << "No defined cost function for op: "
<< op->getName().getStringRef().str();
return 0.0;
}
static bool IsSupported(mlir::Operation* op) { return true; }
};
// tfl.mul
template <>
class TFLiteCostEstimator<MulOp, hardware::GPU> {
@ -162,6 +188,19 @@ class TFLiteCostEstimator<ReluOp, hardware::GPU> {
static bool IsSupported(mlir::Operation* op) { return true; }
};
// tfl.relu6
template <>
class TFLiteCostEstimator<Relu6Op, hardware::GPU> {
public:
static double GetCost(mlir::Operation* op) {
llvm::errs() << "No defined cost function for op: "
<< op->getName().getStringRef().str();
return 0.0;
}
static bool IsSupported(mlir::Operation* op) { return true; }
};
// tfl.reshape
template <>
class TFLiteCostEstimator<ReshapeOp, hardware::GPU> {

View File

@ -1657,7 +1657,11 @@ def TFL_MaxUnpooling2DOp :
}
def TFL_MaximumOp : TFL_Op<"maximum", [
ResultsBroadcastableShape, NoSideEffect, Commutative, SameOperandsAndResultsScale]> {
ResultsBroadcastableShape,
NoSideEffect,
Commutative,
SameOperandsAndResultsScale,
TFL_GpuTargetOp]> {
let summary = "Max operator";
let description = [{
Element-wise max operation.
@ -1855,7 +1859,11 @@ def TFL_ReduceProdOp: TFL_Op<"reduce_prod", [NoSideEffect]> {
}
def TFL_MinimumOp : TFL_Op<"minimum", [
ResultsBroadcastableShape, NoSideEffect, Commutative, SameOperandsAndResultsScale]> {
ResultsBroadcastableShape,
NoSideEffect,
Commutative,
SameOperandsAndResultsScale,
TFL_GpuTargetOp]> {
let summary = "Min operator";
let description = [{
Element-wise min operation.
@ -2127,7 +2135,8 @@ def TFL_ReluOp: TFL_Op<"relu", [NoSideEffect,
def TFL_Relu6Op: TFL_Op<"relu6", [NoSideEffect,
SameOperandsAndResultShape,
SameOperandsAndResultsScale]> {
SameOperandsAndResultsScale,
TFL_GpuTargetOp]> {
let summary = "Relu6 operator";
let description = [{