Rename kTfLiteActRelu1 to kTfLiteActReluN1To1

This CL renames `kTfLiteActRelu1` to `kTfLiteActReluN1To1` because it has minimum clipping at -1, not 0. Also, This CL finishes renaming because TFLite already uses `kTfLiteBuiltinReluN1To1`, `ActivationFunctionType_RELU_N1_TO_1` for this op.

PiperOrigin-RevId: 317589358
Change-Id: I2424104da45234346749b3921d563e9161e809cc
This commit is contained in:
Jae H. Yoo 2020-06-21 21:29:52 -07:00 committed by TensorFlower Gardener
parent db121c7f4b
commit f24d25d860
17 changed files with 37 additions and 35 deletions

View File

@ -67,8 +67,9 @@ typedef struct {
typedef enum { typedef enum {
kTfLiteActNone = 0, kTfLiteActNone = 0,
kTfLiteActRelu, kTfLiteActRelu,
kTfLiteActRelu1, // min(max(-1, x), 1) kTfLiteActReluN1To1, // min(max(-1, x), 1)
kTfLiteActRelu6, // min(max(0, x), 6) kTfLiteActRelu1 = kTfLiteActReluN1To1, // kTfLiteActRelu1 will be deprecated.
kTfLiteActRelu6, // min(max(0, x), 6)
kTfLiteActTanh, kTfLiteActTanh,
kTfLiteActSignBit, kTfLiteActSignBit,
kTfLiteActSigmoid, kTfLiteActSigmoid,

View File

@ -109,7 +109,7 @@ TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
case ActivationFunctionType_RELU: case ActivationFunctionType_RELU:
return kTfLiteActRelu; return kTfLiteActRelu;
case ActivationFunctionType_RELU_N1_TO_1: case ActivationFunctionType_RELU_N1_TO_1:
return kTfLiteActRelu1; return kTfLiteActReluN1To1;
case ActivationFunctionType_RELU6: case ActivationFunctionType_RELU6:
return kTfLiteActRelu6; return kTfLiteActRelu6;
case ActivationFunctionType_TANH: case ActivationFunctionType_TANH:

View File

@ -109,7 +109,7 @@ absl::Status IsActivationSupported(TfLiteFusedActivation fused_activation) {
switch (fused_activation) { switch (fused_activation) {
case kTfLiteActNone: case kTfLiteActNone:
case kTfLiteActRelu: case kTfLiteActRelu:
case kTfLiteActRelu1: case kTfLiteActReluN1To1:
case kTfLiteActRelu6: case kTfLiteActRelu6:
case kTfLiteActTanh: case kTfLiteActTanh:
return absl::OkStatus(); return absl::OkStatus();
@ -140,12 +140,12 @@ absl::Status MaybeFuseActivation(TfLiteFusedActivation fused_activation,
} }
switch (fused_activation) { switch (fused_activation) {
case kTfLiteActRelu: case kTfLiteActRelu:
case kTfLiteActRelu1: case kTfLiteActReluN1To1:
case kTfLiteActRelu6: { case kTfLiteActRelu6: {
ReLUAttributes attr; ReLUAttributes attr;
attr.clip = fused_activation == kTfLiteActRelu attr.clip = fused_activation == kTfLiteActRelu
? 0.0f ? 0.0f
: (fused_activation == kTfLiteActRelu1 ? 1.0f : 6.0f); : (fused_activation == kTfLiteActReluN1To1 ? 1.0f : 6.0f);
for (auto index : output_indices) { for (auto index : output_indices) {
Node* activation_node; Node* activation_node;
RETURN_IF_ERROR( RETURN_IF_ERROR(

View File

@ -197,7 +197,7 @@ TfLiteStatus Conv2dOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
if (activation == kTfLiteActRelu6) { if (activation == kTfLiteActRelu6) {
conv_output_min = 0; conv_output_min = 0;
conv_output_max = 6; conv_output_max = 6;
} else if (activation == kTfLiteActRelu1) { } else if (activation == kTfLiteActReluN1To1) {
conv_output_min = -1; conv_output_min = -1;
conv_output_max = 1; conv_output_max = 1;
} else if (activation == kTfLiteActRelu) { } else if (activation == kTfLiteActRelu) {

View File

@ -26,7 +26,7 @@ namespace {
bool IsActivationReluOrNone(TfLiteFusedActivation activation) { bool IsActivationReluOrNone(TfLiteFusedActivation activation) {
return (activation == kTfLiteActRelu || activation == kTfLiteActRelu6 || return (activation == kTfLiteActRelu || activation == kTfLiteActRelu6 ||
activation == kTfLiteActRelu1 || activation == kTfLiteActNone); activation == kTfLiteActReluN1To1 || activation == kTfLiteActNone);
} }
bool TensorTypeMatch(int tensor_id, TfLiteContext* context, bool TensorTypeMatch(int tensor_id, TfLiteContext* context,

View File

@ -330,7 +330,7 @@ class Subgraph {
*output_min = 0.0f; *output_min = 0.0f;
*output_max = +std::numeric_limits<float>::infinity(); *output_max = +std::numeric_limits<float>::infinity();
return kTfLiteOk; return kTfLiteOk;
case kTfLiteActRelu1: case kTfLiteActReluN1To1:
*output_min = -1.0f; *output_min = -1.0f;
*output_max = +1.0f; *output_max = +1.0f;
return kTfLiteOk; return kTfLiteOk;
@ -497,7 +497,7 @@ class Subgraph {
context, "unsupported fused activation (Relu) in node #%d", context, "unsupported fused activation (Relu) in node #%d",
node_index); node_index);
return kTfLiteOk; return kTfLiteOk;
case kTfLiteActRelu1: case kTfLiteActReluN1To1:
TF_LITE_MAYBE_KERNEL_LOG( TF_LITE_MAYBE_KERNEL_LOG(
context, "unsupported fused activation (ReluMinus1To1) in node #%d", context, "unsupported fused activation (ReluMinus1To1) in node #%d",
node_index); node_index);

View File

@ -41,7 +41,7 @@ CoreML::Specification::NeuralNetworkLayer* ActivationLayerBuilder::Build() {
layer_->mutable_activation()->mutable_relu(); layer_->mutable_activation()->mutable_relu();
break; break;
// Relu1 and Relu6 layers are fully composed in PopulateSubgraph(). // Relu1 and Relu6 layers are fully composed in PopulateSubgraph().
case kTfLiteActRelu1: // clip(-1, 1) case kTfLiteActReluN1To1: // clip(-1, 1)
layer_->mutable_unary()->set_alpha(-1); layer_->mutable_unary()->set_alpha(-1);
layer_->mutable_unary()->set_type( layer_->mutable_unary()->set_type(
CoreML::Specification::UnaryFunctionLayerParams::THRESHOLD); CoreML::Specification::UnaryFunctionLayerParams::THRESHOLD);
@ -64,7 +64,7 @@ CoreML::Specification::NeuralNetworkLayer* ActivationLayerBuilder::Build() {
} }
TfLiteStatus ActivationLayerBuilder::PopulateSubgraph(TfLiteContext* context) { TfLiteStatus ActivationLayerBuilder::PopulateSubgraph(TfLiteContext* context) {
if (!(activation_ == kTfLiteActRelu6 || activation_ == kTfLiteActRelu1)) { if (!(activation_ == kTfLiteActRelu6 || activation_ == kTfLiteActReluN1To1)) {
builder_output_ = AddOutput(); builder_output_ = AddOutput();
return kTfLiteOk; return kTfLiteOk;
} }
@ -125,7 +125,7 @@ OpBuilder* CreateReluOpBuilder(GraphBuilder* graph_builder) {
} }
OpBuilder* CreateReluN1To1OpBuilder(GraphBuilder* graph_builder) { OpBuilder* CreateReluN1To1OpBuilder(GraphBuilder* graph_builder) {
return new ActivationLayerBuilder(graph_builder, kTfLiteActRelu1); return new ActivationLayerBuilder(graph_builder, kTfLiteActReluN1To1);
} }
OpBuilder* CreateRelu6OpBuilder(GraphBuilder* graph_builder) { OpBuilder* CreateRelu6OpBuilder(GraphBuilder* graph_builder) {

View File

@ -29,7 +29,7 @@ inline ActivationFunctionType TfLiteActivationToSchemaActivation(
return ActivationFunctionType_NONE; return ActivationFunctionType_NONE;
case kTfLiteActRelu: case kTfLiteActRelu:
return ActivationFunctionType_RELU; return ActivationFunctionType_RELU;
case kTfLiteActRelu1: case kTfLiteActReluN1To1:
return ActivationFunctionType_RELU_N1_TO_1; return ActivationFunctionType_RELU_N1_TO_1;
case kTfLiteActRelu6: case kTfLiteActRelu6:
return ActivationFunctionType_RELU6; return ActivationFunctionType_RELU6;

View File

@ -312,7 +312,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
if (!is_pie && !is_hybrid) { if (!is_pie && !is_hybrid) {
TF_LITE_ENSURE(context, params->activation == kTfLiteActNone || TF_LITE_ENSURE(context, params->activation == kTfLiteActNone ||
params->activation == kTfLiteActRelu || params->activation == kTfLiteActRelu ||
params->activation == kTfLiteActRelu1 || params->activation == kTfLiteActReluN1To1 ||
params->activation == kTfLiteActRelu6); params->activation == kTfLiteActRelu6);
} }
return PrepareImpl(context, node); return PrepareImpl(context, node);

View File

@ -587,7 +587,7 @@ inline void ApplyActivationToVector(const float* __restrict__ vector,
return; return;
case kTfLiteActRelu: case kTfLiteActRelu:
return ApplyReluToVector(vector, v_size, result); return ApplyReluToVector(vector, v_size, result);
case kTfLiteActRelu1: case kTfLiteActReluN1To1:
return ApplyRelu1ToVector(vector, v_size, result); return ApplyRelu1ToVector(vector, v_size, result);
case kTfLiteActRelu6: case kTfLiteActRelu6:
return ApplyRelu6ToVector(vector, v_size, result); return ApplyRelu6ToVector(vector, v_size, result);

View File

@ -188,7 +188,7 @@ void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation,
} else if (activation == kTfLiteActRelu6) { } else if (activation == kTfLiteActRelu6) {
*act_min = std::max(qmin, quantize(0.0)); *act_min = std::max(qmin, quantize(0.0));
*act_max = std::min(qmax, quantize(6.0)); *act_max = std::min(qmax, quantize(6.0));
} else if (activation == kTfLiteActRelu1) { } else if (activation == kTfLiteActReluN1To1) {
*act_min = std::max(qmin, quantize(-1.0)); *act_min = std::max(qmin, quantize(-1.0));
*act_max = std::min(qmax, quantize(1.0)); *act_max = std::min(qmax, quantize(1.0));
} else { } else {

View File

@ -169,7 +169,7 @@ void CalculateActivationRange(TfLiteFusedActivation activation,
} else if (activation == kTfLiteActRelu6) { } else if (activation == kTfLiteActRelu6) {
*activation_min = 0; *activation_min = 0;
*activation_max = 6; *activation_max = 6;
} else if (activation == kTfLiteActRelu1) { } else if (activation == kTfLiteActReluN1To1) {
*activation_min = -1; *activation_min = -1;
*activation_max = 1; *activation_max = 1;
} else { } else {

View File

@ -35,7 +35,7 @@ inline float ActivationValFloat(TfLiteFusedActivation act, float a) {
return a; return a;
case kTfLiteActRelu: case kTfLiteActRelu:
return TfLiteMax(0.0f, a); return TfLiteMax(0.0f, a);
case kTfLiteActRelu1: case kTfLiteActReluN1To1:
return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f)); return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f));
case kTfLiteActRelu6: case kTfLiteActRelu6:
return TfLiteMax(0.0f, TfLiteMin(a, 6.0f)); return TfLiteMax(0.0f, TfLiteMin(a, 6.0f));

View File

@ -201,7 +201,7 @@ TF_LITE_MICRO_TEST(FloatAddActivationRelu1) {
float output_data[output_dims_count]; float output_data[output_dims_count];
tflite::testing::TestAddFloat(inout_shape, input1_values, inout_shape, tflite::testing::TestAddFloat(inout_shape, input1_values, inout_shape,
input2_values, inout_shape, golden_values, input2_values, inout_shape, golden_values,
kTfLiteActRelu1, output_data); kTfLiteActReluN1To1, output_data);
} }
TF_LITE_MICRO_TEST(FloatAddVariousInputShapes) { TF_LITE_MICRO_TEST(FloatAddVariousInputShapes) {
@ -313,7 +313,7 @@ TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Uint8) {
inout_shape, input1_values, input1_quantized, scales[0], zero_points[0], inout_shape, input1_values, input1_quantized, scales[0], zero_points[0],
inout_shape, input2_values, input2_quantized, scales[1], zero_points[1], inout_shape, input2_values, input2_quantized, scales[1], zero_points[1],
inout_shape, golden_values, golden_quantized, scales[2], zero_points[2], inout_shape, golden_values, golden_quantized, scales[2], zero_points[2],
kTfLiteActRelu1, output); kTfLiteActReluN1To1, output);
} }
TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Int8) { TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Int8) {
@ -334,7 +334,7 @@ TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Int8) {
inout_shape, input1_values, input1_quantized, scales[0], zero_points[0], inout_shape, input1_values, input1_quantized, scales[0], zero_points[0],
inout_shape, input2_values, input2_quantized, scales[1], zero_points[1], inout_shape, input2_values, input2_quantized, scales[1], zero_points[1],
inout_shape, golden_values, golden_quantized, scales[2], zero_points[2], inout_shape, golden_values, golden_quantized, scales[2], zero_points[2],
kTfLiteActRelu1, output); kTfLiteActReluN1To1, output);
} }
TF_LITE_MICRO_TEST(QuantizedAddVariousInputShapesUint8) { TF_LITE_MICRO_TEST(QuantizedAddVariousInputShapesUint8) {

View File

@ -402,7 +402,7 @@ TF_LITE_MICRO_TEST(FloatRelu) {
{0.1, 0.2, 0.3, 0.5}, // input2 data {0.1, 0.2, 0.3, 0.5}, // input2 data
{4, 1, 2, 2, 1}, // output shape {4, 1, 2, 2, 1}, // output shape
{-0.2, 0.04, 0.21, 0.4}, // expected output data {-0.2, 0.04, 0.21, 0.4}, // expected output data
output_data, kTfLiteActRelu1); output_data, kTfLiteActReluN1To1);
} }
TF_LITE_MICRO_TEST(FloatBroadcast) { TF_LITE_MICRO_TEST(FloatBroadcast) {

View File

@ -417,7 +417,8 @@ TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride1Stride2Relu) {
kTfLitePaddingValid, kTfLiteActRelu, output_data); kTfLitePaddingValid, kTfLiteActRelu, output_data);
} }
TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Stride1Relu1) { TF_LITE_MICRO_TEST(
SimpleAveragePoolTestInt8PaddingValidStride2Stride1ReluN1To1) {
using tflite::testing::F2QS; using tflite::testing::F2QS;
const float input_min = -15.9375; const float input_min = -15.9375;
@ -439,7 +440,7 @@ TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Stride1Relu1) {
F2QS(-0.25, output_min, output_max), F2QS(0.75, output_min, output_max)}, F2QS(-0.25, output_min, output_max), F2QS(0.75, output_min, output_max)},
{4, 1, 1, 2, 1}, // Output shape {4, 1, 1, 2, 1}, // Output shape
output_min, output_max, // output quantization range output_min, output_max, // output quantization range
kTfLitePaddingValid, kTfLiteActRelu1, output_data); kTfLitePaddingValid, kTfLiteActReluN1To1, output_data);
} }
TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Relu6) { TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Relu6) {
@ -532,7 +533,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu) {
output_data); output_data);
} }
TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) { TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatReluN1To1) {
float output_data[2]; float output_data[2];
tflite::testing::TestMaxPoolFloat({4, 1, 2, 4, 1}, // Input shape tflite::testing::TestMaxPoolFloat({4, 1, 2, 4, 1}, // Input shape
{ {
@ -548,7 +549,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) {
0.7, 0.7,
}, },
{4, 1, 1, 2, 1}, // Output shape {4, 1, 1, 2, 1}, // Output shape
kTfLitePaddingValid, kTfLiteActRelu1, kTfLitePaddingValid, kTfLiteActReluN1To1,
output_data); output_data);
tflite::testing::TestMaxPoolFloat({4, 1, 2, 4, 1}, // Input shape tflite::testing::TestMaxPoolFloat({4, 1, 2, 4, 1}, // Input shape
@ -565,7 +566,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) {
1.0, 1.0,
}, },
{4, 1, 1, 2, 1}, // Output shape {4, 1, 1, 2, 1}, // Output shape
kTfLitePaddingValid, kTfLiteActRelu1, kTfLitePaddingValid, kTfLiteActReluN1To1,
output_data); output_data);
} }
@ -713,7 +714,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu) {
kTfLitePaddingValid, kTfLiteActRelu, output_data); kTfLitePaddingValid, kTfLiteActRelu, output_data);
} }
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) { TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActReluN1To1) {
using tflite::testing::F2Q; using tflite::testing::F2Q;
uint8_t output_data[2]; uint8_t output_data[2];
@ -743,7 +744,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) {
{// Output values {// Output values
F2Q(-1.0, output_min, output_max), F2Q(1.0, output_min, output_max)}, F2Q(-1.0, output_min, output_max), F2Q(1.0, output_min, output_max)},
output_min, output_max, {4, 1, 1, 2, 1}, // Output shape output_min, output_max, {4, 1, 1, 2, 1}, // Output shape
kTfLitePaddingValid, kTfLiteActRelu1, output_data); kTfLitePaddingValid, kTfLiteActReluN1To1, output_data);
} }
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu6) { TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu6) {
@ -944,7 +945,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu) {
kTfLitePaddingValid, kTfLiteActRelu, output_data); kTfLitePaddingValid, kTfLiteActRelu, output_data);
} }
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) { TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActReluN1To1) {
using tflite::testing::F2QS; using tflite::testing::F2QS;
int8_t output_data[2]; int8_t output_data[2];
@ -974,7 +975,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) {
{// Output values {// Output values
F2QS(-1.0, output_min, output_max), F2QS(1.0, output_min, output_max)}, F2QS(-1.0, output_min, output_max), F2QS(1.0, output_min, output_max)},
output_min, output_max, {4, 1, 1, 2, 1}, // Output shape output_min, output_max, {4, 1, 1, 2, 1}, // Output shape
kTfLitePaddingValid, kTfLiteActRelu1, output_data); kTfLitePaddingValid, kTfLiteActReluN1To1, output_data);
} }
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu6) { TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu6) {

View File

@ -201,7 +201,7 @@ TF_LITE_MICRO_TEST(FloatSubActivationRelu1) {
float output_data[output_dims_count]; float output_data[output_dims_count];
tflite::testing::TestSubFloat(inout_shape, input1_values, inout_shape, tflite::testing::TestSubFloat(inout_shape, input1_values, inout_shape,
input2_values, inout_shape, golden_values, input2_values, inout_shape, golden_values,
kTfLiteActRelu1, output_data); kTfLiteActReluN1To1, output_data);
} }
TF_LITE_MICRO_TEST(FloatSubVariousInputShapes) { TF_LITE_MICRO_TEST(FloatSubVariousInputShapes) {
@ -313,7 +313,7 @@ TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Uint8) {
inout_shape, input1_values, input1_quantized, scales[0], zero_points[0], inout_shape, input1_values, input1_quantized, scales[0], zero_points[0],
inout_shape, input2_values, input2_quantized, scales[1], zero_points[1], inout_shape, input2_values, input2_quantized, scales[1], zero_points[1],
inout_shape, golden_values, golden_quantized, scales[2], zero_points[2], inout_shape, golden_values, golden_quantized, scales[2], zero_points[2],
kTfLiteActRelu1, output); kTfLiteActReluN1To1, output);
} }
TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Int8) { TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Int8) {
@ -334,7 +334,7 @@ TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Int8) {
inout_shape, input1_values, input1_quantized, scales[0], zero_points[0], inout_shape, input1_values, input1_quantized, scales[0], zero_points[0],
inout_shape, input2_values, input2_quantized, scales[1], zero_points[1], inout_shape, input2_values, input2_quantized, scales[1], zero_points[1],
inout_shape, golden_values, golden_quantized, scales[2], zero_points[2], inout_shape, golden_values, golden_quantized, scales[2], zero_points[2],
kTfLiteActRelu1, output); kTfLiteActReluN1To1, output);
} }
TF_LITE_MICRO_TEST(QuantizedSubVariousInputShapesUint8) { TF_LITE_MICRO_TEST(QuantizedSubVariousInputShapesUint8) {