From f24d25d8603c3fb7465922ad0e68737dade34db2 Mon Sep 17 00:00:00 2001 From: "Jae H. Yoo" Date: Sun, 21 Jun 2020 21:29:52 -0700 Subject: [PATCH] Rename kTfLiteActRelu1 to kTfLiteActReluN1To1 This CL renames `kTfLiteActRelu1` to `kTfLiteActReluN1To1` because it has minimum clipping at -1, not 0. Also, This CL finishes renaming because TFLite already uses `kTfLiteBuiltinReluN1To1`, `ActivationFunctionType_RELU_N1_TO_1` for this op. PiperOrigin-RevId: 317589358 Change-Id: I2424104da45234346749b3921d563e9161e809cc --- tensorflow/lite/c/builtin_op_data.h | 5 +++-- .../lite/core/api/flatbuffer_conversions.cc | 2 +- .../delegates/gpu/common/model_builder.cc | 6 +++--- .../hexagon/builders/conv_2d_builder.cc | 2 +- tensorflow/lite/delegates/hexagon/utils.cc | 2 +- .../delegates/xnnpack/xnnpack_delegate.cc | 4 ++-- .../builders/activation_layer_builder.cc | 6 +++--- .../lite/experimental/writer/enum_mapping.h | 2 +- tensorflow/lite/kernels/fully_connected.cc | 2 +- .../lite/kernels/internal/tensor_utils.h | 2 +- tensorflow/lite/kernels/kernel_util.cc | 2 +- tensorflow/lite/kernels/kernel_util.h | 2 +- .../lite/micro/kernels/activation_utils.h | 2 +- tensorflow/lite/micro/kernels/add_test.cc | 6 +++--- tensorflow/lite/micro/kernels/mul_test.cc | 2 +- tensorflow/lite/micro/kernels/pooling_test.cc | 19 ++++++++++--------- tensorflow/lite/micro/kernels/sub_test.cc | 6 +++--- 17 files changed, 37 insertions(+), 35 deletions(-) diff --git a/tensorflow/lite/c/builtin_op_data.h b/tensorflow/lite/c/builtin_op_data.h index 9e0e82bc906..232f5f95928 100644 --- a/tensorflow/lite/c/builtin_op_data.h +++ b/tensorflow/lite/c/builtin_op_data.h @@ -67,8 +67,9 @@ typedef struct { typedef enum { kTfLiteActNone = 0, kTfLiteActRelu, - kTfLiteActRelu1, // min(max(-1, x), 1) - kTfLiteActRelu6, // min(max(0, x), 6) + kTfLiteActReluN1To1, // min(max(-1, x), 1) + kTfLiteActRelu1 = kTfLiteActReluN1To1, // kTfLiteActRelu1 will be deprecated. + kTfLiteActRelu6, // min(max(0, x), 6) kTfLiteActTanh, kTfLiteActSignBit, kTfLiteActSigmoid, diff --git a/tensorflow/lite/core/api/flatbuffer_conversions.cc b/tensorflow/lite/core/api/flatbuffer_conversions.cc index 2a4dfbb6ff4..73d785bf369 100644 --- a/tensorflow/lite/core/api/flatbuffer_conversions.cc +++ b/tensorflow/lite/core/api/flatbuffer_conversions.cc @@ -109,7 +109,7 @@ TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) { case ActivationFunctionType_RELU: return kTfLiteActRelu; case ActivationFunctionType_RELU_N1_TO_1: - return kTfLiteActRelu1; + return kTfLiteActReluN1To1; case ActivationFunctionType_RELU6: return kTfLiteActRelu6; case ActivationFunctionType_TANH: diff --git a/tensorflow/lite/delegates/gpu/common/model_builder.cc b/tensorflow/lite/delegates/gpu/common/model_builder.cc index 01f94c94888..8b5261cfd98 100644 --- a/tensorflow/lite/delegates/gpu/common/model_builder.cc +++ b/tensorflow/lite/delegates/gpu/common/model_builder.cc @@ -109,7 +109,7 @@ absl::Status IsActivationSupported(TfLiteFusedActivation fused_activation) { switch (fused_activation) { case kTfLiteActNone: case kTfLiteActRelu: - case kTfLiteActRelu1: + case kTfLiteActReluN1To1: case kTfLiteActRelu6: case kTfLiteActTanh: return absl::OkStatus(); @@ -140,12 +140,12 @@ absl::Status MaybeFuseActivation(TfLiteFusedActivation fused_activation, } switch (fused_activation) { case kTfLiteActRelu: - case kTfLiteActRelu1: + case kTfLiteActReluN1To1: case kTfLiteActRelu6: { ReLUAttributes attr; attr.clip = fused_activation == kTfLiteActRelu ? 0.0f - : (fused_activation == kTfLiteActRelu1 ? 1.0f : 6.0f); + : (fused_activation == kTfLiteActReluN1To1 ? 1.0f : 6.0f); for (auto index : output_indices) { Node* activation_node; RETURN_IF_ERROR( diff --git a/tensorflow/lite/delegates/hexagon/builders/conv_2d_builder.cc b/tensorflow/lite/delegates/hexagon/builders/conv_2d_builder.cc index a366522e35c..cfddd2c2b97 100644 --- a/tensorflow/lite/delegates/hexagon/builders/conv_2d_builder.cc +++ b/tensorflow/lite/delegates/hexagon/builders/conv_2d_builder.cc @@ -197,7 +197,7 @@ TfLiteStatus Conv2dOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs, if (activation == kTfLiteActRelu6) { conv_output_min = 0; conv_output_max = 6; - } else if (activation == kTfLiteActRelu1) { + } else if (activation == kTfLiteActReluN1To1) { conv_output_min = -1; conv_output_max = 1; } else if (activation == kTfLiteActRelu) { diff --git a/tensorflow/lite/delegates/hexagon/utils.cc b/tensorflow/lite/delegates/hexagon/utils.cc index 9253836a3b1..223d4a8a826 100644 --- a/tensorflow/lite/delegates/hexagon/utils.cc +++ b/tensorflow/lite/delegates/hexagon/utils.cc @@ -26,7 +26,7 @@ namespace { bool IsActivationReluOrNone(TfLiteFusedActivation activation) { return (activation == kTfLiteActRelu || activation == kTfLiteActRelu6 || - activation == kTfLiteActRelu1 || activation == kTfLiteActNone); + activation == kTfLiteActReluN1To1 || activation == kTfLiteActNone); } bool TensorTypeMatch(int tensor_id, TfLiteContext* context, diff --git a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc index 739e45f62e4..0afc9c32122 100644 --- a/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc +++ b/tensorflow/lite/delegates/xnnpack/xnnpack_delegate.cc @@ -330,7 +330,7 @@ class Subgraph { *output_min = 0.0f; *output_max = +std::numeric_limits::infinity(); return kTfLiteOk; - case kTfLiteActRelu1: + case kTfLiteActReluN1To1: *output_min = -1.0f; *output_max = +1.0f; return kTfLiteOk; @@ -497,7 +497,7 @@ class Subgraph { context, "unsupported fused activation (Relu) in node #%d", node_index); return kTfLiteOk; - case kTfLiteActRelu1: + case kTfLiteActReluN1To1: TF_LITE_MAYBE_KERNEL_LOG( context, "unsupported fused activation (ReluMinus1To1) in node #%d", node_index); diff --git a/tensorflow/lite/experimental/delegates/coreml/builders/activation_layer_builder.cc b/tensorflow/lite/experimental/delegates/coreml/builders/activation_layer_builder.cc index ec032d8421e..df853797c8a 100644 --- a/tensorflow/lite/experimental/delegates/coreml/builders/activation_layer_builder.cc +++ b/tensorflow/lite/experimental/delegates/coreml/builders/activation_layer_builder.cc @@ -41,7 +41,7 @@ CoreML::Specification::NeuralNetworkLayer* ActivationLayerBuilder::Build() { layer_->mutable_activation()->mutable_relu(); break; // Relu1 and Relu6 layers are fully composed in PopulateSubgraph(). - case kTfLiteActRelu1: // clip(-1, 1) + case kTfLiteActReluN1To1: // clip(-1, 1) layer_->mutable_unary()->set_alpha(-1); layer_->mutable_unary()->set_type( CoreML::Specification::UnaryFunctionLayerParams::THRESHOLD); @@ -64,7 +64,7 @@ CoreML::Specification::NeuralNetworkLayer* ActivationLayerBuilder::Build() { } TfLiteStatus ActivationLayerBuilder::PopulateSubgraph(TfLiteContext* context) { - if (!(activation_ == kTfLiteActRelu6 || activation_ == kTfLiteActRelu1)) { + if (!(activation_ == kTfLiteActRelu6 || activation_ == kTfLiteActReluN1To1)) { builder_output_ = AddOutput(); return kTfLiteOk; } @@ -125,7 +125,7 @@ OpBuilder* CreateReluOpBuilder(GraphBuilder* graph_builder) { } OpBuilder* CreateReluN1To1OpBuilder(GraphBuilder* graph_builder) { - return new ActivationLayerBuilder(graph_builder, kTfLiteActRelu1); + return new ActivationLayerBuilder(graph_builder, kTfLiteActReluN1To1); } OpBuilder* CreateRelu6OpBuilder(GraphBuilder* graph_builder) { diff --git a/tensorflow/lite/experimental/writer/enum_mapping.h b/tensorflow/lite/experimental/writer/enum_mapping.h index b78d610c4c5..5eabbcb2015 100644 --- a/tensorflow/lite/experimental/writer/enum_mapping.h +++ b/tensorflow/lite/experimental/writer/enum_mapping.h @@ -29,7 +29,7 @@ inline ActivationFunctionType TfLiteActivationToSchemaActivation( return ActivationFunctionType_NONE; case kTfLiteActRelu: return ActivationFunctionType_RELU; - case kTfLiteActRelu1: + case kTfLiteActReluN1To1: return ActivationFunctionType_RELU_N1_TO_1; case kTfLiteActRelu6: return ActivationFunctionType_RELU6; diff --git a/tensorflow/lite/kernels/fully_connected.cc b/tensorflow/lite/kernels/fully_connected.cc index 8b7a7832dbb..9cbbcae9c51 100644 --- a/tensorflow/lite/kernels/fully_connected.cc +++ b/tensorflow/lite/kernels/fully_connected.cc @@ -312,7 +312,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { if (!is_pie && !is_hybrid) { TF_LITE_ENSURE(context, params->activation == kTfLiteActNone || params->activation == kTfLiteActRelu || - params->activation == kTfLiteActRelu1 || + params->activation == kTfLiteActReluN1To1 || params->activation == kTfLiteActRelu6); } return PrepareImpl(context, node); diff --git a/tensorflow/lite/kernels/internal/tensor_utils.h b/tensorflow/lite/kernels/internal/tensor_utils.h index e2af88d50e3..8c956c49f5f 100644 --- a/tensorflow/lite/kernels/internal/tensor_utils.h +++ b/tensorflow/lite/kernels/internal/tensor_utils.h @@ -587,7 +587,7 @@ inline void ApplyActivationToVector(const float* __restrict__ vector, return; case kTfLiteActRelu: return ApplyReluToVector(vector, v_size, result); - case kTfLiteActRelu1: + case kTfLiteActReluN1To1: return ApplyRelu1ToVector(vector, v_size, result); case kTfLiteActRelu6: return ApplyRelu6ToVector(vector, v_size, result); diff --git a/tensorflow/lite/kernels/kernel_util.cc b/tensorflow/lite/kernels/kernel_util.cc index 032726a7860..164aec3f224 100644 --- a/tensorflow/lite/kernels/kernel_util.cc +++ b/tensorflow/lite/kernels/kernel_util.cc @@ -188,7 +188,7 @@ void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation, } else if (activation == kTfLiteActRelu6) { *act_min = std::max(qmin, quantize(0.0)); *act_max = std::min(qmax, quantize(6.0)); - } else if (activation == kTfLiteActRelu1) { + } else if (activation == kTfLiteActReluN1To1) { *act_min = std::max(qmin, quantize(-1.0)); *act_max = std::min(qmax, quantize(1.0)); } else { diff --git a/tensorflow/lite/kernels/kernel_util.h b/tensorflow/lite/kernels/kernel_util.h index 6fc69fa1629..6bd6bb1c7ed 100644 --- a/tensorflow/lite/kernels/kernel_util.h +++ b/tensorflow/lite/kernels/kernel_util.h @@ -169,7 +169,7 @@ void CalculateActivationRange(TfLiteFusedActivation activation, } else if (activation == kTfLiteActRelu6) { *activation_min = 0; *activation_max = 6; - } else if (activation == kTfLiteActRelu1) { + } else if (activation == kTfLiteActReluN1To1) { *activation_min = -1; *activation_max = 1; } else { diff --git a/tensorflow/lite/micro/kernels/activation_utils.h b/tensorflow/lite/micro/kernels/activation_utils.h index a71826211c0..95ecc26dd52 100644 --- a/tensorflow/lite/micro/kernels/activation_utils.h +++ b/tensorflow/lite/micro/kernels/activation_utils.h @@ -35,7 +35,7 @@ inline float ActivationValFloat(TfLiteFusedActivation act, float a) { return a; case kTfLiteActRelu: return TfLiteMax(0.0f, a); - case kTfLiteActRelu1: + case kTfLiteActReluN1To1: return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f)); case kTfLiteActRelu6: return TfLiteMax(0.0f, TfLiteMin(a, 6.0f)); diff --git a/tensorflow/lite/micro/kernels/add_test.cc b/tensorflow/lite/micro/kernels/add_test.cc index 60164ab4746..6c66e0d4aaf 100644 --- a/tensorflow/lite/micro/kernels/add_test.cc +++ b/tensorflow/lite/micro/kernels/add_test.cc @@ -201,7 +201,7 @@ TF_LITE_MICRO_TEST(FloatAddActivationRelu1) { float output_data[output_dims_count]; tflite::testing::TestAddFloat(inout_shape, input1_values, inout_shape, input2_values, inout_shape, golden_values, - kTfLiteActRelu1, output_data); + kTfLiteActReluN1To1, output_data); } TF_LITE_MICRO_TEST(FloatAddVariousInputShapes) { @@ -313,7 +313,7 @@ TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Uint8) { inout_shape, input1_values, input1_quantized, scales[0], zero_points[0], inout_shape, input2_values, input2_quantized, scales[1], zero_points[1], inout_shape, golden_values, golden_quantized, scales[2], zero_points[2], - kTfLiteActRelu1, output); + kTfLiteActReluN1To1, output); } TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Int8) { @@ -334,7 +334,7 @@ TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Int8) { inout_shape, input1_values, input1_quantized, scales[0], zero_points[0], inout_shape, input2_values, input2_quantized, scales[1], zero_points[1], inout_shape, golden_values, golden_quantized, scales[2], zero_points[2], - kTfLiteActRelu1, output); + kTfLiteActReluN1To1, output); } TF_LITE_MICRO_TEST(QuantizedAddVariousInputShapesUint8) { diff --git a/tensorflow/lite/micro/kernels/mul_test.cc b/tensorflow/lite/micro/kernels/mul_test.cc index 6b4d4f07b64..f69bf2aa17e 100644 --- a/tensorflow/lite/micro/kernels/mul_test.cc +++ b/tensorflow/lite/micro/kernels/mul_test.cc @@ -402,7 +402,7 @@ TF_LITE_MICRO_TEST(FloatRelu) { {0.1, 0.2, 0.3, 0.5}, // input2 data {4, 1, 2, 2, 1}, // output shape {-0.2, 0.04, 0.21, 0.4}, // expected output data - output_data, kTfLiteActRelu1); + output_data, kTfLiteActReluN1To1); } TF_LITE_MICRO_TEST(FloatBroadcast) { diff --git a/tensorflow/lite/micro/kernels/pooling_test.cc b/tensorflow/lite/micro/kernels/pooling_test.cc index 9e11e9a4d57..35a77662e07 100644 --- a/tensorflow/lite/micro/kernels/pooling_test.cc +++ b/tensorflow/lite/micro/kernels/pooling_test.cc @@ -417,7 +417,8 @@ TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride1Stride2Relu) { kTfLitePaddingValid, kTfLiteActRelu, output_data); } -TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Stride1Relu1) { +TF_LITE_MICRO_TEST( + SimpleAveragePoolTestInt8PaddingValidStride2Stride1ReluN1To1) { using tflite::testing::F2QS; const float input_min = -15.9375; @@ -439,7 +440,7 @@ TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Stride1Relu1) { F2QS(-0.25, output_min, output_max), F2QS(0.75, output_min, output_max)}, {4, 1, 1, 2, 1}, // Output shape output_min, output_max, // output quantization range - kTfLitePaddingValid, kTfLiteActRelu1, output_data); + kTfLitePaddingValid, kTfLiteActReluN1To1, output_data); } TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Relu6) { @@ -532,7 +533,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu) { output_data); } -TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) { +TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatReluN1To1) { float output_data[2]; tflite::testing::TestMaxPoolFloat({4, 1, 2, 4, 1}, // Input shape { @@ -548,7 +549,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) { 0.7, }, {4, 1, 1, 2, 1}, // Output shape - kTfLitePaddingValid, kTfLiteActRelu1, + kTfLitePaddingValid, kTfLiteActReluN1To1, output_data); tflite::testing::TestMaxPoolFloat({4, 1, 2, 4, 1}, // Input shape @@ -565,7 +566,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) { 1.0, }, {4, 1, 1, 2, 1}, // Output shape - kTfLitePaddingValid, kTfLiteActRelu1, + kTfLitePaddingValid, kTfLiteActReluN1To1, output_data); } @@ -713,7 +714,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu) { kTfLitePaddingValid, kTfLiteActRelu, output_data); } -TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) { +TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActReluN1To1) { using tflite::testing::F2Q; uint8_t output_data[2]; @@ -743,7 +744,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) { {// Output values F2Q(-1.0, output_min, output_max), F2Q(1.0, output_min, output_max)}, output_min, output_max, {4, 1, 1, 2, 1}, // Output shape - kTfLitePaddingValid, kTfLiteActRelu1, output_data); + kTfLitePaddingValid, kTfLiteActReluN1To1, output_data); } TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu6) { @@ -944,7 +945,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu) { kTfLitePaddingValid, kTfLiteActRelu, output_data); } -TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) { +TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActReluN1To1) { using tflite::testing::F2QS; int8_t output_data[2]; @@ -974,7 +975,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) { {// Output values F2QS(-1.0, output_min, output_max), F2QS(1.0, output_min, output_max)}, output_min, output_max, {4, 1, 1, 2, 1}, // Output shape - kTfLitePaddingValid, kTfLiteActRelu1, output_data); + kTfLitePaddingValid, kTfLiteActReluN1To1, output_data); } TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu6) { diff --git a/tensorflow/lite/micro/kernels/sub_test.cc b/tensorflow/lite/micro/kernels/sub_test.cc index d6ab48ead36..b8de6eba453 100644 --- a/tensorflow/lite/micro/kernels/sub_test.cc +++ b/tensorflow/lite/micro/kernels/sub_test.cc @@ -201,7 +201,7 @@ TF_LITE_MICRO_TEST(FloatSubActivationRelu1) { float output_data[output_dims_count]; tflite::testing::TestSubFloat(inout_shape, input1_values, inout_shape, input2_values, inout_shape, golden_values, - kTfLiteActRelu1, output_data); + kTfLiteActReluN1To1, output_data); } TF_LITE_MICRO_TEST(FloatSubVariousInputShapes) { @@ -313,7 +313,7 @@ TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Uint8) { inout_shape, input1_values, input1_quantized, scales[0], zero_points[0], inout_shape, input2_values, input2_quantized, scales[1], zero_points[1], inout_shape, golden_values, golden_quantized, scales[2], zero_points[2], - kTfLiteActRelu1, output); + kTfLiteActReluN1To1, output); } TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Int8) { @@ -334,7 +334,7 @@ TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Int8) { inout_shape, input1_values, input1_quantized, scales[0], zero_points[0], inout_shape, input2_values, input2_quantized, scales[1], zero_points[1], inout_shape, golden_values, golden_quantized, scales[2], zero_points[2], - kTfLiteActRelu1, output); + kTfLiteActReluN1To1, output); } TF_LITE_MICRO_TEST(QuantizedSubVariousInputShapesUint8) {