Rename kTfLiteActRelu1 to kTfLiteActReluN1To1
This CL renames `kTfLiteActRelu1` to `kTfLiteActReluN1To1` because it has minimum clipping at -1, not 0. Also, This CL finishes renaming because TFLite already uses `kTfLiteBuiltinReluN1To1`, `ActivationFunctionType_RELU_N1_TO_1` for this op. PiperOrigin-RevId: 317589358 Change-Id: I2424104da45234346749b3921d563e9161e809cc
This commit is contained in:
parent
db121c7f4b
commit
f24d25d860
@ -67,8 +67,9 @@ typedef struct {
|
||||
typedef enum {
|
||||
kTfLiteActNone = 0,
|
||||
kTfLiteActRelu,
|
||||
kTfLiteActRelu1, // min(max(-1, x), 1)
|
||||
kTfLiteActRelu6, // min(max(0, x), 6)
|
||||
kTfLiteActReluN1To1, // min(max(-1, x), 1)
|
||||
kTfLiteActRelu1 = kTfLiteActReluN1To1, // kTfLiteActRelu1 will be deprecated.
|
||||
kTfLiteActRelu6, // min(max(0, x), 6)
|
||||
kTfLiteActTanh,
|
||||
kTfLiteActSignBit,
|
||||
kTfLiteActSigmoid,
|
||||
|
@ -109,7 +109,7 @@ TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
|
||||
case ActivationFunctionType_RELU:
|
||||
return kTfLiteActRelu;
|
||||
case ActivationFunctionType_RELU_N1_TO_1:
|
||||
return kTfLiteActRelu1;
|
||||
return kTfLiteActReluN1To1;
|
||||
case ActivationFunctionType_RELU6:
|
||||
return kTfLiteActRelu6;
|
||||
case ActivationFunctionType_TANH:
|
||||
|
@ -109,7 +109,7 @@ absl::Status IsActivationSupported(TfLiteFusedActivation fused_activation) {
|
||||
switch (fused_activation) {
|
||||
case kTfLiteActNone:
|
||||
case kTfLiteActRelu:
|
||||
case kTfLiteActRelu1:
|
||||
case kTfLiteActReluN1To1:
|
||||
case kTfLiteActRelu6:
|
||||
case kTfLiteActTanh:
|
||||
return absl::OkStatus();
|
||||
@ -140,12 +140,12 @@ absl::Status MaybeFuseActivation(TfLiteFusedActivation fused_activation,
|
||||
}
|
||||
switch (fused_activation) {
|
||||
case kTfLiteActRelu:
|
||||
case kTfLiteActRelu1:
|
||||
case kTfLiteActReluN1To1:
|
||||
case kTfLiteActRelu6: {
|
||||
ReLUAttributes attr;
|
||||
attr.clip = fused_activation == kTfLiteActRelu
|
||||
? 0.0f
|
||||
: (fused_activation == kTfLiteActRelu1 ? 1.0f : 6.0f);
|
||||
: (fused_activation == kTfLiteActReluN1To1 ? 1.0f : 6.0f);
|
||||
for (auto index : output_indices) {
|
||||
Node* activation_node;
|
||||
RETURN_IF_ERROR(
|
||||
|
@ -197,7 +197,7 @@ TfLiteStatus Conv2dOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
|
||||
if (activation == kTfLiteActRelu6) {
|
||||
conv_output_min = 0;
|
||||
conv_output_max = 6;
|
||||
} else if (activation == kTfLiteActRelu1) {
|
||||
} else if (activation == kTfLiteActReluN1To1) {
|
||||
conv_output_min = -1;
|
||||
conv_output_max = 1;
|
||||
} else if (activation == kTfLiteActRelu) {
|
||||
|
@ -26,7 +26,7 @@ namespace {
|
||||
|
||||
bool IsActivationReluOrNone(TfLiteFusedActivation activation) {
|
||||
return (activation == kTfLiteActRelu || activation == kTfLiteActRelu6 ||
|
||||
activation == kTfLiteActRelu1 || activation == kTfLiteActNone);
|
||||
activation == kTfLiteActReluN1To1 || activation == kTfLiteActNone);
|
||||
}
|
||||
|
||||
bool TensorTypeMatch(int tensor_id, TfLiteContext* context,
|
||||
|
@ -330,7 +330,7 @@ class Subgraph {
|
||||
*output_min = 0.0f;
|
||||
*output_max = +std::numeric_limits<float>::infinity();
|
||||
return kTfLiteOk;
|
||||
case kTfLiteActRelu1:
|
||||
case kTfLiteActReluN1To1:
|
||||
*output_min = -1.0f;
|
||||
*output_max = +1.0f;
|
||||
return kTfLiteOk;
|
||||
@ -497,7 +497,7 @@ class Subgraph {
|
||||
context, "unsupported fused activation (Relu) in node #%d",
|
||||
node_index);
|
||||
return kTfLiteOk;
|
||||
case kTfLiteActRelu1:
|
||||
case kTfLiteActReluN1To1:
|
||||
TF_LITE_MAYBE_KERNEL_LOG(
|
||||
context, "unsupported fused activation (ReluMinus1To1) in node #%d",
|
||||
node_index);
|
||||
|
@ -41,7 +41,7 @@ CoreML::Specification::NeuralNetworkLayer* ActivationLayerBuilder::Build() {
|
||||
layer_->mutable_activation()->mutable_relu();
|
||||
break;
|
||||
// Relu1 and Relu6 layers are fully composed in PopulateSubgraph().
|
||||
case kTfLiteActRelu1: // clip(-1, 1)
|
||||
case kTfLiteActReluN1To1: // clip(-1, 1)
|
||||
layer_->mutable_unary()->set_alpha(-1);
|
||||
layer_->mutable_unary()->set_type(
|
||||
CoreML::Specification::UnaryFunctionLayerParams::THRESHOLD);
|
||||
@ -64,7 +64,7 @@ CoreML::Specification::NeuralNetworkLayer* ActivationLayerBuilder::Build() {
|
||||
}
|
||||
|
||||
TfLiteStatus ActivationLayerBuilder::PopulateSubgraph(TfLiteContext* context) {
|
||||
if (!(activation_ == kTfLiteActRelu6 || activation_ == kTfLiteActRelu1)) {
|
||||
if (!(activation_ == kTfLiteActRelu6 || activation_ == kTfLiteActReluN1To1)) {
|
||||
builder_output_ = AddOutput();
|
||||
return kTfLiteOk;
|
||||
}
|
||||
@ -125,7 +125,7 @@ OpBuilder* CreateReluOpBuilder(GraphBuilder* graph_builder) {
|
||||
}
|
||||
|
||||
OpBuilder* CreateReluN1To1OpBuilder(GraphBuilder* graph_builder) {
|
||||
return new ActivationLayerBuilder(graph_builder, kTfLiteActRelu1);
|
||||
return new ActivationLayerBuilder(graph_builder, kTfLiteActReluN1To1);
|
||||
}
|
||||
|
||||
OpBuilder* CreateRelu6OpBuilder(GraphBuilder* graph_builder) {
|
||||
|
@ -29,7 +29,7 @@ inline ActivationFunctionType TfLiteActivationToSchemaActivation(
|
||||
return ActivationFunctionType_NONE;
|
||||
case kTfLiteActRelu:
|
||||
return ActivationFunctionType_RELU;
|
||||
case kTfLiteActRelu1:
|
||||
case kTfLiteActReluN1To1:
|
||||
return ActivationFunctionType_RELU_N1_TO_1;
|
||||
case kTfLiteActRelu6:
|
||||
return ActivationFunctionType_RELU6;
|
||||
|
@ -312,7 +312,7 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
|
||||
if (!is_pie && !is_hybrid) {
|
||||
TF_LITE_ENSURE(context, params->activation == kTfLiteActNone ||
|
||||
params->activation == kTfLiteActRelu ||
|
||||
params->activation == kTfLiteActRelu1 ||
|
||||
params->activation == kTfLiteActReluN1To1 ||
|
||||
params->activation == kTfLiteActRelu6);
|
||||
}
|
||||
return PrepareImpl(context, node);
|
||||
|
@ -587,7 +587,7 @@ inline void ApplyActivationToVector(const float* __restrict__ vector,
|
||||
return;
|
||||
case kTfLiteActRelu:
|
||||
return ApplyReluToVector(vector, v_size, result);
|
||||
case kTfLiteActRelu1:
|
||||
case kTfLiteActReluN1To1:
|
||||
return ApplyRelu1ToVector(vector, v_size, result);
|
||||
case kTfLiteActRelu6:
|
||||
return ApplyRelu6ToVector(vector, v_size, result);
|
||||
|
@ -188,7 +188,7 @@ void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation,
|
||||
} else if (activation == kTfLiteActRelu6) {
|
||||
*act_min = std::max(qmin, quantize(0.0));
|
||||
*act_max = std::min(qmax, quantize(6.0));
|
||||
} else if (activation == kTfLiteActRelu1) {
|
||||
} else if (activation == kTfLiteActReluN1To1) {
|
||||
*act_min = std::max(qmin, quantize(-1.0));
|
||||
*act_max = std::min(qmax, quantize(1.0));
|
||||
} else {
|
||||
|
@ -169,7 +169,7 @@ void CalculateActivationRange(TfLiteFusedActivation activation,
|
||||
} else if (activation == kTfLiteActRelu6) {
|
||||
*activation_min = 0;
|
||||
*activation_max = 6;
|
||||
} else if (activation == kTfLiteActRelu1) {
|
||||
} else if (activation == kTfLiteActReluN1To1) {
|
||||
*activation_min = -1;
|
||||
*activation_max = 1;
|
||||
} else {
|
||||
|
@ -35,7 +35,7 @@ inline float ActivationValFloat(TfLiteFusedActivation act, float a) {
|
||||
return a;
|
||||
case kTfLiteActRelu:
|
||||
return TfLiteMax(0.0f, a);
|
||||
case kTfLiteActRelu1:
|
||||
case kTfLiteActReluN1To1:
|
||||
return TfLiteMax(-1.0f, TfLiteMin(a, 1.0f));
|
||||
case kTfLiteActRelu6:
|
||||
return TfLiteMax(0.0f, TfLiteMin(a, 6.0f));
|
||||
|
@ -201,7 +201,7 @@ TF_LITE_MICRO_TEST(FloatAddActivationRelu1) {
|
||||
float output_data[output_dims_count];
|
||||
tflite::testing::TestAddFloat(inout_shape, input1_values, inout_shape,
|
||||
input2_values, inout_shape, golden_values,
|
||||
kTfLiteActRelu1, output_data);
|
||||
kTfLiteActReluN1To1, output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(FloatAddVariousInputShapes) {
|
||||
@ -313,7 +313,7 @@ TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Uint8) {
|
||||
inout_shape, input1_values, input1_quantized, scales[0], zero_points[0],
|
||||
inout_shape, input2_values, input2_quantized, scales[1], zero_points[1],
|
||||
inout_shape, golden_values, golden_quantized, scales[2], zero_points[2],
|
||||
kTfLiteActRelu1, output);
|
||||
kTfLiteActReluN1To1, output);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Int8) {
|
||||
@ -334,7 +334,7 @@ TF_LITE_MICRO_TEST(QuantizedAddActivationRelu1Int8) {
|
||||
inout_shape, input1_values, input1_quantized, scales[0], zero_points[0],
|
||||
inout_shape, input2_values, input2_quantized, scales[1], zero_points[1],
|
||||
inout_shape, golden_values, golden_quantized, scales[2], zero_points[2],
|
||||
kTfLiteActRelu1, output);
|
||||
kTfLiteActReluN1To1, output);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(QuantizedAddVariousInputShapesUint8) {
|
||||
|
@ -402,7 +402,7 @@ TF_LITE_MICRO_TEST(FloatRelu) {
|
||||
{0.1, 0.2, 0.3, 0.5}, // input2 data
|
||||
{4, 1, 2, 2, 1}, // output shape
|
||||
{-0.2, 0.04, 0.21, 0.4}, // expected output data
|
||||
output_data, kTfLiteActRelu1);
|
||||
output_data, kTfLiteActReluN1To1);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(FloatBroadcast) {
|
||||
|
@ -417,7 +417,8 @@ TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride1Stride2Relu) {
|
||||
kTfLitePaddingValid, kTfLiteActRelu, output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Stride1Relu1) {
|
||||
TF_LITE_MICRO_TEST(
|
||||
SimpleAveragePoolTestInt8PaddingValidStride2Stride1ReluN1To1) {
|
||||
using tflite::testing::F2QS;
|
||||
|
||||
const float input_min = -15.9375;
|
||||
@ -439,7 +440,7 @@ TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Stride1Relu1) {
|
||||
F2QS(-0.25, output_min, output_max), F2QS(0.75, output_min, output_max)},
|
||||
{4, 1, 1, 2, 1}, // Output shape
|
||||
output_min, output_max, // output quantization range
|
||||
kTfLitePaddingValid, kTfLiteActRelu1, output_data);
|
||||
kTfLitePaddingValid, kTfLiteActReluN1To1, output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(SimpleAveragePoolTestInt8PaddingValidStride2Relu6) {
|
||||
@ -532,7 +533,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu) {
|
||||
output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) {
|
||||
TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatReluN1To1) {
|
||||
float output_data[2];
|
||||
tflite::testing::TestMaxPoolFloat({4, 1, 2, 4, 1}, // Input shape
|
||||
{
|
||||
@ -548,7 +549,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) {
|
||||
0.7,
|
||||
},
|
||||
{4, 1, 1, 2, 1}, // Output shape
|
||||
kTfLitePaddingValid, kTfLiteActRelu1,
|
||||
kTfLitePaddingValid, kTfLiteActReluN1To1,
|
||||
output_data);
|
||||
|
||||
tflite::testing::TestMaxPoolFloat({4, 1, 2, 4, 1}, // Input shape
|
||||
@ -565,7 +566,7 @@ TF_LITE_MICRO_TEST(SimpleMaxPoolTestFloatRelu1) {
|
||||
1.0,
|
||||
},
|
||||
{4, 1, 1, 2, 1}, // Output shape
|
||||
kTfLitePaddingValid, kTfLiteActRelu1,
|
||||
kTfLitePaddingValid, kTfLiteActReluN1To1,
|
||||
output_data);
|
||||
}
|
||||
|
||||
@ -713,7 +714,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu) {
|
||||
kTfLitePaddingValid, kTfLiteActRelu, output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) {
|
||||
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActReluN1To1) {
|
||||
using tflite::testing::F2Q;
|
||||
|
||||
uint8_t output_data[2];
|
||||
@ -743,7 +744,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) {
|
||||
{// Output values
|
||||
F2Q(-1.0, output_min, output_max), F2Q(1.0, output_min, output_max)},
|
||||
output_min, output_max, {4, 1, 1, 2, 1}, // Output shape
|
||||
kTfLitePaddingValid, kTfLiteActRelu1, output_data);
|
||||
kTfLitePaddingValid, kTfLiteActReluN1To1, output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu6) {
|
||||
@ -944,7 +945,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu) {
|
||||
kTfLitePaddingValid, kTfLiteActRelu, output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) {
|
||||
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActReluN1To1) {
|
||||
using tflite::testing::F2QS;
|
||||
|
||||
int8_t output_data[2];
|
||||
@ -974,7 +975,7 @@ TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu1) {
|
||||
{// Output values
|
||||
F2QS(-1.0, output_min, output_max), F2QS(1.0, output_min, output_max)},
|
||||
output_min, output_max, {4, 1, 1, 2, 1}, // Output shape
|
||||
kTfLitePaddingValid, kTfLiteActRelu1, output_data);
|
||||
kTfLitePaddingValid, kTfLiteActReluN1To1, output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(MaxPoolTestUInt8ActRelu6) {
|
||||
|
@ -201,7 +201,7 @@ TF_LITE_MICRO_TEST(FloatSubActivationRelu1) {
|
||||
float output_data[output_dims_count];
|
||||
tflite::testing::TestSubFloat(inout_shape, input1_values, inout_shape,
|
||||
input2_values, inout_shape, golden_values,
|
||||
kTfLiteActRelu1, output_data);
|
||||
kTfLiteActReluN1To1, output_data);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(FloatSubVariousInputShapes) {
|
||||
@ -313,7 +313,7 @@ TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Uint8) {
|
||||
inout_shape, input1_values, input1_quantized, scales[0], zero_points[0],
|
||||
inout_shape, input2_values, input2_quantized, scales[1], zero_points[1],
|
||||
inout_shape, golden_values, golden_quantized, scales[2], zero_points[2],
|
||||
kTfLiteActRelu1, output);
|
||||
kTfLiteActReluN1To1, output);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Int8) {
|
||||
@ -334,7 +334,7 @@ TF_LITE_MICRO_TEST(QuantizedSubActivationRelu1Int8) {
|
||||
inout_shape, input1_values, input1_quantized, scales[0], zero_points[0],
|
||||
inout_shape, input2_values, input2_quantized, scales[1], zero_points[1],
|
||||
inout_shape, golden_values, golden_quantized, scales[2], zero_points[2],
|
||||
kTfLiteActRelu1, output);
|
||||
kTfLiteActReluN1To1, output);
|
||||
}
|
||||
|
||||
TF_LITE_MICRO_TEST(QuantizedSubVariousInputShapesUint8) {
|
||||
|
Loading…
Reference in New Issue
Block a user