Switch TFLM softmax kernels to flat namespace.

This is incremental progress towards a flat namespace for TFLM. See
https://abseil.io/tips/130 for more context.

PiperOrigin-RevId: 335948052
Change-Id: I0374fb08df58bf0d2d12b558e53cabdc68b28235
This commit is contained in:
Advait Jain 2020-10-07 14:02:12 -07:00 committed by TensorFlower Gardener
parent b6d58af144
commit 24d00f249d
6 changed files with 17 additions and 38 deletions

View File

@ -21,9 +21,6 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace micro {
namespace activations {
namespace {
TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
@ -68,8 +65,6 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
return kTfLiteOk;
}
} // namespace
void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr);
return context->AllocatePersistentBuffer(context, sizeof(SoftmaxParams));
@ -157,19 +152,17 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
}
}
} // namespace activations
} // namespace
TfLiteRegistration Register_SOFTMAX() {
return {/*init=*/activations::SoftmaxInit,
return {/*init=*/SoftmaxInit,
/*free=*/nullptr,
/*prepare=*/activations::SoftmaxPrepare,
/*invoke=*/activations::SoftmaxEval,
/*prepare=*/SoftmaxPrepare,
/*invoke=*/SoftmaxEval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -35,6 +35,7 @@ TfLiteRegistration Register_CONV_2D();
TfLiteRegistration Register_DEPTHWISE_CONV_2D();
TfLiteRegistration Register_QUANTIZE();
TfLiteRegistration Register_SHAPE();
TfLiteRegistration Register_SOFTMAX();
namespace ops {
namespace micro {
@ -82,7 +83,6 @@ TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR();
TfLiteRegistration Register_ROUND();
TfLiteRegistration Register_RSQRT();
TfLiteRegistration Register_SIN();
TfLiteRegistration Register_SOFTMAX();
TfLiteRegistration Register_SPLIT();
TfLiteRegistration Register_SPLIT_V();
TfLiteRegistration Register_SQRT();

View File

@ -25,9 +25,6 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace micro {
namespace activations {
namespace {
// Softmax parameter data that persists in user_data
@ -92,8 +89,6 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context,
return kTfLiteOk;
}
} // namespace
// Takes a tensor and performs softmax along the last dimension.
void SoftmaxFloat(const TfLiteEvalTensor* input, TfLiteEvalTensor* output,
const SoftmaxParams& op_data) {
@ -212,19 +207,17 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
}
}
} // namespace activations
} // namespace
TfLiteRegistration Register_SOFTMAX() {
return {/*init=*/activations::SoftmaxInit,
return {/*init=*/SoftmaxInit,
/*free=*/nullptr,
/*prepare=*/activations::SoftmaxPrepare,
/*invoke=*/activations::SoftmaxEval,
/*prepare=*/SoftmaxPrepare,
/*invoke=*/SoftmaxEval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -257,8 +257,7 @@ void ValidateSoftmaxGoldens(TfLiteTensor* tensors, const int tensor_count,
int outputs_array_data[] = {1, 1};
TfLiteIntArray* outputs_array = IntArrayFromInts(outputs_array_data);
const TfLiteRegistration registration =
tflite::ops::micro::Register_SOFTMAX();
const TfLiteRegistration registration = Register_SOFTMAX();
micro::KernelRunner runner(registration, tensors, tensor_count, inputs_array,
outputs_array, &builtin_data,
micro_test::reporter);

View File

@ -25,9 +25,6 @@ limitations under the License.
#include "tensorflow/lite/micro/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace micro {
namespace activations {
namespace {
struct OpData {
@ -105,8 +102,6 @@ TfLiteStatus Softmax(OpData op_data, const RuntimeShape& input_shape,
return kTfLiteOk;
}
} // namespace
TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context,
const TfLiteTensor* input,
TfLiteTensor* output,
@ -196,19 +191,18 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
return kTfLiteError;
}
}
} // namespace activations
} // namespace
TfLiteRegistration Register_SOFTMAX() {
return {/*init=*/activations::SoftmaxInit,
return {/*init=*/SoftmaxInit,
/*free=*/nullptr,
/*prepare=*/activations::SoftmaxPrepare,
/*invoke=*/activations::SoftmaxEval,
/*prepare=*/SoftmaxPrepare,
/*invoke=*/SoftmaxEval,
/*profiling_string=*/nullptr,
/*builtin_code=*/0,
/*custom_name=*/nullptr,
/*version=*/0};
}
} // namespace micro
} // namespace ops
} // namespace tflite

View File

@ -353,8 +353,8 @@ class MicroMutableOpResolver : public MicroOpResolver {
}
TfLiteStatus AddSoftmax() {
return AddBuiltin(BuiltinOperator_SOFTMAX,
tflite::ops::micro::Register_SOFTMAX(), ParseSoftmax);
return AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX(),
ParseSoftmax);
}
TfLiteStatus AddSplit() {