diff --git a/tensorflow/lite/micro/kernels/add.cc b/tensorflow/lite/micro/kernels/add.cc index feb855b8cdf..6a3f9e0007b 100644 --- a/tensorflow/lite/micro/kernels/add.cc +++ b/tensorflow/lite/micro/kernels/add.cc @@ -54,16 +54,6 @@ struct OpData { int32 output_offset; }; -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, @@ -198,9 +188,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace add TfLiteRegistration* Register_ADD() { - static TfLiteRegistration r = {/*init=*/add::Init, - /*free=*/add::Free, - /*prepare=*/add::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/add::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/arc/conv.cc b/tensorflow/lite/micro/kernels/arc/conv.cc index 0a2523dbf0f..69542e12e90 100644 --- a/tensorflow/lite/micro/kernels/arc/conv.cc +++ b/tensorflow/lite/micro/kernels/arc/conv.cc @@ -107,16 +107,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - void EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, @@ -337,8 +327,14 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace conv TfLiteRegistration* Register_CONV_2D() { - static TfLiteRegistration r = {conv::Init, conv::Free, conv::Prepare, - conv::Eval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, + /*invoke=*/conv::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/arc/depthwise_conv.cc b/tensorflow/lite/micro/kernels/arc/depthwise_conv.cc index 054d2571dab..6322414f5c6 100644 --- a/tensorflow/lite/micro/kernels/arc/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/arc/depthwise_conv.cc @@ -103,16 +103,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - void EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, @@ -338,8 +328,14 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace depthwise_conv TfLiteRegistration* Register_DEPTHWISE_CONV_2D() { - static TfLiteRegistration r = {depthwise_conv::Init, depthwise_conv::Free, - depthwise_conv::Prepare, depthwise_conv::Eval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, + /*invoke=*/depthwise_conv::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/arc/fully_connected.cc b/tensorflow/lite/micro/kernels/arc/fully_connected.cc index a0366e7f067..bbd21292cd8 100644 --- a/tensorflow/lite/micro/kernels/arc/fully_connected.cc +++ b/tensorflow/lite/micro/kernels/arc/fully_connected.cc @@ -72,16 +72,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, const TfLiteTensor* input, @@ -242,9 +232,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace fully_connected TfLiteRegistration* Register_FULLY_CONNECTED() { - static TfLiteRegistration r = {fully_connected::Init, fully_connected::Free, - fully_connected::Prepare, - fully_connected::Eval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, + /*invoke=*/fully_connected::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; + return &r; } diff --git a/tensorflow/lite/micro/kernels/arc/pooling.cc b/tensorflow/lite/micro/kernels/arc/pooling.cc index 3a65dfbd045..55452013028 100644 --- a/tensorflow/lite/micro/kernels/arc/pooling.cc +++ b/tensorflow/lite/micro/kernels/arc/pooling.cc @@ -209,16 +209,6 @@ void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); OpData data; @@ -274,18 +264,26 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { } // namespace pooling TfLiteRegistration* Register_AVERAGE_POOL_2D() { - static TfLiteRegistration r = { - pooling::Init, - pooling::Free, - pooling::Prepare, - pooling::AverageEval, - }; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, + /*invoke=*/pooling::AverageEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } TfLiteRegistration* Register_MAX_POOL_2D() { - static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::Prepare, - pooling::MaxEval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, + /*invoke=*/pooling::MaxEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/arg_min_max.cc b/tensorflow/lite/micro/kernels/arg_min_max.cc index 2ec8ca23475..a7c0a4376ae 100644 --- a/tensorflow/lite/micro/kernels/arg_min_max.cc +++ b/tensorflow/lite/micro/kernels/arg_min_max.cc @@ -30,10 +30,6 @@ constexpr int kInputTensor = 0; constexpr int kAxis = 1; constexpr int kOutputTensor = 0; -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - template inline void ArgMinMaxHelper(const RuntimeShape& input1_shape, const T1* input1_data, const T3* input2_data, @@ -105,7 +101,7 @@ TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_ARG_MAX() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/arg_min_max::Prepare, + /*prepare=*/nullptr, /*invoke=*/arg_min_max::ArgMaxEval, /*profiling_string=*/nullptr, /*builtin_code=*/0, @@ -117,7 +113,7 @@ TfLiteRegistration* Register_ARG_MAX() { TfLiteRegistration* Register_ARG_MIN() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/arg_min_max::Prepare, + /*prepare=*/nullptr, /*invoke=*/arg_min_max::ArgMinEval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc b/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc index 8bc1f5351cb..273fdaea65b 100644 --- a/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc +++ b/tensorflow/lite/micro/kernels/cmsis-nn/conv.cc @@ -115,8 +115,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { return raw; } -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { #if defined(__ARM_FEATURE_DSP) OpData data; @@ -408,8 +406,14 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace conv TfLiteRegistration* Register_CONV_2D() { - static TfLiteRegistration r = {conv::Init, conv::Free, conv::Prepare, - conv::Eval}; + static TfLiteRegistration r = {/*init=*/conv::Init, + /*free=*/nullptr, + /*prepare=*/conv::Prepare, + /*invoke=*/conv::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc b/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc index e4be31d12ed..8fd49627165 100644 --- a/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/cmsis-nn/depthwise_conv.cc @@ -103,8 +103,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { return raw; } -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { #if defined(__ARM_FEATURE_DSP) auto* params = @@ -391,8 +389,14 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace depthwise_conv TfLiteRegistration* Register_DEPTHWISE_CONV_2D() { - static TfLiteRegistration r = {depthwise_conv::Init, depthwise_conv::Free, - depthwise_conv::Prepare, depthwise_conv::Eval}; + static TfLiteRegistration r = {/*init=*/depthwise_conv::Init, + /*free=*/nullptr, + /*prepare=*/depthwise_conv::Prepare, + /*invoke=*/depthwise_conv::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc b/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc index 88e32ba5d8c..aa31d14393f 100644 --- a/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc +++ b/tensorflow/lite/micro/kernels/cmsis-nn/fully_connected.cc @@ -77,8 +77,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { return raw; } -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { #if defined(__ARM_FEATURE_DSP) const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); @@ -253,9 +251,14 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace fully_connected TfLiteRegistration* Register_FULLY_CONNECTED() { - static TfLiteRegistration r = {fully_connected::Init, fully_connected::Free, - fully_connected::Prepare, - fully_connected::Eval}; + static TfLiteRegistration r = {/*init=*/fully_connected::Init, + /*free=*/nullptr, + /*prepare=*/fully_connected::Prepare, + /*invoke=*/fully_connected::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc b/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc index 74cf10f5a73..bf7370ee79a 100644 --- a/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc +++ b/tensorflow/lite/micro/kernels/cmsis-nn/pooling.cc @@ -214,8 +214,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { return raw; } -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { #if defined(__ARM_FEATURE_DSP) const TfLiteTensor* input = GetInput(context, node, kInputTensor); @@ -302,18 +300,26 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { } // namespace pooling TfLiteRegistration* Register_AVERAGE_POOL_2D() { - static TfLiteRegistration r = { - pooling::Init, - pooling::Free, - pooling::Prepare, - pooling::AverageEval, - }; + static TfLiteRegistration r = {/*init=*/pooling::Init, + /*free=*/nullptr, + /*prepare=*/pooling::Prepare, + /*invoke=*/pooling::AverageEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } TfLiteRegistration* Register_MAX_POOL_2D() { - static TfLiteRegistration r = {pooling::Init, pooling::Free, pooling::Prepare, - pooling::MaxEval}; + static TfLiteRegistration r = {/*init=*/pooling::Init, + /*free=*/nullptr, + /*prepare=*/pooling::Prepare, + /*invoke=*/pooling::MaxEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc b/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc index 108f0cfbf4c..ce45c1ae9b1 100644 --- a/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc +++ b/tensorflow/lite/micro/kernels/cmsis-nn/softmax.cc @@ -62,12 +62,6 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -137,9 +131,14 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { } // namespace activations TfLiteRegistration* Register_SOFTMAX() { - static TfLiteRegistration r = {activations::Init, activations::Free, - activations::SoftmaxPrepare, - activations::SoftmaxEval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/activations::SoftmaxPrepare, + /*invoke=*/activations::SoftmaxEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/conv.cc b/tensorflow/lite/micro/kernels/conv.cc index ecbd4f42d62..5d1418a68b1 100644 --- a/tensorflow/lite/micro/kernels/conv.cc +++ b/tensorflow/lite/micro/kernels/conv.cc @@ -109,16 +109,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - void EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, @@ -273,7 +263,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_CONV_2D() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/conv::Prepare, + /*prepare=*/nullptr, /*invoke=*/conv::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/depthwise_conv.cc b/tensorflow/lite/micro/kernels/depthwise_conv.cc index fff8eeddd0b..5d76642d37d 100644 --- a/tensorflow/lite/micro/kernels/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/depthwise_conv.cc @@ -96,16 +96,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - void EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, @@ -263,9 +253,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace depthwise_conv TfLiteRegistration* Register_DEPTHWISE_CONV_2D() { - static TfLiteRegistration r = {/*init=*/depthwise_conv::Init, - /*free=*/depthwise_conv::Free, - /*prepare=*/depthwise_conv::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/depthwise_conv::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/fully_connected.cc b/tensorflow/lite/micro/kernels/fully_connected.cc index a1c2424d554..12cf678ffa2 100644 --- a/tensorflow/lite/micro/kernels/fully_connected.cc +++ b/tensorflow/lite/micro/kernels/fully_connected.cc @@ -80,8 +80,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { return data; } -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast(node->user_data); auto* params = @@ -222,7 +220,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_FULLY_CONNECTED() { static TfLiteRegistration r = {/*init=*/fully_connected::Init, - /*free=*/fully_connected::Free, + /*free=*/nullptr, /*prepare=*/fully_connected::Prepare, /*invoke=*/fully_connected::Eval, /*profiling_string=*/nullptr, diff --git a/tensorflow/lite/micro/kernels/logistic.cc b/tensorflow/lite/micro/kernels/logistic.cc index 69d06937c3b..2369bbbb6bd 100644 --- a/tensorflow/lite/micro/kernels/logistic.cc +++ b/tensorflow/lite/micro/kernels/logistic.cc @@ -31,10 +31,6 @@ namespace activations { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); @@ -85,7 +81,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_LOGISTIC() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/activations::Prepare, + /*prepare=*/nullptr, /*invoke=*/activations::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/mul.cc b/tensorflow/lite/micro/kernels/mul.cc index 63258b8796c..2328ca493bf 100644 --- a/tensorflow/lite/micro/kernels/mul.cc +++ b/tensorflow/lite/micro/kernels/mul.cc @@ -65,10 +65,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - void EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteMulParams* params, OpData* data, const TfLiteTensor* input1, const TfLiteTensor* input2, @@ -165,7 +161,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_MUL() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/mul::Prepare, + /*prepare=*/nullptr, /*invoke=*/mul::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/pack.cc b/tensorflow/lite/micro/kernels/pack.cc index de50912b743..60a23cc0e0b 100644 --- a/tensorflow/lite/micro/kernels/pack.cc +++ b/tensorflow/lite/micro/kernels/pack.cc @@ -26,10 +26,6 @@ namespace { constexpr int kOutputTensor = 0; -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - template TfLiteStatus PackImpl(TfLiteContext* context, TfLiteNode* node, TfLiteTensor* output, int values_count, int axis) { @@ -115,7 +111,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_PACK() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/pack::Prepare, + /*prepare=*/nullptr, /*invoke=*/pack::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/pooling.cc b/tensorflow/lite/micro/kernels/pooling.cc index 4b0b150d89a..66c873fbb64 100644 --- a/tensorflow/lite/micro/kernels/pooling.cc +++ b/tensorflow/lite/micro/kernels/pooling.cc @@ -155,15 +155,6 @@ void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, } } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); @@ -219,9 +210,9 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { } // namespace pooling TfLiteRegistration* Register_AVERAGE_POOL_2D() { - static TfLiteRegistration r = {/*init=*/pooling::Init, - /*free=*/pooling::Free, - /*prepare=*/pooling::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/pooling::AverageEval, /*profiling_string=*/nullptr, /*builtin_code=*/0, @@ -231,9 +222,9 @@ TfLiteRegistration* Register_AVERAGE_POOL_2D() { } TfLiteRegistration* Register_MAX_POOL_2D() { - static TfLiteRegistration r = {/*init=*/pooling::Init, - /*free=*/pooling::Free, - /*prepare=*/pooling::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/pooling::MaxEval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/portable_optimized/depthwise_conv.cc b/tensorflow/lite/micro/kernels/portable_optimized/depthwise_conv.cc index edb7fb0e110..9fb8f2e32cc 100644 --- a/tensorflow/lite/micro/kernels/portable_optimized/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/portable_optimized/depthwise_conv.cc @@ -297,16 +297,6 @@ static inline void DepthwiseConvOptimizedForFilterWidthEight( } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - void EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, @@ -509,9 +499,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace depthwise_conv TfLiteRegistration* Register_DEPTHWISE_CONV_2D() { - static TfLiteRegistration r = {/*init=*/depthwise_conv::Init, - /*free=*/depthwise_conv::Free, - /*prepare=*/depthwise_conv::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/depthwise_conv::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/quantize.cc b/tensorflow/lite/micro/kernels/quantize.cc index 7f15c600e5f..d40471df948 100644 --- a/tensorflow/lite/micro/kernels/quantize.cc +++ b/tensorflow/lite/micro/kernels/quantize.cc @@ -26,12 +26,6 @@ namespace ops { namespace micro { namespace quantize { -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -119,8 +113,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // AffineQuantize takes scale and zero point and quantizes the float value to // quantized output, in int8 or uint8 format. TfLiteRegistration* Register_QUANTIZE() { - static TfLiteRegistration r = {/*init=*/quantize::Init, - /*free=*/quantize::Free, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, /*prepare=*/quantize::Prepare, /*invoke=*/quantize::Eval, /*profiling_string=*/nullptr, diff --git a/tensorflow/lite/micro/kernels/softmax.cc b/tensorflow/lite/micro/kernels/softmax.cc index 1c95d6db8fb..494666a12d1 100644 --- a/tensorflow/lite/micro/kernels/softmax.cc +++ b/tensorflow/lite/micro/kernels/softmax.cc @@ -72,12 +72,6 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -145,14 +139,14 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { } // namespace activations TfLiteRegistration* Register_SOFTMAX() { - static TfLiteRegistration r = {activations::Init, - activations::Free, - activations::SoftmaxPrepare, - activations::SoftmaxEval, - nullptr, - 0, - nullptr, - 0}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/activations::SoftmaxPrepare, + /*invoke=*/activations::SoftmaxEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/split.cc b/tensorflow/lite/micro/kernels/split.cc index ab8105f57e6..94b1508dbe6 100644 --- a/tensorflow/lite/micro/kernels/split.cc +++ b/tensorflow/lite/micro/kernels/split.cc @@ -23,10 +23,6 @@ namespace ops { namespace micro { namespace split { -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - template TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input, int axis_value) { @@ -118,7 +114,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_SPLIT() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/split::Prepare, + /*prepare=*/nullptr, /*invoke=*/split::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/sub.cc b/tensorflow/lite/micro/kernels/sub.cc index cb33455d0db..ba079fce7a6 100644 --- a/tensorflow/lite/micro/kernels/sub.cc +++ b/tensorflow/lite/micro/kernels/sub.cc @@ -54,16 +54,6 @@ struct OpData { int32 output_offset; }; -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteSubParams* params, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteTensor* output, @@ -195,9 +185,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace sub TfLiteRegistration* Register_SUB() { - static TfLiteRegistration r = {/*init=*/sub::Init, - /*free=*/sub::Free, - /*prepare=*/sub::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/sub::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/svdf.cc b/tensorflow/lite/micro/kernels/svdf.cc index c6272227c81..e2cacf17927 100644 --- a/tensorflow/lite/micro/kernels/svdf.cc +++ b/tensorflow/lite/micro/kernels/svdf.cc @@ -331,12 +331,6 @@ constexpr int kInputActivationStateTensor = 4; // Output tensor. constexpr int kOutputTensor = 0; -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast(node->builtin_data); @@ -529,8 +523,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace svdf TfLiteRegistration* Register_SVDF() { - static TfLiteRegistration r = {/*init=*/svdf::Init, - /*free=*/svdf::Free, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, /*prepare=*/svdf::Prepare, /*invoke=*/svdf::Eval, /*profiling_string=*/nullptr, diff --git a/tensorflow/lite/micro/kernels/unpack.cc b/tensorflow/lite/micro/kernels/unpack.cc index 1ddd10f7b58..faa032d7977 100644 --- a/tensorflow/lite/micro/kernels/unpack.cc +++ b/tensorflow/lite/micro/kernels/unpack.cc @@ -26,10 +26,6 @@ namespace { constexpr int kInputTensor = 0; -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - template TfLiteStatus UnpackImpl(TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input, int output_count, int axis) { @@ -108,7 +104,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_UNPACK() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/unpack::Prepare, + /*prepare=*/nullptr, /*invoke=*/unpack::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/conv.cc index b32ccb22edb..e0999750ffb 100755 --- a/tensorflow/lite/micro/kernels/xtensa_hifi/conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifi/conv.cc @@ -131,16 +131,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, @@ -540,7 +530,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_CONV_2D() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/conv::Prepare, + /*prepare=*/nullptr, /*invoke=*/conv::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/depthwise_conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/depthwise_conv.cc index 0e533fc4c83..ef409d9b87a 100755 --- a/tensorflow/lite/micro/kernels/xtensa_hifi/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifi/depthwise_conv.cc @@ -118,16 +118,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, @@ -547,9 +537,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace depthwise_conv TfLiteRegistration* Register_DEPTHWISE_CONV_2D() { - static TfLiteRegistration r = {/*init=*/depthwise_conv::Init, - /*free=*/depthwise_conv::Free, - /*prepare=*/depthwise_conv::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/depthwise_conv::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/fully_connected.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/fully_connected.cc index f008a867e4c..7b92d521fb0 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifi/fully_connected.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifi/fully_connected.cc @@ -92,16 +92,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, TfLiteFullyConnectedParams* params, OpData* data, const TfLiteTensor* input, @@ -264,9 +254,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace fully_connected TfLiteRegistration* Register_FULLY_CONNECTED() { - static TfLiteRegistration r = {/*init=*/fully_connected::Init, - /*free=*/fully_connected::Free, - /*prepare=*/fully_connected::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/fully_connected::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/logistic.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/logistic.cc index 8a612bcf691..cb37b1b85a5 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifi/logistic.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifi/logistic.cc @@ -53,10 +53,6 @@ namespace activations { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); @@ -117,7 +113,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteRegistration* Register_LOGISTIC() { static TfLiteRegistration r = {/*init=*/nullptr, /*free=*/nullptr, - /*prepare=*/activations::Prepare, + /*prepare=*/nullptr, /*invoke=*/activations::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/pooling.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/pooling.cc index a3a56a5468a..bd87c32d994 100755 --- a/tensorflow/lite/micro/kernels/xtensa_hifi/pooling.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifi/pooling.cc @@ -494,15 +494,6 @@ TfLiteStatus MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); @@ -558,9 +549,9 @@ TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { } // namespace pooling TfLiteRegistration* Register_AVERAGE_POOL_2D() { - static TfLiteRegistration r = {/*init=*/pooling::Init, - /*free=*/pooling::Free, - /*prepare=*/pooling::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/pooling::AverageEval, /*profiling_string=*/nullptr, /*builtin_code=*/0, @@ -570,9 +561,9 @@ TfLiteRegistration* Register_AVERAGE_POOL_2D() { } TfLiteRegistration* Register_MAX_POOL_2D() { - static TfLiteRegistration r = {/*init=*/pooling::Init, - /*free=*/pooling::Free, - /*prepare=*/pooling::Prepare, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/nullptr, /*invoke=*/pooling::MaxEval, /*profiling_string=*/nullptr, /*builtin_code=*/0, diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/softmax.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/softmax.cc index 3a6a0957785..f12babad77d 100755 --- a/tensorflow/lite/micro/kernels/xtensa_hifi/softmax.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifi/softmax.cc @@ -93,12 +93,6 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -214,14 +208,14 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { } // namespace activations TfLiteRegistration* Register_SOFTMAX() { - static TfLiteRegistration r = {activations::Init, - activations::Free, - activations::SoftmaxPrepare, - activations::SoftmaxEval, - nullptr, - 0, - nullptr, - 0}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/activations::SoftmaxPrepare, + /*invoke=*/activations::SoftmaxEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/xtensa_hifi/svdf.cc b/tensorflow/lite/micro/kernels/xtensa_hifi/svdf.cc index d2cda20ddf7..d79f53ccd56 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifi/svdf.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifi/svdf.cc @@ -361,12 +361,6 @@ constexpr int kInputActivationStateTensor = 4; // Output tensor. constexpr int kOutputTensor = 0; -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast(node->builtin_data); @@ -566,14 +560,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace svdf TfLiteRegistration* Register_SVDF() { - static TfLiteRegistration r = {/*init=*/svdf::Init, - /*free=*/svdf::Free, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, /*prepare=*/svdf::Prepare, /*invoke=*/svdf::Eval, /*profiling_string=*/nullptr, /*builtin_code=*/0, /*custom_name=*/nullptr, /*version=*/0}; + return &r; } diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc index dd68a7b74b6..e3bec9ddcb4 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc @@ -247,12 +247,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, return kTfLiteOk; } -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); @@ -352,8 +346,14 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace conv TfLiteRegistration* Register_CONV_2D() { - static TfLiteRegistration r = {conv::Init, conv::Free, conv::Prepare, - conv::Eval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/conv::Prepare, + /*invoke=*/conv::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc index da0f440e523..03fd3969c97 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc @@ -249,12 +249,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); @@ -360,8 +354,14 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace depthwise_conv TfLiteRegistration* Register_DEPTHWISE_CONV_2D() { - static TfLiteRegistration r = {depthwise_conv::Init, depthwise_conv::Free, - depthwise_conv::Prepare, depthwise_conv::Eval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/depthwise_conv::Prepare, + /*invoke=*/depthwise_conv::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc index bbbd0fdb496..663954d8370 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/fully_connected.cc @@ -175,12 +175,6 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, } // namespace -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast(node->builtin_data); @@ -251,9 +245,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace fully_connected TfLiteRegistration* Register_FULLY_CONNECTED() { - static TfLiteRegistration r = {fully_connected::Init, fully_connected::Free, - fully_connected::Prepare, - fully_connected::Eval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/fully_connected::Prepare, + /*invoke=*/fully_connected::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; + return &r; } diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc index d65fa4df089..9a119cea528 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/quantize.cc @@ -118,12 +118,6 @@ constexpr int kMaxOpDataSize = 2; static int kStaticOpDataCounter = 0; static OpData kStaticOpData[kMaxOpDataSize]; -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); @@ -168,8 +162,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // AffineQuantize takes scale and zero point and quantizes the float value to // quantized output, in int8 or uint8 format. TfLiteRegistration* Register_QUANTIZE() { - static TfLiteRegistration r = {/*init=*/quantize::Init, - /*free=*/quantize::Free, + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, /*prepare=*/quantize::Prepare, /*invoke=*/quantize::Eval, /*profiling_string=*/nullptr, diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc index 58159b1eef4..47a2077fec1 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc @@ -169,12 +169,6 @@ void SoftmaxQuantized(const TfLiteTensor* input, TfLiteTensor* output, } } -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = static_cast(node->builtin_data); @@ -216,9 +210,14 @@ TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { } // namespace activations TfLiteRegistration* Register_SOFTMAX() { - static TfLiteRegistration r = {activations::Init, activations::Free, - activations::SoftmaxPrepare, - activations::SoftmaxEval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/activations::SoftmaxPrepare, + /*invoke=*/activations::SoftmaxEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; } diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc index bae2a9d092d..7f1ade86d35 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/svdf.cc @@ -258,12 +258,6 @@ constexpr int kInputActivationStateTensor = 4; // Output tensor. constexpr int kOutputTensor = 0; -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - return nullptr; -} - -void Free(TfLiteContext* context, void* buffer) {} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast(node->builtin_data); @@ -409,8 +403,14 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace svdf TfLiteRegistration* Register_SVDF() { - static TfLiteRegistration r = {svdf::Init, svdf::Free, svdf::Prepare, - svdf::Eval}; + static TfLiteRegistration r = {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/svdf::Prepare, + /*invoke=*/svdf::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; return &r; }