1. Remove all references to tflite::cpu_backend_support as we no longer do reference-counting on the cpu backend context object and GetFromContext is moved to CpuBackendContext class as a static member function.

2. Remove gemmlowp_support.{h,cc} as their functionalities have already been folded into CpuBackendContext class.

PiperOrigin-RevId: 259464967
This commit is contained in:
Chao Mei 2019-07-22 21:09:21 -07:00 committed by TensorFlower Gardener
parent 91425cf597
commit ade316deef
15 changed files with 52 additions and 289 deletions

View File

@ -106,7 +106,6 @@ cc_library(
"//tensorflow/lite:framework",
"//tensorflow/lite/c:c_api_internal",
"//tensorflow/lite/kernels:cpu_backend_context",
"//tensorflow/lite/kernels:cpu_backend_support",
"//tensorflow/lite/kernels:kernel_util",
"//tensorflow/lite/kernels:op_macros",
"//tensorflow/lite/kernels/internal:tensor",

View File

@ -18,7 +18,6 @@ limitations under the License.
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/experimental/kernels/gru_cell.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_support.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
@ -112,14 +111,12 @@ enum TemporaryTensor {
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
cpu_backend_support::IncrementUsageCounter(context);
auto* scratch_tensor_index = new int;
context->AddTensors(context, kTemporaryNum, scratch_tensor_index);
return scratch_tensor_index;
}
void Free(TfLiteContext* context, void* buffer) {
cpu_backend_support::DecrementUsageCounter(context);
delete reinterpret_cast<int*>(buffer);
}
@ -221,7 +218,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
TfLiteTensor* output_state = GetOutput(context, node, kOutputState);
TfLiteTensor* activation = GetTemporary(context, node, kActivation);
TfLiteTensor* concat = GetTemporary(context, node, kConcat);
auto cpu_backend_context = cpu_backend_support::GetFromContext(context);
auto cpu_backend_context = CpuBackendContext::GetFromContext(context);
if (gate_weight->type == kTfLiteFloat32) {
GruImpl(input, input_state, gate_weight, gate_bias, candidate_weight,

View File

@ -308,23 +308,6 @@ cc_test(
],
)
cc_library(
name = "cpu_backend_support",
srcs = [
"cpu_backend_support.cc",
],
hdrs = [
"cpu_backend_support.h",
],
copts = tflite_copts(),
deps = [
":cpu_backend_context",
":op_macros",
"//tensorflow/lite:external_cpu_backend_context",
"//tensorflow/lite/c:c_api_internal",
],
)
cc_library(
name = "activation_functor",
hdrs = [
@ -483,7 +466,7 @@ cc_library(
visibility = ["//visibility:private"],
deps = [
":activation_functor",
":cpu_backend_support",
":cpu_backend_context",
":eigen_support",
":kernel_util",
":lstm_eval",

View File

@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/kernels/cpu_backend_support.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/eigen_support.h"
// b/131835803 forces us to include multithreaded_conv.h before optimized_ops.h
#ifndef TFLITE_WITH_RUY
@ -115,13 +115,11 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
// to carry information from Prepare() to Eval().
auto* data = new OpData;
eigen_support::IncrementUsageCounter(context);
cpu_backend_support::IncrementUsageCounter(context);
return data;
}
void Free(TfLiteContext* context, void* buffer) {
eigen_support::DecrementUsageCounter(context);
cpu_backend_support::DecrementUsageCounter(context);
delete reinterpret_cast<OpData*>(buffer);
}
@ -472,7 +470,7 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<uint8_t>(output),
GetTensorShape(im2col), GetTensorData<uint8_t>(im2col),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
break;
}
}
@ -516,7 +514,7 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output), GetTensorShape(im2col),
GetTensorData<int8>(im2col),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
break;
}
}
@ -564,7 +562,7 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
GetTensorData<float>(bias), GetTensorShape(output),
GetTensorData<float>(output), GetTensorShape(im2col),
GetTensorData<float>(im2col),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
break;
}
case kMultithreadOptimized: {

View File

@ -20,6 +20,32 @@ limitations under the License.
namespace tflite {
CpuBackendContext* CpuBackendContext::GetFromContext(TfLiteContext* context) {
auto* external_context = static_cast<ExternalCpuBackendContext*>(
context->GetExternalContext(context, kTfLiteCpuBackendContext));
if (external_context == nullptr) {
TF_LITE_FATAL(
"ExternalCpuBackendContext isn't properly initialized during TFLite "
"interpreter initialization.");
}
auto* cpu_backend_context = static_cast<CpuBackendContext*>(
external_context->internal_backend_context());
if (cpu_backend_context == nullptr) {
// We do the lazy initialization here for the TfLiteInternalBackendContext
// that's wrapped inside ExternalCpuBackendContext.
cpu_backend_context = new CpuBackendContext();
if (context->recommended_num_threads != -1) {
cpu_backend_context->SetMaxNumThreads(context->recommended_num_threads);
}
external_context->set_internal_backend_context(
std::unique_ptr<TfLiteInternalBackendContext>(cpu_backend_context));
}
return cpu_backend_context;
}
CpuBackendContext::CpuBackendContext()
: TfLiteInternalBackendContext(),
ruy_context_(new ruy::Context),

View File

@ -26,6 +26,8 @@ namespace tflite {
class CpuBackendContext final : public TfLiteInternalBackendContext {
public:
static CpuBackendContext* GetFromContext(TfLiteContext* context);
CpuBackendContext();
~CpuBackendContext() override;

View File

@ -1,59 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/cpu_backend_support.h"
#include <memory>
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/external_cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace cpu_backend_support {
// TODO(b/130950871): Remove all refrences to the following two no-op functions
// once the new ExternalCpuBackendContext class is checked in.
void IncrementUsageCounter(TfLiteContext* context) {}
void DecrementUsageCounter(TfLiteContext* context) {}
CpuBackendContext* GetFromContext(TfLiteContext* context) {
auto* external_context = static_cast<ExternalCpuBackendContext*>(
context->GetExternalContext(context, kTfLiteCpuBackendContext));
if (external_context == nullptr) {
TF_LITE_FATAL(
"ExternalCpuBackendContext isn't properly initialized during TFLite "
"interpreter initialization.");
}
auto* cpu_backend_context = static_cast<CpuBackendContext*>(
external_context->internal_backend_context());
if (cpu_backend_context == nullptr) {
// We do the lazy initialization here for the TfLiteInternalBackendContext
// that's wrapped inside ExternalCpuBackendContext.
cpu_backend_context = new CpuBackendContext();
if (context->recommended_num_threads != -1) {
cpu_backend_context->SetMaxNumThreads(context->recommended_num_threads);
}
external_context->set_internal_backend_context(
std::unique_ptr<TfLiteInternalBackendContext>(cpu_backend_context));
}
return cpu_backend_context;
}
} // namespace cpu_backend_support
} // namespace tflite

View File

@ -1,34 +0,0 @@
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_SUPPORT_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_SUPPORT_H_
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
namespace tflite {
namespace cpu_backend_support {
CpuBackendContext* GetFromContext(TfLiteContext* context);
void IncrementUsageCounter(TfLiteContext* context);
void DecrementUsageCounter(TfLiteContext* context);
} // namespace cpu_backend_support
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_SUPPORT_H_

View File

@ -24,7 +24,7 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/kernels/cpu_backend_support.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_multithread.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
@ -70,7 +70,6 @@ struct OpData {
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
cpu_backend_support::IncrementUsageCounter(context);
// This is a builtin op, so we don't use the contents in 'buffer', if any.
// Instead, we allocate a new object to carry information from Prepare() to
// Eval().
@ -78,7 +77,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
}
void Free(TfLiteContext* context, void* buffer) {
cpu_backend_support::DecrementUsageCounter(context);
delete reinterpret_cast<OpData*>(buffer);
}
@ -207,7 +205,7 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node,
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
}
}
@ -248,7 +246,7 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node,
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<uint8_t>(output),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
}
}
@ -290,7 +288,7 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node,
GetTensorData<int8>(filter), GetTensorShape(bias),
GetTensorData<int32>(bias), GetTensorShape(output),
GetTensorData<int8>(output),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
}
}

View File

@ -25,7 +25,7 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/kernels/activation_functor.h"
#include "tensorflow/lite/kernels/cpu_backend_support.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h"
@ -115,7 +115,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
// This is a builtin op, so we don't use the contents in 'buffer', if any.
// Instead, we allocate a new object to carry information from Prepare() to
// Eval().
cpu_backend_support::IncrementUsageCounter(context);
auto* op_data = new OpData();
context->AddTensors(context, /*tensors_to_add=*/2,
&op_data->scratch_tensor_index);
@ -123,7 +122,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
}
void Free(TfLiteContext* context, void* buffer) {
cpu_backend_support::DecrementUsageCounter(context);
delete reinterpret_cast<OpData*>(buffer);
}
@ -398,13 +396,13 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<uint8_t>(output),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
}
break;
case kTfLiteInt8:
FullyConnectedInt8<kernel_type>(
data, input, filter, bias, output,
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
break;
case kTfLiteInt16:
if (kernel_type == kReference) {
@ -419,7 +417,7 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node,
GetTensorShape(filter), GetTensorData<uint8_t>(filter),
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
}
break;
default:
@ -456,7 +454,7 @@ TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node,
GetTensorShape(bias), GetTensorData<int32_t>(bias), \
GetTensorShape(output), GetTensorData<int16_t>(output), \
GetTensorData<uint8_t>(shuffled_input_workspace), \
cpu_backend_support::GetFromContext(context)); \
CpuBackendContext::GetFromContext(context)); \
}
FullyConnectedParams op_params;
op_params.output_multiplier = data->output_multiplier;
@ -477,7 +475,7 @@ TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node,
GetTensorShape(bias), GetTensorData<int32_t>(bias),
GetTensorShape(output), GetTensorData<int16_t>(output),
GetTensorData<uint8_t>(shuffled_input_workspace),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
}
#undef TF_LITE_SHUFFLED_FULLY_CONNECTED
@ -512,7 +510,7 @@ TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node,
GetTensorShape(filter), GetTensorData<float>(filter),
GetTensorShape(bias), GetTensorData<float>(bias),
GetTensorShape(output), GetTensorData<float>(output),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
}
return kTfLiteOk;

View File

@ -1,86 +0,0 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/gemmlowp_support.h"
#include <memory>
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace gemmlowp_support {
namespace {
struct RefCountedGemmlowpContext : public TfLiteExternalContext {
std::unique_ptr<gemmlowp::GemmContext> gemmlowp_context;
int num_references = 0;
};
RefCountedGemmlowpContext* GetGemmLowpContext(TfLiteContext* context) {
return reinterpret_cast<RefCountedGemmlowpContext*>(
context->GetExternalContext(context, kTfLiteGemmLowpContext));
}
TfLiteStatus Refresh(TfLiteContext* context) {
auto* ptr = GetGemmLowpContext(context);
if (ptr != nullptr) {
ptr->gemmlowp_context->set_max_num_threads(
context->recommended_num_threads);
}
return kTfLiteOk;
}
} // namespace
void IncrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetGemmLowpContext(context);
if (ptr == nullptr) {
ptr = new RefCountedGemmlowpContext;
ptr->type = kTfLiteGemmLowpContext;
ptr->Refresh = Refresh;
ptr->gemmlowp_context.reset(new gemmlowp::GemmContext());
if (context->recommended_num_threads != -1) {
ptr->gemmlowp_context->set_max_num_threads(
context->recommended_num_threads);
}
ptr->num_references = 0;
context->SetExternalContext(context, kTfLiteGemmLowpContext, ptr);
}
ptr->num_references++;
}
void DecrementUsageCounter(TfLiteContext* context) {
auto* ptr = GetGemmLowpContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to DecrementUsageCounter() not preceded by "
"IncrementUsageCounter()");
}
if (--ptr->num_references == 0) {
delete ptr;
context->SetExternalContext(context, kTfLiteGemmLowpContext, nullptr);
}
}
gemmlowp::GemmContext* GetFromContext(TfLiteContext* context) {
auto* ptr = GetGemmLowpContext(context);
if (ptr == nullptr) {
TF_LITE_FATAL(
"Call to GetFromContext() not preceded by IncrementUsageCounter()");
}
return ptr->gemmlowp_context.get();
}
} // namespace gemmlowp_support
} // namespace tflite

View File

@ -1,51 +0,0 @@
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_GEMMLOWP_SUPPORT_H_
#define TENSORFLOW_LITE_KERNELS_GEMMLOWP_SUPPORT_H_
#include "public/gemmlowp.h"
#include "tensorflow/lite/c/c_api_internal.h"
namespace tflite {
namespace gemmlowp_support {
// Returns the GemmContext stored in 'context', allowing multiple ops to
// share a single object, as long as they share a TfLiteContext. The caller
// must ensure that this is called between IncrementUsageCounter() and
// DecrementUsageCounter(). For example, in the implementation of an op:
// void* Init(TfLiteContext* context, const char*, size_t) {
// gemmlowp_support::IncrementUsageCounter(context);
// return nullptr;
// }
// void Free(TfLiteContext* context, void*) {
// gemmlowp_support::DecrementUsageCounter(context);
// }
// TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
// auto* gemmlowp_context = gemmlowp_support::GetFromContext(context);
// }
gemmlowp::GemmContext* GetFromContext(TfLiteContext* context);
// Let the framework know that the GemmContext stored in 'context' will be used
// by an op. If necessary a new GemmContext is created and placed in 'context'.
void IncrementUsageCounter(TfLiteContext* context);
// Let the framework know that the op stopped using the GemmContext stored in
// 'context'. If there are no more usages the GemmContext will be deleted.
void DecrementUsageCounter(TfLiteContext* context);
} // namespace gemmlowp_support
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_GEMMLOWP_SUPPORT_H_

View File

@ -23,7 +23,7 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/kernels/activation_functor.h"
#include "tensorflow/lite/kernels/cpu_backend_support.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/kernel_utils.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
@ -796,7 +796,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
GetTensorShape(activation_out), GetTensorData<float>(activation_out),
GetTensorShape(concat_temp), GetTensorData<float>(concat_temp),
GetTensorShape(activation_temp), GetTensorData<float>(activation_temp),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
} else if (input->type == kTfLiteUInt8 &&
prev_activation->type == kTfLiteUInt8 &&
weights->type == kTfLiteUInt8 && bias->type == kTfLiteInt32 &&
@ -844,7 +844,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
GetTensorShape(concat_temp), GetTensorData<uint8_t>(concat_temp),
GetTensorShape(activation_temp),
GetTensorData<int16_t>(activation_temp),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
} else {
context->ReportError(context,
"Unsupported combination of data types for LstmCell");
@ -866,10 +866,8 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
const auto* params = reinterpret_cast<const TfLiteLSTMParams*>(buffer);
switch (params->kernel_type) {
case kTfLiteLSTMFullKernel:
cpu_backend_support::IncrementUsageCounter(context);
return full::Init(context, buffer, length);
case kTfLiteLSTMBasicKernel:
cpu_backend_support::IncrementUsageCounter(context);
return basic::Init(context, buffer, length);
default:
return nullptr;
@ -877,8 +875,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
return nullptr;
}
void Free(TfLiteContext* context, void* buffer) {
cpu_backend_support::DecrementUsageCounter(context);
delete reinterpret_cast<OpData*>(buffer);
}

View File

@ -20,7 +20,7 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/kernels/cpu_backend_support.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/mean.h"
@ -62,7 +62,6 @@ struct OpContext {
};
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
cpu_backend_support::IncrementUsageCounter(context);
// Creates two temp tensors to store index and axis for internal
// implementation only.
auto* op_data = new OpData();
@ -71,7 +70,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) {
}
void Free(TfLiteContext* context, void* buffer) {
cpu_backend_support::DecrementUsageCounter(context);
delete reinterpret_cast<OpData*>(buffer);
}
@ -306,7 +304,7 @@ TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) {
GetTensorData<uint8_t>(op_context.output),
op_context.output->params.zero_point,
op_context.output->params.scale,
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
} else {
reference_ops::Mean(op_params, GetTensorShape(input),
GetTensorData<float>(input),

View File

@ -21,7 +21,7 @@ limitations under the License.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/c_api_internal.h"
#include "tensorflow/lite/kernels/cpu_backend_support.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/eigen_support.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
@ -86,13 +86,11 @@ struct OpData {
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new OpData;
eigen_support::IncrementUsageCounter(context);
cpu_backend_support::IncrementUsageCounter(context);
return data;
}
void Free(TfLiteContext* context, void* buffer) {
eigen_support::DecrementUsageCounter(context);
cpu_backend_support::DecrementUsageCounter(context);
delete reinterpret_cast<OpData*>(buffer);
}
@ -338,7 +336,7 @@ void EvalFloat(TfLiteContext* context, const TfLiteTransposeConvParams* params,
GetTensorData<float>(transposed_weights), GetTensorShape(output),
GetTensorData<float>(output), GetTensorShape(col2im),
GetTensorData<float>(col2im),
cpu_backend_support::GetFromContext(context));
CpuBackendContext::GetFromContext(context));
break;
}
}