From ade316deef9fabf49029b3c906fec8d9d545ac34 Mon Sep 17 00:00:00 2001 From: Chao Mei Date: Mon, 22 Jul 2019 21:09:21 -0700 Subject: [PATCH] 1. Remove all references to tflite::cpu_backend_support as we no longer do reference-counting on the cpu backend context object and GetFromContext is moved to CpuBackendContext class as a static member function. 2. Remove gemmlowp_support.{h,cc} as their functionalities have already been folded into CpuBackendContext class. PiperOrigin-RevId: 259464967 --- tensorflow/lite/experimental/kernels/BUILD | 1 - .../kernels/unidirectional_sequence_gru.cc | 5 +- tensorflow/lite/kernels/BUILD | 19 +--- tensorflow/lite/kernels/conv.cc | 10 +-- .../lite/kernels/cpu_backend_context.cc | 26 ++++++ tensorflow/lite/kernels/cpu_backend_context.h | 2 + .../lite/kernels/cpu_backend_support.cc | 59 ------------- tensorflow/lite/kernels/cpu_backend_support.h | 34 -------- tensorflow/lite/kernels/depthwise_conv.cc | 10 +-- tensorflow/lite/kernels/fully_connected.cc | 16 ++-- tensorflow/lite/kernels/gemmlowp_support.cc | 86 ------------------- tensorflow/lite/kernels/gemmlowp_support.h | 51 ----------- tensorflow/lite/kernels/lstm.cc | 10 +-- tensorflow/lite/kernels/reduce.cc | 6 +- tensorflow/lite/kernels/transpose_conv.cc | 6 +- 15 files changed, 52 insertions(+), 289 deletions(-) delete mode 100644 tensorflow/lite/kernels/cpu_backend_support.cc delete mode 100644 tensorflow/lite/kernels/cpu_backend_support.h delete mode 100644 tensorflow/lite/kernels/gemmlowp_support.cc delete mode 100644 tensorflow/lite/kernels/gemmlowp_support.h diff --git a/tensorflow/lite/experimental/kernels/BUILD b/tensorflow/lite/experimental/kernels/BUILD index aed87a2e643..e3d05ae4f51 100644 --- a/tensorflow/lite/experimental/kernels/BUILD +++ b/tensorflow/lite/experimental/kernels/BUILD @@ -106,7 +106,6 @@ cc_library( "//tensorflow/lite:framework", "//tensorflow/lite/c:c_api_internal", "//tensorflow/lite/kernels:cpu_backend_context", - "//tensorflow/lite/kernels:cpu_backend_support", "//tensorflow/lite/kernels:kernel_util", "//tensorflow/lite/kernels:op_macros", "//tensorflow/lite/kernels/internal:tensor", diff --git a/tensorflow/lite/experimental/kernels/unidirectional_sequence_gru.cc b/tensorflow/lite/experimental/kernels/unidirectional_sequence_gru.cc index fc0d681f3bc..9ef8107dc9f 100644 --- a/tensorflow/lite/experimental/kernels/unidirectional_sequence_gru.cc +++ b/tensorflow/lite/experimental/kernels/unidirectional_sequence_gru.cc @@ -18,7 +18,6 @@ limitations under the License. #include "tensorflow/lite/c/c_api_internal.h" #include "tensorflow/lite/experimental/kernels/gru_cell.h" #include "tensorflow/lite/kernels/cpu_backend_context.h" -#include "tensorflow/lite/kernels/cpu_backend_support.h" #include "tensorflow/lite/kernels/internal/tensor.h" #include "tensorflow/lite/kernels/kernel_util.h" @@ -112,14 +111,12 @@ enum TemporaryTensor { }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { - cpu_backend_support::IncrementUsageCounter(context); auto* scratch_tensor_index = new int; context->AddTensors(context, kTemporaryNum, scratch_tensor_index); return scratch_tensor_index; } void Free(TfLiteContext* context, void* buffer) { - cpu_backend_support::DecrementUsageCounter(context); delete reinterpret_cast(buffer); } @@ -221,7 +218,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output_state = GetOutput(context, node, kOutputState); TfLiteTensor* activation = GetTemporary(context, node, kActivation); TfLiteTensor* concat = GetTemporary(context, node, kConcat); - auto cpu_backend_context = cpu_backend_support::GetFromContext(context); + auto cpu_backend_context = CpuBackendContext::GetFromContext(context); if (gate_weight->type == kTfLiteFloat32) { GruImpl(input, input_state, gate_weight, gate_bias, candidate_weight, diff --git a/tensorflow/lite/kernels/BUILD b/tensorflow/lite/kernels/BUILD index ee9090902ce..2b550c95f08 100644 --- a/tensorflow/lite/kernels/BUILD +++ b/tensorflow/lite/kernels/BUILD @@ -308,23 +308,6 @@ cc_test( ], ) -cc_library( - name = "cpu_backend_support", - srcs = [ - "cpu_backend_support.cc", - ], - hdrs = [ - "cpu_backend_support.h", - ], - copts = tflite_copts(), - deps = [ - ":cpu_backend_context", - ":op_macros", - "//tensorflow/lite:external_cpu_backend_context", - "//tensorflow/lite/c:c_api_internal", - ], -) - cc_library( name = "activation_functor", hdrs = [ @@ -483,7 +466,7 @@ cc_library( visibility = ["//visibility:private"], deps = [ ":activation_functor", - ":cpu_backend_support", + ":cpu_backend_context", ":eigen_support", ":kernel_util", ":lstm_eval", diff --git a/tensorflow/lite/kernels/conv.cc b/tensorflow/lite/kernels/conv.cc index 072d6c6fc2c..6a42beab0f3 100644 --- a/tensorflow/lite/kernels/conv.cc +++ b/tensorflow/lite/kernels/conv.cc @@ -24,7 +24,7 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/c_api_internal.h" -#include "tensorflow/lite/kernels/cpu_backend_support.h" +#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/eigen_support.h" // b/131835803 forces us to include multithreaded_conv.h before optimized_ops.h #ifndef TFLITE_WITH_RUY @@ -115,13 +115,11 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { // to carry information from Prepare() to Eval(). auto* data = new OpData; eigen_support::IncrementUsageCounter(context); - cpu_backend_support::IncrementUsageCounter(context); return data; } void Free(TfLiteContext* context, void* buffer) { eigen_support::DecrementUsageCounter(context); - cpu_backend_support::DecrementUsageCounter(context); delete reinterpret_cast(buffer); } @@ -472,7 +470,7 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node, GetTensorShape(bias), GetTensorData(bias), GetTensorShape(output), GetTensorData(output), GetTensorShape(im2col), GetTensorData(im2col), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); break; } } @@ -516,7 +514,7 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, GetTensorData(bias), GetTensorShape(output), GetTensorData(output), GetTensorShape(im2col), GetTensorData(im2col), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); break; } } @@ -564,7 +562,7 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node, GetTensorData(bias), GetTensorShape(output), GetTensorData(output), GetTensorShape(im2col), GetTensorData(im2col), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); break; } case kMultithreadOptimized: { diff --git a/tensorflow/lite/kernels/cpu_backend_context.cc b/tensorflow/lite/kernels/cpu_backend_context.cc index 63f12208630..0b38bb6998a 100644 --- a/tensorflow/lite/kernels/cpu_backend_context.cc +++ b/tensorflow/lite/kernels/cpu_backend_context.cc @@ -20,6 +20,32 @@ limitations under the License. namespace tflite { +CpuBackendContext* CpuBackendContext::GetFromContext(TfLiteContext* context) { + auto* external_context = static_cast( + context->GetExternalContext(context, kTfLiteCpuBackendContext)); + + if (external_context == nullptr) { + TF_LITE_FATAL( + "ExternalCpuBackendContext isn't properly initialized during TFLite " + "interpreter initialization."); + } + + auto* cpu_backend_context = static_cast( + external_context->internal_backend_context()); + if (cpu_backend_context == nullptr) { + // We do the lazy initialization here for the TfLiteInternalBackendContext + // that's wrapped inside ExternalCpuBackendContext. + cpu_backend_context = new CpuBackendContext(); + if (context->recommended_num_threads != -1) { + cpu_backend_context->SetMaxNumThreads(context->recommended_num_threads); + } + external_context->set_internal_backend_context( + std::unique_ptr(cpu_backend_context)); + } + + return cpu_backend_context; +} + CpuBackendContext::CpuBackendContext() : TfLiteInternalBackendContext(), ruy_context_(new ruy::Context), diff --git a/tensorflow/lite/kernels/cpu_backend_context.h b/tensorflow/lite/kernels/cpu_backend_context.h index a55a951ac99..c64eae2f6f3 100644 --- a/tensorflow/lite/kernels/cpu_backend_context.h +++ b/tensorflow/lite/kernels/cpu_backend_context.h @@ -26,6 +26,8 @@ namespace tflite { class CpuBackendContext final : public TfLiteInternalBackendContext { public: + static CpuBackendContext* GetFromContext(TfLiteContext* context); + CpuBackendContext(); ~CpuBackendContext() override; diff --git a/tensorflow/lite/kernels/cpu_backend_support.cc b/tensorflow/lite/kernels/cpu_backend_support.cc deleted file mode 100644 index ab47d5b7e99..00000000000 --- a/tensorflow/lite/kernels/cpu_backend_support.cc +++ /dev/null @@ -1,59 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/cpu_backend_support.h" - -#include - -#include "tensorflow/lite/c/c_api_internal.h" -#include "tensorflow/lite/external_cpu_backend_context.h" -#include "tensorflow/lite/kernels/cpu_backend_context.h" -#include "tensorflow/lite/kernels/op_macros.h" - -namespace tflite { -namespace cpu_backend_support { - -// TODO(b/130950871): Remove all refrences to the following two no-op functions -// once the new ExternalCpuBackendContext class is checked in. -void IncrementUsageCounter(TfLiteContext* context) {} -void DecrementUsageCounter(TfLiteContext* context) {} - -CpuBackendContext* GetFromContext(TfLiteContext* context) { - auto* external_context = static_cast( - context->GetExternalContext(context, kTfLiteCpuBackendContext)); - - if (external_context == nullptr) { - TF_LITE_FATAL( - "ExternalCpuBackendContext isn't properly initialized during TFLite " - "interpreter initialization."); - } - - auto* cpu_backend_context = static_cast( - external_context->internal_backend_context()); - if (cpu_backend_context == nullptr) { - // We do the lazy initialization here for the TfLiteInternalBackendContext - // that's wrapped inside ExternalCpuBackendContext. - cpu_backend_context = new CpuBackendContext(); - if (context->recommended_num_threads != -1) { - cpu_backend_context->SetMaxNumThreads(context->recommended_num_threads); - } - external_context->set_internal_backend_context( - std::unique_ptr(cpu_backend_context)); - } - - return cpu_backend_context; -} - -} // namespace cpu_backend_support -} // namespace tflite diff --git a/tensorflow/lite/kernels/cpu_backend_support.h b/tensorflow/lite/kernels/cpu_backend_support.h deleted file mode 100644 index e7cec5cdd23..00000000000 --- a/tensorflow/lite/kernels/cpu_backend_support.h +++ /dev/null @@ -1,34 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_SUPPORT_H_ -#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_SUPPORT_H_ - -#include "tensorflow/lite/c/c_api_internal.h" -#include "tensorflow/lite/kernels/cpu_backend_context.h" - -namespace tflite { - -namespace cpu_backend_support { - -CpuBackendContext* GetFromContext(TfLiteContext* context); - -void IncrementUsageCounter(TfLiteContext* context); - -void DecrementUsageCounter(TfLiteContext* context); - -} // namespace cpu_backend_support -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_SUPPORT_H_ diff --git a/tensorflow/lite/kernels/depthwise_conv.cc b/tensorflow/lite/kernels/depthwise_conv.cc index f3010549406..bfa3697c0a9 100644 --- a/tensorflow/lite/kernels/depthwise_conv.cc +++ b/tensorflow/lite/kernels/depthwise_conv.cc @@ -24,7 +24,7 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/c_api_internal.h" -#include "tensorflow/lite/kernels/cpu_backend_support.h" +#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/optimized/cpu_check.h" #include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_multithread.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" @@ -70,7 +70,6 @@ struct OpData { }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { - cpu_backend_support::IncrementUsageCounter(context); // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). @@ -78,7 +77,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { } void Free(TfLiteContext* context, void* buffer) { - cpu_backend_support::DecrementUsageCounter(context); delete reinterpret_cast(buffer); } @@ -207,7 +205,7 @@ void EvalFloat(TfLiteContext* context, TfLiteNode* node, GetTensorShape(filter), GetTensorData(filter), GetTensorShape(bias), GetTensorData(bias), GetTensorShape(output), GetTensorData(output), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } } @@ -248,7 +246,7 @@ void EvalQuantized(TfLiteContext* context, TfLiteNode* node, GetTensorShape(filter), GetTensorData(filter), GetTensorShape(bias), GetTensorData(bias), GetTensorShape(output), GetTensorData(output), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } } @@ -290,7 +288,7 @@ void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, GetTensorData(filter), GetTensorShape(bias), GetTensorData(bias), GetTensorShape(output), GetTensorData(output), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } } diff --git a/tensorflow/lite/kernels/fully_connected.cc b/tensorflow/lite/kernels/fully_connected.cc index bca595eb836..64da1533614 100644 --- a/tensorflow/lite/kernels/fully_connected.cc +++ b/tensorflow/lite/kernels/fully_connected.cc @@ -25,7 +25,7 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/c_api_internal.h" #include "tensorflow/lite/kernels/activation_functor.h" -#include "tensorflow/lite/kernels/cpu_backend_support.h" +#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" @@ -115,7 +115,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { // This is a builtin op, so we don't use the contents in 'buffer', if any. // Instead, we allocate a new object to carry information from Prepare() to // Eval(). - cpu_backend_support::IncrementUsageCounter(context); auto* op_data = new OpData(); context->AddTensors(context, /*tensors_to_add=*/2, &op_data->scratch_tensor_index); @@ -123,7 +122,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { } void Free(TfLiteContext* context, void* buffer) { - cpu_backend_support::DecrementUsageCounter(context); delete reinterpret_cast(buffer); } @@ -398,13 +396,13 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, GetTensorShape(filter), GetTensorData(filter), GetTensorShape(bias), GetTensorData(bias), GetTensorShape(output), GetTensorData(output), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } break; case kTfLiteInt8: FullyConnectedInt8( data, input, filter, bias, output, - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); break; case kTfLiteInt16: if (kernel_type == kReference) { @@ -419,7 +417,7 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, GetTensorShape(filter), GetTensorData(filter), GetTensorShape(bias), GetTensorData(bias), GetTensorShape(output), GetTensorData(output), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } break; default: @@ -456,7 +454,7 @@ TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node, GetTensorShape(bias), GetTensorData(bias), \ GetTensorShape(output), GetTensorData(output), \ GetTensorData(shuffled_input_workspace), \ - cpu_backend_support::GetFromContext(context)); \ + CpuBackendContext::GetFromContext(context)); \ } FullyConnectedParams op_params; op_params.output_multiplier = data->output_multiplier; @@ -477,7 +475,7 @@ TfLiteStatus EvalShuffledQuantized(TfLiteContext* context, TfLiteNode* node, GetTensorShape(bias), GetTensorData(bias), GetTensorShape(output), GetTensorData(output), GetTensorData(shuffled_input_workspace), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } #undef TF_LITE_SHUFFLED_FULLY_CONNECTED @@ -512,7 +510,7 @@ TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, GetTensorShape(filter), GetTensorData(filter), GetTensorShape(bias), GetTensorData(bias), GetTensorShape(output), GetTensorData(output), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } return kTfLiteOk; diff --git a/tensorflow/lite/kernels/gemmlowp_support.cc b/tensorflow/lite/kernels/gemmlowp_support.cc deleted file mode 100644 index 410a72ca3f6..00000000000 --- a/tensorflow/lite/kernels/gemmlowp_support.cc +++ /dev/null @@ -1,86 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "tensorflow/lite/kernels/gemmlowp_support.h" - -#include - -#include "tensorflow/lite/kernels/op_macros.h" - -namespace tflite { -namespace gemmlowp_support { -namespace { - -struct RefCountedGemmlowpContext : public TfLiteExternalContext { - std::unique_ptr gemmlowp_context; - int num_references = 0; -}; - -RefCountedGemmlowpContext* GetGemmLowpContext(TfLiteContext* context) { - return reinterpret_cast( - context->GetExternalContext(context, kTfLiteGemmLowpContext)); -} - -TfLiteStatus Refresh(TfLiteContext* context) { - auto* ptr = GetGemmLowpContext(context); - if (ptr != nullptr) { - ptr->gemmlowp_context->set_max_num_threads( - context->recommended_num_threads); - } - return kTfLiteOk; -} - -} // namespace - -void IncrementUsageCounter(TfLiteContext* context) { - auto* ptr = GetGemmLowpContext(context); - if (ptr == nullptr) { - ptr = new RefCountedGemmlowpContext; - ptr->type = kTfLiteGemmLowpContext; - ptr->Refresh = Refresh; - ptr->gemmlowp_context.reset(new gemmlowp::GemmContext()); - if (context->recommended_num_threads != -1) { - ptr->gemmlowp_context->set_max_num_threads( - context->recommended_num_threads); - } - ptr->num_references = 0; - context->SetExternalContext(context, kTfLiteGemmLowpContext, ptr); - } - ptr->num_references++; -} - -void DecrementUsageCounter(TfLiteContext* context) { - auto* ptr = GetGemmLowpContext(context); - if (ptr == nullptr) { - TF_LITE_FATAL( - "Call to DecrementUsageCounter() not preceded by " - "IncrementUsageCounter()"); - } - if (--ptr->num_references == 0) { - delete ptr; - context->SetExternalContext(context, kTfLiteGemmLowpContext, nullptr); - } -} - -gemmlowp::GemmContext* GetFromContext(TfLiteContext* context) { - auto* ptr = GetGemmLowpContext(context); - if (ptr == nullptr) { - TF_LITE_FATAL( - "Call to GetFromContext() not preceded by IncrementUsageCounter()"); - } - return ptr->gemmlowp_context.get(); -} - -} // namespace gemmlowp_support -} // namespace tflite diff --git a/tensorflow/lite/kernels/gemmlowp_support.h b/tensorflow/lite/kernels/gemmlowp_support.h deleted file mode 100644 index 9679326a533..00000000000 --- a/tensorflow/lite/kernels/gemmlowp_support.h +++ /dev/null @@ -1,51 +0,0 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_GEMMLOWP_SUPPORT_H_ -#define TENSORFLOW_LITE_KERNELS_GEMMLOWP_SUPPORT_H_ - -#include "public/gemmlowp.h" -#include "tensorflow/lite/c/c_api_internal.h" - -namespace tflite { -namespace gemmlowp_support { - -// Returns the GemmContext stored in 'context', allowing multiple ops to -// share a single object, as long as they share a TfLiteContext. The caller -// must ensure that this is called between IncrementUsageCounter() and -// DecrementUsageCounter(). For example, in the implementation of an op: -// void* Init(TfLiteContext* context, const char*, size_t) { -// gemmlowp_support::IncrementUsageCounter(context); -// return nullptr; -// } -// void Free(TfLiteContext* context, void*) { -// gemmlowp_support::DecrementUsageCounter(context); -// } -// TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { -// auto* gemmlowp_context = gemmlowp_support::GetFromContext(context); -// } -gemmlowp::GemmContext* GetFromContext(TfLiteContext* context); - -// Let the framework know that the GemmContext stored in 'context' will be used -// by an op. If necessary a new GemmContext is created and placed in 'context'. -void IncrementUsageCounter(TfLiteContext* context); - -// Let the framework know that the op stopped using the GemmContext stored in -// 'context'. If there are no more usages the GemmContext will be deleted. -void DecrementUsageCounter(TfLiteContext* context); - -} // namespace gemmlowp_support -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_GEMMLOWP_SUPPORT_H_ diff --git a/tensorflow/lite/kernels/lstm.cc b/tensorflow/lite/kernels/lstm.cc index 19ec80889e7..1dfd0a9dacc 100644 --- a/tensorflow/lite/kernels/lstm.cc +++ b/tensorflow/lite/kernels/lstm.cc @@ -23,7 +23,7 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/c_api_internal.h" #include "tensorflow/lite/kernels/activation_functor.h" -#include "tensorflow/lite/kernels/cpu_backend_support.h" +#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/kernel_utils.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" @@ -796,7 +796,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { GetTensorShape(activation_out), GetTensorData(activation_out), GetTensorShape(concat_temp), GetTensorData(concat_temp), GetTensorShape(activation_temp), GetTensorData(activation_temp), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } else if (input->type == kTfLiteUInt8 && prev_activation->type == kTfLiteUInt8 && weights->type == kTfLiteUInt8 && bias->type == kTfLiteInt32 && @@ -844,7 +844,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { GetTensorShape(concat_temp), GetTensorData(concat_temp), GetTensorShape(activation_temp), GetTensorData(activation_temp), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } else { context->ReportError(context, "Unsupported combination of data types for LstmCell"); @@ -866,10 +866,8 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { const auto* params = reinterpret_cast(buffer); switch (params->kernel_type) { case kTfLiteLSTMFullKernel: - cpu_backend_support::IncrementUsageCounter(context); return full::Init(context, buffer, length); case kTfLiteLSTMBasicKernel: - cpu_backend_support::IncrementUsageCounter(context); return basic::Init(context, buffer, length); default: return nullptr; @@ -877,8 +875,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { return nullptr; } void Free(TfLiteContext* context, void* buffer) { - cpu_backend_support::DecrementUsageCounter(context); - delete reinterpret_cast(buffer); } diff --git a/tensorflow/lite/kernels/reduce.cc b/tensorflow/lite/kernels/reduce.cc index d28ec70f98a..3474a403495 100644 --- a/tensorflow/lite/kernels/reduce.cc +++ b/tensorflow/lite/kernels/reduce.cc @@ -20,7 +20,7 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/c_api_internal.h" -#include "tensorflow/lite/kernels/cpu_backend_support.h" +#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/reference/integer_ops/mean.h" @@ -62,7 +62,6 @@ struct OpContext { }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { - cpu_backend_support::IncrementUsageCounter(context); // Creates two temp tensors to store index and axis for internal // implementation only. auto* op_data = new OpData(); @@ -71,7 +70,6 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { } void Free(TfLiteContext* context, void* buffer) { - cpu_backend_support::DecrementUsageCounter(context); delete reinterpret_cast(buffer); } @@ -306,7 +304,7 @@ TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) { GetTensorData(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); } else { reference_ops::Mean(op_params, GetTensorShape(input), GetTensorData(input), diff --git a/tensorflow/lite/kernels/transpose_conv.cc b/tensorflow/lite/kernels/transpose_conv.cc index 8bca828a1d9..c4447b2a468 100644 --- a/tensorflow/lite/kernels/transpose_conv.cc +++ b/tensorflow/lite/kernels/transpose_conv.cc @@ -21,7 +21,7 @@ limitations under the License. #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/c_api_internal.h" -#include "tensorflow/lite/kernels/cpu_backend_support.h" +#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/eigen_support.h" #include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h" #include "tensorflow/lite/kernels/internal/tensor.h" @@ -86,13 +86,11 @@ struct OpData { void* Init(TfLiteContext* context, const char* buffer, size_t length) { auto* data = new OpData; eigen_support::IncrementUsageCounter(context); - cpu_backend_support::IncrementUsageCounter(context); return data; } void Free(TfLiteContext* context, void* buffer) { eigen_support::DecrementUsageCounter(context); - cpu_backend_support::DecrementUsageCounter(context); delete reinterpret_cast(buffer); } @@ -338,7 +336,7 @@ void EvalFloat(TfLiteContext* context, const TfLiteTransposeConvParams* params, GetTensorData(transposed_weights), GetTensorShape(output), GetTensorData(output), GetTensorShape(col2im), GetTensorData(col2im), - cpu_backend_support::GetFromContext(context)); + CpuBackendContext::GetFromContext(context)); break; } }