Remove explicit abseil usage from core TFLite runtime

These abseil deps aren't critical, so remove them to avoid potential
conflicts with client libraries that also have abseil deps.

PiperOrigin-RevId: 314831735
Change-Id: I793947aa45e37421df6eaa46fa00d5cb8250bfcb
This commit is contained in:
Jared Duke 2020-06-04 17:01:27 -07:00 committed by TensorFlower Gardener
parent 7a7207f3b6
commit 5515595afa
6 changed files with 13 additions and 27 deletions

View File

@ -37,9 +37,6 @@ cc_library(
"//tensorflow/lite/nnapi:nnapi_implementation", "//tensorflow/lite/nnapi:nnapi_implementation",
"//tensorflow/lite/nnapi:nnapi_lib", "//tensorflow/lite/nnapi:nnapi_lib",
"//tensorflow/lite/nnapi:nnapi_util", "//tensorflow/lite/nnapi:nnapi_util",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
"@com_google_absl//absl/types:optional",
], ],
) )

View File

@ -43,8 +43,6 @@ limitations under the License.
#include <unistd.h> #include <unistd.h>
#endif #endif
#include "absl/memory/memory.h"
#include "absl/types/optional.h"
#include "tensorflow/lite/allocation.h" #include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/builtin_op_data.h" #include "tensorflow/lite/builtin_op_data.h"
#include "tensorflow/lite/builtin_ops.h" #include "tensorflow/lite/builtin_ops.h"
@ -3361,7 +3359,7 @@ TfLiteStatus NNAPIDelegateKernel::GetOperationsSupportedByTargetNnApiDevices(
const auto nnapi_model_size = nnapi_to_tflite_op_mapping_.size(); const auto nnapi_model_size = nnapi_to_tflite_op_mapping_.size();
// Determine the list of operations the device actually supports // Determine the list of operations the device actually supports
auto nnapi_ops_support_flags = absl::make_unique<bool[]>(nnapi_model_size); std::unique_ptr<bool[]> nnapi_ops_support_flags(new bool[nnapi_model_size]);
RETURN_TFLITE_ERROR_IF_NN_ERROR( RETURN_TFLITE_ERROR_IF_NN_ERROR(
context, context,
@ -4152,17 +4150,16 @@ void StatefulNnApiDelegate::Data::CacheDelegateKernel(
delegate_state_cache.emplace(cache_key, delegate_state); delegate_state_cache.emplace(cache_key, delegate_state);
} }
absl::optional<NNAPIDelegateKernel*> NNAPIDelegateKernel* StatefulNnApiDelegate::Data::MaybeGetCachedDelegateKernel(
StatefulNnApiDelegate::Data::GetCachedDelegateKernel(
const TfLiteDelegateParams* delegate_params) { const TfLiteDelegateParams* delegate_params) {
const int cache_key = delegate_params->nodes_to_replace->data[0]; const int cache_key = delegate_params->nodes_to_replace->data[0];
const auto cached_state = delegate_state_cache.find(cache_key); const auto cached_state = delegate_state_cache.find(cache_key);
if (cached_state != std::end(delegate_state_cache)) { if (cached_state != std::end(delegate_state_cache)) {
auto result = absl::optional<NNAPIDelegateKernel*>(cached_state->second); auto result = cached_state->second;
delegate_state_cache.erase(cached_state); delegate_state_cache.erase(cached_state);
return result; return result;
} else { } else {
return absl::nullopt; return nullptr;
} }
} }
@ -4302,7 +4299,8 @@ TfLiteStatus StatefulNnApiDelegate::GetNodesSupportedByAccelerator(
delegate_data->delegate_state_cache.clear(); delegate_data->delegate_state_cache.clear();
for (int idx = 0; idx < *num_partitions; idx++) { for (int idx = 0; idx < *num_partitions; idx++) {
const auto& partition_params = (*params_array)[idx]; const auto& partition_params = (*params_array)[idx];
auto kernel_state = absl::make_unique<NNAPIDelegateKernel>(nnapi); std::unique_ptr<NNAPIDelegateKernel> kernel_state(
new NNAPIDelegateKernel(nnapi));
TfLiteDelegateParams params_with_delegate = partition_params; TfLiteDelegateParams params_with_delegate = partition_params;
params_with_delegate.delegate = delegate; params_with_delegate.delegate = delegate;
TF_LITE_ENSURE_STATUS( TF_LITE_ENSURE_STATUS(
@ -4471,13 +4469,9 @@ TfLiteStatus StatefulNnApiDelegate::DoPrepare(TfLiteContext* context,
auto* delegate_data = static_cast<Data*>(params->delegate->data_); auto* delegate_data = static_cast<Data*>(params->delegate->data_);
int* nnapi_errno = &(delegate_data->nnapi_errno); int* nnapi_errno = &(delegate_data->nnapi_errno);
auto delegate_state_maybe = NNAPIDelegateKernel* kernel_state =
delegate_data->GetCachedDelegateKernel(params); delegate_data->MaybeGetCachedDelegateKernel(params);
if (!kernel_state) {
NNAPIDelegateKernel* kernel_state;
if (delegate_state_maybe.has_value()) {
kernel_state = *delegate_state_maybe;
} else {
kernel_state = new NNAPIDelegateKernel(delegate_data->nnapi); kernel_state = new NNAPIDelegateKernel(delegate_data->nnapi);
kernel_state->Init(context, params, nnapi_errno); kernel_state->Init(context, params, nnapi_errno);
} }

View File

@ -20,7 +20,6 @@ limitations under the License.
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "absl/types/optional.h"
#include "tensorflow/lite/c/common.h" #include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h" #include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
#include "tensorflow/lite/nnapi/nnapi_implementation.h" #include "tensorflow/lite/nnapi/nnapi_implementation.h"
@ -234,7 +233,7 @@ class StatefulNnApiDelegate : public TfLiteDelegate {
NNAPIDelegateKernel* delegate_state); NNAPIDelegateKernel* delegate_state);
// Returns a cached NNAPIDelegateKernel if available and removes it // Returns a cached NNAPIDelegateKernel if available and removes it
// from the cache transferring the ownership to the caller. // from the cache transferring the ownership to the caller.
absl::optional<NNAPIDelegateKernel*> GetCachedDelegateKernel( NNAPIDelegateKernel* MaybeGetCachedDelegateKernel(
const TfLiteDelegateParams* delegate_params); const TfLiteDelegateParams* delegate_params);
}; };

View File

@ -55,10 +55,9 @@ void StatefulNnApiDelegate::Data::CacheDelegateKernel(
const TfLiteDelegateParams* delegate_params, const TfLiteDelegateParams* delegate_params,
NNAPIDelegateKernel* delegate_state) {} NNAPIDelegateKernel* delegate_state) {}
absl::optional<NNAPIDelegateKernel*> NNAPIDelegateKernel* StatefulNnApiDelegate::Data::MaybeGetCachedDelegateKernel(
StatefulNnApiDelegate::Data::GetCachedDelegateKernel(
const TfLiteDelegateParams* delegate_params) { const TfLiteDelegateParams* delegate_params) {
return absl::nullopt; return nullptr;
} }
} // namespace tflite } // namespace tflite

View File

@ -537,8 +537,6 @@ BUILTIN_KERNEL_DEPS = [
":lstm_shared", ":lstm_shared",
":op_macros", ":op_macros",
":padding", ":padding",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
"//third_party/eigen3", "//third_party/eigen3",
"@flatbuffers", "@flatbuffers",
"//tensorflow/lite:framework_lib", "//tensorflow/lite:framework_lib",

View File

@ -21,7 +21,6 @@ limitations under the License.
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "absl/memory/memory.h"
#include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h" #include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/cpu_backend_context.h"
@ -1008,7 +1007,7 @@ TfLiteStatus PrecomputeZeroPointTimesWeightWithBias(
TF_LITE_ENSURE_EQ(context, weight_shape.DimensionsCount(), 2); TF_LITE_ENSURE_EQ(context, weight_shape.DimensionsCount(), 2);
const int row = weight_shape.Dims(0); const int row = weight_shape.Dims(0);
const int col = weight_shape.Dims(1); const int col = weight_shape.Dims(1);
*output = absl::make_unique<int32_t[]>(row); output->reset(new int32_t[row]);
if (bias_tensor == nullptr) { if (bias_tensor == nullptr) {
memset(output->get(), 0, row * sizeof(int32_t)); memset(output->get(), 0, row * sizeof(int32_t));
} else { } else {