Remove explicit abseil usage from core TFLite runtime
These abseil deps aren't critical, so remove them to avoid potential conflicts with client libraries that also have abseil deps. PiperOrigin-RevId: 314831735 Change-Id: I793947aa45e37421df6eaa46fa00d5cb8250bfcb
This commit is contained in:
parent
7a7207f3b6
commit
5515595afa
|
@ -37,9 +37,6 @@ cc_library(
|
|||
"//tensorflow/lite/nnapi:nnapi_implementation",
|
||||
"//tensorflow/lite/nnapi:nnapi_lib",
|
||||
"//tensorflow/lite/nnapi:nnapi_util",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -43,8 +43,6 @@ limitations under the License.
|
|||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/lite/allocation.h"
|
||||
#include "tensorflow/lite/builtin_op_data.h"
|
||||
#include "tensorflow/lite/builtin_ops.h"
|
||||
|
@ -3361,7 +3359,7 @@ TfLiteStatus NNAPIDelegateKernel::GetOperationsSupportedByTargetNnApiDevices(
|
|||
const auto nnapi_model_size = nnapi_to_tflite_op_mapping_.size();
|
||||
|
||||
// Determine the list of operations the device actually supports
|
||||
auto nnapi_ops_support_flags = absl::make_unique<bool[]>(nnapi_model_size);
|
||||
std::unique_ptr<bool[]> nnapi_ops_support_flags(new bool[nnapi_model_size]);
|
||||
|
||||
RETURN_TFLITE_ERROR_IF_NN_ERROR(
|
||||
context,
|
||||
|
@ -4152,17 +4150,16 @@ void StatefulNnApiDelegate::Data::CacheDelegateKernel(
|
|||
delegate_state_cache.emplace(cache_key, delegate_state);
|
||||
}
|
||||
|
||||
absl::optional<NNAPIDelegateKernel*>
|
||||
StatefulNnApiDelegate::Data::GetCachedDelegateKernel(
|
||||
NNAPIDelegateKernel* StatefulNnApiDelegate::Data::MaybeGetCachedDelegateKernel(
|
||||
const TfLiteDelegateParams* delegate_params) {
|
||||
const int cache_key = delegate_params->nodes_to_replace->data[0];
|
||||
const auto cached_state = delegate_state_cache.find(cache_key);
|
||||
if (cached_state != std::end(delegate_state_cache)) {
|
||||
auto result = absl::optional<NNAPIDelegateKernel*>(cached_state->second);
|
||||
auto result = cached_state->second;
|
||||
delegate_state_cache.erase(cached_state);
|
||||
return result;
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4302,7 +4299,8 @@ TfLiteStatus StatefulNnApiDelegate::GetNodesSupportedByAccelerator(
|
|||
delegate_data->delegate_state_cache.clear();
|
||||
for (int idx = 0; idx < *num_partitions; idx++) {
|
||||
const auto& partition_params = (*params_array)[idx];
|
||||
auto kernel_state = absl::make_unique<NNAPIDelegateKernel>(nnapi);
|
||||
std::unique_ptr<NNAPIDelegateKernel> kernel_state(
|
||||
new NNAPIDelegateKernel(nnapi));
|
||||
TfLiteDelegateParams params_with_delegate = partition_params;
|
||||
params_with_delegate.delegate = delegate;
|
||||
TF_LITE_ENSURE_STATUS(
|
||||
|
@ -4471,13 +4469,9 @@ TfLiteStatus StatefulNnApiDelegate::DoPrepare(TfLiteContext* context,
|
|||
auto* delegate_data = static_cast<Data*>(params->delegate->data_);
|
||||
int* nnapi_errno = &(delegate_data->nnapi_errno);
|
||||
|
||||
auto delegate_state_maybe =
|
||||
delegate_data->GetCachedDelegateKernel(params);
|
||||
|
||||
NNAPIDelegateKernel* kernel_state;
|
||||
if (delegate_state_maybe.has_value()) {
|
||||
kernel_state = *delegate_state_maybe;
|
||||
} else {
|
||||
NNAPIDelegateKernel* kernel_state =
|
||||
delegate_data->MaybeGetCachedDelegateKernel(params);
|
||||
if (!kernel_state) {
|
||||
kernel_state = new NNAPIDelegateKernel(delegate_data->nnapi);
|
||||
kernel_state->Init(context, params, nnapi_errno);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ limitations under the License.
|
|||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/types/optional.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/nnapi/NeuralNetworksTypes.h"
|
||||
#include "tensorflow/lite/nnapi/nnapi_implementation.h"
|
||||
|
@ -234,7 +233,7 @@ class StatefulNnApiDelegate : public TfLiteDelegate {
|
|||
NNAPIDelegateKernel* delegate_state);
|
||||
// Returns a cached NNAPIDelegateKernel if available and removes it
|
||||
// from the cache transferring the ownership to the caller.
|
||||
absl::optional<NNAPIDelegateKernel*> GetCachedDelegateKernel(
|
||||
NNAPIDelegateKernel* MaybeGetCachedDelegateKernel(
|
||||
const TfLiteDelegateParams* delegate_params);
|
||||
};
|
||||
|
||||
|
|
|
@ -55,10 +55,9 @@ void StatefulNnApiDelegate::Data::CacheDelegateKernel(
|
|||
const TfLiteDelegateParams* delegate_params,
|
||||
NNAPIDelegateKernel* delegate_state) {}
|
||||
|
||||
absl::optional<NNAPIDelegateKernel*>
|
||||
StatefulNnApiDelegate::Data::GetCachedDelegateKernel(
|
||||
NNAPIDelegateKernel* StatefulNnApiDelegate::Data::MaybeGetCachedDelegateKernel(
|
||||
const TfLiteDelegateParams* delegate_params) {
|
||||
return absl::nullopt;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace tflite
|
||||
|
|
|
@ -537,8 +537,6 @@ BUILTIN_KERNEL_DEPS = [
|
|||
":lstm_shared",
|
||||
":op_macros",
|
||||
":padding",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
"//third_party/eigen3",
|
||||
"@flatbuffers",
|
||||
"//tensorflow/lite:framework_lib",
|
||||
|
|
|
@ -21,7 +21,6 @@ limitations under the License.
|
|||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
#include "tensorflow/lite/c/builtin_op_data.h"
|
||||
#include "tensorflow/lite/c/common.h"
|
||||
#include "tensorflow/lite/kernels/cpu_backend_context.h"
|
||||
|
@ -1008,7 +1007,7 @@ TfLiteStatus PrecomputeZeroPointTimesWeightWithBias(
|
|||
TF_LITE_ENSURE_EQ(context, weight_shape.DimensionsCount(), 2);
|
||||
const int row = weight_shape.Dims(0);
|
||||
const int col = weight_shape.Dims(1);
|
||||
*output = absl::make_unique<int32_t[]>(row);
|
||||
output->reset(new int32_t[row]);
|
||||
if (bias_tensor == nullptr) {
|
||||
memset(output->get(), 0, row * sizeof(int32_t));
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue