diff --git a/tensorflow/lite/kernels/BUILD b/tensorflow/lite/kernels/BUILD index 6f6d111fd77..3a29fee5699 100644 --- a/tensorflow/lite/kernels/BUILD +++ b/tensorflow/lite/kernels/BUILD @@ -386,7 +386,7 @@ cc_library( "//tensorflow/lite/c:common", "//tensorflow/lite/kernels/internal:cppmath", "//tensorflow/lite/kernels/internal:quantization_util", - "@flatbuffers", + "@flatbuffers//:runtime_cc", ], ) diff --git a/tensorflow/lite/kernels/internal/BUILD b/tensorflow/lite/kernels/internal/BUILD index 93292fbb640..d6a96efdbf7 100644 --- a/tensorflow/lite/kernels/internal/BUILD +++ b/tensorflow/lite/kernels/internal/BUILD @@ -629,7 +629,6 @@ cc_library( ":cppmath", "//tensorflow/lite:minimal_logging", "//tensorflow/lite/c:common", - "//tensorflow/lite/kernels:cpu_backend_context", "@gemmlowp", ], ) @@ -785,7 +784,6 @@ cc_library( deps = [ ":cpu_check", "//tensorflow/lite/c:common", - "//tensorflow/lite/kernels:cpu_backend_context", "//third_party/eigen3", ], ) @@ -819,6 +817,7 @@ cc_test( ":quantization_util", ":tensor_utils", "//tensorflow/lite/c:common", + "//tensorflow/lite/kernels:cpu_backend_context", "//tensorflow/lite/kernels:test_util", "@com_google_googletest//:gtest_main", ], diff --git a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc index 22e37d5af71..0e66dfee191 100644 --- a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc +++ b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc @@ -21,7 +21,6 @@ limitations under the License. #include "fixedpoint/fixedpoint.h" #include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/compatibility.h" #include "tensorflow/lite/kernels/internal/cppmath.h" @@ -53,7 +52,7 @@ void PortableSymmetricQuantizeFloats(const float* values, const int size, void PortableSymmetricQuantizeFloats(const float* values, const int size, int8_t* quantized_values, float min_value, float max_value, float* scaling_factor) { - const int kScale = 127; + const int32_t kScale = 127; const float range = std::max(std::abs(min_value), std::abs(max_value)); if (range == 0) { memset(quantized_values, 0, size * sizeof(int8_t)); @@ -66,7 +65,8 @@ void PortableSymmetricQuantizeFloats(const float* values, const int size, const int32_t quantized_value = static_cast(TfLiteRound(values[i] * scaling_factor_inv)); // Clamp: just in case some odd numeric offset. - quantized_values[i] = std::min(kScale, std::max(-kScale, quantized_value)); + quantized_values[i] = static_cast( + std::min(kScale, std::max(-kScale, quantized_value))); } } @@ -660,7 +660,8 @@ void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, int32_t value = static_cast(a) * static_cast(b); value = MultiplyByQuantizedMultiplier(value, multiplier, shift); value -= output_zp; - value = std::min(std::max(-128, value), 127); + value = std::min(std::max(static_cast(-128), value), + static_cast(127)); output[index] = static_cast(value); } @@ -748,7 +749,8 @@ void PortableVectorBatchVectorCwiseProductAccumulate( int32_t prod = vector[v] * *batch_vector++; prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift); int32_t output = prod + *result; - output = std::max(std::min(32767, output), -32768); + output = std::max(std::min(static_cast(32767), output), + static_cast(-32768)); *result++ = output; } } diff --git a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h index 9a365074513..f2e6c9b4f7d 100644 --- a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h +++ b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.h @@ -18,7 +18,6 @@ limitations under the License. // TODO(ghodrat): Remove this header file and the dependency to internal data // structure. #include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h" #if defined(_MSC_VER) diff --git a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h index d8bd70f3722..6c15a6cd919 100644 --- a/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h +++ b/tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h @@ -20,13 +20,17 @@ limitations under the License. // TODO(ghodrat): Remove this header file and the dependency to internal data // structure. #include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/cpu_backend_context.h" #if defined(_MSC_VER) #define __restrict__ __restrict #endif namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. +class CpuBackendContext; + namespace tensor_utils { // Limit a float input f between +abs_limit and -abs_limit. diff --git a/tensorflow/lite/kernels/internal/tensor.h b/tensorflow/lite/kernels/internal/tensor.h index 0005bf38d54..543117df0e5 100644 --- a/tensorflow/lite/kernels/internal/tensor.h +++ b/tensorflow/lite/kernels/internal/tensor.h @@ -119,6 +119,8 @@ class SequentialTensorWriter { T* output_ptr_; }; +// String ops are not yet supported on platforms w/ static memory. +#ifndef TF_LITE_STATIC_MEMORY template <> class SequentialTensorWriter { public: @@ -138,6 +140,7 @@ class SequentialTensorWriter { TfLiteTensor* output_; DynamicBuffer buffer_; }; +#endif // TF_LITE_STATIC_MEMORY } // namespace tflite diff --git a/tensorflow/lite/kernels/internal/tensor_utils.h b/tensorflow/lite/kernels/internal/tensor_utils.h index 1929c2e2ff4..5e106eb7de4 100644 --- a/tensorflow/lite/kernels/internal/tensor_utils.h +++ b/tensorflow/lite/kernels/internal/tensor_utils.h @@ -20,13 +20,18 @@ limitations under the License. #include "third_party/eigen3/Eigen/Core" #include "tensorflow/lite/c/builtin_op_data.h" -#include "tensorflow/lite/kernels/cpu_backend_context.h" #if defined(_MSC_VER) #define __restrict__ __restrict #endif namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. Use of CpuBackendContext in method +// implementations is purely optional. +class CpuBackendContext; + namespace tensor_utils { // Checks if all entries of vector are zero for float. diff --git a/tensorflow/lite/kernels/internal/tensor_utils_test.cc b/tensorflow/lite/kernels/internal/tensor_utils_test.cc index 9b047d3ba84..3ad59acdb68 100644 --- a/tensorflow/lite/kernels/internal/tensor_utils_test.cc +++ b/tensorflow/lite/kernels/internal/tensor_utils_test.cc @@ -18,6 +18,7 @@ limitations under the License. #include #include "tensorflow/lite/c/builtin_op_data.h" +#include "tensorflow/lite/kernels/cpu_backend_context.h" #include "tensorflow/lite/kernels/internal/common.h" #include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/test_util.h" diff --git a/tensorflow/lite/kernels/non_max_suppression.cc b/tensorflow/lite/kernels/non_max_suppression.cc index ee8e407066d..f57ee1bc5d2 100644 --- a/tensorflow/lite/kernels/non_max_suppression.cc +++ b/tensorflow/lite/kernels/non_max_suppression.cc @@ -19,7 +19,6 @@ limitations under the License. #include #include -#include "flatbuffers/flexbuffers.h" // from @flatbuffers #include "tensorflow/lite/c/builtin_op_data.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/kernels/internal/tensor.h" diff --git a/tensorflow/lite/string_util.cc b/tensorflow/lite/string_util.cc index f7fcf2ac630..44719858f2a 100644 --- a/tensorflow/lite/string_util.cc +++ b/tensorflow/lite/string_util.cc @@ -89,6 +89,7 @@ int DynamicBuffer::WriteToBuffer(char** buffer) { return bytes; } +#ifndef TF_LITE_STATIC_MEMORY void DynamicBuffer::WriteToTensorAsVector(TfLiteTensor* tensor) { auto dims = TfLiteIntArrayCreate(1); dims->data[0] = offset_.size() - 1; // Store number of strings. @@ -109,6 +110,7 @@ void DynamicBuffer::WriteToTensor(TfLiteTensor* tensor, tensor_buffer, bytes, kTfLiteDynamic, tensor->allocation, tensor->is_variable, tensor); } +#endif // TF_LITE_STATIC_MEMORY int GetStringCount(const void* raw_buffer) { // The first integers in the raw buffer is the number of strings. diff --git a/tensorflow/lite/string_util.h b/tensorflow/lite/string_util.h index 779b1e12ab8..879aa76b83b 100644 --- a/tensorflow/lite/string_util.h +++ b/tensorflow/lite/string_util.h @@ -74,6 +74,9 @@ class DynamicBuffer { // The function allocates space for the buffer but does NOT take ownership. int WriteToBuffer(char** buffer); + // String tensors are not generally supported on platforms w/ static memory. + // TODO(b/156130024): Remove this guard after removing header from TFLM deps. +#ifndef TF_LITE_STATIC_MEMORY // Fill content into a string tensor, with the given new_shape. The new shape // must match the number of strings in this object. Caller relinquishes // ownership of new_shape. If 'new_shape' is nullptr, keep the tensor's @@ -82,6 +85,7 @@ class DynamicBuffer { // Fill content into a string tensor. Set shape to {num_strings}. void WriteToTensorAsVector(TfLiteTensor* tensor); +#endif // TF_LITE_STATIC_MEMORY private: // Data buffer to store contents of strings, not including headers.