Fix bazel TFLM compilation w/ TF_LITE_STATIC_MEMORY

Ensure dynamic string utils aren't compiled when this build define is
present.

PiperOrigin-RevId: 311626904
Change-Id: Ica229bf337019f0f446fdb94aaf42c6b7e7c749e
This commit is contained in:
Jared Duke 2020-05-14 16:21:53 -07:00 committed by TensorFlower Gardener
parent 098f9176b6
commit 7d40f2c389
11 changed files with 30 additions and 12 deletions

View File

@ -386,7 +386,7 @@ cc_library(
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels/internal:cppmath",
"//tensorflow/lite/kernels/internal:quantization_util",
"@flatbuffers",
"@flatbuffers//:runtime_cc",
],
)

View File

@ -629,7 +629,6 @@ cc_library(
":cppmath",
"//tensorflow/lite:minimal_logging",
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels:cpu_backend_context",
"@gemmlowp",
],
)
@ -785,7 +784,6 @@ cc_library(
deps = [
":cpu_check",
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels:cpu_backend_context",
"//third_party/eigen3",
],
)
@ -819,6 +817,7 @@ cc_test(
":quantization_util",
":tensor_utils",
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels:cpu_backend_context",
"//tensorflow/lite/kernels:test_util",
"@com_google_googletest//:gtest_main",
],

View File

@ -21,7 +21,6 @@ limitations under the License.
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
@ -53,7 +52,7 @@ void PortableSymmetricQuantizeFloats(const float* values, const int size,
void PortableSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float min_value,
float max_value, float* scaling_factor) {
const int kScale = 127;
const int32_t kScale = 127;
const float range = std::max(std::abs(min_value), std::abs(max_value));
if (range == 0) {
memset(quantized_values, 0, size * sizeof(int8_t));
@ -66,7 +65,8 @@ void PortableSymmetricQuantizeFloats(const float* values, const int size,
const int32_t quantized_value =
static_cast<int32_t>(TfLiteRound(values[i] * scaling_factor_inv));
// Clamp: just in case some odd numeric offset.
quantized_values[i] = std::min(kScale, std::max(-kScale, quantized_value));
quantized_values[i] = static_cast<int8_t>(
std::min(kScale, std::max(-kScale, quantized_value)));
}
}
@ -660,7 +660,8 @@ void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
int32_t value = static_cast<int32_t>(a) * static_cast<int32_t>(b);
value = MultiplyByQuantizedMultiplier(value, multiplier, shift);
value -= output_zp;
value = std::min(std::max(-128, value), 127);
value = std::min(std::max(static_cast<int32_t>(-128), value),
static_cast<int32_t>(127));
output[index] = static_cast<int8>(value);
}
@ -748,7 +749,8 @@ void PortableVectorBatchVectorCwiseProductAccumulate(
int32_t prod = vector[v] * *batch_vector++;
prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift);
int32_t output = prod + *result;
output = std::max(std::min(32767, output), -32768);
output = std::max(std::min(static_cast<int32_t>(32767), output),
static_cast<int32_t>(-32768));
*result++ = output;
}
}

View File

@ -18,7 +18,6 @@ limitations under the License.
// TODO(ghodrat): Remove this header file and the dependency to internal data
// structure.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h"
#if defined(_MSC_VER)

View File

@ -20,13 +20,17 @@ limitations under the License.
// TODO(ghodrat): Remove this header file and the dependency to internal data
// structure.
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#if defined(_MSC_VER)
#define __restrict__ __restrict
#endif
namespace tflite {
// Not all backends support CpuBackendContext usage, so forward declare to avoid
// pulling in its implementation.
class CpuBackendContext;
namespace tensor_utils {
// Limit a float input f between +abs_limit and -abs_limit.

View File

@ -119,6 +119,8 @@ class SequentialTensorWriter {
T* output_ptr_;
};
// String ops are not yet supported on platforms w/ static memory.
#ifndef TF_LITE_STATIC_MEMORY
template <>
class SequentialTensorWriter<string> {
public:
@ -138,6 +140,7 @@ class SequentialTensorWriter<string> {
TfLiteTensor* output_;
DynamicBuffer buffer_;
};
#endif // TF_LITE_STATIC_MEMORY
} // namespace tflite

View File

@ -20,13 +20,18 @@ limitations under the License.
#include "third_party/eigen3/Eigen/Core"
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#if defined(_MSC_VER)
#define __restrict__ __restrict
#endif
namespace tflite {
// Not all backends support CpuBackendContext usage, so forward declare to avoid
// pulling in its implementation. Use of CpuBackendContext in method
// implementations is purely optional.
class CpuBackendContext;
namespace tensor_utils {
// Checks if all entries of vector are zero for float.

View File

@ -18,6 +18,7 @@ limitations under the License.
#include <gmock/gmock.h>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/test_util.h"

View File

@ -19,7 +19,6 @@ limitations under the License.
#include <numeric>
#include <vector>
#include "flatbuffers/flexbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor.h"

View File

@ -89,6 +89,7 @@ int DynamicBuffer::WriteToBuffer(char** buffer) {
return bytes;
}
#ifndef TF_LITE_STATIC_MEMORY
void DynamicBuffer::WriteToTensorAsVector(TfLiteTensor* tensor) {
auto dims = TfLiteIntArrayCreate(1);
dims->data[0] = offset_.size() - 1; // Store number of strings.
@ -109,6 +110,7 @@ void DynamicBuffer::WriteToTensor(TfLiteTensor* tensor,
tensor_buffer, bytes, kTfLiteDynamic, tensor->allocation,
tensor->is_variable, tensor);
}
#endif // TF_LITE_STATIC_MEMORY
int GetStringCount(const void* raw_buffer) {
// The first integers in the raw buffer is the number of strings.

View File

@ -74,6 +74,9 @@ class DynamicBuffer {
// The function allocates space for the buffer but does NOT take ownership.
int WriteToBuffer(char** buffer);
// String tensors are not generally supported on platforms w/ static memory.
// TODO(b/156130024): Remove this guard after removing header from TFLM deps.
#ifndef TF_LITE_STATIC_MEMORY
// Fill content into a string tensor, with the given new_shape. The new shape
// must match the number of strings in this object. Caller relinquishes
// ownership of new_shape. If 'new_shape' is nullptr, keep the tensor's
@ -82,6 +85,7 @@ class DynamicBuffer {
// Fill content into a string tensor. Set shape to {num_strings}.
void WriteToTensorAsVector(TfLiteTensor* tensor);
#endif // TF_LITE_STATIC_MEMORY
private:
// Data buffer to store contents of strings, not including headers.