From a495211a789306851beb5720207a7c7f535ca325 Mon Sep 17 00:00:00 2001 From: Suharsh Sivakumar Date: Tue, 4 Jun 2019 20:52:22 -0700 Subject: [PATCH] Use tflite::SafeCast when casting float to int32. PiperOrigin-RevId: 251571788 --- tensorflow/lite/tools/optimize/BUILD | 1 + tensorflow/lite/tools/optimize/quantization_utils.cc | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/tensorflow/lite/tools/optimize/BUILD b/tensorflow/lite/tools/optimize/BUILD index 3205277c743..f9ef0d02e58 100644 --- a/tensorflow/lite/tools/optimize/BUILD +++ b/tensorflow/lite/tools/optimize/BUILD @@ -19,6 +19,7 @@ cc_library( deps = [ "//tensorflow/lite:framework", "//tensorflow/lite/c:c_api_internal", + "//tensorflow/lite/kernels/internal:quantization_util", "//tensorflow/lite/kernels/internal:round", "//tensorflow/lite/kernels/internal:tensor_utils", "//tensorflow/lite/kernels/internal:types", diff --git a/tensorflow/lite/tools/optimize/quantization_utils.cc b/tensorflow/lite/tools/optimize/quantization_utils.cc index 6ae83bca6fa..f26a1b5838e 100644 --- a/tensorflow/lite/tools/optimize/quantization_utils.cc +++ b/tensorflow/lite/tools/optimize/quantization_utils.cc @@ -20,6 +20,7 @@ limitations under the License. #include "absl/memory/memory.h" #include "third_party/eigen3/Eigen/Core" #include "tensorflow/lite/c/c_api_internal.h" +#include "tensorflow/lite/kernels/internal/quantization_util.h" #include "tensorflow/lite/kernels/internal/round.h" #include "tensorflow/lite/kernels/internal/tensor_utils.h" #include "tensorflow/lite/kernels/internal/types.h" @@ -304,8 +305,8 @@ TfLiteStatus SymmetricPerLayerBiasQuantize(ModelT* model, TensorT* tensor, for (int32_t i = 0; i < float_data_size; i++) { float scaling_factor_inv = (scaling_factor == 0) ? 0 : 1.0 / scaling_factor; - const int32_t quantized_value = - static_cast(TfLiteRound(float_data[i] * scaling_factor_inv)); + const int32_t quantized_value = tflite::SafeCast( + TfLiteRound(float_data[i] * scaling_factor_inv)); final_buffer[i] = std::min(kScale, std::max(-kScale, quantized_value)); } @@ -341,7 +342,7 @@ TfLiteStatus SymmetricPerChannelBiasQuantize(ModelT* model, TensorT* tensor, channel_idx++) { float scaling_factor = scales[channel_idx]; float scaling_factor_inv = (scaling_factor == 0) ? 0 : 1.0 / scaling_factor; - const int32_t quantized_value = static_cast( + const int32_t quantized_value = tflite::SafeCast( TfLiteRound(float_data[channel_idx] * scaling_factor_inv)); final_buffer[channel_idx] = std::min(kScale, std::max(-kScale, quantized_value));