From 6177daf885348b0434b9a981667de560c802983c Mon Sep 17 00:00:00 2001 From: "A. Unique TensorFlower" Date: Mon, 27 Apr 2020 14:04:48 -0700 Subject: [PATCH] Make few implicit casts explicit to resolve clang warnings PiperOrigin-RevId: 308693807 Change-Id: I9bfff0da006c029370561111d1cae37675e4eda0 --- tensorflow/lite/micro/micro_utils.cc | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tensorflow/lite/micro/micro_utils.cc b/tensorflow/lite/micro/micro_utils.cc index eacae1bd619..ff885fa04ff 100644 --- a/tensorflow/lite/micro/micro_utils.cc +++ b/tensorflow/lite/micro/micro_utils.cc @@ -105,10 +105,10 @@ int8_t FloatToSymmetricQuantizedInt8(const float value, const float scale) { int32_t FloatToSymmetricQuantizedInt32(const float value, const float scale) { float quantized = round(value / scale); - if (quantized > INT_MAX) { - quantized = INT_MAX; + if (static_cast(quantized) > INT_MAX) { + quantized = static_cast(INT_MAX); } else if (quantized < INT_MIN) { - quantized = INT_MIN; + quantized = static_cast INT_MIN; } return static_cast(quantized); @@ -249,13 +249,15 @@ void SignedSymmetricQuantize(const float* values, TfLiteIntArray* dims, max = fmaxf(max, values[i]); } - *scaling_factor = fmaxf(fabs(min), fabs(max)) / kSymmetricInt32Scale; + *scaling_factor = + fmaxf(fabs(min), fabs(max)) / static_cast(kSymmetricInt32Scale); for (int i = 0; i < input_size; i++) { const int32_t quantized_value = static_cast(roundf(values[i] / *scaling_factor)); // Clamp: just in case some odd numeric offset. - quantized_values[i] = fminf(kSymmetricInt32Scale, - fmaxf(-kSymmetricInt32Scale, quantized_value)); + quantized_values[i] = fminf( + static_cast(kSymmetricInt32Scale), + fmaxf(static_cast(-kSymmetricInt32Scale), quantized_value)); } }