diff --git a/tensorflow/lite/experimental/ruy/check_macros.h b/tensorflow/lite/experimental/ruy/check_macros.h index 564440b4c8f..82dbcee9908 100644 --- a/tensorflow/lite/experimental/ruy/check_macros.h +++ b/tensorflow/lite/experimental/ruy/check_macros.h @@ -35,7 +35,7 @@ struct ToString { template <> struct ToString { static void Run(float value, char* buf) { - snprintf(buf, kValueBufSize, "%.9g", static_cast(value)); + snprintf(buf, kValueBufSize, "%.9g", value); } }; diff --git a/tensorflow/lite/kernels/BUILD b/tensorflow/lite/kernels/BUILD index 0273bc7ecaf..7aea0000bef 100644 --- a/tensorflow/lite/kernels/BUILD +++ b/tensorflow/lite/kernels/BUILD @@ -1,5 +1,5 @@ load("//tensorflow/lite:build_def.bzl", "tflite_copts") -load("//tensorflow/lite/micro:build_def.bzl", "cc_library", "micro_copts") +load("//tensorflow/lite/micro:build_def.bzl", "cc_library") load("//tensorflow/lite:special_rules.bzl", "tflite_portable_test_suite_combined") load("//tensorflow:tensorflow.bzl", "tf_opts_nortti_if_android") @@ -373,7 +373,7 @@ cc_library( hdrs = [ "kernel_util.h", ], - copts = tflite_copts() + micro_copts(), + copts = tflite_copts(), deps = [ "//tensorflow/lite/c:common", "//tensorflow/lite/kernels/internal:quantization_util", diff --git a/tensorflow/lite/kernels/internal/BUILD b/tensorflow/lite/kernels/internal/BUILD index 3ff83934db7..8f64a8534ec 100644 --- a/tensorflow/lite/kernels/internal/BUILD +++ b/tensorflow/lite/kernels/internal/BUILD @@ -1,6 +1,6 @@ load("//tensorflow:tensorflow.bzl", "transitive_hdrs") load("//tensorflow/lite:build_def.bzl", "tflite_copts") -load("//tensorflow/lite/micro:build_def.bzl", "cc_library", "micro_copts") +load("//tensorflow/lite/micro:build_def.bzl", "cc_library") load("//tensorflow/lite:special_rules.bzl", "tflite_portable_test_suite_combined") package( @@ -353,7 +353,7 @@ cc_library( name = "quantization_util", srcs = ["quantization_util.cc"], hdrs = ["quantization_util.h"], - copts = tflite_copts() + micro_copts(), + copts = tflite_copts(), deps = [ ":compatibility", ":round", @@ -645,7 +645,7 @@ cc_library( name = "kernel_utils", srcs = ["kernel_utils.cc"], hdrs = ["kernel_utils.h"], - copts = tflite_copts() + micro_copts(), + copts = tflite_copts(), deps = [ ":tensor_utils", "//tensorflow/lite/c:common", diff --git a/tensorflow/lite/kernels/internal/quantization_util.cc b/tensorflow/lite/kernels/internal/quantization_util.cc index d94ca5beba9..d982859b7e4 100644 --- a/tensorflow/lite/kernels/internal/quantization_util.cc +++ b/tensorflow/lite/kernels/internal/quantization_util.cc @@ -183,11 +183,11 @@ double DoubleFromFractionAndShift(int64_t fraction, int shift) { // Detect NaNs and infinities. if (shift == std::numeric_limits::max()) { if (fraction == 0) { - return std::numeric_limits::quiet_NaN(); + return NAN; } else if (fraction > 0) { - return std::numeric_limits::infinity(); + return INFINITY; } else { - return -std::numeric_limits::infinity(); + return -INFINITY; } } @@ -229,7 +229,7 @@ double IntegerDoubleMultiply(double a, double b) { // Detect NaNs and infinities. if (a_shift == std::numeric_limits::max() || (b_shift == std::numeric_limits::max())) { - return std::numeric_limits::quiet_NaN(); + return NAN; } const int result_shift = a_shift + b_shift + 1; const int64_t result_fraction = (a_fraction * b_fraction) >> 32; @@ -379,7 +379,7 @@ bool CheckedLog2(const float x, int* log2_result) { const float x_log2_fracpart = x_log2 - x_log2_rounded; *log2_result = static_cast(x_log2_rounded); - return std::abs(x_log2_fracpart) < 1e-3f; + return std::abs(x_log2_fracpart) < 1e-3; } void QuantizeMultiplierArray(const double* effective_scales, size_t size, diff --git a/tensorflow/lite/kernels/internal/reference/quantize.h b/tensorflow/lite/kernels/internal/reference/quantize.h index 807eccb5851..37e2bea253d 100644 --- a/tensorflow/lite/kernels/internal/reference/quantize.h +++ b/tensorflow/lite/kernels/internal/reference/quantize.h @@ -36,9 +36,7 @@ inline void AffineQuantize(const tflite::QuantizationParams& op_params, for (int i = 0; i < flat_size; i++) { const float val = input_data[i]; - int32 unclamped = - static_cast(TfLiteRound(val / static_cast(scale))) + - zero_point; + int32 unclamped = static_cast(TfLiteRound(val / scale)) + zero_point; int32 clamped = std::min(std::max(unclamped, min_val), max_val); output_data[i] = clamped; } diff --git a/tensorflow/lite/kernels/internal/reference/softmax.h b/tensorflow/lite/kernels/internal/reference/softmax.h index 790f4d28ddb..45a18cdb47f 100644 --- a/tensorflow/lite/kernels/internal/reference/softmax.h +++ b/tensorflow/lite/kernels/internal/reference/softmax.h @@ -43,20 +43,16 @@ inline void Softmax(const SoftmaxParams& params, max = std::max(max, input_data[i * depth + c]); } - // TODO(b/148114827): Improve this code. // Compute sum. float sum = 0.f; for (int c = 0; c < depth; ++c) { - sum += std::exp(static_cast(input_data[i * depth + c] - max) * - params.beta); + sum += std::exp((input_data[i * depth + c] - max) * params.beta); } // Compute result. for (int c = 0; c < depth; ++c) { output_data[i * depth + c] = - std::exp(static_cast(input_data[i * depth + c] - max) * - params.beta) / - static_cast(sum); + std::exp((input_data[i * depth + c] - max) * params.beta) / sum; } } } diff --git a/tensorflow/lite/kernels/kernel_util.cc b/tensorflow/lite/kernels/kernel_util.cc index 9e26d38a96f..26190a75568 100644 --- a/tensorflow/lite/kernels/kernel_util.cc +++ b/tensorflow/lite/kernels/kernel_util.cc @@ -118,12 +118,11 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, const TfLiteTensor* bias, TfLiteTensor* output, double* multiplier) { - const double input_product_scale = static_cast(input->params.scale) * - static_cast(filter->params.scale); + const double input_product_scale = input->params.scale * filter->params.scale; // TODO(ahentz): The following conditions must be guaranteed by the training // pipeline. if (bias) { - const double bias_scale = static_cast(bias->params.scale); + const double bias_scale = bias->params.scale; TF_LITE_ENSURE(context, std::abs(input_product_scale - bias_scale) <= 1e-6 * std::min(input_product_scale, bias_scale)); @@ -137,10 +136,9 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, const TfLiteTensor* filter, TfLiteTensor* output, double* multiplier) { - const double input_product_scale = static_cast(input->params.scale) * - static_cast(filter->params.scale); + const double input_product_scale = input->params.scale * filter->params.scale; TF_LITE_ENSURE(context, input_product_scale >= 0); - *multiplier = input_product_scale / static_cast(output->params.scale); + *multiplier = input_product_scale / output->params.scale; return kTfLiteOk; } diff --git a/tensorflow/lite/micro/build_def.bzl b/tensorflow/lite/micro/build_def.bzl index ce5beef1181..c29eb92a626 100644 --- a/tensorflow/lite/micro/build_def.bzl +++ b/tensorflow/lite/micro/build_def.bzl @@ -10,9 +10,10 @@ load( def micro_copts(): # TODO(b/139024129): include the followings as well: # -Wmissing-field-initializers + # -Wdouble-promotion # -Wunused-const-variable # -Wshadow - copts = ["-Werror", "-Wsign-compare", "-Wdouble-promotion"] + copts = ["-Werror", "-Wsign-compare"] return copts def cc_library(**kwargs): diff --git a/tensorflow/lite/micro/examples/hello_world/output_handler.cc b/tensorflow/lite/micro/examples/hello_world/output_handler.cc index b1c8898904c..466653c6534 100644 --- a/tensorflow/lite/micro/examples/hello_world/output_handler.cc +++ b/tensorflow/lite/micro/examples/hello_world/output_handler.cc @@ -18,7 +18,5 @@ limitations under the License. void HandleOutput(tflite::ErrorReporter* error_reporter, float x_value, float y_value) { // Log the current X and Y values - error_reporter->Report("x_value: %f, y_value: %f\n", - static_cast(x_value), - static_cast(y_value)); + error_reporter->Report("x_value: %f, y_value: %f\n", x_value, y_value); } diff --git a/tensorflow/lite/micro/kernels/add.cc b/tensorflow/lite/micro/kernels/add.cc index cf0f139d084..e100cb7ca47 100644 --- a/tensorflow/lite/micro/kernels/add.cc +++ b/tensorflow/lite/micro/kernels/add.cc @@ -77,15 +77,14 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, data->output_offset = output->params.zero_point; data->left_shift = 20; const double twice_max_input_scale = - 2 * static_cast( - std::max(input1->params.scale, input2->params.scale)); + 2 * std::max(input1->params.scale, input2->params.scale); const double real_input1_multiplier = - static_cast(input1->params.scale) / twice_max_input_scale; + input1->params.scale / twice_max_input_scale; const double real_input2_multiplier = - static_cast(input2->params.scale) / twice_max_input_scale; + input2->params.scale / twice_max_input_scale; const double real_output_multiplier = twice_max_input_scale / - ((1 << data->left_shift) * static_cast(output->params.scale)); + ((1 << data->left_shift) * output->params.scale); QuantizeMultiplierSmallerThanOneExp( real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); diff --git a/tensorflow/lite/micro/kernels/comparisons.cc b/tensorflow/lite/micro/kernels/comparisons.cc index 83fe9da51d0..c1801d5f731 100644 --- a/tensorflow/lite/micro/kernels/comparisons.cc +++ b/tensorflow/lite/micro/kernels/comparisons.cc @@ -43,14 +43,12 @@ constexpr int kOutputTensor = 0; \ int32 input1_multiplier; \ int input1_shift; \ - QuantizeMultiplierSmallerThanOneExp( \ - static_cast(input1->params.scale), &input1_multiplier, \ - &input1_shift); \ + QuantizeMultiplierSmallerThanOneExp(input1->params.scale, \ + &input1_multiplier, &input1_shift); \ int32 input2_multiplier; \ int input2_shift; \ - QuantizeMultiplierSmallerThanOneExp( \ - static_cast(input2->params.scale), &input2_multiplier, \ - &input2_shift); \ + QuantizeMultiplierSmallerThanOneExp(input2->params.scale, \ + &input2_multiplier, &input2_shift); \ \ ComparisonParams op_params; \ op_params.left_shift = left_shift; \ diff --git a/tensorflow/lite/micro/kernels/dequantize.cc b/tensorflow/lite/micro/kernels/dequantize.cc index fca4b95babb..58c3e1e5cdc 100644 --- a/tensorflow/lite/micro/kernels/dequantize.cc +++ b/tensorflow/lite/micro/kernels/dequantize.cc @@ -46,7 +46,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::DequantizationParams op_params; op_params.zero_point = input->params.zero_point; - op_params.scale = static_cast(input->params.scale); + op_params.scale = input->params.scale; switch (input->type) { case kTfLiteUInt8: reference_ops::Dequantize( diff --git a/tensorflow/lite/micro/kernels/mul.cc b/tensorflow/lite/micro/kernels/mul.cc index 7483e546be9..2dae837a28f 100644 --- a/tensorflow/lite/micro/kernels/mul.cc +++ b/tensorflow/lite/micro/kernels/mul.cc @@ -55,9 +55,8 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, &data->output_activation_max)); if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - double real_multiplier = static_cast(input1->params.scale) * - static_cast(input2->params.scale) / - static_cast(output->params.scale); + double real_multiplier = + input1->params.scale * input2->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } diff --git a/tensorflow/lite/micro/kernels/pad.cc b/tensorflow/lite/micro/kernels/pad.cc index c3316f49aec..916725dc2a0 100644 --- a/tensorflow/lite/micro/kernels/pad.cc +++ b/tensorflow/lite/micro/kernels/pad.cc @@ -152,9 +152,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // same quantized range as the input and output tensors. TF_LITE_ENSURE_EQ(context, op_context.output->params.zero_point, op_context.constant_values->params.zero_point); - TF_LITE_ENSURE_EQ( - context, static_cast(op_context.output->params.scale), - static_cast(op_context.constant_values->params.scale)); + TF_LITE_ENSURE_EQ(context, op_context.output->params.scale, + op_context.constant_values->params.scale); pad_value = *GetTensorData(op_context.constant_values); } if (op_context.resizing_category == ResizingCategory::kImageStyle) { diff --git a/tensorflow/lite/micro/kernels/prelu.cc b/tensorflow/lite/micro/kernels/prelu.cc index c8dea5e43e2..74d7d793d7e 100644 --- a/tensorflow/lite/micro/kernels/prelu.cc +++ b/tensorflow/lite/micro/kernels/prelu.cc @@ -53,7 +53,7 @@ inline void BroadcastPrelu4DSlowFloat( auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); auto in1_val = input1_data[in1_idx]; auto in2_val = input2_data[in2_idx]; - output_data[out_idx] = in1_val >= 0.0f ? in1_val : in1_val * in2_val; + output_data[out_idx] = in1_val >= 0.0 ? in1_val : in1_val * in2_val; } } } @@ -67,9 +67,8 @@ TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { int32_t output_multiplier = 0; int output_shift = 0; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt16) { - double real_multiplier = static_cast(input->params.scale) * - static_cast(alpha->params.scale) / - static_cast(output->params.scale); + double real_multiplier = + input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplierSmallerThanOneExp(real_multiplier, &output_multiplier, &output_shift); } diff --git a/tensorflow/lite/micro/kernels/quantize.cc b/tensorflow/lite/micro/kernels/quantize.cc index 3a99562e803..66883b1561a 100644 --- a/tensorflow/lite/micro/kernels/quantize.cc +++ b/tensorflow/lite/micro/kernels/quantize.cc @@ -60,7 +60,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::QuantizationParams op_params; op_params.zero_point = output->params.zero_point; - op_params.scale = static_cast(output->params.scale); + op_params.scale = output->params.scale; switch (output->type) { case kTfLiteInt8: reference_ops::AffineQuantize( diff --git a/tensorflow/lite/micro/kernels/softmax.cc b/tensorflow/lite/micro/kernels/softmax.cc index fdfb259b48a..a7b1c80fc2f 100644 --- a/tensorflow/lite/micro/kernels/softmax.cc +++ b/tensorflow/lite/micro/kernels/softmax.cc @@ -53,8 +53,7 @@ TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context, static const int kScaledDiffIntegerBits = 5; tflite::PreprocessSoftmaxScaling( - static_cast(params->beta), - static_cast(input->params.scale), kScaledDiffIntegerBits, + params->beta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift); data->diff_min = -1.0 * tflite::CalculateInputRadius( kScaledDiffIntegerBits, data->input_left_shift); @@ -144,7 +143,7 @@ void Softmax2DQuantized(const TfLiteTensor* input, TfLiteTensor* output, void Softmax4DFloat(const TfLiteTensor* input, TfLiteTensor* output, TfLiteSoftmaxParams* params) { SoftmaxParams op_params; - op_params.beta = static_cast(params->beta); + op_params.beta = params->beta; tflite::reference_ops::Softmax( op_params, GetTensorShape(input), GetTensorData(input), GetTensorShape(output), GetTensorData(output)); diff --git a/tensorflow/lite/micro/kernels/svdf.cc b/tensorflow/lite/micro/kernels/svdf.cc index f0574045bc1..59004014dae 100644 --- a/tensorflow/lite/micro/kernels/svdf.cc +++ b/tensorflow/lite/micro/kernels/svdf.cc @@ -526,12 +526,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* output_params = reinterpret_cast( output->quantization.params); const double effective_scale_1 = - static_cast(input_params->scale->data[0] * - weights_feature_params->scale->data[0] / - state_params->scale->data[0]); - const double effective_scale_2 = static_cast( - state_params->scale->data[0] * weight_time_params->scale->data[0] / - output_params->scale->data[0]); + input_params->scale->data[0] * + weights_feature_params->scale->data[0] / + state_params->scale->data[0]; + const double effective_scale_2 = state_params->scale->data[0] * + weight_time_params->scale->data[0] / + output_params->scale->data[0]; QuantizeMultiplier(effective_scale_1, &op_data.effective_scale_1_a, &op_data.effective_scale_1_b); QuantizeMultiplier(effective_scale_2, &op_data.effective_scale_2_a, diff --git a/tensorflow/lite/micro/tools/make/targets/apollo3evb_makefile.inc b/tensorflow/lite/micro/tools/make/targets/apollo3evb_makefile.inc index 4d2eb5f227b..86837ce3a4a 100644 --- a/tensorflow/lite/micro/tools/make/targets/apollo3evb_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/apollo3evb_makefile.inc @@ -54,7 +54,6 @@ $(MAKEFILE_DIR)/downloads/$(AM_SDK_DEST)/$(SF_BSPS_DEST): $(MAKEFILE_DIR)/downlo -Wall \ -Wextra \ -Wsign-compare \ - -Wdouble-promotion \ -Wno-unused-parameter \ -Wno-missing-field-initializers \ -Wno-write-strings \ diff --git a/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc b/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc index c02154233d5..bb01340ab51 100644 --- a/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/bluepill_makefile.inc @@ -28,7 +28,6 @@ ifeq ($(TARGET), bluepill) -Wall \ -Wextra \ -Wsign-compare \ - -Wdouble-promotion \ -Wno-unused-parameter \ -Wno-missing-field-initializers \ -Wno-write-strings \ diff --git a/tensorflow/lite/micro/tools/make/targets/ecm3531_makefile.inc b/tensorflow/lite/micro/tools/make/targets/ecm3531_makefile.inc index 3490ee0d2e5..0e87535b129 100644 --- a/tensorflow/lite/micro/tools/make/targets/ecm3531_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/ecm3531_makefile.inc @@ -41,7 +41,6 @@ ifeq ($(TARGET), ecm3531) -Wall \ -Wextra \ -Wsign-compare \ - -Wdouble-promotion \ -Wno-unused-parameter \ -Wno-missing-field-initializers \ -Wno-write-strings \ diff --git a/tensorflow/lite/micro/tools/make/targets/mcu_riscv_makefile.inc b/tensorflow/lite/micro/tools/make/targets/mcu_riscv_makefile.inc index cc1b1466e8c..1ec91cdca82 100644 --- a/tensorflow/lite/micro/tools/make/targets/mcu_riscv_makefile.inc +++ b/tensorflow/lite/micro/tools/make/targets/mcu_riscv_makefile.inc @@ -24,7 +24,6 @@ ifeq ($(TARGET), riscv32_mcu) -Wall \ -Wextra \ -Wsign-compare \ - -Wdouble-promotion \ -Wno-unused-parameter \ -Wno-missing-field-initializers \ -Wno-write-strings \