From 41aea74ea2b693ab772ccdd181c546f4c96c3f4e Mon Sep 17 00:00:00 2001 From: Robert David Date: Tue, 2 Jun 2020 10:39:47 -0700 Subject: [PATCH] Delete duplicate definition of DequantizeConstantTensor in model_builder.cc: it already exists in model_builder_helper.h. It was not generating a compiler error for multiple definition because this is a function template. It actually had a slightly different implementation, what is undefined behavior. Keep the version that had recent updates. PiperOrigin-RevId: 314359564 Change-Id: I5f81fcdadedf6c3b2174ecc2fa9a8baa58fa1fc3 --- .../delegates/gpu/common/model_builder.cc | 24 ------------------- .../gpu/common/model_builder_helper.h | 7 +++--- 2 files changed, 4 insertions(+), 27 deletions(-) diff --git a/tensorflow/lite/delegates/gpu/common/model_builder.cc b/tensorflow/lite/delegates/gpu/common/model_builder.cc index c110d4640c8..9cb738cf935 100644 --- a/tensorflow/lite/delegates/gpu/common/model_builder.cc +++ b/tensorflow/lite/delegates/gpu/common/model_builder.cc @@ -77,30 +77,6 @@ absl::Status NewPassthroughNode(GraphFloat32* graph, Node* node, return absl::OkStatus(); } -template -inline void DequantizeConstantTensor(const TfLiteTensor& tensor, - const T* source_data, - float* dequantized_data) { - TfLiteAffineQuantization* quant_params = - static_cast(tensor.quantization.params); - if (quant_params->scale->size > 1) { - // Tensor is per-channel quantized. - PerChannelDequantizationParams op_params; - op_params.zero_point = quant_params->zero_point->data; - op_params.scale = quant_params->scale->data; - op_params.quantized_dimension = quant_params->quantized_dimension; - reference_ops::PerChannelDequantize(op_params, GetTensorShape(&tensor), - source_data, GetTensorShape(&tensor), - dequantized_data); - } else { - DequantizationParams op_params; - op_params.zero_point = tensor.params.zero_point; - op_params.scale = tensor.params.scale; - reference_ops::Dequantize(op_params, GetTensorShape(&tensor), source_data, - GetTensorShape(&tensor), dequantized_data); - } -} - absl::Status CheckTensorIsAvailable(const TfLiteContext* context, const TfLiteNode* tflite_node, int idx) { // If tensor id is in range, it's guaranteed that it'll be available. diff --git a/tensorflow/lite/delegates/gpu/common/model_builder_helper.h b/tensorflow/lite/delegates/gpu/common/model_builder_helper.h index 6cbfcd9e7d6..991520c0a3b 100644 --- a/tensorflow/lite/delegates/gpu/common/model_builder_helper.h +++ b/tensorflow/lite/delegates/gpu/common/model_builder_helper.h @@ -69,10 +69,11 @@ void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src, float* dst); template -void DequantizeConstantTensor(const TfLiteTensor& tensor, const T* source_data, - float* dequantized_data) { +inline void DequantizeConstantTensor(const TfLiteTensor& tensor, + const T* source_data, + float* dequantized_data) { TfLiteAffineQuantization* quant_params = - reinterpret_cast(tensor.quantization.params); + static_cast(tensor.quantization.params); if (quant_params->scale->size > 1) { // Tensor is per-channel quantized. PerChannelDequantizationParams op_params;