Delete duplicate definition of DequantizeConstantTensor in model_builder.cc: it already exists in model_builder_helper.h. It was not generating a compiler error for multiple definition because this is a function template.

It actually had a slightly different implementation, what is undefined behavior. Keep the version that had recent updates.

PiperOrigin-RevId: 314359564
Change-Id: I5f81fcdadedf6c3b2174ecc2fa9a8baa58fa1fc3
This commit is contained in:
Robert David 2020-06-02 10:39:47 -07:00 committed by TensorFlower Gardener
parent cf30d41ded
commit 41aea74ea2
2 changed files with 4 additions and 27 deletions

View File

@ -77,30 +77,6 @@ absl::Status NewPassthroughNode(GraphFloat32* graph, Node* node,
return absl::OkStatus();
}
template <typename T>
inline void DequantizeConstantTensor(const TfLiteTensor& tensor,
const T* source_data,
float* dequantized_data) {
TfLiteAffineQuantization* quant_params =
static_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
if (quant_params->scale->size > 1) {
// Tensor is per-channel quantized.
PerChannelDequantizationParams op_params;
op_params.zero_point = quant_params->zero_point->data;
op_params.scale = quant_params->scale->data;
op_params.quantized_dimension = quant_params->quantized_dimension;
reference_ops::PerChannelDequantize(op_params, GetTensorShape(&tensor),
source_data, GetTensorShape(&tensor),
dequantized_data);
} else {
DequantizationParams op_params;
op_params.zero_point = tensor.params.zero_point;
op_params.scale = tensor.params.scale;
reference_ops::Dequantize(op_params, GetTensorShape(&tensor), source_data,
GetTensorShape(&tensor), dequantized_data);
}
}
absl::Status CheckTensorIsAvailable(const TfLiteContext* context,
const TfLiteNode* tflite_node, int idx) {
// If tensor id is in range, it's guaranteed that it'll be available.

View File

@ -69,10 +69,11 @@ void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src,
float* dst);
template <typename T>
void DequantizeConstantTensor(const TfLiteTensor& tensor, const T* source_data,
inline void DequantizeConstantTensor(const TfLiteTensor& tensor,
const T* source_data,
float* dequantized_data) {
TfLiteAffineQuantization* quant_params =
reinterpret_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
static_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
if (quant_params->scale->size > 1) {
// Tensor is per-channel quantized.
PerChannelDequantizationParams op_params;