Delete duplicate definition of DequantizeConstantTensor in model_builder.cc: it already exists in model_builder_helper.h. It was not generating a compiler error for multiple definition because this is a function template.
It actually had a slightly different implementation, what is undefined behavior. Keep the version that had recent updates. PiperOrigin-RevId: 314359564 Change-Id: I5f81fcdadedf6c3b2174ecc2fa9a8baa58fa1fc3
This commit is contained in:
parent
cf30d41ded
commit
41aea74ea2
@ -77,30 +77,6 @@ absl::Status NewPassthroughNode(GraphFloat32* graph, Node* node,
|
|||||||
return absl::OkStatus();
|
return absl::OkStatus();
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
inline void DequantizeConstantTensor(const TfLiteTensor& tensor,
|
|
||||||
const T* source_data,
|
|
||||||
float* dequantized_data) {
|
|
||||||
TfLiteAffineQuantization* quant_params =
|
|
||||||
static_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
|
|
||||||
if (quant_params->scale->size > 1) {
|
|
||||||
// Tensor is per-channel quantized.
|
|
||||||
PerChannelDequantizationParams op_params;
|
|
||||||
op_params.zero_point = quant_params->zero_point->data;
|
|
||||||
op_params.scale = quant_params->scale->data;
|
|
||||||
op_params.quantized_dimension = quant_params->quantized_dimension;
|
|
||||||
reference_ops::PerChannelDequantize(op_params, GetTensorShape(&tensor),
|
|
||||||
source_data, GetTensorShape(&tensor),
|
|
||||||
dequantized_data);
|
|
||||||
} else {
|
|
||||||
DequantizationParams op_params;
|
|
||||||
op_params.zero_point = tensor.params.zero_point;
|
|
||||||
op_params.scale = tensor.params.scale;
|
|
||||||
reference_ops::Dequantize(op_params, GetTensorShape(&tensor), source_data,
|
|
||||||
GetTensorShape(&tensor), dequantized_data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
absl::Status CheckTensorIsAvailable(const TfLiteContext* context,
|
absl::Status CheckTensorIsAvailable(const TfLiteContext* context,
|
||||||
const TfLiteNode* tflite_node, int idx) {
|
const TfLiteNode* tflite_node, int idx) {
|
||||||
// If tensor id is in range, it's guaranteed that it'll be available.
|
// If tensor id is in range, it's guaranteed that it'll be available.
|
||||||
|
@ -69,10 +69,11 @@ void ConvertFloat16ToFloat32(size_t num_elements, const uint16_t* src,
|
|||||||
float* dst);
|
float* dst);
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void DequantizeConstantTensor(const TfLiteTensor& tensor, const T* source_data,
|
inline void DequantizeConstantTensor(const TfLiteTensor& tensor,
|
||||||
|
const T* source_data,
|
||||||
float* dequantized_data) {
|
float* dequantized_data) {
|
||||||
TfLiteAffineQuantization* quant_params =
|
TfLiteAffineQuantization* quant_params =
|
||||||
reinterpret_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
|
static_cast<TfLiteAffineQuantization*>(tensor.quantization.params);
|
||||||
if (quant_params->scale->size > 1) {
|
if (quant_params->scale->size > 1) {
|
||||||
// Tensor is per-channel quantized.
|
// Tensor is per-channel quantized.
|
||||||
PerChannelDequantizationParams op_params;
|
PerChannelDequantizationParams op_params;
|
||||||
|
Loading…
Reference in New Issue
Block a user