From 78688104bc118097a7968c864197a3c328f1c00b Mon Sep 17 00:00:00 2001 From: Nat Jeffries Date: Thu, 23 Jul 2020 11:37:40 -0700 Subject: [PATCH] Fix allocator build errors in xtensa softmax, conv + depthwise conv kernels. PiperOrigin-RevId: 322830325 Change-Id: I22eb3d1259db1390e6ad2c3caa588279b50fd674 --- tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc | 4 ++-- .../lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc | 4 ++-- tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc | 7 +++---- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc index dc39cc44e61..0e71bfbcb26 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/conv.cc @@ -329,10 +329,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const int num_channels = filter->dims->data[kConvQuantizedDimension]; // Dynimically allocate per-channel quantization parameters. op_data->per_channel_output_multiplier = - reinterpret_cast(context->AllocatePersistentBuffer( + reinterpret_cast(context->AllocatePersistentBuffer( context, num_channels * sizeof(int32_t))); op_data->per_channel_output_shift = - reinterpret_cast(context->AllocatePersistentBuffer( + reinterpret_cast(context->AllocatePersistentBuffer( context, num_channels * sizeof(int32_t))); // All per-channel quantized tensors need valid zero point and scale arrays. diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc index e7a37b6901d..656fb1b04cb 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/depthwise_conv.cc @@ -377,10 +377,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; // Dynimically allocate per-channel quantization parameters. op_data->per_channel_output_multiplier = - reinterpret_cast(context->AllocatePersistentBuffer( + reinterpret_cast(context->AllocatePersistentBuffer( context, num_channels * sizeof(int32_t))); op_data->per_channel_output_shift = - reinterpret_cast(context->AllocatePersistentBuffer( + reinterpret_cast(context->AllocatePersistentBuffer( context, num_channels * sizeof(int32_t))); // All per-channel quantized tensors need valid zero point and scale arrays. diff --git a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc index 0fb3646e3e8..f222387c831 100644 --- a/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc +++ b/tensorflow/lite/micro/kernels/xtensa_hifimini/softmax.cc @@ -167,10 +167,9 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { // the scale and beta before calculating exp. It is mandatory to apply beta // and scale here, since each softmax op may have different beta and scale // values. Beta and scale will remain constant for a given softmax op. - void* allocated_ptr; - TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer( - context, kInt8Range * sizeof(int16_t), &allocated_ptr)); - op_data->exp_lut = static_cast(allocated_ptr); + op_data->exp_lut = static_cast(context->AllocatePersistentBuffer( + context, kInt8Range * sizeof(uint16_t))); + TF_LITE_ENSURE(context, op_data->exp_lut != nullptr); TF_LITE_ENSURE_STATUS( CalculateSoftmaxOpData(context, input, output, params, op_data));