Fix allocator build errors in xtensa softmax, conv + depthwise conv kernels.

PiperOrigin-RevId: 322830325
Change-Id: I22eb3d1259db1390e6ad2c3caa588279b50fd674
This commit is contained in:
Nat Jeffries 2020-07-23 11:37:40 -07:00 committed by TensorFlower Gardener
parent 6bfea7624a
commit 78688104bc
3 changed files with 7 additions and 8 deletions

View File

@ -329,10 +329,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int num_channels = filter->dims->data[kConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
op_data->per_channel_output_multiplier =
reinterpret_cast<int32_t>(context->AllocatePersistentBuffer(
reinterpret_cast<int32_t*>(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t)));
op_data->per_channel_output_shift =
reinterpret_cast<int32_t>(context->AllocatePersistentBuffer(
reinterpret_cast<int32_t*>(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t)));
// All per-channel quantized tensors need valid zero point and scale arrays.

View File

@ -377,10 +377,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension];
// Dynimically allocate per-channel quantization parameters.
op_data->per_channel_output_multiplier =
reinterpret_cast<int32_t>(context->AllocatePersistentBuffer(
reinterpret_cast<int32_t*>(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t)));
op_data->per_channel_output_shift =
reinterpret_cast<int32_t>(context->AllocatePersistentBuffer(
reinterpret_cast<int32_t*>(context->AllocatePersistentBuffer(
context, num_channels * sizeof(int32_t)));
// All per-channel quantized tensors need valid zero point and scale arrays.

View File

@ -167,10 +167,9 @@ TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
// the scale and beta before calculating exp. It is mandatory to apply beta
// and scale here, since each softmax op may have different beta and scale
// values. Beta and scale will remain constant for a given softmax op.
void* allocated_ptr;
TF_LITE_ENSURE_STATUS(context->AllocatePersistentBuffer(
context, kInt8Range * sizeof(int16_t), &allocated_ptr));
op_data->exp_lut = static_cast<uint16_t*>(allocated_ptr);
op_data->exp_lut = static_cast<uint16_t*>(context->AllocatePersistentBuffer(
context, kInt8Range * sizeof(uint16_t)));
TF_LITE_ENSURE(context, op_data->exp_lut != nullptr);
TF_LITE_ENSURE_STATUS(
CalculateSoftmaxOpData(context, input, output, params, op_data));