Eliminate VLAs
VLAs complicate static analysis and bloat stack size. Replace VLA allocation with STL containers. PiperOrigin-RevId: 286101915 Change-Id: I2f6a934824fdbd7c54f4c2618de3f24ce9094205
This commit is contained in:
parent
c69bd15c41
commit
d24004cf48
@ -322,9 +322,9 @@ string Env::GetExecutablePath() {
|
|||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
uint32_t buffer_size(0U);
|
uint32_t buffer_size(0U);
|
||||||
_NSGetExecutablePath(nullptr, &buffer_size);
|
_NSGetExecutablePath(nullptr, &buffer_size);
|
||||||
char unresolved_path[buffer_size];
|
std::vector<char> unresolved_path(buffer_size);
|
||||||
_NSGetExecutablePath(unresolved_path, &buffer_size);
|
_NSGetExecutablePath(unresolved_path.data(), &buffer_size);
|
||||||
CHECK(realpath(unresolved_path, exe_path));
|
CHECK(realpath(unresolved_path.data(), exe_path));
|
||||||
#elif defined(__FreeBSD__)
|
#elif defined(__FreeBSD__)
|
||||||
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
|
int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
|
||||||
size_t exe_path_size = PATH_MAX;
|
size_t exe_path_size = PATH_MAX;
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
#include <fstream>
|
#include <fstream>
|
||||||
#include <iostream>
|
#include <iostream>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include "tensorflow/lite/kernels/register.h"
|
#include "tensorflow/lite/kernels/register.h"
|
||||||
#include "tensorflow/lite/model.h"
|
#include "tensorflow/lite/model.h"
|
||||||
@ -357,11 +358,11 @@ void ProcessInputWithQuantizedModel(
|
|||||||
uint8_t* quantized_output = interpreter->typed_output_tensor<uint8_t>(0);
|
uint8_t* quantized_output = interpreter->typed_output_tensor<uint8_t>(0);
|
||||||
int32_t zero_point = input_tensor->params.zero_point;
|
int32_t zero_point = input_tensor->params.zero_point;
|
||||||
float scale = input_tensor->params.scale;
|
float scale = input_tensor->params.scale;
|
||||||
float output[output_size];
|
std::vector<float> output(output_size);
|
||||||
for (int i = 0; i < output_size; ++i) {
|
for (int i = 0; i < output_size; ++i) {
|
||||||
output[i] = (quantized_output[i] - zero_point) * scale;
|
output[i] = (quantized_output[i] - zero_point) * scale;
|
||||||
}
|
}
|
||||||
GetTopN(output, output_size, kNumResults, kThreshold, &top_results);
|
GetTopN(output.data(), output_size, kNumResults, kThreshold, &top_results);
|
||||||
} else {
|
} else {
|
||||||
float* output = interpreter->typed_output_tensor<float>(0);
|
float* output = interpreter->typed_output_tensor<float>(0);
|
||||||
GetTopN(output, output_size, kNumResults, kThreshold, &top_results);
|
GetTopN(output, output_size, kNumResults, kThreshold, &top_results);
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
|
|
||||||
#import "tensorflow/lite/experimental/objc/apis/TFLInterpreter.h"
|
#import "tensorflow/lite/experimental/objc/apis/TFLInterpreter.h"
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#import "TFLErrorUtil.h"
|
#import "TFLErrorUtil.h"
|
||||||
#import "TFLQuantizationParameters+Internal.h"
|
#import "TFLQuantizationParameters+Internal.h"
|
||||||
#import "TFLTensor+Internal.h"
|
#import "TFLTensor+Internal.h"
|
||||||
@ -168,7 +170,7 @@ static void TFLInterpreterErrorReporter(void *user_data, const char *format, va_
|
|||||||
return NO;
|
return NO;
|
||||||
}
|
}
|
||||||
|
|
||||||
int cDimensions[self.inputTensorCount];
|
std::vector<int> cDimensions(self.inputTensorCount);
|
||||||
for (int dimIndex = 0; dimIndex < shape.count; ++dimIndex) {
|
for (int dimIndex = 0; dimIndex < shape.count; ++dimIndex) {
|
||||||
int dimension = shape[dimIndex].intValue;
|
int dimension = shape[dimIndex].intValue;
|
||||||
if (dimension <= 0) {
|
if (dimension <= 0) {
|
||||||
@ -181,7 +183,7 @@ static void TFLInterpreterErrorReporter(void *user_data, const char *format, va_
|
|||||||
cDimensions[dimIndex] = dimension;
|
cDimensions[dimIndex] = dimension;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (TfLiteInterpreterResizeInputTensor(self.interpreter, (int32_t)index, cDimensions,
|
if (TfLiteInterpreterResizeInputTensor(self.interpreter, (int32_t)index, cDimensions.data(),
|
||||||
(int32_t)shape.count) != kTfLiteOk) {
|
(int32_t)shape.count) != kTfLiteOk) {
|
||||||
NSString *errorDescription = [NSString
|
NSString *errorDescription = [NSString
|
||||||
stringWithFormat:@"Failed to resize input tensor at index (%lu).", (unsigned long)index];
|
stringWithFormat:@"Failed to resize input tensor at index (%lu).", (unsigned long)index];
|
||||||
|
Loading…
Reference in New Issue
Block a user