Switch int32_t to int to be consistent with other usages and function callers

PiperOrigin-RevId: 314620643
Change-Id: I9e48cd27f72f4ad771e8448f2b93f4dcdaba9ecb
This commit is contained in:
Karim Nosir 2020-06-03 15:52:18 -07:00 committed by TensorFlower Gardener
parent 21e60ac4e8
commit 35875e5b96
13 changed files with 28 additions and 28 deletions

View File

@ -50,7 +50,7 @@ TfLiteStatus ArgMinMaxOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
axis_value += input_tensor.dims->size;
}
auto* input_axis_const = graph_builder_->AddConstNodeWithData(
kScalarShape, reinterpret_cast<char*>(&axis_value), sizeof(int32_t));
kScalarShape, reinterpret_cast<char*>(&axis_value), sizeof(int));
AddInput(TensorID(input_axis_const->GetID(), 0));
// Compute Min/Max

View File

@ -75,7 +75,7 @@ TfLiteStatus ArithmeticOpBuilder::PopulateSubGraph(
}
if (op_node_.op_type == OP_QuantizedMul_8x8to32) {
const auto& math_out = AddOutput(sizeof(int32_t), 4,
const auto& math_out = AddOutput(sizeof(int), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
const auto& math_out_min = AddOutput(sizeof(float), 4, {1, 1, 1, 1});

View File

@ -29,10 +29,10 @@ TfLiteStatus BatchSeqBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
// Options is currently 0 or 1, 0 is default and batches
// will run in increasing order, this behavior can be disabled by setting 1.
// Refer to Hexagon NN docs for more details.
int32_t config[] = {max_size_for_batch_, 1, 0};
int config[] = {max_size_for_batch_, 1, 0};
auto* input_config = graph_builder_->AddConstNodeWithData(
config_shape, reinterpret_cast<char*>(&config), sizeof(int32_t) * 3);
config_shape, reinterpret_cast<char*>(&config), sizeof(int) * 3);
AddInput(TensorID(input_config->GetID(), 0));
// Add Input batch details.

View File

@ -199,7 +199,7 @@ TfLiteStatus Conv2dOpBuilder::ProcessPerChannelQuantizedBias(
const float input_scale = input_quant_params->scale->data[0];
// Now dequantize bias values to float first, to adjust for the
// normalization of channel scales.
int32_t* bias_data = bias_tensor.data.i32;
int* bias_data = bias_tensor.data.i32;
const int bias_size = NumElements(&bias_tensor);
if (bias_size != num_scale_values_) {
TF_LITE_KERNEL_LOG(
@ -221,10 +221,10 @@ TfLiteStatus Conv2dOpBuilder::ProcessPerChannelQuantizedBias(
*bias_max = *bias_max * 8;
*bias_min = -1 * *bias_max;
// Now requantize the bias values to the new min/max values.
std::vector<int32_t> preprocessed_bias_data;
std::vector<int> preprocessed_bias_data;
preprocessed_bias_data.reserve(num_scale_values_);
for (int i = 0; i < bias_size; ++i) {
preprocessed_bias_data.push_back(static_cast<int32_t>(
preprocessed_bias_data.push_back(static_cast<int>(
std::round(std::pow(2, 31) * (dequantized_bias[i] / *bias_max))));
}
// Add nodes for bias.

View File

@ -76,7 +76,7 @@ TfLiteStatus AddFullyConnectedHelper(const TfLiteIntArray* inputs,
GetDims(&output_batch_size, &output_height_size, &output_width_size,
&output_depth_size, context->tensors[outputs->data[0]].dims);
const auto& matmul_out =
matmul_op->AddOutput(sizeof(int32_t), 4,
matmul_op->AddOutput(sizeof(int), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
const auto& matmul_out_min =
@ -108,7 +108,7 @@ TfLiteStatus AddFullyConnectedHelper(const TfLiteIntArray* inputs,
bias_add_op->AddInput(OpBuilder::TensorID(bias_min_const->GetID(), 0));
bias_add_op->AddInput(OpBuilder::TensorID(bias_max_const->GetID(), 0));
matmul_and_bias_out =
bias_add_op->AddOutput(sizeof(int32_t), 4,
bias_add_op->AddOutput(sizeof(int), 4,
{output_batch_size, output_height_size,
output_width_size, output_depth_size});
matmul_and_bias_out_min =

View File

@ -137,8 +137,8 @@ class OpBuilder {
std::numeric_limits<int8_t>::max());
} else if (tensor.type == kTfLiteInt32) {
return ComputeMinAndMaxQuantValues(tensor, min, max,
std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max());
std::numeric_limits<int>::min(),
std::numeric_limits<int>::max());
}
return kTfLiteError;
}

View File

@ -43,23 +43,23 @@ TfLiteStatus SliceOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
// Start / Size
const auto& begin_tensor = context->tensors[inputs->data[1]];
const auto& size_tensor = context->tensors[inputs->data[2]];
std::vector<int32_t> begins, sizes;
std::vector<int> begins, sizes;
if (begin_tensor.type == kTfLiteInt32) {
GetBeginAndSizeVectors<int32_t>(input_tensor.dims->size, &begin_tensor,
&size_tensor, &begins, &sizes);
GetBeginAndSizeVectors<int>(input_tensor.dims->size, &begin_tensor,
&size_tensor, &begins, &sizes);
} else if (begin_tensor.type == kTfLiteInt64) {
GetBeginAndSizeVectors<int64_t>(input_tensor.dims->size, &begin_tensor,
&size_tensor, &begins, &sizes);
} else {
return kTfLiteError;
}
const int32_t begins_shape[] = {1, 1, 1, static_cast<int32_t>(begins.size())};
const int begins_shape[] = {1, 1, 1, static_cast<int>(begins.size())};
auto begins_node = graph_builder_->AddConstNodeWithData(
begins_shape, reinterpret_cast<char*>(begins.data()),
sizeof(int32_t) * begins.size());
sizeof(int) * begins.size());
auto sizes_node = graph_builder_->AddConstNodeWithData(
begins_shape, reinterpret_cast<char*>(sizes.data()),
sizeof(int32_t) * begins.size());
sizeof(int) * begins.size());
AddInput(TensorID(begins_node->GetID(), 0));
AddInput(TensorID(sizes_node->GetID(), 0));

View File

@ -48,7 +48,7 @@ TfLiteStatus SplitOpBuilder::PopulateSubGraph(const TfLiteIntArray* inputs,
axis_value += input_tensor.dims->size;
}
auto* input_axis_const = graph_builder_->AddConstNodeWithData(
kScalarShape, reinterpret_cast<char*>(&axis_value), sizeof(int32_t));
kScalarShape, reinterpret_cast<char*>(&axis_value), sizeof(int));
AddInput(TensorID(input_axis_const->GetID(), 0));
// Input data tensor & min/max.

View File

@ -28,8 +28,8 @@ class ArgBaseOpModel : public SingleOpModelWithHexagon {
int input() const { return input_; }
std::vector<int32_t> GetInt32Output() const {
return ExtractVector<int32_t>(output_);
std::vector<int> GetInt32Output() const {
return ExtractVector<int>(output_);
}
std::vector<int> GetOutputShape() { return GetTensorShape(output_); }

View File

@ -109,7 +109,7 @@ class QuantizedConvolutionOpModel : public SingleOpModelWithHexagon {
}
void SetBias(std::initializer_list<float> data) {
QuantizeAndPopulate<int32_t>(bias_, data);
QuantizeAndPopulate<int>(bias_, data);
}
template <typename T>

View File

@ -69,7 +69,7 @@ class FullyConnectedOpModel : public SingleOpModelWithHexagon {
}
void SetBias(const std::vector<float>& data) {
QuantizeAndPopulate<int32_t>(bias_, data);
QuantizeAndPopulate<int>(bias_, data);
}
template <typename T>

View File

@ -55,10 +55,10 @@ class SliceOpModel : public SingleOpModelWithHexagon {
};
TEST(SliceOpTest, Input_1D_Uint8) {
SliceOpModel<int32_t> m(/*input=*/{TensorType_UINT8, {4}, -10, 10},
/*output=*/{TensorType_UINT8, {2}, -10, 10},
{TensorType_INT32, {1}}, {TensorType_INT32, {1}}, {1},
{2});
SliceOpModel<int> m(/*input=*/{TensorType_UINT8, {4}, -10, 10},
/*output=*/{TensorType_UINT8, {2}, -10, 10},
{TensorType_INT32, {1}}, {TensorType_INT32, {1}}, {1},
{2});
m.SetInput<uint8_t>({1, 2, 3, 4});
m.ApplyDelegateAndInvoke();
EXPECT_THAT(m.GetOutputShape(), ElementsAreArray({2}));
@ -67,7 +67,7 @@ TEST(SliceOpTest, Input_1D_Uint8) {
}
TEST(SliceOpTest, Input_2D_Uint8) {
SliceOpModel<int32_t> m(
SliceOpModel<int> m(
/*input=*/{TensorType_UINT8, {2, 3}, -10, 10},
/*output=*/{TensorType_UINT8, {1, 2}, -10, 10}, {TensorType_INT32, {2}},
{TensorType_INT32, {2}}, {1, 0}, {1, 2});

View File

@ -35,7 +35,7 @@ class TransposeOpModel : public SingleOpModelWithHexagon {
CreateTransposeOptions(builder_).Union());
BuildInterpreter({GetShape(input_)});
if (!const_perm) {
PopulateTensor<int32_t>(perm_, perm);
PopulateTensor<int>(perm_, perm);
}
}