Separate AddVariableInput and AddInput functions in test_util.h/.cc

Some of the usages were wrong before; the bool input was invoked as {}.

PiperOrigin-RevId: 330623020
Change-Id: I9505c199297d6a1e7465c0610184652168f61430
This commit is contained in:
Robert David 2020-09-08 17:37:10 -07:00 committed by TensorFlower Gardener
parent 443caf8a5b
commit bec38f2829
14 changed files with 64 additions and 70 deletions

View File

@ -2682,7 +2682,7 @@ class RNNOpModel : public SingleOpModelWithNNAPI {
weights_ = AddInput(weights);
recurrent_weights_ = AddInput(recurrent_weights);
bias_ = AddInput(TensorType_FLOAT32);
hidden_state_ = AddInput(TensorType_FLOAT32, true);
hidden_state_ = AddVariableInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(
BuiltinOperator_RNN, BuiltinOptions_RNNOptions,
@ -2872,9 +2872,8 @@ class BaseSVDFOpModel : public SingleOpModelWithNNAPI {
// when using NNAPI delegate.
bias_ = AddInput(TensorType_FLOAT32);
const int num_filters = units * rank;
activation_state_ = AddInput(
TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}},
/*is_variable=*/true);
activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(
BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
@ -3098,8 +3097,8 @@ class LSTMOpModel : public SingleOpModelWithNNAPI {
}
// Adding the 2 input state tensors.
input_activation_state_ = AddInput(TensorType_FLOAT32, true);
input_cell_state_ = AddInput(TensorType_FLOAT32, true);
input_activation_state_ = AddVariableInput(TensorType_FLOAT32);
input_cell_state_ = AddVariableInput(TensorType_FLOAT32);
const bool use_layer_norm = input_shapes.size() > 20;
// Layer norm weights.

View File

@ -37,7 +37,7 @@ class GRUOpModel : public SingleOpModel {
: n_batch_(n_batch), n_input_(n_input), n_output_(n_output) {
input_ = AddInput(TensorType_FLOAT32);
input_state_ =
AddInput(TensorData{TensorType_FLOAT32, {n_batch, n_output}}, true);
AddVariableInput(TensorData{TensorType_FLOAT32, {n_batch, n_output}});
gate_weight_ = AddInput(TensorType_FLOAT32);
gate_bias_ = AddInput(TensorType_FLOAT32);
candidate_weight_ = AddInput(TensorType_FLOAT32);

View File

@ -179,7 +179,7 @@ class RNNOpModel : public SingleOpModel {
weights_ = AddInput(weights);
recurrent_weights_ = AddInput(recurrent_weights);
bias_ = AddInput(TensorType_FLOAT32);
hidden_state_ = AddInput(TensorType_FLOAT32, true);
hidden_state_ = AddVariableInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_RNN, BuiltinOptions_RNNOptions,
CreateRNNOptions(builder_, ActivationFunctionType_RELU,

View File

@ -160,20 +160,16 @@ class BidirectionalLSTMOpModel : public SingleOpModel {
}
// Adding the 2 input state tensors.
fw_input_activation_state_ =
AddInput(TensorData{TensorType_FLOAT32, {n_fw_output_ * n_batch_}},
/*is_variable=*/true);
fw_input_cell_state_ =
AddInput(TensorData{TensorType_FLOAT32, {n_fw_cell_ * n_batch_}},
/*is_variable=*/true);
fw_input_activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_fw_output_ * n_batch_}});
fw_input_cell_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_fw_cell_ * n_batch_}});
// Adding the 2 input state tensors.
bw_input_activation_state_ =
AddInput(TensorData{TensorType_FLOAT32, {n_bw_output_ * n_batch_}},
/*is_variable=*/true);
bw_input_cell_state_ =
AddInput(TensorData{TensorType_FLOAT32, {n_bw_cell_ * n_batch_}},
/*is_variable=*/true);
bw_input_activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_bw_output_ * n_batch_}});
bw_input_cell_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_bw_cell_ * n_batch_}});
fw_output_ = AddOutput(TensorType_FLOAT32);

View File

@ -680,11 +680,11 @@ class BidirectionalRNNOpModel : public SingleOpModel {
fw_weights_ = AddInput(tensor_type);
fw_recurrent_weights_ = AddInput(tensor_type);
fw_bias_ = AddInput(TensorType_FLOAT32);
fw_hidden_state_ = AddInput(TensorType_FLOAT32, true);
fw_hidden_state_ = AddVariableInput(TensorType_FLOAT32);
bw_weights_ = AddInput(tensor_type);
bw_recurrent_weights_ = AddInput(tensor_type);
bw_bias_ = AddInput(TensorType_FLOAT32);
bw_hidden_state_ = AddInput(TensorType_FLOAT32, true);
bw_hidden_state_ = AddVariableInput(TensorType_FLOAT32);
const auto input_shape =
(time_major) ? std::vector<int>({sequence_len_, batches_, input_size_})

View File

@ -101,8 +101,8 @@ class LSTMOpModel : public SingleOpModel {
}
// Adding the 2 state tensors.
AddInput({TensorType_FLOAT32, {n_batch, n_output}}, true);
AddInput({TensorType_FLOAT32, {n_batch, n_cell}}, true);
AddVariableInput({TensorType_FLOAT32, {n_batch, n_output}});
AddVariableInput({TensorType_FLOAT32, {n_batch, n_cell}});
// Layer norm weights.
if (!model_has_legacy_20_inputs) {
@ -1412,16 +1412,14 @@ class LSTMIntegerOpModel : public SingleOpModel {
}
// Adding the 2 state tensors.
AddInput({TensorType_INT16,
{n_batch, n_output},
ranges[18].first,
ranges[18].second},
true);
AddInput({TensorType_INT16,
{n_batch, n_cell},
ranges[19].first,
ranges[19].second},
true);
AddVariableInput({TensorType_INT16,
{n_batch, n_output},
ranges[18].first,
ranges[18].second});
AddVariableInput({TensorType_INT16,
{n_batch, n_cell},
ranges[19].first,
ranges[19].second});
// Layer norm weights.
if (use_layer_norm) {
@ -2204,12 +2202,10 @@ class HybridSparseLSTMOpModel : public ::tflite::SingleOpModel {
}
// Adding the 2 state tensors.
output_state_ = AddInput(::tflite::TensorData{::tflite::TensorType_FLOAT32,
{n_output_ * n_batch_}},
true);
cell_state_ = AddInput(::tflite::TensorData{::tflite::TensorType_FLOAT32,
{n_cell_ * n_batch_}},
true);
output_state_ = AddVariableInput(::tflite::TensorData{
::tflite::TensorType_FLOAT32, {n_output_ * n_batch_}});
cell_state_ = AddVariableInput(::tflite::TensorData{
::tflite::TensorType_FLOAT32, {n_cell_ * n_batch_}});
if (use_cifg) {
input_layer_norm_weights_ = AddNullInput();

View File

@ -77,7 +77,7 @@ class NonMaxSuppressionV4OpModel : public BaseNMSOp {
input_max_output_size_ =
AddConstInput(TensorType_INT32, {max_output_size});
} else {
input_max_output_size_ = AddInput(TensorType_INT32, {});
input_max_output_size_ = AddInput(TensorType_INT32);
}
input_iou_threshold_ = AddConstInput(TensorType_FLOAT32, {iou_threshold});
input_score_threshold_ = AddInput({TensorType_FLOAT32, {}});
@ -168,7 +168,7 @@ class NonMaxSuppressionV5OpModel : public BaseNMSOp {
input_max_output_size_ =
AddConstInput(TensorType_INT32, {max_output_size});
} else {
input_max_output_size_ = AddInput(TensorType_INT32, {});
input_max_output_size_ = AddInput(TensorType_INT32);
}
input_iou_threshold_ = AddConstInput(TensorType_FLOAT32, {iou_threshold});
input_score_threshold_ = AddInput({TensorType_FLOAT32, {}});

View File

@ -94,10 +94,10 @@ class LSTMOpModel : public SingleOpModel {
}
// Adding the 2 input state tensors.
input_activation_state_ =
AddInput(TensorData{TensorType_FLOAT32, {n_output_ * n_batch_}}, true);
input_activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_output_ * n_batch_}});
input_cell_state_ =
AddInput(TensorData{TensorType_FLOAT32, {n_cell_ * n_batch_}}, true);
AddVariableInput(TensorData{TensorType_FLOAT32, {n_cell_ * n_batch_}});
output_ = AddOutput(TensorType_FLOAT32);

View File

@ -61,9 +61,8 @@ class QuantizedLSTMOpModel : public MultiOpModel {
input_ = AddInput(input_tensor_data);
prev_output_ =
AddInput({TensorType_UINT8, output_shape, 0.0f, 0.0f, 1. / 128., 128},
/*is_variable=*/true);
prev_output_ = AddVariableInput(
{TensorType_UINT8, output_shape, 0.0f, 0.0f, 1. / 128., 128});
// Biases and Weights have to be constant in order to allow NNAPI
// delegation
weights_ = AddConstInput<uint8_t>({TensorType_UINT8, weight_shape, 0.0f,
@ -72,9 +71,8 @@ class QuantizedLSTMOpModel : public MultiOpModel {
biases_ = AddConstInput<int32_t>(
{TensorType_INT32, bias_shape, 0.0f, 0.0f, weightsScale / 128, 0},
biases);
prev_cell_state_ =
AddInput({TensorType_INT16, state_shape, 0.0f, 0.0f, 1. / 2048., 0},
/*is_variable=*/true);
prev_cell_state_ = AddVariableInput(
{TensorType_INT16, state_shape, 0.0f, 0.0f, 1. / 2048., 0});
sum_out_ = AddOutput(input_tensor_data);

View File

@ -143,9 +143,8 @@ class BaseSVDFOpModel : public SingleOpModel {
weights_time_ = AddInput(weights_time_type);
bias_ = AddNullInput();
const int num_filters = units * rank;
activation_state_ = AddInput(
TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}},
/*is_variable=*/true);
activation_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {batches, memory_size * num_filters}});
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,
CreateSVDFOptions(builder_, rank, ActivationFunctionType_NONE,
@ -482,9 +481,8 @@ class IntegerSVDFOpModel : public SingleOpModel {
weights_time_ =
AddInput({TensorType_INT16, {num_filters, memory_size}, -1, 1});
bias_ = AddInput({TensorType_INT32, {units}, -512, 512});
activation_state_ = AddInput(
{TensorType_INT16, {batches, memory_size * num_filters}, -16, 16},
/*is_variable=*/true);
activation_state_ = AddVariableInput(
{TensorType_INT16, {batches, memory_size * num_filters}, -16, 16});
output_ = AddOutput({TensorType_INT8, {batches, units}, -0.5, 0.5});
SetBuiltinOp(
BuiltinOperator_SVDF, BuiltinOptions_SVDFOptions,

View File

@ -79,12 +79,23 @@ std::vector<Matcher<std::complex<float>>> ArrayComplex64Near(
return matchers;
}
int SingleOpModel::AddInput(const TensorData& t, bool is_variable) {
int SingleOpModel::AddInput(const TensorData& t) {
int id = 0;
if (t.per_channel_quantization) {
id = AddTensorPerChannelQuant(t);
} else {
id = AddTensor<float>(t, {}, is_variable);
id = AddTensor<float>(t, {});
}
inputs_.push_back(id);
return id;
}
int SingleOpModel::AddVariableInput(const TensorData& t) {
int id = 0;
if (t.per_channel_quantization) {
id = AddTensorPerChannelQuant(t);
} else {
id = AddTensor<float>(t, {}, true);
}
inputs_.push_back(id);
return id;

View File

@ -188,10 +188,8 @@ class SingleOpModel {
SingleOpModel& operator=(const SingleOpModel&) = delete;
// Add a TensorType input tensor and return its index.
int AddInput(TensorType type, bool is_variable = false) {
return AddInput(TensorData{type}, is_variable);
}
int AddInput(const TensorData& t, bool is_variable = false);
int AddInput(const TensorData& t);
int AddVariableInput(const TensorData& t);
int AddIntermediate(TensorType type, const std::vector<float>& scale,
const std::vector<int64_t>& zero_point);
@ -378,7 +376,6 @@ class SingleOpModel {
int AddNullInput();
// Add a TensorType output tensor and return its index.
int AddOutput(TensorType type) { return AddOutput(TensorData{type}); }
int AddOutput(const TensorData& t);
template <typename T>

View File

@ -101,11 +101,10 @@ class UnidirectionalLSTMOpModel : public SingleOpModel {
}
// Adding the 2 state tensors.
output_state_ =
AddInput(TensorData{TensorType_FLOAT32, {n_output_ * n_batch_}},
/*is_variable=*/true);
cell_state_ = AddInput(TensorData{TensorType_FLOAT32, {n_cell_ * n_batch_}},
/*is_variable=*/true);
output_state_ = AddVariableInput(
TensorData{TensorType_FLOAT32, {n_output_ * n_batch_}});
cell_state_ =
AddVariableInput(TensorData{TensorType_FLOAT32, {n_cell_ * n_batch_}});
// Layer norm weights.
if (is_layer_norm) {

View File

@ -183,7 +183,7 @@ class UnidirectionalRNNOpModel : public SingleOpModel {
weights_ = AddInput(weights);
recurrent_weights_ = AddInput(recurrent_weights);
bias_ = AddInput(TensorType_FLOAT32);
hidden_state_ = AddInput(TensorType_FLOAT32, true);
hidden_state_ = AddVariableInput(TensorType_FLOAT32);
output_ = AddOutput(TensorType_FLOAT32);
SetBuiltinOp(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN,
BuiltinOptions_SequenceRNNOptions,