minor spelling tweaks
This commit is contained in:
parent
efff893c70
commit
8938220c67
tensorflow/lite/testing
@ -33,7 +33,7 @@ def _tflite_convert_verify_num_ops(tflite_convert_function, *args, **kwargs):
|
||||
tflite_model_binary = result[0]
|
||||
if not result[0]:
|
||||
tf.compat.v1.logging.error(result[1]) # stderr from running tflite_convert.
|
||||
raise RuntimeError("Failed to bulid model: \n\n" + result[1])
|
||||
raise RuntimeError("Failed to build model: \n\n" + result[1])
|
||||
interpreter = tf.lite.Interpreter(model_content=tflite_model_binary)
|
||||
interpreter.allocate_tensors()
|
||||
if len(interpreter.get_tensor_details()) != num_ops:
|
||||
|
@ -63,7 +63,7 @@ std::vector<std::string> ParseLine(const std::string& line) {
|
||||
|
||||
} // namespace
|
||||
|
||||
// Given a `filename`, produce a vector of Examples corresopnding
|
||||
// Given a `filename`, produce a vector of Examples corresponding
|
||||
// to test cases that can be applied to a tflite model.
|
||||
TfLiteStatus ParseExamples(const char* filename,
|
||||
std::vector<Example>* examples) {
|
||||
|
@ -42,7 +42,7 @@ class TestDriver : public TfDriver {
|
||||
}
|
||||
};
|
||||
|
||||
TEST(TfDriverTest, ReadingAndWrintingValues) {
|
||||
TEST(TfDriverTest, ReadingAndWritingValues) {
|
||||
TestDriver driver;
|
||||
ASSERT_EQ(driver.WriteAndReadBack(tensorflow::DT_FLOAT, {1, 2, 2},
|
||||
"0.10,0.20,0.30,0.40"),
|
||||
@ -55,7 +55,7 @@ TEST(TfDriverTest, ReadingAndWrintingValues) {
|
||||
"0,1,y,z");
|
||||
}
|
||||
|
||||
TEST(TfDriverTest, ReadingAndWrintingValuesStrings) {
|
||||
TEST(TfDriverTest, ReadingAndWritingValuesStrings) {
|
||||
TestDriver driver;
|
||||
|
||||
auto set_buffer = [](const std::vector<string>& values, string* buffer) {
|
||||
|
@ -94,7 +94,7 @@ class ExtraTocoOptions(object):
|
||||
self.allow_custom_ops = False
|
||||
# Rnn states that are used to support rnn / lstm cells.
|
||||
self.rnn_states = None
|
||||
# Split the LSTM inputs from 5 inoputs to 18 inputs for TFLite.
|
||||
# Split the LSTM inputs from 5 inputs to 18 inputs for TFLite.
|
||||
self.split_tflite_lstm_inputs = None
|
||||
# The inference input type passed to TFLiteConvert.
|
||||
self.inference_input_type = None
|
||||
@ -168,7 +168,7 @@ def write_examples(fp, examples):
|
||||
|
||||
Args:
|
||||
fp: File-like object to write to.
|
||||
examples: Example dictionary consiting of keys "inputs" and "outputs"
|
||||
examples: Example dictionary consisting of keys "inputs" and "outputs"
|
||||
"""
|
||||
|
||||
def write_tensor(fp, x):
|
||||
@ -196,7 +196,7 @@ def write_test_cases(fp, model_name, examples):
|
||||
Args:
|
||||
fp: File-like object to write to.
|
||||
model_name: Filename where the model was written to, relative to filename.
|
||||
examples: Example dictionary consiting of keys "inputs" and "outputs"
|
||||
examples: Example dictionary consisting of keys "inputs" and "outputs"
|
||||
"""
|
||||
|
||||
fp.write("load_model: %s\n" % os.path.basename(model_name))
|
||||
@ -262,8 +262,8 @@ def make_zip_of_tests(options,
|
||||
expected_tf_failures=0):
|
||||
"""Helper to make a zip file of a bunch of TensorFlow models.
|
||||
|
||||
This does a cartestian product of the dictionary of test_parameters and
|
||||
calls make_graph() for each item in the cartestian product set.
|
||||
This does a cartesian product of the dictionary of test_parameters and
|
||||
calls make_graph() for each item in the cartesian product set.
|
||||
If the graph is built successfully, then make_test_inputs() is called to
|
||||
build expected input/output value pairs. The model is then converted to tflite
|
||||
with toco, and the examples are serialized with the tflite model into a zip
|
||||
|
Loading…
Reference in New Issue
Block a user