diff --git a/tensorflow/examples/speech_commands/BUILD b/tensorflow/examples/speech_commands/BUILD index ea8a7d89ed9..b4b07c368b4 100644 --- a/tensorflow/examples/speech_commands/BUILD +++ b/tensorflow/examples/speech_commands/BUILD @@ -48,7 +48,6 @@ py_binary( ":recognize_commands_py", "//tensorflow:tensorflow_py", "//third_party/py/numpy", - "@six_archive//:six", ], ) @@ -172,8 +171,6 @@ py_library( ":input_data", ":models", "//tensorflow:tensorflow_py", - "//third_party/py/numpy", - "@six_archive//:six", ], ) @@ -216,8 +213,6 @@ py_library( ":input_data", ":models", "//tensorflow:tensorflow_py", - "//third_party/py/numpy", - "@six_archive//:six", ], ) @@ -261,7 +256,6 @@ py_library( ":models", "//tensorflow:tensorflow_py", "//third_party/py/numpy", - "@six_archive//:six", ], ) diff --git a/tensorflow/examples/speech_commands/freeze.py b/tensorflow/examples/speech_commands/freeze.py index 1cc7138cebf..4a48a440b6e 100644 --- a/tensorflow/examples/speech_commands/freeze.py +++ b/tensorflow/examples/speech_commands/freeze.py @@ -44,10 +44,10 @@ import sys import tensorflow as tf -from tensorflow.python.ops import gen_audio_ops as audio_ops import input_data import models from tensorflow.python.framework import graph_util +from tensorflow.python.ops import gen_audio_ops as audio_ops # If it's available, load the specialized feature generator. If this doesn't # work, try building with bazel instead of running the Python script directly. diff --git a/tensorflow/examples/speech_commands/input_data.py b/tensorflow/examples/speech_commands/input_data.py index 2f104b9eaea..fbbbc6fdc50 100644 --- a/tensorflow/examples/speech_commands/input_data.py +++ b/tensorflow/examples/speech_commands/input_data.py @@ -233,15 +233,15 @@ class AudioProcessor(object): filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress) except: tf.compat.v1.logging.error( - 'Failed to download URL: %s to folder: %s', data_url, filepath) - tf.compat.v1.logging.error( - 'Please make sure you have enough free space and' - ' an internet connection') + 'Failed to download URL: {0} to folder: {1}. Please make sure you ' + 'have enough free space and an internet connection'.format( + data_url, filepath)) raise print() statinfo = os.stat(filepath) - tf.compat.v1.logging.info('Successfully downloaded %s (%d bytes)', - filename, statinfo.st_size) + tf.compat.v1.logging.info( + 'Successfully downloaded {0} ({1} bytes)'.format( + filename, statinfo.st_size)) tarfile.open(filepath, 'r:gz').extractall(dest_directory) def prepare_data_index(self, silence_percentage, unknown_percentage, diff --git a/tensorflow/examples/speech_commands/input_data_test.py b/tensorflow/examples/speech_commands/input_data_test.py index 274d33b333f..ad20911a284 100644 --- a/tensorflow/examples/speech_commands/input_data_test.py +++ b/tensorflow/examples/speech_commands/input_data_test.py @@ -33,7 +33,7 @@ from tensorflow.python.platform import test class InputDataTest(test.TestCase): def _getWavData(self): - with self.cached_session() as sess: + with self.cached_session(): sample_data = tf.zeros([32000, 2]) wav_encoder = tf.audio.encode_wav(sample_data, 16000) wav_data = self.evaluate(wav_encoder) @@ -105,11 +105,11 @@ class InputDataTest(test.TestCase): ["a", "b"], 10, 10, self._model_settings(), tmp_dir) self.assertLess(0, audio_processor.set_size("training")) - self.assertTrue("training" in audio_processor.data_index) - self.assertTrue("validation" in audio_processor.data_index) - self.assertTrue("testing" in audio_processor.data_index) - self.assertEquals(input_data.UNKNOWN_WORD_INDEX, - audio_processor.word_to_index["c"]) + self.assertIn("training", audio_processor.data_index) + self.assertIn("validation", audio_processor.data_index) + self.assertIn("testing", audio_processor.data_index) + self.assertEqual(input_data.UNKNOWN_WORD_INDEX, + audio_processor.word_to_index["c"]) def testPrepareDataIndexEmpty(self): tmp_dir = self.get_temp_dir() @@ -117,7 +117,7 @@ class InputDataTest(test.TestCase): with self.assertRaises(Exception) as e: _ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10, self._model_settings(), tmp_dir) - self.assertTrue("No .wavs found" in str(e.exception)) + self.assertIn("No .wavs found", str(e.exception)) def testPrepareDataIndexMissing(self): tmp_dir = self.get_temp_dir() @@ -125,7 +125,7 @@ class InputDataTest(test.TestCase): with self.assertRaises(Exception) as e: _ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b", "d"], 10, 10, self._model_settings(), tmp_dir) - self.assertTrue("Expected to find" in str(e.exception)) + self.assertIn("Expected to find", str(e.exception)) @test_util.run_deprecated_v1 def testPrepareBackgroundData(self): diff --git a/tensorflow/examples/speech_commands/label_wav.py b/tensorflow/examples/speech_commands/label_wav.py index 2a0190df616..3d8c3b67bf1 100644 --- a/tensorflow/examples/speech_commands/label_wav.py +++ b/tensorflow/examples/speech_commands/label_wav.py @@ -77,13 +77,12 @@ def run_graph(wav_data, labels, input_layer_name, output_layer_name, def label_wav(wav, labels, graph, input_name, output_name, how_many_labels): """Loads the model and labels, and runs the inference to print predictions.""" if not wav or not tf.io.gfile.exists(wav): - tf.compat.v1.logging.fatal('Audio file does not exist %s', wav) - + raise ValueError('Audio file does not exist at {0}'.format(wav)) if not labels or not tf.io.gfile.exists(labels): - tf.compat.v1.logging.fatal('Labels file does not exist %s', labels) + raise ValueError('Labels file does not exist at {0}'.format(labels)) if not graph or not tf.io.gfile.exists(graph): - tf.compat.v1.logging.fatal('Graph file does not exist %s', graph) + raise ValueError('Graph file does not exist at {0}'.format(graph)) labels_list = load_labels(labels) diff --git a/tensorflow/examples/speech_commands/label_wav_dir.py b/tensorflow/examples/speech_commands/label_wav_dir.py index 313647b1ee7..d6016a06b62 100644 --- a/tensorflow/examples/speech_commands/label_wav_dir.py +++ b/tensorflow/examples/speech_commands/label_wav_dir.py @@ -64,8 +64,7 @@ def run_graph(wav_dir, labels, input_layer_name, output_layer_name, # predictions per class for wav_path in glob.glob(wav_dir + '/*.wav'): if not wav_path or not tf.io.gfile.exists(wav_path): - tf.compat.v1.logging.fatal('Audio file does not exist %s', wav_path) - + raise ValueError('Audio file does not exist at {0}'.format(wav_path)) with open(wav_path, 'rb') as wav_file: wav_data = wav_file.read() @@ -86,10 +85,10 @@ def run_graph(wav_dir, labels, input_layer_name, output_layer_name, def label_wav(wav_dir, labels, graph, input_name, output_name, how_many_labels): """Loads the model and labels, and runs the inference to print predictions.""" if not labels or not tf.io.gfile.exists(labels): - tf.compat.v1.logging.fatal('Labels file does not exist %s', labels) + raise ValueError('Labels file does not exist at {0}'.format(labels)) if not graph or not tf.io.gfile.exists(graph): - tf.compat.v1.logging.fatal('Graph file does not exist %s', graph) + raise ValueError('Graph file does not exist at {0}'.format(graph)) labels_list = load_labels(labels) diff --git a/tensorflow/examples/speech_commands/label_wav_test.py b/tensorflow/examples/speech_commands/label_wav_test.py index 0e52d1b4388..67aec3dbece 100644 --- a/tensorflow/examples/speech_commands/label_wav_test.py +++ b/tensorflow/examples/speech_commands/label_wav_test.py @@ -29,7 +29,7 @@ from tensorflow.python.platform import test class LabelWavTest(test.TestCase): def _getWavData(self): - with self.cached_session() as sess: + with self.cached_session(): sample_data = tf.zeros([1000, 2]) wav_encoder = tf.audio.encode_wav(sample_data, 16000) wav_data = self.evaluate(wav_encoder) diff --git a/tensorflow/examples/speech_commands/train.py b/tensorflow/examples/speech_commands/train.py index e917a51d837..bce5e521092 100644 --- a/tensorflow/examples/speech_commands/train.py +++ b/tensorflow/examples/speech_commands/train.py @@ -251,12 +251,16 @@ def main(_): dropout_rate: 0.5 }) train_writer.add_summary(train_summary, training_step) - tf.compat.v1.logging.info( + tf.compat.v1.logging.debug( 'Step #%d: rate %f, accuracy %.1f%%, cross entropy %f' % (training_step, learning_rate_value, train_accuracy * 100, cross_entropy_value)) is_last_step = (training_step == training_steps_max) if (training_step % FLAGS.eval_step_interval) == 0 or is_last_step: + tf.compat.v1.logging.info( + 'Step #%d: rate %f, accuracy %.1f%%, cross entropy %f' % + (training_step, learning_rate_value, train_accuracy * 100, + cross_entropy_value)) set_size = audio_processor.set_size('validation') total_accuracy = 0 total_conf_matrix = None diff --git a/tensorflow/examples/speech_commands/wav_to_features_test.py b/tensorflow/examples/speech_commands/wav_to_features_test.py index dfe6c657c2f..f7d617f14f8 100644 --- a/tensorflow/examples/speech_commands/wav_to_features_test.py +++ b/tensorflow/examples/speech_commands/wav_to_features_test.py @@ -30,7 +30,7 @@ from tensorflow.python.platform import test class WavToFeaturesTest(test.TestCase): def _getWavData(self): - with self.cached_session() as sess: + with self.cached_session(): sample_data = tf.zeros([32000, 2]) wav_encoder = tf.audio.encode_wav(sample_data, 16000) wav_data = self.evaluate(wav_encoder) @@ -63,7 +63,7 @@ class WavToFeaturesTest(test.TestCase): input_file_path, output_file_path) with open(output_file_path, "rb") as f: content = f.read() - self.assertTrue(b"const unsigned char g_input_data" in content) + self.assertIn(b"const unsigned char g_input_data", content) @test_util.run_deprecated_v1 def testWavToFeaturesMicro(self):