Fix lstm tests & also reduce test time.

PiperOrigin-RevId: 283402882
Change-Id: I0a8a4e352586bedc06be2c79286efb04c8014f18
This commit is contained in:
Renjie Liu 2019-12-02 13:18:19 -08:00 committed by TensorFlower Gardener
parent 60cc118870
commit c77c0f8176
5 changed files with 45 additions and 20 deletions

View File

@ -35,7 +35,7 @@ py_library(
py_test(
name = "unidirectional_sequence_lstm_test",
size = "large",
size = "medium",
srcs = ["unidirectional_sequence_lstm_test.py"],
python_version = "PY3",
srcs_version = "PY2AND3",
@ -58,7 +58,7 @@ py_test(
py_test(
name = "unidirectional_sequence_rnn_test",
size = "large",
size = "medium",
srcs = ["unidirectional_sequence_rnn_test.py"],
python_version = "PY3",
srcs_version = "PY2AND3",
@ -81,7 +81,7 @@ py_test(
py_test(
name = "bidirectional_sequence_lstm_test",
size = "large",
size = "medium",
srcs = ["bidirectional_sequence_lstm_test.py"],
python_version = "PY3",
srcs_version = "PY2AND3",
@ -104,13 +104,14 @@ py_test(
py_test(
name = "bidirectional_sequence_rnn_test",
size = "large",
size = "medium",
srcs = ["bidirectional_sequence_rnn_test.py"],
python_version = "PY3",
srcs_version = "PY2AND3",
tags = [
"no_oss",
"no_pip",
"notap", # b/141373014
],
deps = [
":rnn",

View File

@ -27,7 +27,9 @@ from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
# Number of steps to train model.
TRAIN_STEPS = 1
# Dial to 0 means no training at all, all the weights will be just using their
# initial values. This can help make the test smaller.
TRAIN_STEPS = 0
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
@ -37,7 +39,8 @@ class BidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
def setUp(self):
tf.reset_default_graph()
# Import MNIST dataset
self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
self.mnist = input_data.read_data_sets(
"/tmp/data/", fake_data=True, one_hot=True)
# Define constants
# Unrolled through 28 time steps
@ -144,8 +147,10 @@ class BidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_size=self.batch_size, fake_data=True)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
@ -200,7 +205,8 @@ class BidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
b1, _ = self.mnist.train.next_batch(batch_size=1, fake_data=True)
b1 = np.array(b1, dtype=np.dtype("float32"))
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})

View File

@ -31,7 +31,9 @@ from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
# Dial to 0 means no training at all, all the weights will be just using their
# initial values. This can help make the test smaller.
TRAIN_STEPS = 0
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
@ -58,7 +60,8 @@ class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
super(BidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
self.mnist = input_data.read_data_sets(
data_dir, fake_data=True, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
@ -165,8 +168,10 @@ class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_size=self.batch_size, shuffle=False, fake_data=True)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
@ -228,7 +233,8 @@ class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
b1, _ = self.mnist.train.next_batch(batch_size=1, fake_data=True)
b1 = np.array(b1, dtype=np.dtype("float32"))
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})

View File

@ -27,7 +27,9 @@ from tensorflow.python.platform import test
# Number of steps to train model.
TRAIN_STEPS = 1
# Dial to 0 means no training at all, all the weights will be just using their
# initial values. This can help make the test smaller.
TRAIN_STEPS = 0
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
@ -37,7 +39,8 @@ class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
def setUp(self):
tf.reset_default_graph()
# Import MNIST dataset
self.mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
self.mnist = input_data.read_data_sets(
"/tmp/data/", fake_data=True, one_hot=True)
# Define constants
# Unrolled through 28 time steps
@ -133,8 +136,10 @@ class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
sess.run(init)
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_size=self.batch_size, fake_data=True)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
@ -184,7 +189,8 @@ class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
b1, _ = self.mnist.train.next_batch(batch_size=1, fake_data=True)
b1 = np.array(b1, dtype=np.dtype("float32"))
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})

View File

@ -30,7 +30,9 @@ from tensorflow.python.platform import test
FLAGS = flags.FLAGS
# Number of steps to train model.
TRAIN_STEPS = 1
# Dial to 0 means no training at all, all the weights will be just using their
# initial values. This can help make the test smaller.
TRAIN_STEPS = 0
CONFIG = tf.ConfigProto(device_count={"GPU": 0})
@ -57,7 +59,8 @@ class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
super(UnidirectionalSequenceRnnTest, self).setUp()
# Import MNIST dataset
data_dir = tempfile.mkdtemp(dir=FLAGS.test_tmpdir)
self.mnist = input_data.read_data_sets(data_dir, one_hot=True)
self.mnist = input_data.read_data_sets(
data_dir, fake_data=True, one_hot=True)
def buildRnnLayer(self):
return tf.keras.layers.StackedRNNCells([
@ -128,8 +131,10 @@ class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
sess.run(tf.global_variables_initializer())
for _ in range(TRAIN_STEPS):
batch_x, batch_y = self.mnist.train.next_batch(
batch_size=self.batch_size, shuffle=False)
batch_size=self.batch_size, fake_data=True)
batch_x = np.array(batch_x)
batch_y = np.array(batch_y)
batch_x = batch_x.reshape((self.batch_size, self.time_steps,
self.n_input))
sess.run(opt, feed_dict={x: batch_x, y: batch_y})
@ -179,7 +184,8 @@ class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
- Expected output.
"""
b1, _ = self.mnist.train.next_batch(batch_size=1)
b1, _ = self.mnist.train.next_batch(batch_size=1, fake_data=True)
b1 = np.array(b1, dtype=np.dtype("float32"))
sample_input = np.reshape(b1, (1, self.time_steps, self.n_input))
expected_output = sess.run(output_class, feed_dict={x: sample_input})