Merge pull request #30473 from siju-samuel:random_normal_depr_removed

PiperOrigin-RevId: 281682983
Change-Id: If026d1260a5c464a83021cdd14fa3926a77e6d4b
This commit is contained in:
TensorFlower Gardener 2019-11-20 23:55:50 -08:00
commit 6acdf87ae7
5 changed files with 10 additions and 10 deletions

View File

@ -88,8 +88,8 @@ class BidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
tf.random.normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random.normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(

View File

@ -92,8 +92,8 @@ class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
tf.random.normal([self.num_units * 2, self.n_classes]))
out_bias = tf.Variable(tf.random.normal([self.n_classes]))
batch_size = self.batch_size
if is_inference:

View File

@ -71,7 +71,7 @@ tflite_model = converter.convert() # You got a tflite model!
+ tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_lstm_units, forget_bias=0))
# Weights and biases for output softmax layer.
out_weights = tf.Variable(tf.random_normal([self.units, self.num_class]))
out_weights = tf.Variable(tf.random.normal([self.units, self.num_class]))
@@ -67,7 +67,7 @@ class MnistLstmModel(object):
lstm_cells = tf.nn.rnn_cell.MultiRNNCell(lstm_layers)
# Note here, we use `tf.lite.experimental.nn.dynamic_rnn` and `time_major`
@ -170,7 +170,7 @@ class MnistLstmModel(object):
tf.lite.experimental.nn.TFLiteLSTMCell(
self.num_lstm_units, forget_bias=0))
# Weights and biases for output softmax layer.
out_weights = tf.Variable(tf.random_normal([self.units, self.num_class]))
out_weights = tf.Variable(tf.random.normal([self.units, self.num_class]))
out_bias = tf.Variable(tf.zeros([self.num_class]))
# Transpose input x to make it time major.

View File

@ -85,8 +85,8 @@ class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
tf.random.normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random.normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(

View File

@ -81,8 +81,8 @@ class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
"""
# Weights and biases for output softmax layer.
out_weights = tf.Variable(
tf.random_normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random_normal([self.n_classes]))
tf.random.normal([self.num_units, self.n_classes]))
out_bias = tf.Variable(tf.random.normal([self.n_classes]))
# input image placeholder
x = tf.placeholder(