Run tf_upgrade_v2 on these to get the tests passing in v2 builds.

PiperOrigin-RevId: 261009001
This commit is contained in:
A. Unique TensorFlower 2019-07-31 15:19:20 -07:00 committed by TensorFlower Gardener
parent 6b31ac341a
commit c9483fe392
3 changed files with 80 additions and 76 deletions

View File

@ -50,9 +50,9 @@ def placeholder_inputs(batch_size):
# Note that the shapes of the placeholders match the shapes of the full # Note that the shapes of the placeholders match the shapes of the full
# image and label tensors, except the first dimension is now batch_size # image and label tensors, except the first dimension is now batch_size
# rather than the full size of the train or test data sets. # rather than the full size of the train or test data sets.
images_placeholder = tf.placeholder(tf.float32, shape=(batch_size, images_placeholder = tf.compat.v1.placeholder(
mnist.IMAGE_PIXELS)) tf.float32, shape=(batch_size, mnist.IMAGE_PIXELS))
labels_placeholder = tf.placeholder(tf.int32, shape=(batch_size)) labels_placeholder = tf.compat.v1.placeholder(tf.int32, shape=(batch_size))
return images_placeholder, labels_placeholder return images_placeholder, labels_placeholder
@ -140,19 +140,19 @@ def run_training():
eval_correct = mnist.evaluation(logits, labels_placeholder) eval_correct = mnist.evaluation(logits, labels_placeholder)
# Build the summary Tensor based on the TF collection of Summaries. # Build the summary Tensor based on the TF collection of Summaries.
summary = tf.summary.merge_all() summary = tf.compat.v1.summary.merge_all()
# Add the variable initializer Op. # Add the variable initializer Op.
init = tf.global_variables_initializer() init = tf.compat.v1.global_variables_initializer()
# Create a saver for writing training checkpoints. # Create a saver for writing training checkpoints.
saver = tf.train.Saver() saver = tf.compat.v1.train.Saver()
# Create a session for running Ops on the Graph. # Create a session for running Ops on the Graph.
sess = tf.compat.v1.Session() sess = tf.compat.v1.Session()
# Instantiate a SummaryWriter to output summaries and the Graph. # Instantiate a SummaryWriter to output summaries and the Graph.
summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph) summary_writer = tf.compat.v1.summary.FileWriter(FLAGS.log_dir, sess.graph)
# And then after everything is built: # And then after everything is built:
@ -216,9 +216,9 @@ def run_training():
def main(_): def main(_):
if tf.gfile.Exists(FLAGS.log_dir): if tf.io.gfile.exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir) tf.io.gfile.rmtree(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir) tf.io.gfile.makedirs(FLAGS.log_dir)
run_training() run_training()
@ -276,4 +276,4 @@ if __name__ == '__main__':
) )
FLAGS, unparsed = parser.parse_known_args() FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)

View File

@ -54,29 +54,29 @@ def inference(images, hidden1_units, hidden2_units):
softmax_linear: Output tensor with the computed logits. softmax_linear: Output tensor with the computed logits.
""" """
# Hidden 1 # Hidden 1
with tf.name_scope('hidden1'): with tf.compat.v1.name_scope('hidden1'):
weights = tf.Variable( weights = tf.Variable(
tf.truncated_normal([IMAGE_PIXELS, hidden1_units], tf.random.truncated_normal(
stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))), [IMAGE_PIXELS, hidden1_units],
name='weights') stddev=1.0 / math.sqrt(float(IMAGE_PIXELS))), name='weights')
biases = tf.Variable(tf.zeros([hidden1_units]), biases = tf.Variable(tf.zeros([hidden1_units]),
name='biases') name='biases')
hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases) hidden1 = tf.nn.relu(tf.matmul(images, weights) + biases)
# Hidden 2 # Hidden 2
with tf.name_scope('hidden2'): with tf.compat.v1.name_scope('hidden2'):
weights = tf.Variable( weights = tf.Variable(
tf.truncated_normal([hidden1_units, hidden2_units], tf.random.truncated_normal(
stddev=1.0 / math.sqrt(float(hidden1_units))), [hidden1_units, hidden2_units],
name='weights') stddev=1.0 / math.sqrt(float(hidden1_units))), name='weights')
biases = tf.Variable(tf.zeros([hidden2_units]), biases = tf.Variable(tf.zeros([hidden2_units]),
name='biases') name='biases')
hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases) hidden2 = tf.nn.relu(tf.matmul(hidden1, weights) + biases)
# Linear # Linear
with tf.name_scope('softmax_linear'): with tf.compat.v1.name_scope('softmax_linear'):
weights = tf.Variable( weights = tf.Variable(
tf.truncated_normal([hidden2_units, NUM_CLASSES], tf.random.truncated_normal(
stddev=1.0 / math.sqrt(float(hidden2_units))), [hidden2_units, NUM_CLASSES],
name='weights') stddev=1.0 / math.sqrt(float(hidden2_units))), name='weights')
biases = tf.Variable(tf.zeros([NUM_CLASSES]), biases = tf.Variable(tf.zeros([NUM_CLASSES]),
name='biases') name='biases')
logits = tf.matmul(hidden2, weights) + biases logits = tf.matmul(hidden2, weights) + biases
@ -93,8 +93,9 @@ def loss(logits, labels):
Returns: Returns:
loss: Loss tensor of type float. loss: Loss tensor of type float.
""" """
labels = tf.to_int64(labels) labels = tf.cast(labels, dtype=tf.int64)
return tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) return tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=labels, logits=logits)
def training(loss, learning_rate): def training(loss, learning_rate):
@ -115,9 +116,9 @@ def training(loss, learning_rate):
train_op: The Op for training. train_op: The Op for training.
""" """
# Add a scalar summary for the snapshot loss. # Add a scalar summary for the snapshot loss.
tf.summary.scalar('loss', loss) tf.compat.v1.summary.scalar('loss', loss)
# Create the gradient descent optimizer with the given learning rate. # Create the gradient descent optimizer with the given learning rate.
optimizer = tf.train.GradientDescentOptimizer(learning_rate) optimizer = tf.compat.v1.train.GradientDescentOptimizer(learning_rate)
# Create a variable to track the global step. # Create a variable to track the global step.
global_step = tf.Variable(0, name='global_step', trainable=False) global_step = tf.Variable(0, name='global_step', trainable=False)
# Use the optimizer to apply the gradients that minimize the loss # Use the optimizer to apply the gradients that minimize the loss
@ -142,6 +143,6 @@ def evaluation(logits, labels):
# It returns a bool tensor with shape [batch_size] that is true for # It returns a bool tensor with shape [batch_size] that is true for
# the examples where the label is in the top k (here k=1) # the examples where the label is in the top k (here k=1)
# of all logits for that example. # of all logits for that example.
correct = tf.nn.in_top_k(logits, labels, 1) correct = tf.nn.in_top_k(predictions=logits, targets=labels, k=1)
# Return the number of true entries. # Return the number of true entries.
return tf.reduce_sum(tf.cast(correct, tf.int32)) return tf.reduce_sum(input_tensor=tf.cast(correct, tf.int32))

View File

@ -40,22 +40,22 @@ def train():
mnist = input_data.read_data_sets(FLAGS.data_dir, mnist = input_data.read_data_sets(FLAGS.data_dir,
fake_data=FLAGS.fake_data) fake_data=FLAGS.fake_data)
sess = tf.InteractiveSession() sess = tf.compat.v1.InteractiveSession()
# Create a multilayer model. # Create a multilayer model.
# Input placeholders # Input placeholders
with tf.name_scope('input'): with tf.compat.v1.name_scope('input'):
x = tf.placeholder(tf.float32, [None, 784], name='x-input') x = tf.compat.v1.placeholder(tf.float32, [None, 784], name='x-input')
y_ = tf.placeholder(tf.int64, [None], name='y-input') y_ = tf.compat.v1.placeholder(tf.int64, [None], name='y-input')
with tf.name_scope('input_reshape'): with tf.compat.v1.name_scope('input_reshape'):
image_shaped_input = tf.reshape(x, [-1, 28, 28, 1]) image_shaped_input = tf.reshape(x, [-1, 28, 28, 1])
tf.summary.image('input', image_shaped_input, 10) tf.compat.v1.summary.image('input', image_shaped_input, 10)
# We can't initialize these variables to 0 - the network will get stuck. # We can't initialize these variables to 0 - the network will get stuck.
def weight_variable(shape): def weight_variable(shape):
"""Create a weight variable with appropriate initialization.""" """Create a weight variable with appropriate initialization."""
initial = tf.truncated_normal(shape, stddev=0.1) initial = tf.random.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial) return tf.Variable(initial)
def bias_variable(shape): def bias_variable(shape):
@ -65,15 +65,15 @@ def train():
def variable_summaries(var): def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization).""" """Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'): with tf.compat.v1.name_scope('summaries'):
mean = tf.reduce_mean(var) mean = tf.reduce_mean(input_tensor=var)
tf.summary.scalar('mean', mean) tf.compat.v1.summary.scalar('mean', mean)
with tf.name_scope('stddev'): with tf.compat.v1.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) stddev = tf.sqrt(tf.reduce_mean(input_tensor=tf.square(var - mean)))
tf.summary.scalar('stddev', stddev) tf.compat.v1.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var)) tf.compat.v1.summary.scalar('max', tf.reduce_max(input_tensor=var))
tf.summary.scalar('min', tf.reduce_min(var)) tf.compat.v1.summary.scalar('min', tf.reduce_min(input_tensor=var))
tf.summary.histogram('histogram', var) tf.compat.v1.summary.histogram('histogram', var)
def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu): def nn_layer(input_tensor, input_dim, output_dim, layer_name, act=tf.nn.relu):
"""Reusable code for making a simple neural net layer. """Reusable code for making a simple neural net layer.
@ -83,32 +83,32 @@ def train():
and adds a number of summary ops. and adds a number of summary ops.
""" """
# Adding a name scope ensures logical grouping of the layers in the graph. # Adding a name scope ensures logical grouping of the layers in the graph.
with tf.name_scope(layer_name): with tf.compat.v1.name_scope(layer_name):
# This Variable will hold the state of the weights for the layer # This Variable will hold the state of the weights for the layer
with tf.name_scope('weights'): with tf.compat.v1.name_scope('weights'):
weights = weight_variable([input_dim, output_dim]) weights = weight_variable([input_dim, output_dim])
variable_summaries(weights) variable_summaries(weights)
with tf.name_scope('biases'): with tf.compat.v1.name_scope('biases'):
biases = bias_variable([output_dim]) biases = bias_variable([output_dim])
variable_summaries(biases) variable_summaries(biases)
with tf.name_scope('Wx_plus_b'): with tf.compat.v1.name_scope('Wx_plus_b'):
preactivate = tf.matmul(input_tensor, weights) + biases preactivate = tf.matmul(input_tensor, weights) + biases
tf.summary.histogram('pre_activations', preactivate) tf.compat.v1.summary.histogram('pre_activations', preactivate)
activations = act(preactivate, name='activation') activations = act(preactivate, name='activation')
tf.summary.histogram('activations', activations) tf.compat.v1.summary.histogram('activations', activations)
return activations return activations
hidden1 = nn_layer(x, 784, 500, 'layer1') hidden1 = nn_layer(x, 784, 500, 'layer1')
with tf.name_scope('dropout'): with tf.compat.v1.name_scope('dropout'):
keep_prob = tf.placeholder(tf.float32) keep_prob = tf.compat.v1.placeholder(tf.float32)
tf.summary.scalar('dropout_keep_probability', keep_prob) tf.compat.v1.summary.scalar('dropout_keep_probability', keep_prob)
dropped = tf.nn.dropout(hidden1, rate=(1 - keep_prob)) dropped = tf.nn.dropout(hidden1, rate=(1 - keep_prob))
# Do not apply softmax activation yet, see below. # Do not apply softmax activation yet, see below.
y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity) y = nn_layer(dropped, 500, 10, 'layer2', act=tf.identity)
with tf.name_scope('cross_entropy'): with tf.compat.v1.name_scope('cross_entropy'):
# The raw formulation of cross-entropy, # The raw formulation of cross-entropy,
# #
# tf.reduce_mean(-tf.reduce_sum(y_ * tf.math.log(tf.softmax(y)), # tf.reduce_mean(-tf.reduce_sum(y_ * tf.math.log(tf.softmax(y)),
@ -119,28 +119,30 @@ def train():
# So here we use tf.compat.v1.losses.sparse_softmax_cross_entropy on the # So here we use tf.compat.v1.losses.sparse_softmax_cross_entropy on the
# raw logit outputs of the nn_layer above, and then average across # raw logit outputs of the nn_layer above, and then average across
# the batch. # the batch.
with tf.name_scope('total'): with tf.compat.v1.name_scope('total'):
cross_entropy = tf.losses.sparse_softmax_cross_entropy( cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
labels=y_, logits=y) labels=y_, logits=y)
tf.summary.scalar('cross_entropy', cross_entropy) tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)
with tf.name_scope('train'): with tf.compat.v1.name_scope('train'):
train_step = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize( train_step = tf.compat.v1.train.AdamOptimizer(FLAGS.learning_rate).minimize(
cross_entropy) cross_entropy)
with tf.name_scope('accuracy'): with tf.compat.v1.name_scope('accuracy'):
with tf.name_scope('correct_prediction'): with tf.compat.v1.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(y, 1), y_) correct_prediction = tf.equal(tf.argmax(input=y, axis=1), y_)
with tf.name_scope('accuracy'): with tf.compat.v1.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_prediction,
tf.summary.scalar('accuracy', accuracy) tf.float32))
tf.compat.v1.summary.scalar('accuracy', accuracy)
# Merge all the summaries and write them out to # Merge all the summaries and write them out to
# /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default) # /tmp/tensorflow/mnist/logs/mnist_with_summaries (by default)
merged = tf.summary.merge_all() merged = tf.compat.v1.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.log_dir + '/train', sess.graph) train_writer = tf.compat.v1.summary.FileWriter(FLAGS.log_dir + '/train',
test_writer = tf.summary.FileWriter(FLAGS.log_dir + '/test') sess.graph)
tf.global_variables_initializer().run() test_writer = tf.compat.v1.summary.FileWriter(FLAGS.log_dir + '/test')
tf.compat.v1.global_variables_initializer().run()
# Train the model, and also write summaries. # Train the model, and also write summaries.
# Every 10th step, measure test-set accuracy, and write test summaries # Every 10th step, measure test-set accuracy, and write test summaries
@ -163,8 +165,9 @@ def train():
print('Accuracy at step %s: %s' % (i, acc)) print('Accuracy at step %s: %s' % (i, acc))
else: # Record train set summaries, and train else: # Record train set summaries, and train
if i % 100 == 99: # Record execution stats if i % 100 == 99: # Record execution stats
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_options = tf.compat.v1.RunOptions(
run_metadata = tf.RunMetadata() trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
run_metadata = tf.compat.v1.RunMetadata()
summary, _ = sess.run([merged, train_step], summary, _ = sess.run([merged, train_step],
feed_dict=feed_dict(True), feed_dict=feed_dict(True),
options=run_options, options=run_options,
@ -180,9 +183,9 @@ def train():
def main(_): def main(_):
if tf.gfile.Exists(FLAGS.log_dir): if tf.io.gfile.exists(FLAGS.log_dir):
tf.gfile.DeleteRecursively(FLAGS.log_dir) tf.io.gfile.rmtree(FLAGS.log_dir)
tf.gfile.MakeDirs(FLAGS.log_dir) tf.io.gfile.makedirs(FLAGS.log_dir)
with tf.Graph().as_default(): with tf.Graph().as_default():
train() train()
@ -211,4 +214,4 @@ if __name__ == '__main__':
'tensorflow/mnist/logs/mnist_with_summaries'), 'tensorflow/mnist/logs/mnist_with_summaries'),
help='Summaries log directory') help='Summaries log directory')
FLAGS, unparsed = parser.parse_known_args() FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed) tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)