Apply tf1-tf2 renames to tensorflow/lite docstrings and comments.

No code changes, only doc-strings and comments.

PiperOrigin-RevId: 243841407
This commit is contained in:
Mark Daoust 2019-04-16 11:05:29 -07:00 committed by TensorFlower Gardener
parent 05392b422d
commit 70dfecf6a1
9 changed files with 19 additions and 19 deletions

View File

@ -158,7 +158,7 @@ class BidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
bw_lstm_layer: The backward lstm layer either a single lstm cell or a
multi lstm cell.
sess: Old session.
saver: saver created by tf.train.Saver()
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:

View File

@ -184,7 +184,7 @@ class BidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
bw_rnn_layer: The backward rnn layer either a single rnn cell or a multi
rnn cell.
sess: Old session.
saver: Saver created by tf.train.Saver()
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
use_sequence_length: Whether to use sequence length or not. Default to
False.

View File

@ -56,7 +56,7 @@ def dynamic_rnn(cell,
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
rnn_cell = tf.compat.v1.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
@ -64,22 +64,22 @@ def dynamic_rnn(cell,
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
outputs, state = tf.compat.v1.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
rnn_layers = [tf.compat.v1.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
multi_rnn_cell = tf.compat.v1.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
# tf.nn.rnn_cell.LSTMStateTuple for each cell
outputs, state = tf.compat.v1.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```

View File

@ -143,7 +143,7 @@ class UnidirectionalSequenceLstmTest(test_util.TensorFlowTestCase):
Args:
lstm_layer: The lstm layer either a single lstm cell or a multi lstm cell.
sess: Old session.
saver: Saver created by tf.train.Saver()
saver: Saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: Use dynamic_rnn or not.
Returns:

View File

@ -138,7 +138,7 @@ class UnidirectionalSequenceRnnTest(test_util.TensorFlowTestCase):
Args:
rnn_layer: The rnn layer either a single rnn cell or a multi rnn cell.
sess: Old session.
saver: saver created by tf.train.Saver()
saver: saver created by tf.compat.v1.train.Saver()
is_dynamic_rnn: use dynamic_rnn or not.
Returns:

View File

@ -99,7 +99,7 @@ def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.lite.toco_convert`.
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see

View File

@ -30,13 +30,13 @@ Example:
output, = custom.add_outputs(output)
return output
image = tf.placeholder(tf.float32, (1, 16, 16, 1))
image = tf.compat.v1.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.Session()
session = tf.compat.v1.Session()
graphdef_to_convert = tf.lite.convert_op_hints_to_stubs(session)
tflite_graph = tf.lite.toco_convert(graphdef_to_convert, [image], [output])
tflite_graph = tf.compat.v1.lite.toco_convert(graphdef_to_convert, [image], [output])
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)

View File

@ -149,7 +149,7 @@ def set_tensor_shapes(tensors, shapes):
def get_grappler_config(enable_layout_optimizer=False, function_only=False):
"""Creates a tf.ConfigProto for configuring Grappler.
"""Creates a tf.compat.v1.ConfigProto for configuring Grappler.
Args:
enable_layout_optimizer: Bool indicating whether to run the layout

View File

@ -625,7 +625,7 @@ def make_pool_tests(pool_op_in):
"""Make a set of tests to do average pooling.
Args:
pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool`.
pool_op_in: TensorFlow pooling operation to test i.e. `tf.nn.avg_pool2d`.
Returns:
A function representing the true generator (after curried pool_op_in).
@ -3859,7 +3859,7 @@ def make_conv2d_transpose_tests(options):
# Since compute output_shape is fairly complicated for
# tf.nn.conv2d_backprop_input input_sizes argument, so we here first perform a
# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a
# "conv2d" operation to get the output, then we use the output to feed in
# tf.nn.conv2d_backprop_input.
# This test will depend on the "conv2d" operation's correctness.
@ -4567,7 +4567,7 @@ def make_reverse_sequence_tests(options):
@register_make_test_function()
def make_matrix_diag_tests(options):
"""Make a set of tests for tf.matrix_diag op."""
"""Make a set of tests for tf.linalg.diag op."""
test_parameters = [
{
@ -4595,7 +4595,7 @@ def make_matrix_diag_tests(options):
@register_make_test_function()
def make_matrix_set_diag_tests(options):
"""Make a set of tests for tf.matrix_set_diag op."""
"""Make a set of tests for tf.linalg.set_diag op."""
test_parameters = [
{