Don't convert batch_gather to gather with batch_dims=-1 as they're not equivalent.

PiperOrigin-RevId: 231983764
This commit is contained in:
Alexandre Passos 2019-02-01 09:27:02 -08:00 committed by TensorFlower Gardener
parent de87e628e6
commit 1501b31d25
4 changed files with 19 additions and 53 deletions

View File

@ -3375,7 +3375,7 @@ gather.__doc__ = gather_v2.__doc__ = gen_array_ops.gather_v2.__doc__
@dispatch.add_dispatch_support
@deprecation.deprecated(
"2017-10-25", "`tf.batch_gather` is deprecated, please use `tf.gather` "
"with `batch_dims=-1` instead.") # pylint: disable=missing-docstring
"with `batch_dims` instead.") # pylint: disable=missing-docstring
def batch_gather(params, indices, name=None):
"""Gather slices from params according to indices with leading batch dims."""
with ops.name_scope(name, "BatchGather", [params, indices]):

View File

@ -28,12 +28,11 @@ from __future__ import print_function
reorders = {
'tf.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'],
'tf.batch_gather': ['params', 'indices', 'name'],
'tf.batch_to_space': ['input', 'crops', 'block_size', 'name'],
'tf.batch_to_space': ['input', 'crops', 'block_size', 'name', 'block_shape'],
'tf.boolean_mask': ['tensor', 'mask', 'name', 'axis'],
'tf.cond': ['pred', 'true_fn', 'false_fn', 'strict', 'name', 'fn1', 'fn2'],
'tf.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'],
'tf.convert_to_tensor': ['value', 'dtype', 'name', 'preferred_dtype'],
'tf.convert_to_tensor': ['value', 'dtype', 'name', 'preferred_dtype', 'dtype_hint'],
'tf.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'],
'tf.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.feature_column.categorical_column_with_vocabulary_file': ['key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'default_value', 'dtype'],
@ -59,27 +58,27 @@ reorders = {
'tf.math.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.math.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'],
'tf.nn.conv1d': ['value', 'filters', 'stride', 'padding', 'use_cudnn_on_gpu', 'data_format', 'name'],
'tf.nn.conv2d': ['input', 'filter', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name'],
'tf.nn.conv1d': ['value', 'filters', 'stride', 'padding', 'use_cudnn_on_gpu', 'data_format', 'name', 'input'],
'tf.nn.conv2d': ['input', 'filter', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name', 'filters'],
'tf.nn.conv2d_backprop_filter': ['input', 'filter_sizes', 'out_backprop', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name'],
'tf.nn.conv2d_backprop_input': ['input_sizes', 'filter', 'out_backprop', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name'],
'tf.nn.convolution': ['input', 'filter', 'padding', 'strides', 'dilation_rate', 'name', 'data_format'],
'tf.nn.conv2d_backprop_input': ['input_sizes', 'filter', 'out_backprop', 'strides', 'padding', 'use_cudnn_on_gpu', 'data_format', 'dilations', 'name', 'filters'],
'tf.nn.convolution': ['input', 'filter', 'padding', 'strides', 'dilation_rate', 'name', 'data_format', 'filters', 'dilations'],
'tf.nn.crelu': ['features', 'name', 'axis'],
'tf.nn.ctc_beam_search_decoder': ['inputs', 'sequence_length', 'beam_width', 'top_paths', 'merge_repeated'],
'tf.nn.depth_to_space': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.depthwise_conv2d': ['input', 'filter', 'strides', 'padding', 'rate', 'name', 'data_format'],
'tf.nn.depthwise_conv2d': ['input', 'filter', 'strides', 'padding', 'rate', 'name', 'data_format', 'dilations'],
'tf.nn.embedding_lookup': ['params', 'ids', 'partition_strategy', 'name', 'validate_indices', 'max_norm'],
'tf.nn.embedding_lookup_sparse': ['params', 'sp_ids', 'sp_weights', 'partition_strategy', 'name', 'combiner', 'max_norm'],
'tf.nn.fractional_avg_pool': ['value', 'pooling_ratio', 'pseudo_random', 'overlapping', 'deterministic', 'seed', 'seed2', 'name'],
'tf.nn.fractional_max_pool': ['value', 'pooling_ratio', 'pseudo_random', 'overlapping', 'deterministic', 'seed', 'seed2', 'name'],
'tf.nn.in_top_k': ['predictions', 'targets', 'k', 'name'],
'tf.nn.moments': ['x', 'axes', 'shift', 'name', 'keep_dims'],
'tf.nn.pool': ['input', 'window_shape', 'pooling_type', 'padding', 'dilation_rate', 'strides', 'name', 'data_format'],
'tf.nn.separable_conv2d': ['input', 'depthwise_filter', 'pointwise_filter', 'strides', 'padding', 'rate', 'name', 'data_format'],
'tf.nn.softmax_cross_entropy_with_logits': ['_sentinel', 'labels', 'logits', 'dim', 'name'],
'tf.nn.space_to_batch': ['input', 'paddings', 'block_size', 'name'],
'tf.nn.moments': ['x', 'axes', 'shift', 'name', 'keep_dims', 'keepdims'],
'tf.nn.pool': ['input', 'window_shape', 'pooling_type', 'padding', 'dilation_rate', 'strides', 'name', 'data_format', 'dilations'],
'tf.nn.separable_conv2d': ['input', 'depthwise_filter', 'pointwise_filter', 'strides', 'padding', 'rate', 'name', 'data_format', 'dilations'],
'tf.nn.softmax_cross_entropy_with_logits': ['_sentinel', 'labels', 'logits', 'dim', 'name', 'axis'],
'tf.nn.space_to_batch': ['input', 'paddings', 'block_size', 'name', 'block_shape'],
'tf.nn.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.nn.weighted_moments': ['x', 'axes', 'frequency_weights', 'name', 'keep_dims'],
'tf.nn.weighted_moments': ['x', 'axes', 'frequency_weights', 'name', 'keep_dims', 'keepdims'],
'tf.norm': ['tensor', 'ord', 'axis', 'keepdims', 'name', 'keep_dims'],
'tf.pad': ['tensor', 'paddings', 'mode', 'name', 'constant_values'],
'tf.parse_example': ['serialized', 'features', 'name', 'example_names'],
@ -90,7 +89,7 @@ reorders = {
'tf.random_poisson': ['lam', 'shape', 'dtype', 'seed', 'name'],
'tf.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices'],
'tf.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices', 'keepdims'],
'tf.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
'tf.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'],
@ -102,17 +101,17 @@ reorders = {
'tf.serialize_sparse': ['sp_input', 'name', 'out_type'],
'tf.shape': ['input', 'name', 'out_type'],
'tf.size': ['input', 'name', 'out_type'],
'tf.space_to_batch': ['input', 'paddings', 'block_size', 'name'],
'tf.space_to_batch': ['input', 'paddings', 'block_size', 'name', 'block_shape'],
'tf.space_to_depth': ['input', 'block_size', 'name', 'data_format'],
'tf.sparse.add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse.concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim'],
'tf.sparse.concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim', 'expand_nonconcat_dims'],
'tf.sparse.reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse.segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse.split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.sparse_add': ['a', 'b', 'threshold', 'thresh'],
'tf.sparse_concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim'],
'tf.sparse_concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim', 'expand_nonconcat_dims'],
'tf.sparse_matmul': ['a', 'b', 'transpose_a', 'transpose_b', 'a_is_sparse', 'b_is_sparse', 'name'],
'tf.sparse_reduce_max': ['sp_input', 'axis', 'keepdims', 'reduction_axes', 'keep_dims'],
'tf.sparse_segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
@ -120,7 +119,7 @@ reorders = {
'tf.sparse_segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'],
'tf.sparse_split': ['keyword_required', 'sp_input', 'num_split', 'axis', 'name', 'split_dim'],
'tf.strings.length': ['input', 'name', 'unit'],
'tf.strings.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices'],
'tf.strings.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices', 'keepdims'],
'tf.strings.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.substr': ['input', 'pos', 'len', 'name', 'unit'],
'tf.test.assert_equal_graph_def': ['actual', 'expected', 'checkpoint_v2'],

View File

@ -723,7 +723,6 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"tf.io.serialize_many_sparse",
"tf.argmax",
"tf.argmin",
"tf.batch_gather",
"tf.batch_to_space",
"tf.cond",
"tf.nn.space_to_batch",
@ -1264,7 +1263,6 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
"*.make_initializable_iterator": _iterator_transformer,
"*.make_one_shot_iterator": _iterator_transformer,
"tf.nn.dropout": _dropout_transformer,
"tf.batch_gather": _batch_gather_transformer,
"tf.to_bfloat16": _cast_transformer,
"tf.to_complex128": _cast_transformer,
"tf.to_complex64": _cast_transformer,
@ -1574,24 +1572,6 @@ def _softmax_cross_entropy_with_logits_transformer(
return node
def _batch_gather_transformer(parent, node, full_name, name, logs):
"""Add batch_dims argument for gather calls."""
# Check if the call already has a batch_dims argument
if any([kw.arg == "batch_dims" for kw in node.keywords]):
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"tf.batch_gather already has batch_dims argument. Neat."))
return None
minus_one = ast.Num(n=-1)
minus_one.lineno = 0
minus_one.col_offset = 0
new_arg = ast.keyword("batch_dims", minus_one)
node.keywords.append(new_arg)
logs.append((ast_edits.INFO, node.lineno, node.col_offset,
"Added keyword argument batch_dims=-1 to tf.batch_gather."))
return node
def _image_resize_transformer(parent, node, full_name, name, logs):
"""Transforms image.resize_* to image.resize(..., method=*, ...)."""
resize_method = name[7:].upper()

View File

@ -994,19 +994,6 @@ tf.print('abc')
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchGather(self):
text = "tf.batch_gather(foo, bar)"
expected_text1 = "tf.gather(params=foo, indices=bar, batch_dims=-1)"
expected_text2 = "tf.gather(batch_dims=-1, params=foo, indices=bar)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertIn(new_text, [expected_text1, expected_text2])
text = "tf.batch_gather(params=foo, indices=bar)"
expected_text1 = "tf.gather(params=foo, indices=bar, batch_dims=-1)"
expected_text2 = "tf.gather(batch_dims=-1, params=foo, indices=bar)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertIn(new_text, [expected_text1, expected_text2])
def testIterators(self):
for (text, expected) in [
("(expr + yielding(data)).make_one_shot_iterator()",