diff --git a/tensorflow/tools/compatibility/BUILD b/tensorflow/tools/compatibility/BUILD index 05d924c092c..a9902d77f5e 100644 --- a/tensorflow/tools/compatibility/BUILD +++ b/tensorflow/tools/compatibility/BUILD @@ -52,13 +52,20 @@ py_library( ) py_library( - name = "tf_upgrade_v2_lib", - srcs = [ - "renames_v2.py", - "tf_upgrade_v2.py", - ], + name = "reorders_v2", + srcs = ["reorders_v2.py"], srcs_version = "PY2AND3", - deps = [":ast_edits"], +) + +py_library( + name = "tf_upgrade_v2_lib", + srcs = ["tf_upgrade_v2.py"], + srcs_version = "PY2AND3", + deps = [ + ":ast_edits", + ":renames_v2", + ":reorders_v2", + ], ) py_binary( diff --git a/tensorflow/tools/compatibility/reorders_v2.py b/tensorflow/tools/compatibility/reorders_v2.py new file mode 100644 index 00000000000..4152d37db9c --- /dev/null +++ b/tensorflow/tools/compatibility/reorders_v2.py @@ -0,0 +1,98 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=line-too-long +"""List of renames to apply when converting from TF 1.0 to TF 2.0. + +THIS FILE IS AUTOGENERATED: To update, please run: + bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map + bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map +This file should be updated whenever a function is added to +self.reordered_function_names in tf_upgrade_v2.py. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +reorders = { + 'tf.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'], + 'tf.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'], + 'tf.batch_to_space': ['input', 'crops', 'block_size', 'name'], + 'tf.boolean_mask': ['tensor', 'mask', 'name', 'axis'], + 'tf.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'], + 'tf.convert_to_tensor': ['value', 'dtype', 'name', 'preferred_dtype'], + 'tf.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'], + 'tf.feature_column.categorical_column_with_vocabulary_file': ['key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'default_value', 'dtype'], + 'tf.io.decode_csv': ['records', 'record_defaults', 'field_delim', 'use_quote_delim', 'name', 'na_value', 'select_cols'], + 'tf.io.parse_example': ['serialized', 'features', 'name', 'example_names'], + 'tf.io.parse_single_example': ['serialized', 'features', 'name', 'example_names'], + 'tf.io.serialize_many_sparse': ['sp_input', 'name', 'out_type'], + 'tf.io.serialize_sparse': ['sp_input', 'name', 'out_type'], + 'tf.math.argmax': ['input', 'axis', 'name', 'dimension', 'output_type'], + 'tf.math.argmin': ['input', 'axis', 'name', 'dimension', 'output_type'], + 'tf.math.confusion_matrix': ['labels', 'predictions', 'num_classes', 'dtype', 'name', 'weights'], + 'tf.math.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.math.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'], + 'tf.nn.convolution': ['input', 'filter', 'padding', 'strides', 'dilation_rate', 'name', 'data_format'], + 'tf.nn.crelu': ['features', 'name', 'axis'], + 'tf.nn.depthwise_conv2d': ['input', 'filter', 'strides', 'padding', 'rate', 'name', 'data_format'], + 'tf.nn.moments': ['x', 'axes', 'shift', 'name', 'keep_dims'], + 'tf.nn.pool': ['input', 'window_shape', 'pooling_type', 'padding', 'dilation_rate', 'strides', 'name', 'data_format'], + 'tf.nn.separable_conv2d': ['input', 'depthwise_filter', 'pointwise_filter', 'strides', 'padding', 'rate', 'name', 'data_format'], + 'tf.nn.weighted_moments': ['x', 'axes', 'frequency_weights', 'name', 'keep_dims'], + 'tf.pad': ['tensor', 'paddings', 'mode', 'name', 'constant_values'], + 'tf.parse_example': ['serialized', 'features', 'name', 'example_names'], + 'tf.parse_single_example': ['serialized', 'features', 'name', 'example_names'], + 'tf.quantize_v2': ['input', 'min_range', 'max_range', 'T', 'mode', 'name', 'round_mode'], + 'tf.random.multinomial': ['logits', 'num_samples', 'seed', 'name', 'output_dtype'], + 'tf.random.poisson': ['lam', 'shape', 'dtype', 'seed', 'name'], + 'tf.random_poisson': ['lam', 'shape', 'dtype', 'seed', 'name'], + 'tf.reduce_all': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.reduce_any': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices'], + 'tf.reduce_logsumexp': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.reduce_max': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.reduce_mean': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.reduce_min': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.reduce_prod': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.reduce_sum': ['input_tensor', 'axis', 'keepdims', 'name', 'reduction_indices', 'keep_dims'], + 'tf.serialize_many_sparse': ['sp_input', 'name', 'out_type'], + 'tf.serialize_sparse': ['sp_input', 'name', 'out_type'], + 'tf.shape': ['input', 'name', 'out_type'], + 'tf.size': ['input', 'name', 'out_type'], + 'tf.sparse.add': ['a', 'b', 'threshold', 'thresh'], + 'tf.sparse.concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim'], + 'tf.sparse.segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'], + 'tf.sparse.segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'], + 'tf.sparse.segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'], + 'tf.sparse_add': ['a', 'b', 'threshold', 'thresh'], + 'tf.sparse_concat': ['axis', 'sp_inputs', 'name', 'expand_nonconcat_dim', 'concat_dim'], + 'tf.sparse_matmul': ['a', 'b', 'transpose_a', 'transpose_b', 'a_is_sparse', 'b_is_sparse', 'name'], + 'tf.sparse_segment_mean': ['data', 'indices', 'segment_ids', 'name', 'num_segments'], + 'tf.sparse_segment_sqrt_n': ['data', 'indices', 'segment_ids', 'name', 'num_segments'], + 'tf.sparse_segment_sum': ['data', 'indices', 'segment_ids', 'name', 'num_segments'], + 'tf.strings.length': ['input', 'name', 'unit'], + 'tf.strings.reduce_join': ['inputs', 'axis', 'keep_dims', 'separator', 'name', 'reduction_indices'], + 'tf.strings.substr': ['input', 'pos', 'len', 'name', 'unit'], + 'tf.transpose': ['a', 'perm', 'name', 'conjugate'], + 'tf.tuple': ['tensors', 'name', 'control_inputs'], + 'tf.while_loop': ['cond', 'body', 'loop_vars', 'shape_invariants', 'parallel_iterations', 'back_prop', 'swap_memory', 'name', 'maximum_iterations', 'return_same_structure'] +} diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2.py b/tensorflow/tools/compatibility/tf_upgrade_v2.py index d19eb133365..edfa2f88c1c 100644 --- a/tensorflow/tools/compatibility/tf_upgrade_v2.py +++ b/tensorflow/tools/compatibility/tf_upgrade_v2.py @@ -20,6 +20,7 @@ from __future__ import print_function from tensorflow.tools.compatibility import ast_edits from tensorflow.tools.compatibility import renames_v2 +from tensorflow.tools.compatibility import reorders_v2 class TFAPIChangeSpec(ast_edits.APIChangeSpec): @@ -483,187 +484,80 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec): # Variables that should be changed to functions. self.change_to_function = {} + # pylint: disable=line-too-long + # This list should just contain names of functions that had + # their arguments reordered. After adding a function name to the list + # run the following to update reorders_v2.py: + # bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map + # bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map + # pylint: enable=line-too-long + self.reordered_function_names = { + "tf.io.serialize_sparse", + "tf.io.serialize_many_sparse", + "tf.argmax", + "tf.argmin", + "tf.batch_to_space", + "tf.boolean_mask", + "tf.convert_to_tensor", + "tf.nn.moments", + "tf.nn.convolution", + "tf.nn.crelu", + "tf.nn.weighted_moments", + "tf.nn.pool", + "tf.nn.separable_conv2d", + "tf.nn.depthwise_conv2d", + "tf.multinomial", + "tf.random.multinomial", + "tf.pad", + "tf.quantize_v2", + "tf.feature_column.categorical_column_with_vocabulary_file", + "tf.shape", + "tf.size", + "tf.random.poisson", + "tf.sparse.add", + "tf.sparse_add", + "tf.sparse.concat", + "tf.sparse_concat", + "tf.sparse.segment_mean", + "tf.sparse.segment_sqrt_n", + "tf.sparse.segment_sum", + "tf.sparse_matmul", + "tf.io.decode_csv", + "tf.strings.substr", + "tf.strings.reduce_join", + "tf.strings.length", + "tf.transpose", + "tf.tuple", + "tf.parse_example", + "tf.parse_single_example", + "tf.io.parse_example", + "tf.io.parse_single_example", + "tf.while_loop", + "tf.reduce_all", + "tf.math.reduce_all", + "tf.reduce_any", + "tf.math.reduce_any", + "tf.reduce_min", + "tf.math.reduce_min", + "tf.reduce_max", + "tf.math.reduce_max", + "tf.reduce_sum", + "tf.math.reduce_sum", + "tf.reduce_mean", + "tf.math.reduce_mean", + "tf.reduce_prod", + "tf.math.reduce_prod", + "tf.reduce_logsumexp", + "tf.math.reduce_logsumexp", + "tf.reduce_join", + "tf.confusion_matrix", + "tf.math.confusion_matrix", + } + # Functions that were reordered should be changed to the new keyword args # for safety, if positional arguments are used. If you have reversed the # positional arguments yourself, this could do the wrong thing. - # IMPORTANT: order here should correspond to OLD argument order. - # We just prepend "arg_name=" to all arguments in function calls. - self.function_reorders = { - "tf.io.serialize_sparse": ["sp_input", "name", "out_type"], - "tf.io.serialize_many_sparse": ["sp_input", "name", "out_type"], - "tf.argmax": ["input", "axis", "name", "axis", "output_type"], - "tf.argmin": ["input", "axis", "name", "axis", "output_type"], - "tf.batch_to_space": ["input", "crops", "block_size", "name"], - "tf.boolean_mask": ["tensor", "mask", "name", "axis"], - "tf.convert_to_tensor": ["value", "dtype", "name", "preferred_dtype"], - "tf.nn.moments": ["x", "axes", "shift", "keepdims", "name"], - "tf.nn.convolution": [ - "input", "filter", "padding", "strides", "dilation_rate", "name", - "data_format" - ], - "tf.nn.crelu": ["features", "name", "axis"], - "tf.nn.weighted_moments": [ - "x", "axes", "frequency_weights", "name", "keep_dims" - ], - "tf.nn.pool": [ - "input", "window_shape", "pooling_type", "padding", "dilation_rate", - "strides", "name", "data_format" - ], - "tf.nn.separable_conv2d": [ - "input", "depthwise_filter", "pointwise_filter", "strides", - "padding", "rate", "name", "data_format" - ], - "tf.nn.depthwise_conv2d": [ - "input", "filter", "strides", "padding", "rate", "name", - "data_format" - ], - "tf.multinomial": [ - "logits", "num_samples", "seed", "name", "output_dtype" - ], - "tf.random.multinomial": [ - "logits", "num_samples", "seed", "name", "output_dtype" - ], - "tf.pad": ["tensor", "paddings", "mode", "name", "constant_values"], - "tf.quantize_v2": [ - "input", "min_range", "max_range", "T", "mode", "name", "round_mode" - ], - "tf.feature_column.categorical_column_with_vocabulary_file": [ - "key", "vocabulary_file", "vocabulary_size", "num_oov_buckets", - "default_value", "dtype" - ], - "tf.shape": ["input", "name", "out_type"], - "tf.size": ["input", "name", "out_type"], - "tf.random.poisson": ["lam", "shape", "dtype", "seed", "name"], - "tf.sparse.add": ["a", "b", "thresh"], - "tf.sparse_add": ["a", "b", "thresh"], - "tf.sparse.concat": [ - "axis", "sp_inputs", "name", "expand_nonconcat_dim", "concat_dim" - ], - "tf.sparse_concat": [ - "axis", "sp_inputs", "name", "expand_nonconcat_dim", "concat_dim" - ], - "tf.sparse.segment_mean": [ - "data", "indices", "segment_ids", "name", "num_segments" - ], - "tf.sparse.segment_sqrt_n": [ - "data", "indices", "segment_ids", "name", "num_segments" - ], - "tf.sparse.segment_sum": [ - "data", "indices", "segment_ids", "name", "num_segments" - ], - "tf.sparse_matmul": [ - "a", "b", "transpose_a", "transpose_b", "a_is_sparse", - "b_is_sparse", "name" - ], - "tf.io.decode_csv": [ - "records", - "record_defaults", - "field_delim", - "use_quote_delim", - "name", - "na_value", - "select_cols", - ], - "tf.strings.substr": ["input", "pos", "len", "name", "unit"], - "tf.strings.reduce_join": [ - "input", "axis", "keep_dims", "separator", "name", - "reduction_indices" - ], - "tf.strings.length": ["input", "name", "unit"], - "tf.transpose": ["a", "perm", "name", "conjugate"], - "tf.tuple": ["tensors", "name", "control_inputs"], - "tf.parse_example": [ - "serialized", "features", "name", "example_names" - ], - "tf.parse_single_example": [ - "serialized", "features", "name", "example_names" - ], - "tf.io.parse_example": [ - "serialized", "features", "name", "example_names" - ], - "tf.io.parse_single_example": [ - "serialized", "features", "name", "example_names" - ], - "tf.while_loop": [ - "cond", "body", "loop_vars", "shape_invariants", - "parallel_iterations", "back_prop", "swap_memory", "name", - "maximum_iterations", "return_same_structure" - ], - "tf.reduce_all": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.math.reduce_all": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.reduce_any": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.math.reduce_any": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.reduce_min": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.math.reduce_min": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.reduce_max": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.math.reduce_max": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.reduce_sum": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.math.reduce_sum": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.reduce_mean": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.math.reduce_mean": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.reduce_prod": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.math.reduce_prod": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.reduce_logsumexp": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.math.reduce_logsumexp": [ - "input_tensor", "axis", "keepdims", "name", "reduction_indices", - "keep_dims" - ], - "tf.reduce_join": [ - "input", "axis", "keep_dims", "separator", "name", - "reduction_indices" - ], - "tf.confusion_matrix": [ - "labels", "predictions", "num_classes", "dtype", "name", "weights" - ], - "tf.math.confusion_matrix": [ - "labels", "predictions", "num_classes", "dtype", "name", "weights" - ] - } + self.function_reorders = reorders_v2.reorders # Specially handled functions. self.function_handle = { diff --git a/tensorflow/tools/compatibility/tf_upgrade_v2_test.py b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py index 7c26718edba..57700c07d6b 100644 --- a/tensorflow/tools/compatibility/tf_upgrade_v2_test.py +++ b/tensorflow/tools/compatibility/tf_upgrade_v2_test.py @@ -63,6 +63,15 @@ def get_v2_names(symbol): return list(names_v2) +def get_symbol_for_name(root, name): + name_parts = name.split(".") + symbol = root + # Iterate starting with second item since 1st item is "tf.". + for part in name_parts[1:]: + symbol = getattr(symbol, part) + return symbol + + def get_func_and_args_from_str(call_str): """Parse call string to get function and argument names. @@ -246,6 +255,41 @@ class TestUpgrade(test_util.TensorFlowTestCase): visitor.private_map["tf.compat"] = ["v1", "v2"] traverse.traverse(tf.compat.v1, visitor) + def testReorderFileNeedsUpdate(self): + reordered_function_names = ( + tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names) + function_reorders = ( + tf_upgrade_v2.TFAPIChangeSpec().function_reorders) + + added_names_message = """Some function names in +self.reordered_function_names are not in reorders_v2.py. +Please run the following commands to update reorders_v2.py: +bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map +bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map +""" + removed_names_message = """%s in self.reorders_v2 does not match +any name in self.reordered_function_names. +Please run the following commands to update reorders_v2.py: +bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map +bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map +""" + self.assertTrue( + reordered_function_names.issubset(function_reorders), + added_names_message) + # function_reorders should contain reordered_function_names + # and their TensorFlow V1 aliases. + for name in function_reorders: + # get other names for this function + attr = get_symbol_for_name(tf.compat.v1, name) + _, attr = tf_decorator.unwrap(attr) + v1_names = get_v1_names(attr) + self.assertTrue(v1_names) + v1_names = ["tf.%s" % n for n in v1_names] + # check if any other name is in + self.assertTrue( + any(n in reordered_function_names for n in v1_names), + removed_names_message % name) + def testRenameConstant(self): text = "tf.MONOLITHIC_BUILD\n" _, unused_report, unused_errors, new_text = self._upgrade(text) diff --git a/tensorflow/tools/compatibility/update/BUILD b/tensorflow/tools/compatibility/update/BUILD index b9725a74ee5..75bb0cfd2b7 100644 --- a/tensorflow/tools/compatibility/update/BUILD +++ b/tensorflow/tools/compatibility/update/BUILD @@ -15,3 +15,17 @@ py_binary( "//tensorflow/tools/compatibility:tf_upgrade_v2_lib", ], ) + +py_binary( + name = "generate_v2_reorders_map", + srcs = ["generate_v2_reorders_map.py"], + srcs_version = "PY2AND3", + deps = [ + "//tensorflow:tensorflow_py", + "//tensorflow/python:lib", + "//tensorflow/python:no_contrib", + "//tensorflow/tools/common:public_api", + "//tensorflow/tools/common:traverse", + "//tensorflow/tools/compatibility:tf_upgrade_v2_lib", + ], +) diff --git a/tensorflow/tools/compatibility/update/generate_v2_reorders_map.py b/tensorflow/tools/compatibility/update/generate_v2_reorders_map.py new file mode 100644 index 00000000000..63541771bf3 --- /dev/null +++ b/tensorflow/tools/compatibility/update/generate_v2_reorders_map.py @@ -0,0 +1,166 @@ +# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=line-too-long +"""Script for updating tensorflow/tools/compatibility/reorders_v2.py. + +To update reorders_v2.py, run: + bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map + bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map +""" +# pylint: enable=line-too-long +import tensorflow as tf + +# This import is needed so that TensorFlow python modules are in sys.modules. +from tensorflow import python as tf_python # pylint: disable=unused-import +from tensorflow.python.lib.io import file_io +from tensorflow.python.platform import app +from tensorflow.python.util import tf_decorator +from tensorflow.python.util import tf_export +from tensorflow.python.util import tf_inspect +from tensorflow.tools.common import public_api +from tensorflow.tools.common import traverse +from tensorflow.tools.compatibility import tf_upgrade_v2 + + +_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/reorders_v2.py' +_FILE_HEADER = """# Copyright 2018 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +# pylint: disable=line-too-long +\"\"\"List of renames to apply when converting from TF 1.0 to TF 2.0. + +THIS FILE IS AUTOGENERATED: To update, please run: + bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map + bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map +This file should be updated whenever a function is added to +self.reordered_function_names in tf_upgrade_v2.py. +\"\"\" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +""" + +_TENSORFLOW_API_ATTR_V1 = ( + tf_export.API_ATTRS_V1[tf_export.TENSORFLOW_API_NAME].names) +_TENSORFLOW_API_ATTR = tf_export.API_ATTRS[tf_export.TENSORFLOW_API_NAME].names +_TENSORFLOW_CONSTANTS_ATTR_V1 = ( + tf_export.API_ATTRS_V1[tf_export.TENSORFLOW_API_NAME].constants) +_TENSORFLOW_CONSTANTS_ATTR = ( + tf_export.API_ATTRS[tf_export.TENSORFLOW_API_NAME].constants) + +_ESTIMATOR_API_ATTR_V1 = ( + tf_export.API_ATTRS_V1[tf_export.ESTIMATOR_API_NAME].names) +_ESTIMATOR_API_ATTR = tf_export.API_ATTRS[tf_export.ESTIMATOR_API_NAME].names +_ESTIMATOR_CONSTANTS_ATTR_V1 = ( + tf_export.API_ATTRS_V1[tf_export.ESTIMATOR_API_NAME].constants) +_ESTIMATOR_CONSTANTS_ATTR = ( + tf_export.API_ATTRS[tf_export.ESTIMATOR_API_NAME].constants) + + +def get_v1_names(symbol): + names_v1 = [] + if hasattr(symbol, _TENSORFLOW_API_ATTR_V1): + names_v1.extend(getattr(symbol, _TENSORFLOW_API_ATTR_V1)) + if hasattr(symbol, _ESTIMATOR_API_ATTR_V1): + names_v1.extend(getattr(symbol, _ESTIMATOR_API_ATTR_V1)) + return names_v1 + + +def get_v2_names(symbol): + names_v2 = [] + if hasattr(symbol, _TENSORFLOW_API_ATTR): + names_v2.extend(getattr(symbol, _TENSORFLOW_API_ATTR)) + if hasattr(symbol, _ESTIMATOR_API_ATTR): + names_v2.extend(getattr(symbol, _ESTIMATOR_API_ATTR)) + return list(names_v2) + + +def collect_function_arg_names(function_names): + """Determines argument names for reordered function signatures. + + Args: + function_names: Functions to collect arguments for. + + Returns: + Dictionary mapping function name to its arguments. + """ + # Map from reordered function name to its arguments. + function_to_args = {} + + def visit(unused_path, unused_parent, children): + """Visitor that collects arguments for reordered functions.""" + for child in children: + _, attr = tf_decorator.unwrap(child[1]) + api_names_v1 = get_v1_names(attr) + api_names_v1 = ['tf.%s' % name for name in api_names_v1] + matches_function_names = any( + name in function_names for name in api_names_v1) + if matches_function_names: + arg_list = tf_inspect.getargspec(attr)[0] + for name in api_names_v1: + function_to_args[name] = arg_list + + visitor = public_api.PublicAPIVisitor(visit) + visitor.do_not_descend_map['tf'].append('contrib') + visitor.do_not_descend_map['tf.compat'] = ['v1', 'v2'] + traverse.traverse(tf, visitor) + + return function_to_args + + +def get_reorder_line(name, arg_list): + return ' \'%s\': %s' % (name, str(arg_list)) + + +def update_reorders_v2(output_file_path): + """Writes a Python dictionary mapping function name to argument order. + + Args: + output_file_path: File path to write output to. Any existing contents + would be replaced. + """ + reordered_function_names = ( + tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names) + + all_reorders = collect_function_arg_names(reordered_function_names) + + # List of reorder lines to write to output file in the form: + # 'tf.function_name': ['arg1', 'arg2', ...] + rename_lines = [ + get_reorder_line(name, arg_names) + for name, arg_names in all_reorders.items()] + renames_file_text = '%sreorders = {\n%s\n}\n' % ( + _FILE_HEADER, ',\n'.join(sorted(rename_lines))) + file_io.write_string_to_file(output_file_path, renames_file_text) + + +def main(unused_argv): + update_reorders_v2(_OUTPUT_FILE_PATH) + + +if __name__ == '__main__': + app.run(main=main)