Don't create an automatic rename rule for any symbol with a
manually-specified rename. PiperOrigin-RevId: 223243781
This commit is contained in:
parent
a7b3f17a16
commit
36325c5234
tensorflow/tools/compatibility
@ -51,8 +51,8 @@ py_library(
|
||||
srcs_version = "PY2AND3",
|
||||
)
|
||||
|
||||
py_binary(
|
||||
name = "tf_upgrade_v2",
|
||||
py_library(
|
||||
name = "tf_upgrade_v2_lib",
|
||||
srcs = [
|
||||
"renames_v2.py",
|
||||
"tf_upgrade_v2.py",
|
||||
@ -61,6 +61,17 @@ py_binary(
|
||||
deps = [":ast_edits"],
|
||||
)
|
||||
|
||||
py_binary(
|
||||
name = "tf_upgrade_v2",
|
||||
srcs = ["tf_upgrade_v2_main.py"],
|
||||
main = "tf_upgrade_v2_main.py",
|
||||
srcs_version = "PY2AND3",
|
||||
deps = [
|
||||
":ast_edits",
|
||||
":tf_upgrade_v2_lib",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "tf_upgrade_v2_test",
|
||||
srcs = ["tf_upgrade_v2_test.py"],
|
||||
|
@ -92,15 +92,12 @@ renames = {
|
||||
'tf.assign': 'tf.compat.v1.assign',
|
||||
'tf.assign_add': 'tf.compat.v1.assign_add',
|
||||
'tf.assign_sub': 'tf.compat.v1.assign_sub',
|
||||
'tf.batch_to_space_nd': 'tf.compat.v1.batch_to_space_nd',
|
||||
'tf.betainc': 'tf.math.betainc',
|
||||
'tf.bincount': 'tf.compat.v1.bincount',
|
||||
'tf.ceil': 'tf.math.ceil',
|
||||
'tf.check_numerics': 'tf.debugging.check_numerics',
|
||||
'tf.cholesky': 'tf.linalg.cholesky',
|
||||
'tf.cholesky_solve': 'tf.linalg.cholesky_solve',
|
||||
'tf.colocate_with': 'tf.compat.v1.colocate_with',
|
||||
'tf.confusion_matrix': 'tf.compat.v1.confusion_matrix',
|
||||
'tf.conj': 'tf.math.conj',
|
||||
'tf.container': 'tf.compat.v1.container',
|
||||
'tf.convert_to_tensor_or_indexed_slices': 'tf.compat.v1.convert_to_tensor_or_indexed_slices',
|
||||
@ -110,6 +107,7 @@ renames = {
|
||||
'tf.create_partitioned_variables': 'tf.compat.v1.create_partitioned_variables',
|
||||
'tf.cross': 'tf.linalg.cross',
|
||||
'tf.cumprod': 'tf.math.cumprod',
|
||||
'tf.data.Iterator': 'tf.compat.v1.data.Iterator',
|
||||
'tf.debugging.is_finite': 'tf.math.is_finite',
|
||||
'tf.debugging.is_inf': 'tf.math.is_inf',
|
||||
'tf.debugging.is_nan': 'tf.math.is_nan',
|
||||
@ -117,7 +115,6 @@ renames = {
|
||||
'tf.debugging.is_strictly_increasing': 'tf.math.is_strictly_increasing',
|
||||
'tf.decode_base64': 'tf.io.decode_base64',
|
||||
'tf.decode_compressed': 'tf.io.decode_compressed',
|
||||
'tf.decode_csv': 'tf.compat.v1.decode_csv',
|
||||
'tf.decode_json_example': 'tf.io.decode_json_example',
|
||||
'tf.decode_raw': 'tf.io.decode_raw',
|
||||
'tf.delete_session_tensor': 'tf.compat.v1.delete_session_tensor',
|
||||
@ -180,21 +177,9 @@ renames = {
|
||||
'tf.get_session_tensor': 'tf.compat.v1.get_session_tensor',
|
||||
'tf.get_variable': 'tf.compat.v1.get_variable',
|
||||
'tf.get_variable_scope': 'tf.compat.v1.get_variable_scope',
|
||||
'tf.gfile.Copy': 'tf.compat.v1.gfile.Copy',
|
||||
'tf.gfile.DeleteRecursively': 'tf.compat.v1.gfile.DeleteRecursively',
|
||||
'tf.gfile.Exists': 'tf.compat.v1.gfile.Exists',
|
||||
'tf.gfile.FastGFile': 'tf.compat.v1.gfile.FastGFile',
|
||||
'tf.gfile.GFile': 'tf.compat.v1.gfile.GFile',
|
||||
'tf.gfile.Glob': 'tf.compat.v1.gfile.Glob',
|
||||
'tf.gfile.IsDirectory': 'tf.compat.v1.gfile.IsDirectory',
|
||||
'tf.gfile.ListDirectory': 'tf.compat.v1.gfile.ListDirectory',
|
||||
'tf.gfile.MakeDirs': 'tf.compat.v1.gfile.MakeDirs',
|
||||
'tf.gfile.MkDir': 'tf.compat.v1.gfile.MkDir',
|
||||
'tf.gfile.Open': 'tf.compat.v1.gfile.Open',
|
||||
'tf.gfile.Remove': 'tf.compat.v1.gfile.Remove',
|
||||
'tf.gfile.Rename': 'tf.compat.v1.gfile.Rename',
|
||||
'tf.gfile.Stat': 'tf.compat.v1.gfile.Stat',
|
||||
'tf.gfile.Walk': 'tf.compat.v1.gfile.Walk',
|
||||
'tf.global_norm': 'tf.linalg.global_norm',
|
||||
'tf.global_variables': 'tf.compat.v1.global_variables',
|
||||
'tf.global_variables_initializer': 'tf.compat.v1.global_variables_initializer',
|
||||
@ -274,7 +259,6 @@ renames = {
|
||||
'tf.layers.separable_conv2d': 'tf.compat.v1.layers.separable_conv2d',
|
||||
'tf.lbeta': 'tf.math.lbeta',
|
||||
'tf.lgamma': 'tf.math.lgamma',
|
||||
'tf.load_file_system_library': 'tf.compat.v1.load_file_system_library',
|
||||
'tf.local_variables': 'tf.compat.v1.local_variables',
|
||||
'tf.local_variables_initializer': 'tf.compat.v1.local_variables_initializer',
|
||||
'tf.log_sigmoid': 'tf.math.log_sigmoid',
|
||||
@ -312,7 +296,6 @@ renames = {
|
||||
'tf.losses.sparse_softmax_cross_entropy': 'tf.compat.v1.losses.sparse_softmax_cross_entropy',
|
||||
'tf.make_template': 'tf.compat.v1.make_template',
|
||||
'tf.make_tensor_proto': 'tf.compat.v1.make_tensor_proto',
|
||||
'tf.manip.batch_to_space_nd': 'tf.compat.v1.manip.batch_to_space_nd',
|
||||
'tf.manip.gather_nd': 'tf.gather_nd',
|
||||
'tf.manip.reshape': 'tf.reshape',
|
||||
'tf.manip.reverse': 'tf.reverse',
|
||||
@ -367,7 +350,6 @@ renames = {
|
||||
'tf.min_max_variable_partitioner': 'tf.compat.v1.min_max_variable_partitioner',
|
||||
'tf.model_variables': 'tf.compat.v1.model_variables',
|
||||
'tf.moving_average_variables': 'tf.compat.v1.moving_average_variables',
|
||||
'tf.multinomial': 'tf.compat.v1.multinomial',
|
||||
'tf.nn.bidirectional_dynamic_rnn': 'tf.compat.v1.nn.bidirectional_dynamic_rnn',
|
||||
'tf.nn.conv3d_backprop_filter_v2': 'tf.nn.conv3d_backprop_filter',
|
||||
'tf.nn.ctc_beam_search_decoder_v2': 'tf.nn.ctc_beam_search_decoder',
|
||||
@ -376,6 +358,7 @@ renames = {
|
||||
'tf.nn.depthwise_conv2d_native_backprop_filter': 'tf.nn.depthwise_conv2d_backprop_filter',
|
||||
'tf.nn.depthwise_conv2d_native_backprop_input': 'tf.nn.depthwise_conv2d_backprop_input',
|
||||
'tf.nn.dynamic_rnn': 'tf.compat.v1.nn.dynamic_rnn',
|
||||
'tf.nn.fused_batch_norm': 'tf.compat.v1.nn.fused_batch_norm',
|
||||
'tf.nn.log_uniform_candidate_sampler': 'tf.random.log_uniform_candidate_sampler',
|
||||
'tf.nn.quantized_avg_pool': 'tf.compat.v1.nn.quantized_avg_pool',
|
||||
'tf.nn.quantized_conv2d': 'tf.compat.v1.nn.quantized_conv2d',
|
||||
@ -417,10 +400,8 @@ renames = {
|
||||
'tf.python_io.tf_record_iterator': 'tf.compat.v1.python_io.tf_record_iterator',
|
||||
'tf.qr': 'tf.linalg.qr',
|
||||
'tf.quantize': 'tf.quantization.quantize',
|
||||
'tf.quantize_v2': 'tf.compat.v1.quantize_v2',
|
||||
'tf.quantized_concat': 'tf.quantization.quantized_concat',
|
||||
'tf.random.get_seed': 'tf.compat.v1.random.get_seed',
|
||||
'tf.random.multinomial': 'tf.compat.v1.random.multinomial',
|
||||
'tf.random.set_random_seed': 'tf.compat.v1.random.set_random_seed',
|
||||
'tf.random.stateless_multinomial': 'tf.compat.v1.random.stateless_multinomial',
|
||||
'tf.random_crop': 'tf.image.random_crop',
|
||||
@ -540,7 +521,6 @@ renames = {
|
||||
'tf.sparse_segment_sum': 'tf.compat.v1.sparse_segment_sum',
|
||||
'tf.sparse_slice': 'tf.sparse.slice',
|
||||
'tf.sparse_softmax': 'tf.sparse.softmax',
|
||||
'tf.sparse_split': 'tf.compat.v1.sparse_split',
|
||||
'tf.sparse_tensor_dense_matmul': 'tf.sparse.sparse_dense_matmul',
|
||||
'tf.sparse_tensor_to_dense': 'tf.sparse.to_dense',
|
||||
'tf.sparse_to_dense': 'tf.compat.v1.sparse_to_dense',
|
||||
@ -563,10 +543,8 @@ renames = {
|
||||
'tf.squared_difference': 'tf.math.squared_difference',
|
||||
'tf.string_join': 'tf.strings.join',
|
||||
'tf.string_strip': 'tf.strings.strip',
|
||||
'tf.string_to_hash_bucket': 'tf.compat.v1.string_to_hash_bucket',
|
||||
'tf.string_to_hash_bucket_fast': 'tf.strings.to_hash_bucket_fast',
|
||||
'tf.string_to_hash_bucket_strong': 'tf.strings.to_hash_bucket_strong',
|
||||
'tf.string_to_number': 'tf.compat.v1.string_to_number',
|
||||
'tf.summary.audio': 'tf.compat.v1.summary.audio',
|
||||
'tf.summary.get_summary_description': 'tf.compat.v1.summary.get_summary_description',
|
||||
'tf.summary.histogram': 'tf.compat.v1.summary.histogram',
|
||||
@ -591,16 +569,16 @@ renames = {
|
||||
'tf.to_int32': 'tf.compat.v1.to_int32',
|
||||
'tf.to_int64': 'tf.compat.v1.to_int64',
|
||||
'tf.trace': 'tf.linalg.trace',
|
||||
'tf.train.ChiefSessionCreator': 'tf.compat.v1.train.ChiefSessionCreator',
|
||||
'tf.train.MonitoredSession': 'tf.compat.v1.train.MonitoredSession',
|
||||
'tf.train.LooperThread': 'tf.compat.v1.train.LooperThread',
|
||||
'tf.train.AdadeltaOptimizer': 'tf.compat.v1.train.AdadeltaOptimizer',
|
||||
'tf.train.AdagradDAOptimizer': 'tf.compat.v1.train.AdagradDAOptimizer',
|
||||
'tf.train.AdagradOptimizer': 'tf.compat.v1.train.AdagradOptimizer',
|
||||
'tf.train.AdamOptimizer': 'tf.compat.v1.train.AdamOptimizer',
|
||||
'tf.train.ChiefSessionCreator': 'tf.compat.v1.train.ChiefSessionCreator',
|
||||
'tf.train.FtrlOptimizer': 'tf.compat.v1.train.FtrlOptimizer',
|
||||
'tf.train.GradientDescentOptimizer': 'tf.compat.v1.train.GradientDescentOptimizer',
|
||||
'tf.train.LooperThread': 'tf.compat.v1.train.LooperThread',
|
||||
'tf.train.MomentumOptimizer': 'tf.compat.v1.train.MomentumOptimizer',
|
||||
'tf.train.MonitoredSession': 'tf.compat.v1.train.MonitoredSession',
|
||||
'tf.train.MonitoredTrainingSession': 'tf.compat.v1.train.MonitoredTrainingSession',
|
||||
'tf.train.NewCheckpointReader': 'tf.compat.v1.train.NewCheckpointReader',
|
||||
'tf.train.Optimizer': 'tf.compat.v1.train.Optimizer',
|
||||
@ -615,8 +593,8 @@ renames = {
|
||||
'tf.train.SingularMonitoredSession': 'tf.compat.v1.train.SingularMonitoredSession',
|
||||
'tf.train.Supervisor': 'tf.compat.v1.train.Supervisor',
|
||||
'tf.train.SyncReplicasOptimizer': 'tf.compat.v1.train.SyncReplicasOptimizer',
|
||||
'tf.train.WorkerSessionCreator': 'tf.compat.v1.train.WorkerSessionCreator',
|
||||
'tf.train.VocabInfo': 'tf.compat.v1.train.VocabInfo',
|
||||
'tf.train.WorkerSessionCreator': 'tf.compat.v1.train.WorkerSessionCreator',
|
||||
'tf.train.add_queue_runner': 'tf.compat.v1.train.add_queue_runner',
|
||||
'tf.train.assert_global_step': 'tf.compat.v1.train.assert_global_step',
|
||||
'tf.train.basic_train_loop': 'tf.compat.v1.train.basic_train_loop',
|
||||
|
@ -18,8 +18,6 @@ from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
|
||||
from tensorflow.tools.compatibility import ast_edits
|
||||
from tensorflow.tools.compatibility import renames_v2
|
||||
|
||||
@ -275,14 +273,12 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
|
||||
},
|
||||
}
|
||||
|
||||
# Mapping from function to the new name of the function
|
||||
self.symbol_renames = renames_v2.renames
|
||||
# pylint: disable=line-too-long
|
||||
# Add additional renames not in renames_v2.py here.
|
||||
# IMPORTANT: For the renames in here, if you also need to add to
|
||||
# function_reorders or function_keyword_renames, use the OLD function name.
|
||||
# These renames happen after the arguments have been processed.
|
||||
self.symbol_renames.update({
|
||||
self.manual_symbol_renames = {
|
||||
"tf.batch_to_space_nd":
|
||||
"tf.batch_to_space",
|
||||
"tf.gfile.Copy":
|
||||
@ -425,13 +421,12 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
|
||||
"tf.math.confusion_matrix",
|
||||
"tf.decode_csv":
|
||||
"tf.io.decode_csv",
|
||||
})
|
||||
}
|
||||
# pylint: enable=line-too-long
|
||||
|
||||
# For custom behavior and if auto-generate rename in renames_v2.py
|
||||
# is incorrect, add the op name here to exclude it from renames_v2.py.
|
||||
excluded_renames = [
|
||||
]
|
||||
# Mapping from function to the new name of the function
|
||||
self.symbol_renames = renames_v2.renames
|
||||
self.symbol_renames.update(self.manual_symbol_renames)
|
||||
|
||||
# Variables that should be changed to functions.
|
||||
self.change_to_function = {}
|
||||
@ -711,7 +706,7 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
|
||||
self.symbol_renames = {
|
||||
name: new_name
|
||||
for name, new_name in self.symbol_renames.items()
|
||||
if name not in self.function_warnings and name not in excluded_renames
|
||||
if name not in self.function_warnings
|
||||
}
|
||||
|
||||
export_saved_model_renamed = (
|
||||
@ -772,79 +767,3 @@ class TFAPIChangeSpec(ast_edits.APIChangeSpec):
|
||||
"",
|
||||
error="{} requires manual check.".format(name))
|
||||
return _helper
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description="""Convert a TensorFlow Python file to 2.0
|
||||
|
||||
Simple usage:
|
||||
tf_upgrade_v2.py --infile foo.py --outfile bar.py
|
||||
tf_upgrade_v2.py --intree ~/code/old --outtree ~/code/new
|
||||
""")
|
||||
parser.add_argument(
|
||||
"--infile",
|
||||
dest="input_file",
|
||||
help="If converting a single file, the name of the file "
|
||||
"to convert")
|
||||
parser.add_argument(
|
||||
"--outfile",
|
||||
dest="output_file",
|
||||
help="If converting a single file, the output filename.")
|
||||
parser.add_argument(
|
||||
"--intree",
|
||||
dest="input_tree",
|
||||
help="If converting a whole tree of files, the directory "
|
||||
"to read from (relative or absolute).")
|
||||
parser.add_argument(
|
||||
"--outtree",
|
||||
dest="output_tree",
|
||||
help="If converting a whole tree of files, the output "
|
||||
"directory (relative or absolute).")
|
||||
parser.add_argument(
|
||||
"--copyotherfiles",
|
||||
dest="copy_other_files",
|
||||
help=("If converting a whole tree of files, whether to "
|
||||
"copy the other files."),
|
||||
type=bool,
|
||||
default=False)
|
||||
parser.add_argument(
|
||||
"--reportfile",
|
||||
dest="report_filename",
|
||||
help=("The name of the file where the report log is "
|
||||
"stored."
|
||||
"(default: %(default)s)"),
|
||||
default="report.txt")
|
||||
args = parser.parse_args()
|
||||
|
||||
upgrade = ast_edits.ASTCodeUpgrader(TFAPIChangeSpec())
|
||||
report_text = None
|
||||
report_filename = args.report_filename
|
||||
files_processed = 0
|
||||
if args.input_file:
|
||||
if not args.output_file:
|
||||
raise ValueError(
|
||||
"--outfile=<output file> argument is required when converting a "
|
||||
"single file.")
|
||||
files_processed, report_text, errors = upgrade.process_file(
|
||||
args.input_file, args.output_file)
|
||||
files_processed = 1
|
||||
elif args.input_tree:
|
||||
if not args.output_tree:
|
||||
raise ValueError(
|
||||
"--outtree=<output directory> argument is required when converting a "
|
||||
"file tree.")
|
||||
files_processed, report_text, errors = upgrade.process_tree(
|
||||
args.input_tree, args.output_tree, args.copy_other_files)
|
||||
else:
|
||||
parser.print_help()
|
||||
if report_text:
|
||||
open(report_filename, "w").write(report_text)
|
||||
print("TensorFlow 2.0 Upgrade Script")
|
||||
print("-----------------------------")
|
||||
print("Converted %d files\n" % files_processed)
|
||||
print("Detected %d errors that require attention" % len(errors))
|
||||
print("-" * 80)
|
||||
print("\n".join(errors))
|
||||
print("\nMake sure to read the detailed log %r\n" % report_filename)
|
||||
|
100
tensorflow/tools/compatibility/tf_upgrade_v2_main.py
Normal file
100
tensorflow/tools/compatibility/tf_upgrade_v2_main.py
Normal file
@ -0,0 +1,100 @@
|
||||
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
"""Upgrader for Python scripts from 1.* TensorFlow to 2.0 TensorFlow."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
|
||||
from tensorflow.tools.compatibility import ast_edits
|
||||
from tensorflow.tools.compatibility import tf_upgrade_v2
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
description="""Convert a TensorFlow Python file to 2.0
|
||||
|
||||
Simple usage:
|
||||
tf_upgrade_v2.py --infile foo.py --outfile bar.py
|
||||
tf_upgrade_v2.py --intree ~/code/old --outtree ~/code/new
|
||||
""")
|
||||
parser.add_argument(
|
||||
"--infile",
|
||||
dest="input_file",
|
||||
help="If converting a single file, the name of the file "
|
||||
"to convert")
|
||||
parser.add_argument(
|
||||
"--outfile",
|
||||
dest="output_file",
|
||||
help="If converting a single file, the output filename.")
|
||||
parser.add_argument(
|
||||
"--intree",
|
||||
dest="input_tree",
|
||||
help="If converting a whole tree of files, the directory "
|
||||
"to read from (relative or absolute).")
|
||||
parser.add_argument(
|
||||
"--outtree",
|
||||
dest="output_tree",
|
||||
help="If converting a whole tree of files, the output "
|
||||
"directory (relative or absolute).")
|
||||
parser.add_argument(
|
||||
"--copyotherfiles",
|
||||
dest="copy_other_files",
|
||||
help=("If converting a whole tree of files, whether to "
|
||||
"copy the other files."),
|
||||
type=bool,
|
||||
default=False)
|
||||
parser.add_argument(
|
||||
"--reportfile",
|
||||
dest="report_filename",
|
||||
help=("The name of the file where the report log is "
|
||||
"stored."
|
||||
"(default: %(default)s)"),
|
||||
default="report.txt")
|
||||
args = parser.parse_args()
|
||||
|
||||
upgrade = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
|
||||
report_text = None
|
||||
report_filename = args.report_filename
|
||||
files_processed = 0
|
||||
if args.input_file:
|
||||
if not args.output_file:
|
||||
raise ValueError(
|
||||
"--outfile=<output file> argument is required when converting a "
|
||||
"single file.")
|
||||
files_processed, report_text, errors = upgrade.process_file(
|
||||
args.input_file, args.output_file)
|
||||
files_processed = 1
|
||||
elif args.input_tree:
|
||||
if not args.output_tree:
|
||||
raise ValueError(
|
||||
"--outtree=<output directory> argument is required when converting a "
|
||||
"file tree.")
|
||||
files_processed, report_text, errors = upgrade.process_tree(
|
||||
args.input_tree, args.output_tree, args.copy_other_files)
|
||||
else:
|
||||
parser.print_help()
|
||||
if report_text:
|
||||
open(report_filename, "w").write(report_text)
|
||||
print("TensorFlow 2.0 Upgrade Script")
|
||||
print("-----------------------------")
|
||||
print("Converted %d files\n" % files_processed)
|
||||
print("Detected %d errors that require attention" % len(errors))
|
||||
print("-" * 80)
|
||||
print("\n".join(errors))
|
||||
print("\nMake sure to read the detailed log %r\n" % report_filename)
|
@ -12,5 +12,6 @@ py_binary(
|
||||
"//tensorflow/python:no_contrib",
|
||||
"//tensorflow/tools/common:public_api",
|
||||
"//tensorflow/tools/common:traverse",
|
||||
"//tensorflow/tools/compatibility:tf_upgrade_v2_lib",
|
||||
],
|
||||
)
|
||||
|
@ -32,6 +32,7 @@ from tensorflow.python.util import tf_decorator
|
||||
from tensorflow.python.util import tf_export
|
||||
from tensorflow.tools.common import public_api
|
||||
from tensorflow.tools.common import traverse
|
||||
from tensorflow.tools.compatibility import tf_upgrade_v2
|
||||
|
||||
|
||||
_OUTPUT_FILE_PATH = 'third_party/tensorflow/tools/compatibility/renames_v2.py'
|
||||
@ -102,7 +103,7 @@ def collect_constant_renames():
|
||||
"""Looks for constants that need to be renamed in TF 2.0.
|
||||
|
||||
Returns:
|
||||
List of tuples of the form (current name, new name).
|
||||
Set of tuples of the form (current name, new name).
|
||||
"""
|
||||
renames = set()
|
||||
for module in sys.modules.values():
|
||||
@ -135,7 +136,7 @@ def collect_function_renames():
|
||||
"""Looks for functions/classes that need to be renamed in TF 2.0.
|
||||
|
||||
Returns:
|
||||
List of tuples of the form (current name, new name).
|
||||
Set of tuples of the form (current name, new name).
|
||||
"""
|
||||
# Set of rename lines to write to output file in the form:
|
||||
# 'tf.deprecated_name': 'tf.canonical_name'
|
||||
@ -181,12 +182,15 @@ def update_renames_v2(output_file_path):
|
||||
function_renames = collect_function_renames()
|
||||
constant_renames = collect_constant_renames()
|
||||
all_renames = function_renames.union(constant_renames)
|
||||
manual_renames = set(
|
||||
tf_upgrade_v2.TFAPIChangeSpec().manual_symbol_renames.keys())
|
||||
|
||||
# List of rename lines to write to output file in the form:
|
||||
# 'tf.deprecated_name': 'tf.canonical_name'
|
||||
rename_lines = [
|
||||
get_rename_line(name, canonical_name)
|
||||
for name, canonical_name in all_renames]
|
||||
for name, canonical_name in all_renames
|
||||
if 'tf.' + name not in manual_renames]
|
||||
renames_file_text = '%srenames = {\n%s\n}\n' % (
|
||||
_FILE_HEADER, ',\n'.join(sorted(rename_lines)))
|
||||
file_io.write_string_to_file(output_file_path, renames_file_text)
|
||||
|
Loading…
Reference in New Issue
Block a user