Remove deprecated SessionConfig in favor of just using defaults

This commit is contained in:
Reuben Morais 2019-06-28 10:25:04 -03:00
parent d52ab9df0b
commit dc78f8d1e6
4 changed files with 3 additions and 13 deletions

View File

@ -425,7 +425,7 @@ def train():
initializer = tf.global_variables_initializer()
with tf.Session(config=Config.session_config) as session:
with tf.Session() as session:
log_debug('Session opened.')
tf.get_default_graph().finalize()
@ -745,7 +745,7 @@ def export():
def do_single_file_inference(input_file_path):
with tf.Session(config=Config.session_config) as session:
with tf.Session() as session:
inputs, outputs, _ = create_inference_graph(batch_size=1, n_steps=-1)
# Create a saver using variables from the above newly created graph

View File

@ -77,7 +77,7 @@ def evaluate(test_csvs, create_model, try_loading):
# Create a saver using variables from the above newly created graph
saver = tf.train.Saver()
with tf.Session(config=Config.session_config) as session:
with tf.Session() as session:
# Restore variables from training checkpoint
loaded = try_loading(session, saver, 'best_dev_checkpoint', 'best validation')
if not loaded:

View File

@ -56,11 +56,6 @@ def initialize_globals():
if not FLAGS.summary_dir:
FLAGS.summary_dir = xdg.save_data_path(os.path.join('deepspeech', 'summaries'))
# Standard session configuration that'll be used for all new sessions.
c.session_config = tf.ConfigProto(allow_soft_placement=True, log_device_placement=FLAGS.log_placement,
inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,
intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads)
c.alphabet = Alphabet(os.path.abspath(FLAGS.alphabet_config_path))
# Geometric Constants

View File

@ -51,10 +51,6 @@ def create_flags():
f.DEFINE_integer('export_batch_size', 1, 'number of elements per batch on the exported graph')
# Performance(UNSUPPORTED)
f.DEFINE_integer('inter_op_parallelism_threads', 0, 'number of inter-op parallelism threads - see tf.ConfigProto for more details')
f.DEFINE_integer('intra_op_parallelism_threads', 0, 'number of intra-op parallelism threads - see tf.ConfigProto for more details')
# Sample limits
f.DEFINE_integer('limit_train', 0, 'maximum number of elements to use from train set - 0 means no limit')
@ -81,7 +77,6 @@ def create_flags():
f.DEFINE_integer('log_level', 1, 'log level for console logs - 0: INFO, 1: WARN, 2: ERROR, 3: FATAL')
f.DEFINE_boolean('show_progressbar', True, 'Show progress for training, validation and testing processes. Log level should be > 0.')
f.DEFINE_boolean('log_placement', False, 'whether to log device placement of the operators to the console')
f.DEFINE_integer('report_count', 10, 'number of phrases with lowest WER(best matching) to print out during a WER report')
f.DEFINE_string('summary_dir', '', 'target directory for TensorBoard summaries - defaults to directory "deepspeech/summaries" within user\'s data home specified by the XDG Base Directory Specification')