diff --git a/tensorflow/compiler/tests/xla_test.py b/tensorflow/compiler/tests/xla_test.py
index 98a41981cf3..8f63c1a48cb 100644
--- a/tensorflow/compiler/tests/xla_test.py
+++ b/tensorflow/compiler/tests/xla_test.py
@@ -66,7 +66,7 @@ def parse_disabled_manifest(manifest_content):
       raise ValueError('Bad entry in manifest file.')
 
   disabled_regex = '|'.join(disabled_tests)
-  method_types_filter = dict()
+  method_types_filter = {}
   for method, types in disabled_method_types:
     method_types_filter[method] = set([
         dtypes.as_dtype(types_pb2.DataType.Value(name)).as_numpy_dtype
diff --git a/tensorflow/contrib/autograph/examples/benchmarks/benchmark_base.py b/tensorflow/contrib/autograph/examples/benchmarks/benchmark_base.py
index 93c694849c4..25414fbda62 100644
--- a/tensorflow/contrib/autograph/examples/benchmarks/benchmark_base.py
+++ b/tensorflow/contrib/autograph/examples/benchmarks/benchmark_base.py
@@ -47,7 +47,7 @@ class ReportingBenchmark(tf.test.Benchmark):
 
     avg_time = np.average(all_times)
 
-    extras = dict()
+    extras = {}
     extras['all_times'] = all_times
 
     if isinstance(name, tuple):
diff --git a/tensorflow/contrib/boosted_trees/estimator_batch/estimator_utils.py b/tensorflow/contrib/boosted_trees/estimator_batch/estimator_utils.py
index 48a7f85eada..c4f94a6554a 100644
--- a/tensorflow/contrib/boosted_trees/estimator_batch/estimator_utils.py
+++ b/tensorflow/contrib/boosted_trees/estimator_batch/estimator_utils.py
@@ -44,7 +44,7 @@ def _export_outputs_to_output_alternatives(export_outputs):
   Returns:
     converted output_alternatives.
   """
-  output = dict()
+  output = {}
   if export_outputs is not None:
     for key, value in export_outputs.items():
       if isinstance(value, export_output.ClassificationOutput):
diff --git a/tensorflow/contrib/distribute/python/multi_worker_test_base.py b/tensorflow/contrib/distribute/python/multi_worker_test_base.py
index ce448840f14..3f8fa608f8e 100644
--- a/tensorflow/contrib/distribute/python/multi_worker_test_base.py
+++ b/tensorflow/contrib/distribute/python/multi_worker_test_base.py
@@ -316,13 +316,13 @@ class MockOsEnv(collections.Mapping):
   """A class that allows per-thread TF_CONFIG."""
 
   def __init__(self, *args):
-    self._dict = dict()
+    self._dict = {}
     self._thread_local = threading.local()
     super(MockOsEnv, self).__init__(*args)
 
   def get(self, key, default=None):
     if not hasattr(self._thread_local, 'dict'):
-      self._thread_local.dict = dict()
+      self._thread_local.dict = {}
     if key == 'TF_CONFIG':
       return dict.get(self._thread_local.dict, key, default)
     else:
@@ -330,7 +330,7 @@ class MockOsEnv(collections.Mapping):
 
   def __getitem__(self, key):
     if not hasattr(self._thread_local, 'dict'):
-      self._thread_local.dict = dict()
+      self._thread_local.dict = {}
     if key == 'TF_CONFIG':
       return dict.__getitem__(self._thread_local.dict, key)
     else:
@@ -338,7 +338,7 @@ class MockOsEnv(collections.Mapping):
 
   def __setitem__(self, key, val):
     if not hasattr(self._thread_local, 'dict'):
-      self._thread_local.dict = dict()
+      self._thread_local.dict = {}
     if key == 'TF_CONFIG':
       return dict.__setitem__(self._thread_local.dict, key, val)
     else:
@@ -346,7 +346,7 @@ class MockOsEnv(collections.Mapping):
 
   def __iter__(self):
     if not hasattr(self._thread_local, 'dict'):
-      self._thread_local.dict = dict()
+      self._thread_local.dict = {}
     for x in self._thread_local.dict:
       yield x
     for x in self._dict:
@@ -354,7 +354,7 @@ class MockOsEnv(collections.Mapping):
 
   def __len__(self):
     if not hasattr(self._thread_local, 'dict'):
-      self._thread_local.dict = dict()
+      self._thread_local.dict = {}
     return self._thread_local.dict.__len__() + self._dict.__len__()
 
 
diff --git a/tensorflow/contrib/eager/python/examples/spinn/data.py b/tensorflow/contrib/eager/python/examples/spinn/data.py
index 3bc3bb49bcb..72d23630cd9 100644
--- a/tensorflow/contrib/eager/python/examples/spinn/data.py
+++ b/tensorflow/contrib/eager/python/examples/spinn/data.py
@@ -179,7 +179,7 @@ def load_word_vectors(data_root, vocab):
 
   print("Loading word vectors...")
 
-  word2index = dict()
+  word2index = {}
   embed = []
 
   embed.append([0] * WORD_VECTOR_LEN)  # <unk>
diff --git a/tensorflow/contrib/eager/python/saver.py b/tensorflow/contrib/eager/python/saver.py
index 1d0d6c6c14c..7e1ed195d10 100644
--- a/tensorflow/contrib/eager/python/saver.py
+++ b/tensorflow/contrib/eager/python/saver.py
@@ -85,7 +85,7 @@ def restore_variables_on_create(save_path, map_func=None):
         raise ValueError("map_func must be callable.")
       map_func_wrapper = lambda self, x: map_func(x)
 
-    ckpt_var_cache = dict()
+    ckpt_var_cache = {}
     reader = checkpoint_utils.load_checkpoint(save_path)
     for k, _ in checkpoint_utils.list_variables(save_path):
       ckpt_var_cache[k] = reader.get_tensor(k)
diff --git a/tensorflow/contrib/kernel_methods/python/kernel_estimators.py b/tensorflow/contrib/kernel_methods/python/kernel_estimators.py
index 1626e55b9b3..0f863c5a906 100644
--- a/tensorflow/contrib/kernel_methods/python/kernel_estimators.py
+++ b/tensorflow/contrib/kernel_methods/python/kernel_estimators.py
@@ -77,7 +77,7 @@ def _update_features_and_columns(features, feature_columns,
     return features, feature_columns
 
   # First construct new columns and features affected by kernel_mappers_dict.
-  mapped_features = dict()
+  mapped_features = {}
   mapped_columns = set()
   for feature_column in kernel_mappers_dict:
     column_name = feature_column.name
diff --git a/tensorflow/contrib/layers/python/layers/feature_column_ops.py b/tensorflow/contrib/layers/python/layers/feature_column_ops.py
index a85cff4f709..f1a302bc69a 100644
--- a/tensorflow/contrib/layers/python/layers/feature_column_ops.py
+++ b/tensorflow/contrib/layers/python/layers/feature_column_ops.py
@@ -488,7 +488,7 @@ def weighted_sum_from_feature_columns(columns_to_tensors,
       default_name='weighted_sum_from_feature_columns',
       values=columns_to_tensors.values()):
     output_tensors = []
-    column_to_variable = dict()
+    column_to_variable = {}
     transformer = _Transformer(columns_to_tensors)
     # pylint: disable=protected-access
     for column in sorted(set(feature_columns), key=lambda x: x.key):
diff --git a/tensorflow/contrib/learn/python/learn/estimators/_sklearn.py b/tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
index 4c206839300..99f22d182cd 100644
--- a/tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
+++ b/tensorflow/contrib/learn/python/learn/estimators/_sklearn.py
@@ -50,7 +50,7 @@ class _BaseEstimator(object):
       params : mapping of string to any
       Parameter names mapped to their values.
     """
-    out = dict()
+    out = {}
     param_names = [name for name in self.__dict__ if not name.startswith('_')]
     for key in param_names:
       value = getattr(self, key, None)
diff --git a/tensorflow/contrib/training/python/training/hparam.py b/tensorflow/contrib/training/python/training/hparam.py
index cb0a25f333b..2368df5bd2f 100644
--- a/tensorflow/contrib/training/python/training/hparam.py
+++ b/tensorflow/contrib/training/python/training/hparam.py
@@ -545,7 +545,7 @@ class HParams(object):
       ValueError: If `values` cannot be parsed or a hyperparameter in `values`
       doesn't exist.
     """
-    type_map = dict()
+    type_map = {}
     for name, t in self._hparam_types.items():
       param_type, _ = t
       type_map[name] = param_type
diff --git a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver_test.py b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver_test.py
index 8932b905c91..15dc1622054 100644
--- a/tensorflow/contrib/training/python/training/sequence_queueing_state_saver_test.py
+++ b/tensorflow/contrib/training/python/training/sequence_queueing_state_saver_test.py
@@ -366,7 +366,7 @@ class SequenceQueueingStateSaverTest(test.TestCase):
       update_2 = next_batch.save_state("state2",
                                        -1 + next_batch.state("state2"))
 
-      original_values = dict()
+      original_values = {}
 
       def insert(which):
         for i in range(20):
diff --git a/tensorflow/examples/tutorials/word2vec/word2vec_basic.py b/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
index 20f83b7f755..b503709ee2a 100644
--- a/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
+++ b/tensorflow/examples/tutorials/word2vec/word2vec_basic.py
@@ -81,7 +81,7 @@ def word2vec_basic(log_dir):
     """Process raw inputs into a dataset."""
     count = [['UNK', -1]]
     count.extend(collections.Counter(words).most_common(n_words - 1))
-    dictionary = dict()
+    dictionary = {}
     for word, _ in count:
       dictionary[word] = len(dictionary)
     data = []
diff --git a/tensorflow/examples/udacity/5_word2vec.ipynb b/tensorflow/examples/udacity/5_word2vec.ipynb
index 9728b6c2206..93f0b9e162d 100644
--- a/tensorflow/examples/udacity/5_word2vec.ipynb
+++ b/tensorflow/examples/udacity/5_word2vec.ipynb
@@ -251,7 +251,7 @@
         "def build_dataset(words):\n",
         "  count = [['UNK', -1]]\n",
         "  count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n",
-        "  dictionary = dict()\n",
+        "  dictionary = {}\n",
         "  for word, _ in count:\n",
         "    dictionary[word] = len(dictionary)\n",
         "  data = []\n",
diff --git a/tensorflow/examples/udacity/6_lstm.ipynb b/tensorflow/examples/udacity/6_lstm.ipynb
index 67fc7b083ae..fa348ed9d27 100644
--- a/tensorflow/examples/udacity/6_lstm.ipynb
+++ b/tensorflow/examples/udacity/6_lstm.ipynb
@@ -661,7 +661,7 @@
         "  mean_loss = 0\n",
         "  for step in range(num_steps):\n",
         "    batches = train_batches.next()\n",
-        "    feed_dict = dict()\n",
+        "    feed_dict = {}\n",
         "    for i in range(num_unrollings + 1):\n",
         "      feed_dict[train_data[i]] = batches[i]\n",
         "    _, l, predictions, lr = session.run(\n",
diff --git a/tensorflow/python/autograph/utils/testing.py b/tensorflow/python/autograph/utils/testing.py
index dd6bdc8931e..a59642c9577 100644
--- a/tensorflow/python/autograph/utils/testing.py
+++ b/tensorflow/python/autograph/utils/testing.py
@@ -28,7 +28,7 @@ from tensorflow.python.ops import math_ops
 def fake_tf():
   """Creates a fake module that looks like TensorFlow, for testing."""
   mod = imp.new_module('tensorflow')
-  mod_contents = dict()
+  mod_contents = {}
   mod_contents.update(gen_math_ops.__dict__)
   mod_contents.update(math_ops.__dict__)
   mod_contents.update(ops.__dict__)
diff --git a/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py b/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py
index 4839bc8e49b..d9c463d744d 100644
--- a/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py
+++ b/tensorflow/python/data/experimental/kernel_tests/bucket_by_sequence_length_test.py
@@ -95,11 +95,11 @@ class BucketBySequenceLengthTest(test_base.DatasetTestBase,
 
     # Expected sum of all batches with an equal sequence length.
     # <seq-length>: <expected-total-sum>
-    expected_sums = dict()
+    expected_sums = {}
 
     # Expected batch sizes of batches depending on the sequence length.
     # <seq-length>: [batch1_size, ..., batchN_size]
-    expected_batch_sizes = dict()
+    expected_batch_sizes = {}
 
     for length, batch_size, bucket_elements in zip(lengths, batch_sizes,
                                                    n_bucket_elements):
@@ -155,10 +155,10 @@ class BucketBySequenceLengthTest(test_base.DatasetTestBase,
       generated_lengths = []
 
       # <seq-length>: <total-sum>
-      generated_sums = dict()
+      generated_sums = {}
 
       # <seq-length>: [<batch_size>, ...]
-      generated_batch_sizes = dict()
+      generated_batch_sizes = {}
 
       for length, batch_size, bucket_elements in zip(lengths, batch_sizes,
                                                      n_bucket_elements):
diff --git a/tensorflow/python/debug/cli/cli_config.py b/tensorflow/python/debug/cli/cli_config.py
index beed4f0a82a..a3c312945d2 100644
--- a/tensorflow/python/debug/cli/cli_config.py
+++ b/tensorflow/python/debug/cli/cli_config.py
@@ -47,7 +47,7 @@ class CLIConfig(object):
         self._config[key] = value
     self._save_to_file()
 
-    self._set_callbacks = dict()
+    self._set_callbacks = {}
 
   def get(self, property_name):
     if property_name not in self._config:
diff --git a/tensorflow/python/debug/cli/evaluator.py b/tensorflow/python/debug/cli/evaluator.py
index 6f528646f99..e474332f74c 100644
--- a/tensorflow/python/debug/cli/evaluator.py
+++ b/tensorflow/python/debug/cli/evaluator.py
@@ -113,7 +113,7 @@ class ExpressionEvaluator(object):
       dump: an instance of `DebugDumpDir`.
     """
     self._dump = dump
-    self._cached_tensor_values = dict()
+    self._cached_tensor_values = {}
 
   def evaluate(self, expression):
     """Parse an expression.
diff --git a/tensorflow/python/debug/lib/debug_data.py b/tensorflow/python/debug/lib/debug_data.py
index 7c96c2878c7..8951b013bbe 100644
--- a/tensorflow/python/debug/lib/debug_data.py
+++ b/tensorflow/python/debug/lib/debug_data.py
@@ -949,7 +949,7 @@ class DebugDumpDir(object):
     Returns:
       A dict mapping device names (`str`s) to reconstructed `tf.GraphDef`s.
     """
-    non_debug_graphs = dict()
+    non_debug_graphs = {}
     for key in self._debug_graphs:
       non_debug_graphs[key] = self._debug_graphs[key].non_debug_graph_def
     return non_debug_graphs
diff --git a/tensorflow/python/debug/lib/grpc_debug_test_server.py b/tensorflow/python/debug/lib/grpc_debug_test_server.py
index a7be20948df..9c0e5390bd8 100644
--- a/tensorflow/python/debug/lib/grpc_debug_test_server.py
+++ b/tensorflow/python/debug/lib/grpc_debug_test_server.py
@@ -249,7 +249,7 @@ class EventListenerTestServicer(grpc_debug_server.EventListenerBaseServicer):
 
   def _initialize_toggle_watch_state(self, toggle_watches):
     self._toggle_watches = toggle_watches
-    self._toggle_watch_state = dict()
+    self._toggle_watch_state = {}
     if self._toggle_watches:
       for watch_key in self._toggle_watches:
         self._toggle_watch_state[watch_key] = False
diff --git a/tensorflow/python/debug/lib/source_remote.py b/tensorflow/python/debug/lib/source_remote.py
index 4afae41bc9a..df2dc05387b 100644
--- a/tensorflow/python/debug/lib/source_remote.py
+++ b/tensorflow/python/debug/lib/source_remote.py
@@ -59,7 +59,7 @@ def _format_origin_stack(origin_stack, call_traceback_proto):
     call_traceback_proto: A `CallTraceback` proto whose fields are to be
       populated.
   """
-  string_to_id = dict()
+  string_to_id = {}
   string_to_id[None] = 0
   for frame in origin_stack:
     file_path, lineno, func_name, line_text = frame
diff --git a/tensorflow/python/debug/lib/stepper.py b/tensorflow/python/debug/lib/stepper.py
index c27b3f51cdd..ac47315f161 100644
--- a/tensorflow/python/debug/lib/stepper.py
+++ b/tensorflow/python/debug/lib/stepper.py
@@ -243,7 +243,7 @@ class NodeStepper(object):
     done = set()  # Keep track of visited graph elements.
 
     # A list of str: Names of the topologically-sorted graph elements.
-    node_inputs = dict()  # New: Input map of nodes in the transitive closure.
+    node_inputs = {}  # New: Input map of nodes in the transitive closure.
 
     elem_stack = copy.copy(elem_list)
 
diff --git a/tensorflow/python/debug/wrappers/framework.py b/tensorflow/python/debug/wrappers/framework.py
index ae403205b7c..6cc9c67f2f0 100644
--- a/tensorflow/python/debug/wrappers/framework.py
+++ b/tensorflow/python/debug/wrappers/framework.py
@@ -396,7 +396,7 @@ class BaseDebugWrapperSession(session.SessionInterface):
     self._default_session_context_manager = None
 
     # A cache for callables created from CallableOptions.
-    self._cached_callables_from_options = dict()
+    self._cached_callables_from_options = {}
 
   @property
   def graph(self):
diff --git a/tensorflow/python/distribute/cross_device_utils.py b/tensorflow/python/distribute/cross_device_utils.py
index 612a958ebba..027e0a4c35a 100644
--- a/tensorflow/python/distribute/cross_device_utils.py
+++ b/tensorflow/python/distribute/cross_device_utils.py
@@ -264,10 +264,10 @@ class CollectiveKeys(object):
         recorded with an id.
     """
     self._group_key = group_key_start
-    self._group_key_table = dict()
+    self._group_key_table = {}
 
     # For instance keys with ids
-    self._instance_key_id_to_key_table = dict()
+    self._instance_key_id_to_key_table = {}
     self._instance_key_with_id_counter = instance_key_with_id_start
 
     # For instance keys without ids
diff --git a/tensorflow/python/feature_column/feature_column.py b/tensorflow/python/feature_column/feature_column.py
index 8c0300d204f..420bfcd4d0e 100644
--- a/tensorflow/python/feature_column/feature_column.py
+++ b/tensorflow/python/feature_column/feature_column.py
@@ -2251,7 +2251,7 @@ def _normalize_feature_columns(feature_columns):
                        'Given (type {}): {}.'.format(type(column), column))
   if not feature_columns:
     raise ValueError('feature_columns must not be empty.')
-  name_to_column = dict()
+  name_to_column = {}
   for column in feature_columns:
     if column.name in name_to_column:
       raise ValueError('Duplicate feature column name found for columns: {} '
diff --git a/tensorflow/python/feature_column/feature_column_v2.py b/tensorflow/python/feature_column/feature_column_v2.py
index d8bcea29487..292342287d9 100644
--- a/tensorflow/python/feature_column/feature_column_v2.py
+++ b/tensorflow/python/feature_column/feature_column_v2.py
@@ -2691,7 +2691,7 @@ def _normalize_feature_columns(feature_columns):
                        'Given (type {}): {}.'.format(type(column), column))
   if not feature_columns:
     raise ValueError('feature_columns must not be empty.')
-  name_to_column = dict()
+  name_to_column = {}
   for column in feature_columns:
     if column.name in name_to_column:
       raise ValueError('Duplicate feature column name found for columns: {} '
diff --git a/tensorflow/python/framework/function.py b/tensorflow/python/framework/function.py
index d7d069872ba..3c660fbc48c 100644
--- a/tensorflow/python/framework/function.py
+++ b/tensorflow/python/framework/function.py
@@ -262,7 +262,7 @@ class _DefinedFunction(object):
     self._definition = None
     # Constructed only when C API is enabled, lazily
     self._c_func = None
-    self._sub_functions = dict()  # Constructed with _definition or _c_func
+    self._sub_functions = {}  # Constructed with _definition or _c_func
     # pylint: disable=protected-access
     device_funcs = ops.get_default_graph()._device_functions_outer_to_inner
     # pylint: enable=protected-access
diff --git a/tensorflow/python/framework/ops.py b/tensorflow/python/framework/ops.py
index 267b3873303..44aa8e1258c 100644
--- a/tensorflow/python/framework/ops.py
+++ b/tensorflow/python/framework/ops.py
@@ -2995,9 +2995,9 @@ class Graph(object):
     # Similarly, if one or more Session.run calls are going on, all mutate ops
     # have to wait until all Session.run calls have finished.
     self._group_lock = lock_util.GroupLock(num_groups=2)
-    self._nodes_by_id = dict()  # GUARDED_BY(self._lock)
+    self._nodes_by_id = {}  # GUARDED_BY(self._lock)
     self._next_id_counter = 0  # GUARDED_BY(self._lock)
-    self._nodes_by_name = dict()  # GUARDED_BY(self._lock)
+    self._nodes_by_name = {}  # GUARDED_BY(self._lock)
     self._version = 0  # GUARDED_BY(self._lock)
     # Maps a name used in the graph to the next id to use for that name.
     self._names_in_use = {}
diff --git a/tensorflow/python/framework/registry.py b/tensorflow/python/framework/registry.py
index 53c68b04619..a095b0c5756 100644
--- a/tensorflow/python/framework/registry.py
+++ b/tensorflow/python/framework/registry.py
@@ -39,7 +39,7 @@ class Registry(object):
   def __init__(self, name):
     """Creates a new registry."""
     self._name = name
-    self._registry = dict()
+    self._registry = {}
 
   def register(self, candidate, name=None):
     """Registers a Python object "candidate" for the given "name".
diff --git a/tensorflow/python/keras/backend.py b/tensorflow/python/keras/backend.py
index 29e967faaf4..bd73c4c0248 100644
--- a/tensorflow/python/keras/backend.py
+++ b/tensorflow/python/keras/backend.py
@@ -2989,7 +2989,7 @@ class GraphExecutionFunction(object):
     # output from a fetch in `fetches`: { fetch: function(fetch_output) }
     # A Callback can use this to register a function with access to the
     # output values for a fetch it added.
-    self.fetch_callbacks = dict()
+    self.fetch_callbacks = {}
 
     if session_kwargs:
       raise ValueError('Some keys in session_kwargs are not supported at this '
diff --git a/tensorflow/python/ops/distributions/bijector_impl.py b/tensorflow/python/ops/distributions/bijector_impl.py
index a347cfdec15..b226232658f 100644
--- a/tensorflow/python/ops/distributions/bijector_impl.py
+++ b/tensorflow/python/ops/distributions/bijector_impl.py
@@ -104,8 +104,8 @@ class _Mapping(collections.namedtuple(
 
   def _merge_dicts(self, old=None, new=None):
     """Helper to merge two dictionaries."""
-    old = dict() if old is None else old
-    new = dict() if new is None else new
+    old = {} if old is None else old
+    new = {} if new is None else new
     for k, v in six.iteritems(new):
       val = old.get(k, None)
       if val is not None and val != v:
diff --git a/tensorflow/python/profiler/internal/run_metadata_test.py b/tensorflow/python/profiler/internal/run_metadata_test.py
index 88392ff3f08..18bc7a33dfc 100644
--- a/tensorflow/python/profiler/internal/run_metadata_test.py
+++ b/tensorflow/python/profiler/internal/run_metadata_test.py
@@ -201,7 +201,7 @@ class RunMetadataTest(test.TestCase):
     graph = ops.get_default_graph()
     forward_op = set()
     backward_op = set()
-    back_to_forward = dict()
+    back_to_forward = {}
     for op in graph.get_operations():
       if op.name.find('gradients/') > 0 and op.name.find('_grad/') > 0:
         backward_op.add(op.name)
diff --git a/tensorflow/python/profiler/tfprof_logger.py b/tensorflow/python/profiler/tfprof_logger.py
index 6ccd0e0ff3b..745e1c0c50f 100644
--- a/tensorflow/python/profiler/tfprof_logger.py
+++ b/tensorflow/python/profiler/tfprof_logger.py
@@ -93,7 +93,7 @@ def _get_logged_ops(graph, run_meta=None, add_trace=True,
 
   op_missing_shape = 0
   logged_ops = {}
-  string_to_id = dict()
+  string_to_id = {}
   string_to_id['none'] = len(string_to_id)
   # TODO(xpan): Work with Profiler more efficiently.
   for op in graph.get_operations():
@@ -169,7 +169,7 @@ def merge_default_with_oplog(graph, op_log=None, run_meta=None,
   if not op_log:
     tmp_op_log.log_entries.extend(logged_ops.values())
   else:
-    all_ops = dict()
+    all_ops = {}
     for entry in op_log.log_entries:
       all_ops[entry.name] = entry
     for op_name, entry in six.iteritems(logged_ops):
diff --git a/tensorflow/python/saved_model/signature_def_utils_test.py b/tensorflow/python/saved_model/signature_def_utils_test.py
index d1347eb0178..9a18f185d0b 100644
--- a/tensorflow/python/saved_model/signature_def_utils_test.py
+++ b/tensorflow/python/saved_model/signature_def_utils_test.py
@@ -64,12 +64,12 @@ class SignatureDefUtilsTest(test.TestCase):
   def testBuildSignatureDef(self):
     x = array_ops.placeholder(dtypes.float32, 1, name="x")
     x_tensor_info = utils.build_tensor_info(x)
-    inputs = dict()
+    inputs = {}
     inputs["foo-input"] = x_tensor_info
 
     y = array_ops.placeholder(dtypes.float32, name="y")
     y_tensor_info = utils.build_tensor_info(y)
-    outputs = dict()
+    outputs = {}
     outputs["foo-output"] = y_tensor_info
 
     signature_def = signature_def_utils_impl.build_signature_def(
diff --git a/tensorflow/python/training/tracking/tracking.py b/tensorflow/python/training/tracking/tracking.py
index 877b300ff37..58f657602c4 100644
--- a/tensorflow/python/training/tracking/tracking.py
+++ b/tensorflow/python/training/tracking/tracking.py
@@ -93,7 +93,7 @@ class AutoTrackable(base.Trackable):
 
   def _list_functions_for_serialization(self):
     """Return a dict of `Function`s of a trackable."""
-    functions = dict()
+    functions = {}
     for attribute_name in dir(self):
       try:
         attribute_value = getattr(self, attribute_name, None)
diff --git a/tensorflow/python/util/tf_should_use.py b/tensorflow/python/util/tf_should_use.py
index 63de4a7a96c..5ce7cf52bbe 100644
--- a/tensorflow/python/util/tf_should_use.py
+++ b/tensorflow/python/util/tf_should_use.py
@@ -104,7 +104,7 @@ def _new_mark_used(self, *args, **kwargs):
     pass
 
 
-_WRAPPERS = dict()
+_WRAPPERS = {}
 
 
 def _get_wrapper(x, tf_should_use_helper):
diff --git a/tensorflow/tools/dockerfiles/assembler.py b/tensorflow/tools/dockerfiles/assembler.py
index 83b72cb5bb8..5e84f36a2e6 100644
--- a/tensorflow/tools/dockerfiles/assembler.py
+++ b/tensorflow/tools/dockerfiles/assembler.py
@@ -339,7 +339,7 @@ def get_slice_sets_and_required_args(slice_sets, tag_spec):
 
 def gather_tag_args(slices, cli_input_args, required_args):
   """Build a dictionary of all the CLI and slice-specified args for a tag."""
-  args = dict()
+  args = {}
 
   for s in slices:
     args = update_args_dict(args, s['args'])
@@ -452,7 +452,7 @@ def gather_existing_partials(partial_path):
     Dict[string, string] of partial short names (like "ubuntu/python" or
       "bazel") to the full contents of that partial.
   """
-  partials = dict()
+  partials = {}
   for path, _, files in os.walk(partial_path):
     for name in files:
       fullpath = os.path.join(path, name)
diff --git a/third_party/repo.bzl b/third_party/repo.bzl
index bad6d20a08c..353ccf69fa3 100644
--- a/third_party/repo.bzl
+++ b/third_party/repo.bzl
@@ -185,7 +185,7 @@ def _third_party_http_archive(ctx):
             _apply_patch(ctx, ctx.attr.patch_file)
         ctx.symlink(Label(ctx.attr.build_file), buildfile_path)
 
-    link_dict = dict()
+    link_dict = {}
     if use_syslib:
         link_dict.update(ctx.attr.system_link_files)